From 60882d23f7fabbfee439c2872ebac85aaa8bae61 Mon Sep 17 00:00:00 2001 From: Maksym Kholiavchenko Date: Sun, 27 Oct 2019 22:24:28 +0300 Subject: [PATCH] Update install process --- README.md | 3 +- dependencies/scikit-tensor/LICENSE | 674 ------------------ dependencies/scikit-tensor/MANIFEST.in | 6 - dependencies/scikit-tensor/setup.cfg | 2 - dependencies/scikit-tensor/setup.py | 115 --- .../scikit-tensor/sktensor/__init__.py | 14 - dependencies/scikit-tensor/sktensor/core.py | 407 ----------- dependencies/scikit-tensor/sktensor/cp.py | 207 ------ .../scikit-tensor/sktensor/dedicom.py | 276 ------- .../scikit-tensor/sktensor/dtensor.py | 194 ----- .../scikit-tensor/sktensor/indscal.py | 75 -- .../scikit-tensor/sktensor/ktensor.py | 205 ------ .../scikit-tensor/sktensor/pyutils.py | 62 -- dependencies/scikit-tensor/sktensor/rescal.py | 299 -------- dependencies/scikit-tensor/sktensor/setup.py | 7 - .../scikit-tensor/sktensor/sptensor.py | 399 ----------- .../scikit-tensor/sktensor/tests/__init__.py | 0 .../sktensor/tests/sptensor_fixture.py | 21 - .../sktensor/tests/sptensor_rand_fixture.py | 27 - .../scikit-tensor/sktensor/tests/test_base.py | 93 --- .../sktensor/tests/test_dtensor.py | 53 -- .../sktensor/tests/test_ktensor.py | 14 - .../sktensor/tests/test_pyutils.py | 11 - .../sktensor/tests/test_sptensor.py | 183 ----- .../sktensor/tests/test_tucker_hooi.py | 48 -- .../sktensor/tests/test_utils.py | 20 - .../sktensor/tests/ttm_fixture.py | 23 - dependencies/scikit-tensor/sktensor/tucker.py | 150 ---- dependencies/scikit-tensor/sktensor/utils.py | 38 - .../scikit-tensor/sktensor/version.py | 1 - requirements.txt | 3 +- setup.py | 12 +- 32 files changed, 6 insertions(+), 3636 deletions(-) delete mode 100644 dependencies/scikit-tensor/LICENSE delete mode 100644 dependencies/scikit-tensor/MANIFEST.in delete mode 100644 dependencies/scikit-tensor/setup.cfg delete mode 100644 dependencies/scikit-tensor/setup.py delete mode 100644 dependencies/scikit-tensor/sktensor/__init__.py delete mode 100644 dependencies/scikit-tensor/sktensor/core.py delete mode 100644 dependencies/scikit-tensor/sktensor/cp.py delete mode 100644 dependencies/scikit-tensor/sktensor/dedicom.py delete mode 100644 dependencies/scikit-tensor/sktensor/dtensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/indscal.py delete mode 100644 dependencies/scikit-tensor/sktensor/ktensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/pyutils.py delete mode 100644 dependencies/scikit-tensor/sktensor/rescal.py delete mode 100644 dependencies/scikit-tensor/sktensor/setup.py delete mode 100644 dependencies/scikit-tensor/sktensor/sptensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/__init__.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/sptensor_fixture.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/sptensor_rand_fixture.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_base.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_dtensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_ktensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_pyutils.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_sptensor.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_tucker_hooi.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/test_utils.py delete mode 100644 dependencies/scikit-tensor/sktensor/tests/ttm_fixture.py delete mode 100644 dependencies/scikit-tensor/sktensor/tucker.py delete mode 100644 dependencies/scikit-tensor/sktensor/utils.py delete mode 100644 dependencies/scikit-tensor/sktensor/version.py diff --git a/README.md b/README.md index 8010c4d..4ed4abb 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,8 @@ It demonstrates how a neural network with convolutional and fully connected laye ``` numpy scipy -tensorly +scikit-tensor-py3 +tensorly-musco absl-py tqdm tensorflow-gpu (TensorRT support) diff --git a/dependencies/scikit-tensor/LICENSE b/dependencies/scikit-tensor/LICENSE deleted file mode 100644 index 94a9ed0..0000000 --- a/dependencies/scikit-tensor/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/dependencies/scikit-tensor/MANIFEST.in b/dependencies/scikit-tensor/MANIFEST.in deleted file mode 100644 index c38a949..0000000 --- a/dependencies/scikit-tensor/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include *.rst -recursive-include docs * -recursive-include examples * -recursive-include sktensor *.c *.h *.pyx *.pxd -recursive-include sktensor/datasets *.csv *.csv.gz *.rst *.jpg *.txt -include LICENSE diff --git a/dependencies/scikit-tensor/setup.cfg b/dependencies/scikit-tensor/setup.cfg deleted file mode 100644 index b88034e..0000000 --- a/dependencies/scikit-tensor/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[metadata] -description-file = README.md diff --git a/dependencies/scikit-tensor/setup.py b/dependencies/scikit-tensor/setup.py deleted file mode 100644 index c70c9ad..0000000 --- a/dependencies/scikit-tensor/setup.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -descr = """Python module for multilinear algebra and tensor factorizations""" - -import os -import sys - -DISTNAME = 'scikit-tensor' -DESCRIPTION = descr -MAINTAINER = 'Maximilian Nickel', -MAINTAINER_EMAIL = 'mnick@mit.edu', -URL = 'http://github.com/mnick/scikit-tensor' -LICENSE = 'GPLv3' -DOWNLOAD_URL = URL -PACKAGE_NAME = 'sktensor' -EXTRA_INFO = dict( - classifiers=[ - "Development Status :: 3 - Alpha", - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Operating System :: MacOS', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', - ] -) - -try: - import setuptools # If you want to enable 'python setup.py develop' - EXTRA_INFO.update(dict( - zip_safe=False, # the package can run out of an .egg file - include_package_data=True, - )) -except: - print('setuptools module not found.') - print("Install setuptools if you want to enable 'python setup.py develop'.") - - -def configuration(parent_package='', top_path=None, package_name=DISTNAME): - if os.path.exists('MANIFEST'): - os.remove('MANIFEST') - - from numpy.distutils.misc_util import Configuration - config = Configuration(None, parent_package, top_path) - - # Avoid non-useful msg: "Ignoring attempt to set 'name' (from ... " - config.set_options( - ignore_setup_xxx_py=True, - assume_default_configuration=True, - delegate_options_to_subpackages=True, - quiet=True - ) - - config.add_subpackage(PACKAGE_NAME) - return config - - -def get_version(): - """Obtain the version number""" - import imp - mod = imp.load_source('version', os.path.join(PACKAGE_NAME, 'version.py')) - return mod.__version__ - - -def setup_package(): -# Call the setup function - metadata = dict( - name=DISTNAME, - maintainer=MAINTAINER, - maintainer_email=MAINTAINER_EMAIL, - description=DESCRIPTION, - license=LICENSE, - url=URL, - download_url=DOWNLOAD_URL, - version=get_version(), - install_requires=[ - 'numpy', - 'scipy', - 'nose' - ], - #test_suite="nose.collector", - **EXTRA_INFO - ) - - if (len(sys.argv) >= 2 - and ('--common' in sys.argv[1:] or sys.argv[1] - in ('--common-commands', 'egg_info', '--version', 'clean'))): - - # For these actions, NumPy is not required. - # - # They are required to succeed without Numpy for example when - # pip is used to install Scikit when Numpy is not yet present in - # the system. - try: - from setuptools import setup - except ImportError: - from distutils.core import setup - - metadata['version'] = get_version() - else: - metadata['configuration'] = configuration - from numpy.distutils.core import setup - - - setup(**metadata) - -if __name__ == "__main__": - setup_package() diff --git a/dependencies/scikit-tensor/sktensor/__init__.py b/dependencies/scikit-tensor/sktensor/__init__.py deleted file mode 100644 index 95b01ef..0000000 --- a/dependencies/scikit-tensor/sktensor/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from .version import __version__ - -from .utils import * -from .core import * - -# data types -from .sptensor import sptensor, unfolded_sptensor -from .dtensor import dtensor, unfolded_dtensor -from .ktensor import ktensor - -# import algorithms -from .cp import als as cp_als -from .tucker import hooi as tucker_hooi -from .tucker import hooi as tucker_hosvd diff --git a/dependencies/scikit-tensor/sktensor/core.py b/dependencies/scikit-tensor/sktensor/core.py deleted file mode 100644 index 4935458..0000000 --- a/dependencies/scikit-tensor/sktensor/core.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy as np -from numpy import array, dot, zeros, ones, arange, kron -from numpy import setdiff1d -from scipy.linalg import eigh -from scipy.sparse import issparse as issparse_mat -from scipy.sparse import csr_matrix -from scipy.sparse.linalg import eigsh -from abc import ABCMeta, abstractmethod -from .pyutils import is_sequence, func_attr -#from coremod import khatrirao - -import sys -import types - -module_funs = [] - - -def modulefunction(func): - module_funs.append(func_attr(func, 'name')) - - -class tensor_mixin(object, metaclass=ABCMeta): - """ - Base tensor class from which all tensor classes are subclasses. - Can not be instaniated - - See also - -------- - sktensor.dtensor : Subclass for *dense* tensors. - sktensor.sptensor : Subclass for *sparse* tensors. - """ - - def ttm(self, V, mode=None, transp=False, without=False): - """ - Tensor times matrix product - - Parameters - ---------- - V : M x N array_like or list of M_i x N_i array_likes - Matrix or list of matrices for which the tensor times matrix - products should be performed - mode : int or list of int's, optional - Modes along which the tensor times matrix products should be - performed - transp: boolean, optional - If True, tensor times matrix products are computed with - transpositions of matrices - without: boolean, optional - It True, tensor times matrix products are performed along all - modes **except** the modes specified via parameter ``mode`` - - - Examples - -------- - Create dense tensor - - >>> T = zeros((3, 4, 2)) - >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] - >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] - >>> T = dtensor(T) - - Create matrix - - >>> V = array([[1, 3, 5], [2, 4, 6]]) - - Multiply tensor with matrix along mode 0 - - >>> Y = T.ttm(V, 0) - >>> Y[:, :, 0] - array([[ 22., 49., 76., 103.], - [ 28., 64., 100., 136.]]) - >>> Y[:, :, 1] - array([[ 130., 157., 184., 211.], - [ 172., 208., 244., 280.]]) - - """ - if mode is None: - mode = list(range(self.ndim)) - if isinstance(V, np.ndarray): - Y = self._ttm_compute(V, mode, transp) - elif is_sequence(V): - dims, vidx = check_multiplication_dims(mode, self.ndim, len(V), vidx=True, without=without) - Y = self._ttm_compute(V[vidx[0]], dims[0], transp) - for i in range(1, len(dims)): - Y = Y._ttm_compute(V[vidx[i]], dims[i], transp) - return Y - - def ttv(self, v, modes=[], without=False): - """ - Tensor times vector product - - Parameters - ---------- - v : 1-d array or tuple of 1-d arrays - Vector to be multiplied with tensor. - modes : array_like of integers, optional - Modes in which the vectors should be multiplied. - without : boolean, optional - If True, vectors are multiplied in all modes **except** the - modes specified in ``modes``. - - """ - if not isinstance(v, tuple): - v = (v, ) - dims, vidx = check_multiplication_dims(modes, self.ndim, len(v), vidx=True, without=without) - for i in range(len(dims)): - if not len(v[vidx[i]]) == self.shape[dims[i]]: - raise ValueError('Multiplicant is wrong size') - remdims = np.setdiff1d(list(range(self.ndim)), dims) - return self._ttv_compute(v, dims, vidx, remdims) - - #@abstractmethod - #def ttt(self, other, modes=None): - # pass - - @abstractmethod - def _ttm_compute(self, V, mode, transp): - pass - - @abstractmethod - def _ttv_compute(self, v, dims, vidx, remdims): - pass - - @abstractmethod - def unfold(self, rdims, cdims=None, transp=False): - pass - - @abstractmethod - def uttkrp(self, U, mode): - """ - Unfolded tensor times Khatri-Rao product: - :math:`M = \\unfold{X}{3} (U_1 \kr \cdots \kr U_N)` - - Computes the _matrix_ product of the unfolding - of a tensor and the Khatri-Rao product of multiple matrices. - Efficient computations are perfomed by the respective - tensor implementations. - - Parameters - ---------- - U : list of array-likes - Matrices for which the Khatri-Rao product is computed and - which are multiplied with the tensor in mode ``mode``. - mode: int - Mode in which the Khatri-Rao product of ``U`` is multiplied - with the tensor. - - Returns - ------- - M : np.ndarray - Matrix which is the result of the matrix product of the unfolding of - the tensor and the Khatri-Rao product of ``U`` - - See also - -------- - For efficient computations of unfolded tensor times Khatri-Rao products - for specialiized tensors see also - dtensor.uttkrp, sptensor.uttkrp, ktensor.uttkrp, ttensor.uttkrp - - References - ---------- - .. [1] B.W. Bader, T.G. Kolda - Efficient Matlab Computations With Sparse and Factored Tensors - SIAM J. Sci. Comput, Vol 30, No. 1, pp. 205--231, 2007 - """ - pass - - @abstractmethod - def transpose(self, axes=None): - """ - Compute transpose of tensors. - - Parameters - ---------- - axes : array_like of ints, optional - Permute the axes according to the values given. - - Returns - ------- - d : tensor_mixin - tensor with axes permuted. - - See also - -------- - dtensor.transpose, sptensor.transpose - """ - pass - - -def istensor(X): - return isinstance(X, tensor_mixin) - - -# dynamically create module level functions -conv_funcs = [ - 'norm', - 'transpose', - 'ttm', - 'ttv', - 'unfold', -] - -for fname in conv_funcs: - def call_on_me(obj, *args, **kwargs): - if not istensor(obj): - raise ValueError('%s() object must be tensor (%s)' % (fname, type(obj))) - func = getattr(obj, fname) - return func(*args, **kwargs) - - nfunc = types.FunctionType( - func_attr(call_on_me, 'code'), - { - 'getattr': getattr, - 'fname': fname, - 'istensor': istensor, - 'ValueError': ValueError, - 'type': type - }, - name=fname, - argdefs=func_attr(call_on_me, 'defaults'), - closure=func_attr(call_on_me, 'closure') - ) - setattr(sys.modules[__name__], fname, nfunc) - - -def check_multiplication_dims(dims, N, M, vidx=False, without=False): - dims = array(dims, ndmin=1) - if len(dims) == 0: - dims = arange(N) - if without: - dims = setdiff1d(list(range(N)), dims) - if not np.in1d(dims, arange(N)).all(): - raise ValueError('Invalid dimensions') - P = len(dims) - sidx = np.argsort(dims) - sdims = dims[sidx] - if vidx: - if M > N: - raise ValueError('More multiplicants than dimensions') - if M != N and M != P: - raise ValueError('Invalid number of multiplicants') - if P == M: - vidx = sidx - else: - vidx = sdims - return sdims, vidx - else: - return sdims - - -def innerprod(X, Y): - """ - Inner prodcut with a Tensor - """ - return dot(X.flatten(), Y.flatten()) - - -def nvecs(X, n, rank, do_flipsign=True, dtype=np.float): - """ - Eigendecomposition of mode-n unfolding of a tensor - """ - Xn = X.unfold(n) - if issparse_mat(Xn): - Xn = csr_matrix(Xn, dtype=dtype) - Y = Xn.dot(Xn.T) - _, U = eigsh(Y, rank, which='LM') - else: - Y = Xn.dot(Xn.T) - N = Y.shape[0] - _, U = eigh(Y, eigvals=(N - rank, N - 1)) - #_, U = eigsh(Y, rank, which='LM') - # reverse order of eigenvectors such that eigenvalues are decreasing - U = array(U[:, ::-1]) - # flip sign - if do_flipsign: - U = flipsign(U) - return U - - -def flipsign(U): - """ - Flip sign of factor matrices such that largest magnitude - element will be positive - """ - midx = abs(U).argmax(axis=0) - for i in range(U.shape[1]): - if U[midx[i], i] < 0: - U[:, i] = -U[:, i] - return U - - -def center(X, n): - Xn = unfold(X, n) - N = Xn.shape[0] - m = Xn.sum(axis=0) / N - m = kron(m, ones((N, 1))) - Xn = Xn - m - return fold(Xn, n) - - -def center_matrix(X): - m = X.mean(axis=0) - return X - m - - -def scale(X, n): - Xn = unfold(X, n) - m = np.float_(np.sqrt((Xn ** 2).sum(axis=1))) - m[m == 0] = 1 - for i in range(Xn.shape[0]): - Xn[i, :] = Xn[i] / m[i] - return fold(Xn, n, X.shape) - - -# TODO more efficient cython implementation -def khatrirao(A, reverse=False): - """ - Compute the columnwise Khatri-Rao product. - - Parameters - ---------- - A : tuple of ndarrays - Matrices for which the columnwise Khatri-Rao product should be computed - - reverse : boolean - Compute Khatri-Rao product in reverse order - - Examples - -------- - >>> A = np.random.randn(5, 2) - >>> B = np.random.randn(4, 2) - >>> C = khatrirao((A, B)) - >>> C.shape - (20, 2) - >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() - true - >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() - true - """ - - if not isinstance(A, tuple): - raise ValueError('A must be a tuple of array likes') - N = A[0].shape[1] - M = 1 - for i in range(len(A)): - if A[i].ndim != 2: - raise ValueError('A must be a tuple of matrices (A[%d].ndim = %d)' % (i, A[i].ndim)) - elif N != A[i].shape[1]: - raise ValueError('All matrices must have same number of columns') - M *= A[i].shape[0] - matorder = arange(len(A)) - if reverse: - matorder = matorder[::-1] - # preallocate - P = np.zeros((M, N), dtype=A[0].dtype) - for n in range(N): - ab = A[matorder[0]][:, n] - for j in range(1, len(matorder)): - ab = np.kron(ab, A[matorder[j]][:, n]) - P[:, n] = ab - return P - - -def teneye(dim, order): - """ - Create tensor with superdiagonal all one, rest zeros - """ - I = zeros(dim ** order) - for f in range(dim): - idd = f - for i in range(1, order): - idd = idd + dim ** (i - 1) * (f - 1) - I[idd] = 1 - return I.reshape(ones(order) * dim) - - -def tvecmat(m, n): - d = m * n - i2 = arange(d).reshape(m, n).T.flatten() - Tmn = zeros((d, d)) - Tmn[arange(d), i2] = 1 - return Tmn - - #i = arange(d); - #rI = m * (i-1)-(m*n-1) * floor((i-1)/n) - #print rI - #I1s = s2i((d,d), rI, arange(d)) - #print I1s - #Tmn[I1s] = 1 - #return Tmn.reshape((d,d)).T - -# vim: set et: diff --git a/dependencies/scikit-tensor/sktensor/cp.py b/dependencies/scikit-tensor/sktensor/cp.py deleted file mode 100644 index d710b73..0000000 --- a/dependencies/scikit-tensor/sktensor/cp.py +++ /dev/null @@ -1,207 +0,0 @@ -# coding: utf-8 -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -""" -This module holds diffent algorithms to compute the CP decompositions, i.e. -algorithms where - -.. math:: \\ten{X} \\approx \sum_{r=1}^{rank} \\vec{u}_r^{(1)} \outer \cdots \outer \\vec{u}_r^{(N)} - -""" -import logging -import time -import numpy as np -from numpy import array, dot, ones, sqrt -from scipy.linalg import pinv -from numpy.random import rand -from .core import nvecs, norm -from .ktensor import ktensor - -_log = logging.getLogger('CP') -_DEF_MAXITER = 500 -_DEF_INIT = 'nvecs' -_DEF_CONV = 1e-5 -_DEF_FIT_METHOD = 'full' -_DEF_TYPE = np.float - -__all__ = [ - 'als', - 'opt', - 'wopt' -] - - -def als(X, rank, **kwargs): - """ - Alternating least-sqaures algorithm to compute the CP decompositions. - - Parameters - ---------- - X : tensor_mixin - The tensor to be decomposed. - rank : int - Tensor rank of the decompositions. - init : {'random', 'nvecs'}, optional - The initialization method to use. - - random : Factor matrices are initialized randomly. - - nvecs : Factor matrices are initialzed via HOSVD. - (default 'nvecs') - max_iter : int, optional - Maximium number of iterations of the ALS algorithm. - (default 500) - fit_method : {'full', None} - The method to compute the fit of the factorization - - 'full' : Compute least-squares fit of the dense approximation of. - X and X. - - None : Do not compute the fit of the factorization, but iterate - until ``max_iter`` (Useful for large-scale tensors). - (default 'full') - conv : float - Convergence tolerance on difference of fit between iterations - (default 1e-5) - - Returns - ------- - P : ktensor - Rank ``rank`` factorization of X. ``P.U[i]`` corresponds to the factor - matrix for the i-th mode. ``P.lambda[i]`` corresponds to the weight - of the i-th mode. - fit : float - Fit of the factorization compared to ``X`` - itr : int - Number of iterations that were needed until convergence - exectimes : ndarray of floats - Time needed for each single iteration - - Examples - -------- - Create random dense tensor - - >>> from sktensor import dtensor, ktensor - >>> U = [np.random.rand(i,3) for i in (20, 10, 14)] - >>> T = dtensor(ktensor(U).toarray()) - - Compute rank-3 CP decompositions of ``T`` with ALS - - >>> P, fit, itr, _ = als(T, 3) - - Result is a decomposed tensor stored as a Kruskal operator - - >>> type(P) - - - Factorization should be close to original data - - >>> np.allclose(T, P.totensor()) - True - - References - ---------- - .. [1] Kolda, T. G. & Bader, B. W. - Tensor Decompositions and Applications. - SIAM Rev. 51, 455–500 (2009). - .. [2] Harshman, R. A. - Foundations of the PARAFAC procedure: models and conditions for an 'explanatory' multimodal factor analysis. - UCLA Working Papers in Phonetics 16, (1970). - .. [3] Carroll, J. D., Chang, J. J. - Analysis of individual differences in multidimensional scaling via an N-way generalization of 'Eckart-Young' decompositions. - Psychometrika 35, 283–319 (1970). - """ - - # init options - ainit = kwargs.pop('init', _DEF_INIT) - maxiter = kwargs.pop('max_iter', _DEF_MAXITER) - fit_method = kwargs.pop('fit_method', _DEF_FIT_METHOD) - conv = kwargs.pop('conv', _DEF_CONV) - dtype = kwargs.pop('dtype', _DEF_TYPE) - if not len(kwargs) == 0: - raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) - - N = X.ndim - normX = norm(X) - - U = _init(ainit, X, N, rank, dtype) - fit = 0 - exectimes = [] - for itr in range(maxiter): - tic = time.clock() - fitold = fit - - for n in range(N): - Unew = X.uttkrp(U, n) - Y = ones((rank, rank), dtype=dtype) - for i in (list(range(n)) + list(range(n + 1, N))): - Y = Y * dot(U[i].T, U[i]) - Unew = Unew.dot(pinv(Y)) - # Normalize - if itr == 0: - lmbda = sqrt((Unew ** 2).sum(axis=0)) - else: - lmbda = Unew.max(axis=0) - lmbda[lmbda < 1] = 1 - U[n] = Unew / lmbda - - P = ktensor(U, lmbda) - if fit_method == 'full': - normresidual = normX ** 2 + P.norm() ** 2 - 2 * P.innerprod(X) - fit = 1 - (normresidual / normX ** 2) - else: - fit = itr - fitchange = abs(fitold - fit) - exectimes.append(time.clock() - tic) - _log.debug( - '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % - (itr, fit, fitchange, exectimes[-1]) - ) - if itr > 0 and fitchange < conv: - break - - return P, fit, itr, array(exectimes) - - -def opt(X, rank, **kwargs): - ainit = kwargs.pop('init', _DEF_INIT) - maxiter = kwargs.pop('maxIter', _DEF_MAXITER) - conv = kwargs.pop('conv', _DEF_CONV) - dtype = kwargs.pop('dtype', _DEF_TYPE) - if not len(kwargs) == 0: - raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) - - N = X.ndim - U = _init(ainit, X, N, rank, dtype) - - -def wopt(X, rank, **kwargs): - raise NotImplementedError() - - -def _init(init, X, N, rank, dtype): - """ - Initialization for CP models - """ - Uinit = [None for _ in range(N)] - if isinstance(init, list): - Uinit = init - elif init == 'random': - for n in range(1, N): - Uinit[n] = array(rand(X.shape[n], rank), dtype=dtype) - elif init == 'nvecs': - for n in range(1, N): - Uinit[n] = array(nvecs(X, n, rank), dtype=dtype) - else: - raise 'Unknown option (init=%s)' % str(init) - return Uinit - -# vim: set et: diff --git a/dependencies/scikit-tensor/sktensor/dedicom.py b/dependencies/scikit-tensor/sktensor/dedicom.py deleted file mode 100644 index c4311f7..0000000 --- a/dependencies/scikit-tensor/sktensor/dedicom.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -import time -import numpy as np -from numpy import dot, ones, zeros, diag, kron, outer, array, prod, eye -from numpy.linalg import norm, solve, eigvals -from numpy.random import rand -from scipy.linalg import qr -from scipy.sparse.linalg import eigsh -from scipy.optimize import fmin_l_bfgs_b, fmin_ncg, fmin_tnc -from scipy.sparse import issparse - -_DEF_MAXITER = 500 -_DEF_INIT = 'nvecs' -_DEF_PROJ = True -_DEF_CONV = 1e-5 -_DEF_NNE = -1 -_DEF_OPTFUNC = 'lbfgs' - -_log = logging.getLogger('DEDICOM') -np.seterr(invalid='raise') - - -def asalsan(X, rank, **kwargs): - """ - ASALSAN algorithm to compute the three-way DEDICOM decompositions - of a tensor - - See - --- - .. [1] Brett W. Bader, Richard A. Harshman, Tamara G. Kolda - "Temporal analysis of semantic graphs using ASALSAN" - 7th International Conference on Data Mining, 2007 - - .. [2] Brett W. Bader, Richard A. Harshman, Tamara G. Kolda - "Temporal analysis of Social Networks using Three-way DEDICOM" - Technical Report, 2006 - """ - # init options - ainit = kwargs.pop('init', _DEF_INIT) - proj = kwargs.pop('proj', _DEF_PROJ) - maxIter = kwargs.pop('maxIter', _DEF_MAXITER) - conv = kwargs.pop('conv', _DEF_CONV) - nne = kwargs.pop('nne', _DEF_NNE) - optfunc = kwargs.pop('optfunc', _DEF_OPTFUNC) - if not len(kwargs) == 0: - raise BaseException('Unknown keywords (%s)' % (list(kwargs.keys()))) - - # init starting points - D = ones((len(X), rank)) - sz = X[0].shape - n = sz[0] - R = rand(rank, rank) - if ainit == 'random': - A = rand(n, rank) - elif ainit == 'nvecs': - S = zeros((n, n)) - T = zeros((n, n)) - for i in range(len(X)): - T = X[i] - S = S + T + T.T - evals, A = eigsh(S, rank) - if nne > 0: - A[A < 0] = 0 - if proj: - Q, A2 = qr(A) - X2 = __projectSlices(X, Q) - R = __updateR(X2, A2, D, R, nne) - else: - R = __updateR(X, A, D, R, nne) - elif isinstance(ainit, np.ndarray): - A = ainit - else: - raise 'Unknown init option ("%s")' % ainit - - # perform decompositions - if issparse(X[0]): - normX = [norm(M.data) ** 2 for M in X] - Xflat = [M.tolil().reshape((1, prod(M.shape))).tocsr() for M in X] - else: - normX = [norm(M) ** 2 for M in X] - Xflat = [M.flatten() for M in X] - M = zeros((n, n)) - normXSum = sum(normX) - #normX = norm(X)**2 - fit = fitold = f = fitchange = 0 - exectimes = [] - for iters in range(maxIter): - tic = time.clock() - fitold = fit - A = __updateA(X, A, D, R, nne) - if proj: - Q, A2 = qr(A) - X2 = __projectSlices(X, Q) - R = __updateR(X2, A2, D, R, nne) - D, f = __updateD(X2, A2, D, R, nne, optfunc) - else: - R = __updateR(X, A, D, R, nne) - D, f = __updateD(X, A, D, R, nne, optfunc) - - # compute fit - f = 0 - for i in range(len(X)): - AD = dot(A, diag(D[i, :])) - M = dot(dot(AD, R), AD.T) - f += normX[i] + norm(M) ** 2 - 2 * Xflat[i].dot(M.flatten()) - f *= 0.5 - fit = 1 - (f / normXSum) - fitchange = abs(fitold - fit) - - exectimes.append(time.clock() - tic) - - # print iter info when debugging is enabled - _log.debug('[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % ( - iters, fit, fitchange, exectimes[-1] - )) - - if iters > 1 and fitchange < conv: - break - return A, R, D, fit, iters, array(exectimes) - - -def __updateA(X, A, D, R, nne): - rank = A.shape[1] - F = zeros((X[0].shape[0], rank)) - E = zeros((rank, rank)) - - AtA = dot(A.T, A) - for i in range(len(X)): - Dk = diag(D[i, :]) - DRD = dot(Dk, dot(R, Dk)) - DRtD = DRD.T - F += X[i].dot(dot(A, DRtD)) + X[i].T.dot(dot(A, DRD)) - E += dot(DRD, dot(AtA, DRtD)) + dot(DRtD, dot(AtA, DRD)) - if nne > 0: - E = dot(A, E) + nne - A = A * (F / E) - else: - A = solve(E.T, F.T).T - return A - - -def __updateR(X, A, D, R, nne): - r = A.shape[1] ** 2 - T = zeros((r, r)) - t = zeros(r) - for i in range(len(X)): - AD = dot(A, diag(D[i, :])) - ADt = AD.T - tmp = dot(ADt, AD) - T = T + kron(tmp, tmp) - tmp = dot(ADt, X[i].dot(AD)) - t = t + tmp.flatten() - r = A.shape[1] - if nne > 0: - Rflat = R.flatten() - T = dot(T, Rflat) + nne - R = (Rflat * t / T).reshape(r, r) - else: - # TODO check if this is correct - R = solve(T, t).reshape(r, r) - #R = (pinv(T + eye(r ** 2)).dot(t)).reshape(r, r) - return R - - -def __updateD(X, A, D, R, nne, optfunc): - f = 0 - for i in range(len(X)): - d = D[i, :] - u = Updater(X[i], A, R) - if nne > 0: - bounds = len(d) * [(0, None)] - res = fmin_l_bfgs_b( - u.updateD_F, d, u.updateD_G, factr=1e12, bounds=bounds - ) - else: - if optfunc == 'lbfgs': - res = fmin_l_bfgs_b(u.updateD_F, d, u.updateD_G, factr=1e12) - D[i, :] = res[0] - f += res[1] - elif optfunc == 'ncg': - res = fmin_ncg( - u.updateD_F, d, u.updateD_G, fhess=u.updateD_H, - full_output=True, disp=False - ) - # TODO: check return value of ncg and update D, f - raise NotImplementedError() - elif optfunc == 'tnc': - res = fmin_tnc(u.updateD_F, d, u.updateD_G, disp=False) - # TODO: check return value of tnc and update D, f - raise NotImplementedError() - return D, f - - -class Updater: - def __init__(self, Z, A, R): - self.Z = Z - self.A = A - self.R = R - self.x = None - - def precompute(self, x, cache=True): - if not cache or self.x is None or (x != self.x).any(): - self.AD = dot(self.A, diag(x)) - self.ADt = self.AD.T - self.E = self.Z - dot(self.AD, dot(self.R, self.ADt)) - - def updateD_F(self, x): - self.precompute(x) - return norm(self.E, 'fro') ** 2 - - def updateD_G(self, x): - """ - Compute Gradient for update of D - - See [2] for derivation of Gradient - """ - self.precompute(x) - g = zeros(len(x)) - Ai = zeros(self.A.shape[0]) - for i in range(len(g)): - Ai = self.A[:, i] - g[i] = (self.E * (dot(self.AD, outer(self.R[:, i], Ai)) + - dot(outer(Ai, self.R[i, :]), self.ADt))).sum() - return -2 * g - - def updateD_H(self, x): - """ - Compute Hessian for update of D - - See [2] for derivation of Hessian - """ - self.precompute(x) - H = zeros((len(x), len(x))) - Ai = zeros(self.A.shape[0]) - Aj = zeros(Ai.shape) - for i in range(len(x)): - Ai = self.A[:, i] - ti = dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt) - - for j in range(i, len(x)): - Aj = self.A[:, j] - tj = outer(Ai, Aj) - H[i, j] = ( - self.E * (self.R[i, j] * tj + self.R[j, i] * tj.T) - - ti * ( - dot(self.AD, outer(self.R[:, j], Aj)) + - dot(outer(Aj, self.R[j, :]), self.ADt) - ) - ).sum() - H[j, i] = H[i, j] - H *= -2 - e = eigvals(H).min() - H = H + (eye(H.shape[0]) * e) - return H - - -def __projectSlices(X, Q): - X2 = [] - for i in range(len(X)): - X2.append(Q.T.dot(X[i].dot(Q))) - return X2 diff --git a/dependencies/scikit-tensor/sktensor/dtensor.py b/dependencies/scikit-tensor/sktensor/dtensor.py deleted file mode 100644 index 45f4ed9..0000000 --- a/dependencies/scikit-tensor/sktensor/dtensor.py +++ /dev/null @@ -1,194 +0,0 @@ -# sktensor.dtensor - base class for dense tensors -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy as np -from numpy import array, prod, argsort -from .core import tensor_mixin, khatrirao -from .pyutils import inherit_docstring_from, from_to_without - - -__all__ = [ - 'dtensor', - 'unfolded_dtensor', -] - - -class dtensor(tensor_mixin, np.ndarray): - """ - Class to store **dense** tensors - - Parameters - ---------- - input_array : np.ndarray - Multidimenional numpy array which holds the entries of the tensor - - Examples - -------- - Create dense tensor from numpy array - - >>> T = np.zeros((3, 4, 2)) - >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] - >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] - >>> T = dtensor(T) - """ - - def __new__(cls, input_array): - obj = np.asarray(input_array).view(cls) - return obj - - def __array_wrap__(self, out_arr, context=None): - return np.ndarray.__array_wrap__(self, out_arr, context) - - def __eq__(self, other): - return np.equal(self, other) - - def _ttm_compute(self, V, mode, transp): - sz = array(self.shape) - r1, r2 = from_to_without(0, self.ndim, mode, separate=True) - #r1 = list(range(0, mode)) - #r2 = list(range(mode + 1, self.ndim)) - order = [mode] + r1 + r2 - newT = self.transpose(axes=order) - newT = newT.reshape(sz[mode], prod(sz[r1 + list(range(mode + 1, len(sz)))])) - if transp: - newT = V.T.dot(newT) - p = V.shape[1] - else: - newT = V.dot(newT) - p = V.shape[0] - newsz = [p] + list(sz[:mode]) + list(sz[mode + 1:]) - newT = newT.reshape(newsz) - # transpose + argsort(order) equals ipermute - newT = newT.transpose(argsort(order)) - return dtensor(newT) - - def _ttv_compute(self, v, dims, vidx, remdims): - """ - Tensor times vector product - - Parameter - --------- - """ - if not isinstance(v, tuple): - raise ValueError('v must be a tuple of vectors') - ndim = self.ndim - order = list(remdims) + list(dims) - if ndim > 1: - T = self.transpose(order) - sz = array(self.shape)[order] - for i in np.arange(len(dims), 0, -1): - T = T.reshape((sz[:ndim - 1].prod(), sz[ndim - 1])) - T = T.dot(v[vidx[i - 1]]) - ndim -= 1 - if ndim > 0: - T = T.reshape(sz[:ndim]) - return T - - def ttt(self, other, modes=None): - pass - - def unfold(self, mode): - """ - Unfolds a dense tensor in mode n. - - Parameters - ---------- - mode : int - Mode in which tensor is unfolded - - Returns - ------- - unfolded_dtensor : unfolded_dtensor object - Tensor unfolded along mode - - Examples - -------- - Create dense tensor from numpy array - - >>> T = np.zeros((3, 4, 2)) - >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] - >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] - >>> T = dtensor(T) - - Unfolding of dense tensors - - >>> T.unfold(0) - array([[ 1., 4., 7., 10., 13., 16., 19., 22.], - [ 2., 5., 8., 11., 14., 17., 20., 23.], - [ 3., 6., 9., 12., 15., 18., 21., 24.]]) - >>> T.unfold(1) - array([[ 1., 2., 3., 13., 14., 15.], - [ 4., 5., 6., 16., 17., 18.], - [ 7., 8., 9., 19., 20., 21.], - [ 10., 11., 12., 22., 23., 24.]]) - >>> T.unfold(2) - array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., - 12.], - [ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., - 24.]]) - """ - - sz = array(self.shape) - N = len(sz) - order = ([mode], from_to_without(N - 1, -1, mode, step=-1, skip=-1)) - newsz = (sz[order[0]][0], prod(sz[order[1]])) - arr = self.transpose(axes=(order[0] + order[1])) - arr = arr.reshape(newsz) - return unfolded_dtensor(arr, mode, self.shape) - - def norm(self): - """ - Computes the Frobenius norm for dense tensors - :math:`norm(X) = \sqrt{\sum_{i_1,\ldots,i_N} x_{i_1,\ldots,i_N}^2}` - - References - ---------- - [Kolda and Bader, 2009; p.457] - """ - return np.linalg.norm(self) - - @inherit_docstring_from(tensor_mixin) - def uttkrp(self, U, n): - order = list(range(n)) + list(range(n + 1, self.ndim)) - Z = khatrirao(tuple(U[i] for i in order), reverse=True) - return self.unfold(n).dot(Z) - - @inherit_docstring_from(tensor_mixin) - def transpose(self, axes=None): - return dtensor(np.transpose(array(self), axes=axes)) - - -class unfolded_dtensor(np.ndarray): - - def __new__(cls, input_array, mode, ten_shape): - obj = np.asarray(input_array).view(cls) - obj.ten_shape = ten_shape - obj.mode = mode - return obj - - def __array_finalize__(self, obj): - if obj is None: - return - self.ten_shape = getattr(obj, 'ten_shape', None) - self.mode = getattr(obj, 'mode', None) - - def fold(self): - shape = array(self.ten_shape) - N = len(shape) - order = ([self.mode], from_to_without(0, N, self.mode, reverse=True)) - arr = self.reshape(tuple(shape[order[0]],) + tuple(shape[order[1]])) - arr = np.transpose(arr, argsort(order[0] + order[1])) - return dtensor(arr) diff --git a/dependencies/scikit-tensor/sktensor/indscal.py b/dependencies/scikit-tensor/sktensor/indscal.py deleted file mode 100644 index bf0d712..0000000 --- a/dependencies/scikit-tensor/sktensor/indscal.py +++ /dev/null @@ -1,75 +0,0 @@ -from numpy import zeros, dot, diag -from numpy.random import rand -from scipy.linalg import svd, norm, orth -from scipy.sparse.linalg import eigsh -import time -import logging - -_log = logging.getLogger('INDSCAL') - -_DEF_MAXITER = 50 -_DEF_INIT = 'random' -_DEF_CONV = 1e-7 - - -def orth_als(X, ncomp, **kwargs): - - ainit = kwargs.pop('init', _DEF_INIT) - maxiter = kwargs.pop('max_iter', _DEF_MAXITER) - conv = kwargs.pop('conv', _DEF_CONV) - if not len(kwargs) == 0: - raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) - - K = len(X) - normX = sum([norm(Xk)**2 for Xk in X]) - - A = init(X, ainit, ncomp) - fit = 0 - exectimes = [] - for itr in range(maxiter): - tic = time.time() - fitold = fit - D = _updateD(X, A) - A = _updateA(X, A, D) - - fit = sum([norm(X[k] - dot(A, dot(diag(D[k, :]), A.T)))**2 for k in range(K)]) - fit = 1 - fit / normX - fitchange = abs(fitold - fit) - - exectimes.append(time.time() - tic) - _log.info('[%3d] fit: %0.5f | delta: %7.1e | secs: %.5f' % ( - itr, fit, fitchange, exectimes[-1] - )) - if itr > 0 and fitchange < conv: - break - return A, D - - -def _updateA(X, A, D): - G = zeros(A.shape) - for k in range(len(X)): - G = G + dot(X[k], dot(A, diag(D[k, :]))) - U, _, Vt = svd(G, full_matrices=0) - A = dot(U, Vt) - return A - - -def _updateD(X, A): - K, R = len(X), A.shape[1] - D = zeros((K, R)) - for k in range(K): - D[k, :] = diag(dot(A.T, dot(X[k], A))) - D[D < 0] = 0 - return D - - -def init(X, init, ncomp): - N, K = X[0].shape[0], len(X) - if init == 'random': - A = orth(rand(N, ncomp)) - elif init == 'nvecs': - S = zeros(N, N) - for k in range(K): - S = S + X[k] + X[k].T - _, A = eigsh(S, ncomp) - return A diff --git a/dependencies/scikit-tensor/sktensor/ktensor.py b/dependencies/scikit-tensor/sktensor/ktensor.py deleted file mode 100644 index 4803228..0000000 --- a/dependencies/scikit-tensor/sktensor/ktensor.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy as np -from numpy import dot, ones, array, outer, zeros, prod, sum -from sktensor.core import khatrirao, tensor_mixin -from sktensor.dtensor import dtensor - -__all__ = [ - 'ktensor', - 'vectorized_ktensor', -] - - -class ktensor(object): - """ - Tensor stored in decomposed form as a Kruskal operator. - - Intended Usage - The Kruskal operator is particularly useful to store - the results of a CP decompositions. - - Parameters - ---------- - U : list of ndarrays - Factor matrices from which the tensor representation - is created. All factor matrices ``U[i]`` must have the - same number of columns, but can have different - number of rows. - lmbda : array_like of floats, optional - Weights for each dimension of the Kruskal operator. - ``len(lambda)`` must be equal to ``U[i].shape[1]`` - - See also - -------- - sktensor.dtensor : Dense tensors - sktensor.sptensor : Sparse tensors - sktensor.ttensor : Tensors stored in form of the Tucker operator - - References - ---------- - .. [1] B.W. Bader, T.G. Kolda - Efficient Matlab Computations With Sparse and Factored Tensors - SIAM J. Sci. Comput, Vol 30, No. 1, pp. 205--231, 2007 - """ - - def __init__(self, U, lmbda=None): - self.U = U - self.shape = tuple(Ui.shape[0] for Ui in U) - self.ndim = len(self.shape) - self.rank = U[0].shape[1] - self.lmbda = lmbda - if not all(array([Ui.shape[1] for Ui in U]) == self.rank): - raise ValueError('Dimension mismatch of factor matrices') - if lmbda is None: - self.lmbda = ones(self.rank) - - def __eq__(self, other): - if isinstance(other, ktensor): - # avoid costly elementwise comparison for obvious cases - if self.ndim != other.ndim or self.shape != other.shape: - return False - # do elementwise comparison - return all( - [(self.U[i] == other.U[i]).all() for i in range(self.ndim)] + - [(self.lmbda == other.lmbda).all()] - ) - else: - # TODO implement __eq__ for tensor_mixins and ndarrays - raise NotImplementedError() - - def uttkrp(self, U, mode): - - """ - Unfolded tensor times Khatri-Rao product for Kruskal tensors - - Parameters - ---------- - X : tensor_mixin - Tensor whose unfolding should be multiplied. - U : list of array_like - Matrices whose Khatri-Rao product should be multiplied. - mode : int - Mode in which X should be unfolded. - - See also - -------- - sktensor.sptensor.uttkrp : Efficient computation of uttkrp for sparse tensors - ttensor.uttkrp : Efficient computation of uttkrp for Tucker operators - """ - N = self.ndim - if mode == 1: - R = U[1].shape[1] - else: - R = U[0].shape[1] - W = np.tile(self.lmbda, 1, R) - for i in list(range(mode)) + list(range(mode + 1, N)): - W = W * dot(self.U[i].T, U[i]) - return dot(self.U[mode], W) - - def norm(self): - """ - Efficient computation of the Frobenius norm for ktensors - - Returns - ------- - norm : float - Frobenius norm of the ktensor - """ - N = len(self.shape) - coef = outer(self.lmbda, self.lmbda) - for i in range(N): - coef = coef * dot(self.U[i].T, self.U[i]) - return np.sqrt(coef.sum()) - - def innerprod(self, X): - """ - Efficient computation of the inner product of a ktensor with another tensor - - Parameters - ---------- - X : tensor_mixin - Tensor to compute the inner product with. - - Returns - ------- - p : float - Inner product between ktensor and X. - """ - N = len(self.shape) - R = len(self.lmbda) - res = 0 - for r in range(R): - vecs = [] - for n in range(N): - vecs.append(self.U[n][:, r]) - res += self.lmbda[r] * X.ttv(tuple(vecs)) - return res - - def toarray(self): - """ - Converts a ktensor into a dense multidimensional ndarray - - Returns - ------- - arr : np.ndarray - Fully computed multidimensional array whose shape matches - the original ktensor. - """ - A = dot(self.lmbda, khatrirao(tuple(self.U)).T) - return A.reshape(self.shape) - - def totensor(self): - """ - Converts a ktensor into a dense tensor - - Returns - ------- - arr : dtensor - Fully computed multidimensional array whose shape matches - the original ktensor. - """ - return dtensor(self.toarray()) - - def tovec(self): - v = zeros(sum([s * self.rank for s in self.shape])) - offset = 0 - for M in self.U: - noff = offset + prod(M.shape) - v[offset:noff] = M.flatten() - offset = noff - return vectorized_ktensor(v, self.shape, self.lmbda) - - -class vectorized_ktensor(object): - - def __init__(self, v, shape, lmbda): - self.v = v - self.shape = shape - self.lmbda = lmbda - - def toktensor(self): - order = len(self.shape) - rank = len(self.v) / sum(self.shape) - U = [None for _ in range(order)] - offset = 0 - for i in range(order): - noff = offset + self.shape[i] * rank - U[i] = self.v[offset:noff].reshape((self.shape[i], rank)) - offset = noff - return ktensor(U, self.lmbda) - -# vim: set et: diff --git a/dependencies/scikit-tensor/sktensor/pyutils.py b/dependencies/scikit-tensor/sktensor/pyutils.py deleted file mode 100644 index d6be0e0..0000000 --- a/dependencies/scikit-tensor/sktensor/pyutils.py +++ /dev/null @@ -1,62 +0,0 @@ -def inherit_docstring_from(cls): - def docstring_inheriting_decorator(fn): - fn.__doc__ = getattr(cls, fn.__name__).__doc__ - return fn - return docstring_inheriting_decorator - - -def is_sequence(obj): - """ - Helper function to determine sequences - across Python 2.x and 3.x - """ - try: - from collections import Sequence - except ImportError: - from operator import isSequenceType - return isSequenceType(obj) - else: - return isinstance(obj, Sequence) - - -def is_number(obj): - """ - Helper function to determine numbers - across Python 2.x and 3.x - """ - try: - from numbers import Number - except ImportError: - from operator import isNumberType - return isNumberType(obj) - else: - return isinstance(obj, Number) - - -def func_attr(f, attr): - """ - Helper function to get the attribute of a function - like, name, code, defaults across Python 2.x and 3.x - """ - if hasattr(f, 'func_%s' % attr): - return getattr(f, 'func_%s' % attr) - elif hasattr(f, '__%s__' % attr): - return getattr(f, '__%s__' % attr) - else: - raise ValueError('Object %s has no attr' % (str(f), attr)) - - -def from_to_without(frm, to, without, step=1, skip=1, reverse=False, separate=False): - """ - Helper function to create ranges with missing entries - """ - if reverse: - frm, to = (to - 1), (frm - 1) - step *= -1 - skip *= -1 - a = list(range(frm, without, step)) - b = list(range(without + skip, to, step)) - if separate: - return a, b - else: - return a + b diff --git a/dependencies/scikit-tensor/sktensor/rescal.py b/dependencies/scikit-tensor/sktensor/rescal.py deleted file mode 100644 index ed2647a..0000000 --- a/dependencies/scikit-tensor/sktensor/rescal.py +++ /dev/null @@ -1,299 +0,0 @@ -# coding: utf-8 -# rescal.py - python script to compute the RESCAL tensor factorization -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -import time -import numpy as np -from numpy import dot, zeros, array, eye, kron, prod -from numpy.linalg import norm, solve, inv, svd -from scipy.sparse import csr_matrix, issparse -from scipy.sparse.linalg import eigsh -from numpy.random import rand - -__version__ = "0.5" -__all__ = ['als'] - -_DEF_MAXITER = 100 -_DEF_INIT = 'nvecs' -_DEF_CONV = 1e-4 -_DEF_LMBDA = 0 -_DEF_ATTR = [] -_DEF_NO_FIT = 1e9 -_DEF_FIT_METHOD = None - -_log = logging.getLogger('RESCAL') - - -def als(X, rank, **kwargs): - """ - RESCAL-ALS algorithm to compute the RESCAL tensor factorization. - - - Parameters - ---------- - X : list - List of frontal slices X_k of the tensor X. - The shape of each X_k is ('N', 'N'). - X_k's are expected to be instances of scipy.sparse.csr_matrix - rank : int - Rank of the factorization - lmbdaA : float, optional - Regularization parameter for A factor matrix. 0 by default - lmbdaR : float, optional - Regularization parameter for R_k factor matrices. 0 by default - lmbdaV : float, optional - Regularization parameter for V_l factor matrices. 0 by default - attr : list, optional - List of sparse ('N', 'L_l') attribute matrices. 'L_l' may be different - for each attribute - init : string, optional - Initialization method of the factor matrices. 'nvecs' (default) - initializes A based on the eigenvectors of X. 'random' initializes - the factor matrices randomly. - compute_fit : boolean, optional - If true, compute the fit of the factorization compared to X. - For large sparse tensors this should be turned of. None by default. - maxIter : int, optional - Maximium number of iterations of the ALS algorithm. 500 by default. - conv : float, optional - Stop when residual of factorization is less than conv. 1e-5 by default - - Returns - ------- - A : ndarray - array of shape ('N', 'rank') corresponding to the factor matrix A - R : list - list of 'M' arrays of shape ('rank', 'rank') corresponding to the - factor matrices R_k - fval : float - function value of the factorization - itr : int - number of iterations until convergence - exectimes : ndarray - execution times to compute the updates in each iteration - - Examples - -------- - >>> X1 = csr_matrix(([1,1,1], ([2,1,3], [0,2,3])), shape=(4, 4)) - >>> X2 = csr_matrix(([1,1,1,1], ([0,2,3,3], [0,1,2,3])), shape=(4, 4)) - >>> A, R, fval, iter, exectimes = rescal([X1, X2], 2) - - See - --- - For a full description of the algorithm see: - .. [1] Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel, - "A Three-Way Model for Collective Learning on Multi-Relational Data", - ICML 2011, Bellevue, WA, USA - - .. [2] Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel, - "Factorizing YAGO: Scalable Machine Learning for Linked Data" - WWW 2012, Lyon, France - """ - - # ------------ init options ---------------------------------------------- - ainit = kwargs.pop('init', _DEF_INIT) - maxIter = kwargs.pop('maxIter', _DEF_MAXITER) - conv = kwargs.pop('conv', _DEF_CONV) - lmbdaA = kwargs.pop('lambda_A', _DEF_LMBDA) - lmbdaR = kwargs.pop('lambda_R', _DEF_LMBDA) - lmbdaV = kwargs.pop('lambda_V', _DEF_LMBDA) - func_compute_fval = kwargs.pop('compute_fval', _DEF_FIT_METHOD) - orthogonalize = kwargs.pop('orthogonalize', False) - P = kwargs.pop('attr', _DEF_ATTR) - dtype = kwargs.pop('dtype', np.float) - - # ------------- check input ---------------------------------------------- - if not len(kwargs) == 0: - raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) - - # check frontal slices have same size and are matrices - sz = X[0].shape - for i in range(len(X)): - if X[i].ndim != 2: - raise ValueError('Frontal slices of X must be matrices') - if X[i].shape != sz: - raise ValueError('Frontal slices of X must be all of same shape') - #if not issparse(X[i]): - #raise ValueError('X[%d] is not a sparse matrix' % i) - - if func_compute_fval is None: - if orthogonalize: - func_compute_fval = _compute_fval_orth - elif prod(X[0].shape) * len(X) > _DEF_NO_FIT: - _log.warn('For large tensors automatic computation of fit is disabled by default\nTo compute the fit, call rescal.als with "compute_fit=True"\nPlease note that this might cause memory and runtime problems') - func_compute_fval = None - else: - func_compute_fval = _compute_fval - - n = sz[0] - k = len(X) - - _log.debug( - '[Config] rank: %d | maxIter: %d | conv: %7.1e | lmbda: %7.1e' % - (rank, maxIter, conv, lmbdaA) - ) - _log.debug('[Config] dtype: %s / %s' % (dtype, X[0].dtype)) - - # ------- convert X and P to CSR ------------------------------------------ - for i in range(k): - if issparse(X[i]): - X[i] = X[i].tocsr() - X[i].sort_indices() - for i in range(len(P)): - if issparse(P[i]): - P[i] = P[i].tocoo().tocsr() - P[i].sort_indices() - - # ---------- initialize A ------------------------------------------------ - _log.debug('Initializing A') - if ainit == 'random': - A = array(rand(n, rank), dtype=dtype) - elif ainit == 'nvecs': - S = csr_matrix((n, n), dtype=dtype) - for i in range(k): - S = S + X[i] - S = S + X[i].T - _, A = eigsh(csr_matrix(S, dtype=dtype, shape=(n, n)), rank) - A = array(A, dtype=dtype) - else: - raise ValueError('Unknown init option ("%s")' % ainit) - - # ------- initialize R and Z --------------------------------------------- - R = _updateR(X, A, lmbdaR) - Z = _updateZ(A, P, lmbdaV) - - # precompute norms of X - normX = [sum(M.data ** 2) for M in X] - - # ------ compute factorization ------------------------------------------ - fit = fitchange = fitold = f = 0 - exectimes = [] - for itr in range(maxIter): - tic = time.time() - fitold = fit - A = _updateA(X, A, R, P, Z, lmbdaA, orthogonalize) - R = _updateR(X, A, lmbdaR) - Z = _updateZ(A, P, lmbdaV) - - # compute fit value - if func_compute_fval is not None: - fit = func_compute_fval(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaV, normX) - else: - fit = np.Inf - - fitchange = abs(fitold - fit) - - toc = time.time() - exectimes.append(toc - tic) - - _log.debug('[%3d] fval: %0.5f | delta: %7.1e | secs: %.5f' % ( - itr, fit, fitchange, exectimes[-1] - )) - if itr > 0 and fitchange < conv: - break - return A, R, f, itr + 1, array(exectimes) - - -# ------------------ Update A ------------------------------------------------ -def _updateA(X, A, R, P, Z, lmbdaA, orthogonalize): - """Update step for A""" - n, rank = A.shape - F = zeros((n, rank), dtype=A.dtype) - E = zeros((rank, rank), dtype=A.dtype) - - AtA = dot(A.T, A) - - for i in range(len(X)): - F += X[i].dot(dot(A, R[i].T)) + X[i].T.dot(dot(A, R[i])) - E += dot(R[i], dot(AtA, R[i].T)) + dot(R[i].T, dot(AtA, R[i])) - - # regularization - I = lmbdaA * eye(rank, dtype=A.dtype) - - # attributes - for i in range(len(Z)): - F += P[i].dot(Z[i].T) - E += dot(Z[i], Z[i].T) - - # finally compute update for A - A = solve(I + E.T, F.T).T - return orth(A) if orthogonalize else A - - -# ------------------ Update R ------------------------------------------------ -def _updateR(X, A, lmbdaR): - rank = A.shape[1] - U, S, Vt = svd(A, full_matrices=False) - Shat = kron(S, S) - Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank) - R = [] - for i in range(len(X)): - Rn = Shat * dot(U.T, X[i].dot(U)) - Rn = dot(Vt.T, dot(Rn, Vt)) - R.append(Rn) - return R - - -# ------------------ Update Z ------------------------------------------------ -def _updateZ(A, P, lmbdaZ): - Z = [] - if len(P) == 0: - return Z - #_log.debug('Updating Z (Norm EQ, %d)' % len(P)) - pinvAt = inv(dot(A.T, A) + lmbdaZ * eye(A.shape[1], dtype=A.dtype)) - pinvAt = dot(pinvAt, A.T).T - for i in range(len(P)): - if issparse(P[i]): - Zn = P[i].tocoo().T.tocsr().dot(pinvAt).T - else: - Zn = dot(pinvAt.T, P[i]) - Z.append(Zn) - return Z - - -def _compute_fval(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaZ, normX): - """Compute fit for full slices""" - f = lmbdaA * norm(A) ** 2 - for i in range(len(X)): - ARAt = dot(A, dot(R[i], A.T)) - f += (norm(X[i] - ARAt) ** 2) / normX[i] + lmbdaR * norm(R[i]) ** 2 - return f - - -def _compute_fval_orth(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaZ, normX): - f = lmbdaA * norm(A) ** 2 - for i in range(len(X)): - f += (normX[i] - norm(R[i]) ** 2) / normX[i] + lmbdaR * norm(R[i]) ** 2 - return f - - -def sptensor_to_list(X): - from scipy.sparse import lil_matrix - if X.ndim != 3: - raise ValueError('Only third-order tensors are supported (ndim=%d)' % X.ndim) - if X.shape[0] != X.shape[1]: - raise ValueError('First and second mode must be of identical length') - N = X.shape[0] - K = X.shape[2] - res = [lil_matrix((N, N)) for _ in range(K)] - for n in range(X.nnz()): - res[X.subs[2][n]][X.subs[0][n], X.subs[1][n]] = X.vals[n] - return res - -def orth(A): - [U, _, Vt] = svd(A, full_matrices=0) - return dot(U, Vt) diff --git a/dependencies/scikit-tensor/sktensor/setup.py b/dependencies/scikit-tensor/sktensor/setup.py deleted file mode 100644 index 1f272ce..0000000 --- a/dependencies/scikit-tensor/sktensor/setup.py +++ /dev/null @@ -1,7 +0,0 @@ -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('sktensor', parent_package, top_path) - - config.add_subpackage('tests') - - return config diff --git a/dependencies/scikit-tensor/sktensor/sptensor.py b/dependencies/scikit-tensor/sktensor/sptensor.py deleted file mode 100644 index 9c4c8a4..0000000 --- a/dependencies/scikit-tensor/sktensor/sptensor.py +++ /dev/null @@ -1,399 +0,0 @@ -# sktensor.sptensor - base module for sparse tensors -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy as np -from numpy import zeros, ones, array, arange, copy, ravel_multi_index, unravel_index -from numpy import setdiff1d, hstack, hsplit, vsplit, sort, prod, lexsort, unique, bincount -from scipy.sparse import coo_matrix -from scipy.sparse import issparse as issparse_mat -from sktensor.core import tensor_mixin -from sktensor.utils import accum -from sktensor.dtensor import unfolded_dtensor -from sktensor.pyutils import inherit_docstring_from, from_to_without - - -__all__ = [ - 'concatenate', - 'fromarray', - 'sptensor', - 'unfolded_sptensor', -] - - -class sptensor(tensor_mixin): - """ - A sparse tensor. - - Data is stored in COOrdinate format. - - Sparse tensors can be instantiated via - - Parameters - ---------- - subs : n-tuple of array-likes - Subscripts of the nonzero entries in the tensor. - Length of tuple n must be equal to dimension of tensor. - vals : array-like - Values of the nonzero entries in the tensor. - shape : n-tuple, optional - Shape of the sparse tensor. - Length of tuple n must be equal to dimension of tensor. - dtype : dtype, optional - Type of the entries in the tensor - accumfun : function pointer - Function to be accumulate duplicate entries - - Examples - -------- - >>> S = sptensor(([0,1,2], [3,2,0], [2,2,2]), [1,1,1], shape=(10, 20, 5), dtype=np.float) - >>> S.shape - (10, 20, 5) - >>> S.dtype - - """ - - def __init__(self, subs, vals, shape=None, dtype=None, accumfun=None, issorted=False): - if not isinstance(subs, tuple): - raise ValueError('Subscripts must be a tuple of array-likes') - if len(subs[0]) != len(vals): - raise ValueError('Subscripts and values must be of equal length') - if dtype is None: - dtype = array(vals).dtype - for i in range(len(subs)): - if array(subs[i]).dtype.kind != 'i': - raise ValueError('Subscripts must be integers') - - vals = array(vals, dtype=dtype) - if accumfun is not None: - vals, subs = accum( - subs, vals, - issorted=False, with_subs=True, func=accumfun - ) - self.subs = subs - self.vals = vals - self.dtype = dtype - self.issorted = issorted - self.accumfun = accumfun - - if shape is None: - self.shape = tuple(array(subs).max(axis=1).flatten() + 1) - else: - self.shape = tuple(int(d) for d in shape) - self.ndim = len(subs) - - def __eq__(self, other): - if isinstance(other, sptensor): - self._sort() - other._sort() - return (self.vals == other.vals).all() and (array(self.subs) == array(other.subs)).all() - elif isinstance(other, np.ndarray): - return (self.toarray() == other).all() - else: - raise NotImplementedError('Unsupported object class for sptensor.__eq__ (%s)' % type(other)) - - def __getitem__(self, idx): - # TODO check performance - if len(idx) != self.ndim: - raise ValueError('subscripts must be complete') - sidx = ones(len(self.vals)) - for i in range(self.ndim): - sidx = np.logical_and(self.subs[i] == idx[i], sidx) - vals = self.vals[sidx] - if len(vals) == 0: - vals = 0 - elif len(vals) > 1: - if self.accumfun is None: - raise ValueError('Duplicate entries without specified accumulation function') - vals = self.accumfun(vals) - return vals - - def __sub__(self, other): - if isinstance(other, np.ndarray): - res = -other - res[self.subs] += self.vals - else: - raise NotImplementedError() - return res - - def _sort(self): - # TODO check performance - subs = array(self.subs) - sidx = lexsort(subs) - self.subs = tuple(z.flatten()[sidx] for z in vsplit(subs, len(self.shape))) - self.vals = self.vals[sidx] - self.issorted = True - - def _ttm_compute(self, V, mode, transp): - Z = self.unfold(mode, transp=True).tocsr() - if transp: - V = V.T - Z = Z.dot(V.T) - shape = copy(self.shape) - shape[mode] = V.shape[0] - if issparse_mat(Z): - newT = unfolded_sptensor((Z.data, (Z.row, Z.col)), [mode], None, shape=shape).fold() - else: - newT = unfolded_dtensor(Z.T, mode, shape).fold() - - return newT - - def _ttv_compute(self, v, dims, vidx, remdims): - nvals = self.vals - nsubs = self.subs - for i in range(len(dims)): - idx = nsubs[dims[i]] - w = v[vidx[i]] - nvals = nvals * w[idx] - - # Case 1: all dimensions used -> return sum - if len(remdims) == 0: - return nvals.sum() - - nsubs = tuple(self.subs[i] for i in remdims) - nshp = tuple(self.shape[i] for i in remdims) - - # Case 2: result is a vector - if len(remdims) == 1: - usubs = unique(nsubs[0]) - bins = usubs.searchsorted(nsubs[0]) - c = bincount(bins, weights=nvals) - (nz,) = c.nonzero() - return sptensor((usubs[nz],), c[nz], nshp) - - # Case 3: result is an array - return sptensor(nsubs, nvals, shape=nshp, accumfun=np.sum) - - def _ttm_me_compute(self, V, edims, sdims, transp): - """ - Assume Y = T x_i V_i for i = 1...n can fit into memory - """ - shapeY = np.copy(self.shape) - - # Determine size of Y - for n in np.union1d(edims, sdims): - shapeY[n] = V[n].shape[1] if transp else V[n].shape[0] - - # Allocate Y (final result) and v (vectors for elementwise computations) - Y = zeros(shapeY) - shapeY = array(shapeY) - v = [None for _ in range(len(edims))] - - for i in range(np.prod(shapeY[edims])): - rsubs = unravel_index(shapeY[edims], i) - - def unfold(self, rdims, cdims=None, transp=False): - if isinstance(rdims, type(1)): - rdims = [rdims] - if transp: - cdims = rdims - rdims = setdiff1d(list(range(self.ndim)), cdims)[::-1] - elif cdims is None: - cdims = setdiff1d(list(range(self.ndim)), rdims)[::-1] - if not (arange(self.ndim) == sort(hstack((rdims, cdims)))).all(): - raise ValueError( - 'Incorrect specification of dimensions (rdims: %s, cdims: %s)' - % (str(rdims), str(cdims)) - ) - M = prod([self.shape[r] for r in rdims]) - N = prod([self.shape[c] for c in cdims]) - ridx = _build_idx(self.subs, self.vals, rdims, self.shape) - cidx = _build_idx(self.subs, self.vals, cdims, self.shape) - return unfolded_sptensor((self.vals, (ridx, cidx)), (M, N), rdims, cdims, self.shape) - - @inherit_docstring_from(tensor_mixin) - def uttkrp(self, U, mode): - R = U[1].shape[1] if mode == 0 else U[0].shape[1] - #dims = list(range(0, mode)) + list(range(mode + 1, self.ndim)) - dims = from_to_without(0, self.ndim, mode) - V = zeros((self.shape[mode], R)) - for r in range(R): - Z = tuple(U[n][:, r] for n in dims) - TZ = self.ttv(Z, mode, without=True) - if isinstance(TZ, sptensor): - V[TZ.subs, r] = TZ.vals - else: - V[:, r] = self.ttv(Z, mode, without=True) - return V - - @inherit_docstring_from(tensor_mixin) - def transpose(self, axes=None): - """ - Compute transpose of sparse tensors. - - Parameters - ---------- - axes : array_like of ints, optional - Permute the axes according to the values given. - - Returns - ------- - d : dtensor - dtensor with axes permuted. - """ - if axes is None: - raise NotImplementedError( - 'Sparse tensor transposition without axes argument is not supported' - ) - nsubs = tuple([self.subs[idx] for idx in axes]) - nshape = [self.shape[idx] for idx in axes] - return sptensor(nsubs, self.vals, nshape) - - def concatenate(self, tpl, axis=None): - """ - Concatenates sparse tensors. - - Parameters - ---------- - tpl : tuple of sparse tensors - Tensors to be concatenated. - axis : int, optional - Axis along which concatenation should take place - """ - if axis is None: - raise NotImplementedError( - 'Sparse tensor concatenation without axis argument is not supported' - ) - T = self - for i in range(1, len(tpl)): - T = _single_concatenate(T, tpl[i], axis=axis) - return T - - def norm(self): - """ - Frobenius norm for tensors - - References - ---------- - [Kolda and Bader, 2009; p.457] - """ - return np.linalg.norm(self.vals) - - def toarray(self): - A = zeros(self.shape) - A.put(ravel_multi_index(self.subs, tuple(self.shape)), self.vals) - return A - - -class unfolded_sptensor(coo_matrix): - """ - An unfolded sparse tensor. - - Data is stored in form of a sparse COO matrix. - Unfolded_sptensor objects additionall hold information about the - original tensor, such that re-folding the tensor into its original - shape can be done easily. - - Unfolded_sptensor objects can be instantiated via - - Parameters - ---------- - tpl : (data, (i, j)) tuple - Construct sparse matrix from three arrays: - 1. ``data[:]`` the entries of the matrix, in any order - 2. ``i[:]`` the row indices of the matrix entries - 3. ``j[:]`` the column indices of the matrix entries - where ``A[i[k], j[k]] = data[k]``. - shape : tuple of integers - Shape of the unfolded tensor. - rdims : array_like of integers - Modes of the original tensor that are mapped onto rows. - cdims : array_like of integers - Modes of the original tensor that are mapped onto columns. - ten_shape : tuple of integers - Shape of the original tensor. - dtype : np.dtype, optional - Data type of the unfolded tensor. - copy : boolean, optional - If true, data and subscripts are copied. - - Returns - ------- - M : unfolded_sptensor - Sparse matrix in COO format where ``rdims`` are mapped to rows and - ``cdims`` are mapped to columns of the matrix. - """ - - def __init__(self, tpl, shape, rdims, cdims, ten_shape, dtype=None, copy=False): - self.ten_shape = array(ten_shape) - if isinstance(rdims, int): - rdims = [rdims] - if cdims is None: - cdims = setdiff1d(list(range(len(self.ten_shape))), rdims)[::-1] - self.rdims = rdims - self.cdims = cdims - super(unfolded_sptensor, self).__init__(tpl, shape=shape, dtype=dtype, copy=copy) - - def fold(self): - """ - Recreate original tensor by folding unfolded_sptensor according toc - ``ten_shape``. - - Returns - ------- - T : sptensor - Sparse tensor that is created by refolding according to ``ten_shape``. - """ - nsubs = zeros((len(self.data), len(self.ten_shape)), dtype=np.int) - if len(self.rdims) > 0: - nidx = unravel_index(self.row, self.ten_shape[self.rdims]) - for i in range(len(self.rdims)): - nsubs[:, self.rdims[i]] = nidx[i] - if len(self.cdims) > 0: - nidx = unravel_index(self.col, self.ten_shape[self.cdims]) - for i in range(len(self.cdims)): - nsubs[:, self.cdims[i]] = nidx[i] - nsubs = [z.flatten() for z in hsplit(nsubs, len(self.ten_shape))] - return sptensor(tuple(nsubs), self.data, self.ten_shape) - - -def fromarray(A): - """Create a sptensor from a dense numpy array""" - subs = np.nonzero(A) - vals = A[subs] - return sptensor(subs, vals, shape=A.shape, dtype=A.dtype) - - -def _single_concatenate(ten, other, axis): - tshape = ten.shape - oshape = other.shape - if len(tshape) != len(oshape): - raise ValueError("len(tshape) != len(oshape") - oaxes = setdiff1d(list(range(len(tshape))), [axis]) - for i in oaxes: - if tshape[i] != oshape[i]: - raise ValueError("Dimensions must match") - nsubs = [None for _ in range(len(tshape))] - for i in oaxes: - nsubs[i] = np.concatenate((ten.subs[i], other.subs[i])) - nsubs[axis] = np.concatenate(( - ten.subs[axis], other.subs[axis] + tshape[axis] - )) - nvals = np.concatenate((ten.vals, other.vals)) - nshape = np.copy(tshape) - nshape[axis] = tshape[axis] + oshape[axis] - return sptensor(nsubs, nvals, nshape) - - -def _build_idx(subs, vals, dims, tshape): - shape = array([tshape[d] for d in dims], ndmin=1) - dims = array(dims, ndmin=1) - if len(shape) == 0: - idx = ones(len(vals), dtype=vals.dtype) - elif len(subs) == 0: - idx = array(tuple()) - else: - idx = ravel_multi_index(tuple(subs[i] for i in dims), shape) - return idx diff --git a/dependencies/scikit-tensor/sktensor/tests/__init__.py b/dependencies/scikit-tensor/sktensor/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/dependencies/scikit-tensor/sktensor/tests/sptensor_fixture.py b/dependencies/scikit-tensor/sktensor/tests/sptensor_fixture.py deleted file mode 100644 index e3da1db..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/sptensor_fixture.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import array -import pytest - - -@pytest.fixture -def subs(): - return ( - array([0, 1, 0, 5, 7, 8]), - array([2, 0, 4, 5, 3, 9]), - array([0, 1, 2, 2, 1, 0]) - ) - - -@pytest.fixture -def vals(): - return array([1, 2, 3, 4, 5, 6.1]) - - -@pytest.fixture -def shape(): - return (10, 12, 3) diff --git a/dependencies/scikit-tensor/sktensor/tests/sptensor_rand_fixture.py b/dependencies/scikit-tensor/sktensor/tests/sptensor_rand_fixture.py deleted file mode 100644 index 2f47ca3..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/sptensor_rand_fixture.py +++ /dev/null @@ -1,27 +0,0 @@ -from numpy.random import randint, seed -import pytest - - -@pytest.fixture -def sptensor_seed(): - return seed(5) - - -@pytest.fixture -def sz(): - return 100 - - -@pytest.fixture -def vals(sptensor_seed, sz): - return randint(0, 100, sz) - - -@pytest.fixture -def shape(): - return (25, 11, 18, 7, 2) - - -@pytest.fixture -def subs(sptensor_seed, shape, sz): - return tuple(randint(0, shape[i], sz) for i in range(len(shape))) diff --git a/dependencies/scikit-tensor/sktensor/tests/test_base.py b/dependencies/scikit-tensor/sktensor/tests/test_base.py deleted file mode 100644 index 11f99ba..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_base.py +++ /dev/null @@ -1,93 +0,0 @@ -from numpy import array -from numpy.random import randn -from sktensor.core import * -from sktensor import dtensor, sptensor, ktensor -from .ttm_fixture import T, U, Y -from .sptensor_fixture import shape, vals, subs - - -def test_check_multiplication_dims(): - ndims = 3 - M = 2 - assert ([1, 2] == check_multiplication_dims(0, ndims, M, without=True)).all() - assert ([0, 2] == check_multiplication_dims(1, ndims, M, without=True)).all() - assert ([0, 1] == check_multiplication_dims(2, ndims, M, without=True)).all() - - -def test_khatrirao(): - A = array([ - [1, 2, 3], - [4, 5, 6], - [7, 8, 9] - ]) - B = array([ - [1, 4, 7], - [2, 5, 8], - [3, 6, 9] - ]) - C = array([ - [1, 8, 21], - [2, 10, 24], - [3, 12, 27], - [4, 20, 42], - [8, 25, 48], - [12, 30, 54], - [7, 32, 63], - [14, 40, 72], - [21, 48, 81] - ]) - - D = khatrirao((A, B)) - assert C.shape == D.shape - assert (C == D).all() - - -def test_dense_fold(T): - X = dtensor(T) - I, J, K = T.shape - X1 = X[:, :, 0] - X2 = X[:, :, 1] - - U = X.unfold(0) - assert (3, 8) == U.shape - for j in range(J): - assert (U[:, j] == X1[:, j]).all() - assert (U[:, j + J] == X2[:, j]).all() - - U = X.unfold(1) - assert (4, 6) == U.shape - for i in range(I): - assert (U[:, i] == X1[i, :]).all() - assert (U[:, i + I] == X2[i, :]).all() - - U = X.unfold(2) - assert (2, 12) == U.shape - for k in range(U.shape[1]): - assert (U[:, k] == array([X1.flatten('F')[k], X2.flatten('F')[k]])).all() - - -def test_dtensor_fold_unfold(): - sz = (10, 35, 3, 12) - X = dtensor(randn(*sz)) - for i in range(4): - U = X.unfold(i).fold() - assert (X == U).all() - - -def test_dtensor_ttm(T, U, Y): - X = dtensor(T) - Y2 = X.ttm(U, 0) - assert (2, 4, 2) == Y2.shape - assert (Y == Y2).all() - - -def test_spttv(subs, vals, shape): - #subs = ( - # array([0, 1, 0, 5, 7, 8]), - # array([2, 0, 4, 5, 3, 9]), - # array([0, 1, 2, 2, 1, 0]) - #) - #vals = array([1, 1, 1, 1, 1, 1]) - S = sptensor(subs, vals, shape=shape) - K = ktensor([randn(shape[0], 2), randn(shape[1], 2), randn(shape[2], 2)]) - K.innerprod(S) diff --git a/dependencies/scikit-tensor/sktensor/tests/test_dtensor.py b/dependencies/scikit-tensor/sktensor/tests/test_dtensor.py deleted file mode 100644 index cd02da9..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_dtensor.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy import array -from numpy.random import randn -from sktensor.dtensor import dtensor -from .ttm_fixture import T, U, Y - - -def test_new(): - sz = (10, 23, 5) - A = randn(*sz) - T = dtensor(A) - assert A.ndim == T.ndim - assert A.shape == T.shape - assert (A == T).all() - assert (T == A).all() - - -def test_dense_fold(T): - X = dtensor(T) - I, J, K = T.shape - X1 = X[:, :, 0] - X2 = X[:, :, 1] - - U = X.unfold(0) - assert (3, 8) == U.shape - for j in range(J): - assert (U[:, j] == X1[:, j]).all() - assert (U[:, j + J] == X2[:, j]).all() - - U = X.unfold(1) - assert (4, 6) == U.shape - for i in range(I): - assert (U[:, i] == X1[i, :]).all() - assert (U[:, i + I] == X2[i, :]).all() - - U = X.unfold(2) - assert (2, 12) == U.shape - for k in range(U.shape[1]): - assert (U[:, k] == array([X1.flatten('F')[k], X2.flatten('F')[k]])).all() - - -def test_dtensor_fold_unfold(): - sz = (10, 35, 3, 12) - X = dtensor(randn(*sz)) - for i in range(4): - U = X.unfold(i).fold() - assert (X == U).all() - - -def test_dtensor_ttm(T, Y, U): - X = dtensor(T) - Y2 = X.ttm(U, 0) - assert (2, 4, 2) == Y2.shape - assert (Y == Y2).all() diff --git a/dependencies/scikit-tensor/sktensor/tests/test_ktensor.py b/dependencies/scikit-tensor/sktensor/tests/test_ktensor.py deleted file mode 100644 index b05eaef..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_ktensor.py +++ /dev/null @@ -1,14 +0,0 @@ -from numpy.random import randn -from sktensor import ktensor - - -def test_vectorization(): - rank = 5 - shape = (5, 27, 3, 13) - U = [randn(s, rank) for s in shape] - K = ktensor(U) - v = K.tovec() - K2 = v.toktensor() - - assert sum([s * rank for s in shape]) == len(v.v) - assert K == K2 diff --git a/dependencies/scikit-tensor/sktensor/tests/test_pyutils.py b/dependencies/scikit-tensor/sktensor/tests/test_pyutils.py deleted file mode 100644 index 71db480..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_pyutils.py +++ /dev/null @@ -1,11 +0,0 @@ -from sktensor.pyutils import * - - -def test_from_to_without(): - frm, to, without = 2, 88, 47 - lst = list(range(frm, without)) + list(range(without + 1, to)) - assert lst == from_to_without(frm, to, without) - - rlst = list(range(to - 1, without, -1)) + list(range(without - 1, frm - 1,-1)) - assert rlst == from_to_without(frm, to, without, reverse=True) - assert lst[::-1] == from_to_without(frm, to, without, reverse=True) diff --git a/dependencies/scikit-tensor/sktensor/tests/test_sptensor.py b/dependencies/scikit-tensor/sktensor/tests/test_sptensor.py deleted file mode 100644 index c22046e..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_sptensor.py +++ /dev/null @@ -1,183 +0,0 @@ -import pytest -import numpy as np -from numpy import ones, zeros, array, setdiff1d, allclose -from numpy.random import randint -from sktensor.dtensor import dtensor -from sktensor.sptensor import sptensor, fromarray -from .ttm_fixture import T, U, Y -from .sptensor_rand_fixture import subs, vals, shape, sptensor_seed, sz - - -def setup_diagonal(): - """ - Setup data for a 20x20x20 diagonal tensor - """ - n = 20 - shape = (n, n, n) - subs = [np.arange(0, shape[i]) for i in range(len(shape))] - vals = ones(n) - return tuple(subs), vals, shape - - -def test_init(subs, vals, shape): - """ - Creation of new sptensor objects - """ - T = sptensor(subs, vals, shape) - assert len(shape) == T.ndim - assert (array(shape) == T.shape).all() - - T = sptensor(subs, vals) - tshape = array(subs).max(axis=1) + 1 - assert len(subs) == len(T.shape) - assert (tshape == array(T.shape)).all() - - -def test_init_diagonal(): - subs, vals, shape = setup_diagonal() - T = sptensor(subs, vals, shape) - assert len(shape) == T.ndim - assert (array(shape) == T.shape).all() - - T = sptensor(subs, vals) - assert len(subs) == len(T.shape) - assert (shape == array(T.shape)).all() - - -def test_non2Dsubs(): - with pytest.raises(ValueError): - sptensor(randint(0, 10, 18).reshape(3, 3, 2), ones(10)) - - -def test_nonEqualLength(subs): - with pytest.raises(ValueError): - sptensor(subs, ones(len(subs) + 1)) - - -def test_unfold(T, subs, vals, shape): - Td = dtensor(zeros(shape, dtype=np.float32)) - Td[subs] = vals - - for i in range(len(shape)): - rdims = [i] - cdims = setdiff1d(list(range(len(shape))), rdims)[::-1] - Md = Td.unfold(i) - - T = sptensor(subs, vals, shape, accumfun=lambda l: l[-1]) - - Ms = T.unfold(rdims, cdims) - assert Md.shape == Ms.shape - assert (allclose(Md, Ms.toarray())) - - Ms = T.unfold(rdims) - assert Md.shape == Ms.shape - assert (allclose(Md, Ms.toarray())) - - Md = Md.T - Ms = T.unfold(rdims, cdims, transp=True) - assert Md.shape == Ms.shape - assert (allclose(Md, Ms.toarray())) - - -def test_fold(subs, vals, shape): - T = sptensor(subs, vals, shape) - for i in range(len(shape)): - X = T.unfold([i]).fold() - assert shape == tuple(T.shape) - assert len(shape) == len(T.subs) - assert len(subs) == len(T.subs) - assert X == T - for j in range(len(subs)): - subs[j].sort() - T.subs[j].sort() - assert (subs[j] == T.subs[j]).all() - - -def test_ttm(T, Y, U): - S = sptensor(T.nonzero(), T.flatten(), T.shape) - Y2 = S.ttm(U, 0) - assert (2, 4, 2) == Y2.shape - assert (Y == Y2).all() - - -def test_ttv_sparse_result(): - # Test case by Andre Panisson to check return type of sptensor.ttv - subs = ( - array([0, 1, 0, 5, 7, 8]), - array([2, 0, 4, 5, 3, 9]), - array([0, 1, 2, 2, 1, 0]) - ) - vals = array([1, 1, 1, 1, 1, 1]) - S = sptensor(subs, vals, shape=[10, 10, 3]) - - sttv = S.ttv((zeros(10), zeros(10)), modes=[0, 1]) - assert type(sttv) == sptensor - # sparse tensor should return only nonzero vals - assert (allclose(np.array([]), sttv.vals)) - assert (allclose(np.array([]), sttv.subs)) - assert sttv.shape == (3,) - - -def test_ttv(T): - result = array([ - [70, 190], - [80, 200], - [90, 210] - ]) - - X = fromarray(T) - v = array([1, 2, 3, 4]) - Xv = X.ttv(v, 1) - - assert (3, 2) == Xv.shape - assert (Xv == result).all() - - -def test_sttm_me(T, U): - S = sptensor(T.nonzero(), T.flatten(), T.shape) - S._ttm_me_compute(U, [1], [0], False) - - -def test_sp_uttkrp(subs, vals, shape): - # Test case by Andre Panisson, sparse ttv - # see issue #3 - S = sptensor(subs, vals, shape) - U = [] - for shp in shape: - U.append(np.zeros((shp, 5))) - SU = S.uttkrp(U, mode=0) - assert SU.shape == (25, 5) - - -def test_getitem(): - subs = ( - array([0, 1, 0, 5, 7, 8]), - array([2, 0, 4, 5, 3, 9]), - array([0, 1, 2, 2, 1, 0]) - ) - vals = array([1, 2, 3, 4, 5, 6]) - S = sptensor(subs, vals, shape=[10, 10, 3]) - assert 0 == S[1, 1, 1] - assert 0 == S[1, 2, 3] - assert 1 == S[0, 2, 0] - assert 2 == S[1, 0, 1] - assert 3 == S[0, 4, 2] - assert 4 == S[5, 5, 2] - assert 5 == S[7, 3, 1] - assert 6 == S[8, 9, 0] - - -def test_add(): - subs = ( - array([0, 1, 0]), - array([2, 0, 2]), - array([0, 1, 2]) - ) - vals = array([1, 2, 3]) - S = sptensor(subs, vals, shape=[3, 3, 3]) - D = np.arange(27).reshape(3, 3, 3) - T = S - D - for i in range(3): - for j in range(3): - for k in range(3): - assert S[i, j, k] - D[i, j, k] == T[i, j, k] diff --git a/dependencies/scikit-tensor/sktensor/tests/test_tucker_hooi.py b/dependencies/scikit-tensor/sktensor/tests/test_tucker_hooi.py deleted file mode 100644 index 99852b2..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_tucker_hooi.py +++ /dev/null @@ -1,48 +0,0 @@ -import pytest -import logging -from numpy import allclose -from numpy.random import randn -from scipy.sparse import rand as sprand -from sktensor import tucker -from sktensor.core import ttm -from sktensor.dtensor import dtensor, unfolded_dtensor -from sktensor.sptensor import unfolded_sptensor -#from sktensor.rotation import orthomax - -logging.basicConfig(level=logging.INFO) - - -def normalize(X): - return X / X.sum(axis=0) - - -def disabled_test_factorization(): - I, J, K, rank = 10, 20, 75, 5 - A = orthomax(randn(I, rank)) - B = orthomax(randn(J, rank)) - C = orthomax(randn(K, rank)) - - core_real = dtensor(randn(rank, rank, rank)) - T = core_real.ttm([A, B, C]) - core, U = tucker.hooi(T, rank) - - assert allclose(T, ttm(core, U)) - assert allclose(A, orthomax(U[0])) - assert allclose(B, orthomax(U[1])) - assert allclose(C, orthomax(U[2])) - assert allclose(core_real, core) - - -def disabled_test_factorization_sparse(): - I, J, K, rank = 10, 20, 75, 5 - Tmat = sprand(I, J * K, 0.1).tocoo() - T = unfolded_sptensor((Tmat.data, (Tmat.row, Tmat.col)), None, 0, [], (I, J, K)).fold() - core, U = tucker.hooi(T, rank, maxIter=20) - - Tmat = Tmat.toarray() - T = unfolded_dtensor(Tmat, 0, (I, J, K)).fold() - core2, U2 = tucker.hooi(T, rank, maxIter=20) - - assert allclose(core2, core) - for i in range(len(U)): - assert allclose(U2[i], U[i]) diff --git a/dependencies/scikit-tensor/sktensor/tests/test_utils.py b/dependencies/scikit-tensor/sktensor/tests/test_utils.py deleted file mode 100644 index df24ee4..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/test_utils.py +++ /dev/null @@ -1,20 +0,0 @@ -from ..utils import accum -from numpy import array, allclose - - -def test_accum(): - subs1 = array([0, 1, 1, 2, 2, 2]) - subs2 = array([0, 1, 1, 1, 2, 2]) - vals = array([1, 2, 3, 4, 5, 6]) - nvals, nsubs = accum((subs1, subs2), vals, with_subs=True) - assert allclose(nvals, array([1, 5, 4, 11])) - assert allclose(nsubs[0], array([0, 1, 2, 2])) - assert allclose(nsubs[1], array([0, 1, 1, 2])) - - subs1 = array([0, 0, 1]) - subs2 = array([0, 0, 1]) - vals = array([1, 2, 3]) - nvals, nsubs = accum((subs1, subs2), vals, with_subs=True) - assert allclose(nvals, array([3, 3])) - assert allclose(nsubs[0], array([0, 1])) - assert allclose(nsubs[1], array([0, 1])) diff --git a/dependencies/scikit-tensor/sktensor/tests/ttm_fixture.py b/dependencies/scikit-tensor/sktensor/tests/ttm_fixture.py deleted file mode 100644 index 0026f55..0000000 --- a/dependencies/scikit-tensor/sktensor/tests/ttm_fixture.py +++ /dev/null @@ -1,23 +0,0 @@ -from numpy import array, zeros -import pytest - - -@pytest.fixture -def T(): - T = zeros((3, 4, 2)) - T[:, :, 0] = array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]) - T[:, :, 1] = array([[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]) - return T - - -@pytest.fixture -def Y(): - Y = zeros((2, 4, 2)) - Y[:, :, 0] = array([[22, 49, 76, 103], [28, 64, 100, 136]]) - Y[:, :, 1] = array([[130, 157, 184, 211], [172, 208, 244, 280]]) - return Y - - -@pytest.fixture -def U(): - return array([[1, 3, 5], [2, 4, 6]]) diff --git a/dependencies/scikit-tensor/sktensor/tucker.py b/dependencies/scikit-tensor/sktensor/tucker.py deleted file mode 100644 index 5c91706..0000000 --- a/dependencies/scikit-tensor/sktensor/tucker.py +++ /dev/null @@ -1,150 +0,0 @@ -# sktensor.tucker - Algorithms to compute Tucker decompositions -# Copyright (C) 2013 Maximilian Nickel -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -import time -import numpy as np -from numpy import array, ones, sqrt -from numpy.random import rand -from .pyutils import is_number -from .core import ttm, nvecs, norm - -__all__ = [ - 'hooi', - 'hosvd', -] - -_log = logging.getLogger('TUCKER') -__DEF_MAXITER = 500 -__DEF_INIT = 'nvecs' -__DEF_CONV = 1e-7 - - -def hooi(X, rank, **kwargs): - """ - Compute Tucker decompositions of a tensor using Higher-Order Orthogonal - Iterations. - - Parameters - ---------- - X : tensor_mixin - The tensor to be decomposed - rank : array_like - The rank of the decompositions for each mode of the tensor. - The length of ``rank`` must match the number of modes of ``X``. - init : {'random', 'nvecs'}, optional - The initialization method to use. - - random : Factor matrices are initialized randomly. - - nvecs : Factor matrices are initialzed via HOSVD. - default : 'nvecs' - - Examples - -------- - Create dense tensor - - >>> T = np.zeros((3, 4, 2)) - >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] - >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] - >>> T = dtensor(T) - - Compute Tucker decompositions of ``T`` with n-rank [2, 3, 1] via higher-order - orthogonal iterations - - >>> Y = hooi(T, [2, 3, 1], init='nvecs') - - Shape of the core tensor matches n-rank of the decompositions. - - >>> Y['core'].shape - (2, 3, 1) - >>> Y['U'][1].shape - (3, 2) - - References - ---------- - .. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and - rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors; - IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271 - """ - # init options - ainit = kwargs.pop('init', __DEF_INIT) - maxIter = kwargs.pop('maxIter', __DEF_MAXITER) - conv = kwargs.pop('conv', __DEF_CONV) - dtype = kwargs.pop('dtype', X.dtype) - if not len(kwargs) == 0: - raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) - - ndims = X.ndim - if is_number(rank): - rank = rank * ones(ndims) - - normX = norm(X) - - U = __init(ainit, X, ndims, rank, dtype) - fit = 0 - exectimes = [] - for itr in range(maxIter): - tic = time.clock() - fitold = fit - - for n in range(ndims): - Utilde = ttm(X, U, n, transp=True, without=True) - U[n] = nvecs(Utilde, n, rank[n]) - - # compute core tensor to get fit - core = ttm(Utilde, U, n, transp=True) - - # since factors are orthonormal, compute fit on core tensor - normresidual = sqrt(normX ** 2 - norm(core) ** 2) - - # fraction explained by model - fit = 1 - (normresidual / normX) - fitchange = abs(fitold - fit) - exectimes.append(time.clock() - tic) - - _log.debug( - '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' - % (itr, fit, fitchange, exectimes[-1]) - ) - if itr > 1 and fitchange < conv: - break - return core, U - -def hosvd(X, rank, dims=None, dtype=None, compute_core=True): - U = [None for _ in range(X.ndim)] - if dims is None: - dims = list(range(X.ndim)) - if dtype is None: - dtype = X.dtype - for d in dims: - U[d] = array(nvecs(X, d, rank[d]), dtype=dtype) - if compute_core: - core = X.ttm(U, transp=True) - return U, core - else: - return U - -def __init(init, X, N, rank, dtype): - # Don't compute initial factor for first index, gets computed in - # first iteration - Uinit = [None] - if isinstance(init, list): - Uinit = init - elif init == 'random': - for n in range(1, N): - Uinit.append(array(rand(X.shape[n], rank[n]), dtype=dtype)) - elif init == 'nvecs': - Uinit = hosvd(X, rank, list(range(1, N)), dtype=dtype, compute_core=False) - return Uinit diff --git a/dependencies/scikit-tensor/sktensor/utils.py b/dependencies/scikit-tensor/sktensor/utils.py deleted file mode 100644 index f35922e..0000000 --- a/dependencies/scikit-tensor/sktensor/utils.py +++ /dev/null @@ -1,38 +0,0 @@ -import numpy as np -from numpy import cumprod, array, arange, zeros, floor, lexsort - - -def accum(subs, vals, func=np.sum, issorted=False, with_subs=False): - """ - NumPy implementation for Matlab's accumarray - """ - # sort accmap for ediff if not sorted - if not issorted: - sidx = lexsort(subs, axis=0) - subs = [sub[sidx] for sub in subs] - vals = vals[sidx] - idx = np.where(np.diff(subs).any(axis=0))[0] + 1 - idx = np.concatenate(([0], idx, [subs[0].shape[0]])) - - # create values array - nvals = np.zeros(len(idx) - 1) - for i in range(len(idx) - 1): - nvals[i] = func(vals[idx[i]:idx[i + 1]]) - - # return results - if with_subs: - return nvals, tuple(sub[idx[:-1]] for sub in subs) - else: - return nvals - - -def unravel_dimension(shape, idx): - if isinstance(idx, type(1)): - idx = array([idx]) - k = [1] + list(cumprod(shape[:-1])) - n = len(shape) - subs = zeros((len(idx), n), dtype=np.int) - for i in arange(n - 1, -1, -1): - subs[:, i] = floor(idx / k[i]) - idx = idx % k[i] - return subs diff --git a/dependencies/scikit-tensor/sktensor/version.py b/dependencies/scikit-tensor/sktensor/version.py deleted file mode 100644 index 11d27f8..0000000 --- a/dependencies/scikit-tensor/sktensor/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1' diff --git a/requirements.txt b/requirements.txt index d53af0c..ecbd861 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ numpy scipy -tensorly +scikit-tensor-py3 +tensorly-musco absl-py tqdm \ No newline at end of file diff --git a/setup.py b/setup.py index 4262dee..dded4ad 100644 --- a/setup.py +++ b/setup.py @@ -2,13 +2,6 @@ from setuptools import setup, find_packages from setuptools.command.install import install - -class InstallLocalPackage(install): - def run(self): - install.run(self) - subprocess.call("cd dependencies/scikit-tensor && python setup.py install && cd ../..", shell=True) - - try: from pip._internal.req import parse_requirements except ImportError: @@ -22,14 +15,13 @@ def load_requirements(file_name): setup( name="musco-tf", - version="1.0.1", + version="1.0.2", description="MUSCO: Multi-Stage COmpression of neural networks", author="Julia Gusak, Maksym Kholiavchenko, Evgeny Ponomarev, Larisa Markeeva, Andrzej Cichocki, Ivan Oseledets", author_email="m.kholyavchenko@innopolis.ru", url="https://github.com/musco-ai/musco-tf", - download_url="https://github.com/musco-ai/musco-tf/archive/1.0.1.tar.gz", + download_url="https://github.com/musco-ai/musco-tf/archive/1.0.2.tar.gz", license="Apache-2.0", packages=find_packages(), - cmdclass={"install": InstallLocalPackage}, install_requires=load_requirements("requirements.txt") )