diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a77c2bf85c92cd7f7b3e5c9f19d4b6dd65cfa522 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/bbp_pi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/bbp_pi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d257d6d5ea75446b9068bc6b67fecee862a3a02 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/bbp_pi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86d9c5c2c5b99c613a20853ce1d4dd361befa154 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/digits.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/digits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3200a139fb51c99e4edf0f6bf62afbd9d1a676db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/digits.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/ecm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/ecm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ca14db60081345ce1fae4fdb07e8ed343281f9d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/ecm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/egyptian_fraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/egyptian_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..714660ec10cde5f8c9ea4e9701f922089ddd2d77 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/egyptian_fraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/elliptic_curve.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/elliptic_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e58cb6abcb8995eb9d22859305454b68ee51199 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/elliptic_curve.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2893a8c2aa2b5bd2ecb7e530d4c9870eb2412eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22d76dfa63f760315ebd7ef107a06b60484e5875 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..161102ada104e99522ef15dfdaf63f76806329c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/multinomial.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ca131aa702ac3169bafc1f5c75692d034d5ee24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/multinomial.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/partitions_.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/partitions_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd84b86d08262bea56ea4502d1cdee4f40b42e3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/partitions_.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99784d7b252eb92354cf23a18788aeb8c5f6aece Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/qs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/qs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2a3e289ddf07fd6842df72ee09532391e1ecbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/qs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/residue_ntheory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/residue_ntheory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b76f67f53d426ab4c3c9b0b6c0e373b8f5267fec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/__pycache__/residue_ntheory.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a7d057505a564af613fccb4a81e6ba79dc3b73b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_bbp_pi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_bbp_pi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9d6d2416ae2965922120e82035fb5c41cc2a9ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_bbp_pi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e90dcf1391b95589ca36c63673908f35173ffcb6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_digits.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_digits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38fb3f6bc9a2be22dd356a463df9056f9fd41287 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_digits.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..574645a49e92f6450e437a1992d390a3acea1462 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_egyptian_fraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_egyptian_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c8f647da2516318313566fb400edf3682925f5a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_egyptian_fraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2055c700848ddb73c1e49e828ad7632254e21e0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_factor_.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_factor_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c1813997c4a1004ec0d7bc96823c897120dfbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_factor_.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_generate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48e7ea6890236e200f77a849d12335d82f630115 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_generate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95ba03763758d8797caa4a7bec774041e7bc2a43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_multinomial.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1b097f7fb2b0541c3658408fa6741ae518fd811 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_multinomial.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c482fe0401a8b9549cf35f809a5e49cd3b995d3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a49c57910653ce5bbc0507188311c557c86dfad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..493a37918e46d0381b05a390023eec435bc73c98 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802daecf817f797b4494a534ff8dc07a9b56e59f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_bbp_pi.py b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_bbp_pi.py new file mode 100644 index 0000000000000000000000000000000000000000..c18188e3c8a308c102e97434078915317763b33c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_bbp_pi.py @@ -0,0 +1,133 @@ +from sympy.core.random import randint + +from sympy.ntheory.bbp_pi import pi_hex_digits +from sympy.testing.pytest import raises + + +# http://www.herongyang.com/Cryptography/Blowfish-First-8366-Hex-Digits-of-PI.html +# There are actually 8336 listed there; with the prepended 3 there are 8337 +# below +dig=''.join(''' +3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c89452821e638d013 +77be5466cf34e90c6cc0ac29b7c97c50dd3f84d5b5b54709179216d5d98979fb1bd1310ba698dfb5 +ac2ffd72dbd01adfb7b8e1afed6a267e96ba7c9045f12c7f9924a19947b3916cf70801f2e2858efc +16636920d871574e69a458fea3f4933d7e0d95748f728eb658718bcd5882154aee7b54a41dc25a59 +b59c30d5392af26013c5d1b023286085f0ca417918b8db38ef8e79dcb0603a180e6c9e0e8bb01e8a +3ed71577c1bd314b2778af2fda55605c60e65525f3aa55ab945748986263e8144055ca396a2aab10 +b6b4cc5c341141e8cea15486af7c72e993b3ee1411636fbc2a2ba9c55d741831f6ce5c3e169b8793 +1eafd6ba336c24cf5c7a325381289586773b8f48986b4bb9afc4bfe81b6628219361d809ccfb21a9 +91487cac605dec8032ef845d5de98575b1dc262302eb651b8823893e81d396acc50f6d6ff383f442 +392e0b4482a484200469c8f04a9e1f9b5e21c66842f6e96c9a670c9c61abd388f06a51a0d2d8542f +68960fa728ab5133a36eef0b6c137a3be4ba3bf0507efb2a98a1f1651d39af017666ca593e82430e +888cee8619456f9fb47d84a5c33b8b5ebee06f75d885c12073401a449f56c16aa64ed3aa62363f77 +061bfedf72429b023d37d0d724d00a1248db0fead349f1c09b075372c980991b7b25d479d8f6e8de +f7e3fe501ab6794c3b976ce0bd04c006bac1a94fb6409f60c45e5c9ec2196a246368fb6faf3e6c53 +b51339b2eb3b52ec6f6dfc511f9b30952ccc814544af5ebd09bee3d004de334afd660f2807192e4b +b3c0cba85745c8740fd20b5f39b9d3fbdb5579c0bd1a60320ad6a100c6402c7279679f25fefb1fa3 +cc8ea5e9f8db3222f83c7516dffd616b152f501ec8ad0552ab323db5fafd23876053317b483e00df +829e5c57bbca6f8ca01a87562edf1769dbd542a8f6287effc3ac6732c68c4f5573695b27b0bbca58 +c8e1ffa35db8f011a010fa3d98fd2183b84afcb56c2dd1d35b9a53e479b6f84565d28e49bc4bfb97 +90e1ddf2daa4cb7e3362fb1341cee4c6e8ef20cada36774c01d07e9efe2bf11fb495dbda4dae9091 +98eaad8e716b93d5a0d08ed1d0afc725e08e3c5b2f8e7594b78ff6e2fbf2122b648888b812900df0 +1c4fad5ea0688fc31cd1cff191b3a8c1ad2f2f2218be0e1777ea752dfe8b021fa1e5a0cc0fb56f74 +e818acf3d6ce89e299b4a84fe0fd13e0b77cc43b81d2ada8d9165fa2668095770593cc7314211a14 +77e6ad206577b5fa86c75442f5fb9d35cfebcdaf0c7b3e89a0d6411bd3ae1e7e4900250e2d2071b3 +5e226800bb57b8e0af2464369bf009b91e5563911d59dfa6aa78c14389d95a537f207d5ba202e5b9 +c5832603766295cfa911c819684e734a41b3472dca7b14a94a1b5100529a532915d60f573fbc9bc6 +e42b60a47681e6740008ba6fb5571be91ff296ec6b2a0dd915b6636521e7b9f9b6ff34052ec58556 +6453b02d5da99f8fa108ba47996e85076a4b7a70e9b5b32944db75092ec4192623ad6ea6b049a7df +7d9cee60b88fedb266ecaa8c71699a17ff5664526cc2b19ee1193602a575094c29a0591340e4183a +3e3f54989a5b429d656b8fe4d699f73fd6a1d29c07efe830f54d2d38e6f0255dc14cdd20868470eb +266382e9c6021ecc5e09686b3f3ebaefc93c9718146b6a70a1687f358452a0e286b79c5305aa5007 +373e07841c7fdeae5c8e7d44ec5716f2b8b03ada37f0500c0df01c1f040200b3ffae0cf51a3cb574 +b225837a58dc0921bdd19113f97ca92ff69432477322f547013ae5e58137c2dadcc8b576349af3dd +a7a94461460fd0030eecc8c73ea4751e41e238cd993bea0e2f3280bba1183eb3314e548b384f6db9 +086f420d03f60a04bf2cb8129024977c795679b072bcaf89afde9a771fd9930810b38bae12dccf3f +2e5512721f2e6b7124501adde69f84cd877a5847187408da17bc9f9abce94b7d8cec7aec3adb851d +fa63094366c464c3d2ef1c18473215d908dd433b3724c2ba1612a14d432a65c45150940002133ae4 +dd71dff89e10314e5581ac77d65f11199b043556f1d7a3c76b3c11183b5924a509f28fe6ed97f1fb +fa9ebabf2c1e153c6e86e34570eae96fb1860e5e0a5a3e2ab3771fe71c4e3d06fa2965dcb999e71d +0f803e89d65266c8252e4cc9789c10b36ac6150eba94e2ea78a5fc3c531e0a2df4f2f74ea7361d2b +3d1939260f19c279605223a708f71312b6ebadfe6eeac31f66e3bc4595a67bc883b17f37d1018cff +28c332ddefbe6c5aa56558218568ab9802eecea50fdb2f953b2aef7dad5b6e2f841521b628290761 +70ecdd4775619f151013cca830eb61bd960334fe1eaa0363cfb5735c904c70a239d59e9e0bcbaade +14eecc86bc60622ca79cab5cabb2f3846e648b1eaf19bdf0caa02369b9655abb5040685a323c2ab4 +b3319ee9d5c021b8f79b540b19875fa09995f7997e623d7da8f837889a97e32d7711ed935f166812 +810e358829c7e61fd696dedfa17858ba9957f584a51b2272639b83c3ff1ac24696cdb30aeb532e30 +548fd948e46dbc312858ebf2ef34c6ffeafe28ed61ee7c3c735d4a14d9e864b7e342105d14203e13 +e045eee2b6a3aaabeadb6c4f15facb4fd0c742f442ef6abbb5654f3b1d41cd2105d81e799e86854d +c7e44b476a3d816250cf62a1f25b8d2646fc8883a0c1c7b6a37f1524c369cb749247848a0b5692b2 +85095bbf00ad19489d1462b17423820e0058428d2a0c55f5ea1dadf43e233f70613372f0928d937e +41d65fecf16c223bdb7cde3759cbee74604085f2a7ce77326ea607808419f8509ee8efd85561d997 +35a969a7aac50c06c25a04abfc800bcadc9e447a2ec3453484fdd567050e1e9ec9db73dbd3105588 +cd675fda79e3674340c5c43465713e38d83d28f89ef16dff20153e21e78fb03d4ae6e39f2bdb83ad +f7e93d5a68948140f7f64c261c94692934411520f77602d4f7bcf46b2ed4a20068d40824713320f4 +6a43b7d4b7500061af1e39f62e9724454614214f74bf8b88404d95fc1d96b591af70f4ddd366a02f +45bfbc09ec03bd97857fac6dd031cb850496eb27b355fd3941da2547e6abca0a9a28507825530429 +f40a2c86dae9b66dfb68dc1462d7486900680ec0a427a18dee4f3ffea2e887ad8cb58ce0067af4d6 +b6aace1e7cd3375fecce78a399406b2a4220fe9e35d9f385b9ee39d7ab3b124e8b1dc9faf74b6d18 +5626a36631eae397b23a6efa74dd5b43326841e7f7ca7820fbfb0af54ed8feb397454056acba4895 +2755533a3a20838d87fe6ba9b7d096954b55a867bca1159a58cca9296399e1db33a62a4a563f3125 +f95ef47e1c9029317cfdf8e80204272f7080bb155c05282ce395c11548e4c66d2248c1133fc70f86 +dc07f9c9ee41041f0f404779a45d886e17325f51ebd59bc0d1f2bcc18f41113564257b7834602a9c +60dff8e8a31f636c1b0e12b4c202e1329eaf664fd1cad181156b2395e0333e92e13b240b62eebeb9 +2285b2a20ee6ba0d99de720c8c2da2f728d012784595b794fd647d0862e7ccf5f05449a36f877d48 +fac39dfd27f33e8d1e0a476341992eff743a6f6eabf4f8fd37a812dc60a1ebddf8991be14cdb6e6b +0dc67b55106d672c372765d43bdcd0e804f1290dc7cc00ffa3b5390f92690fed0b667b9ffbcedb7d +9ca091cf0bd9155ea3bb132f88515bad247b9479bf763bd6eb37392eb3cc1159798026e297f42e31 +2d6842ada7c66a2b3b12754ccc782ef11c6a124237b79251e706a1bbe64bfb63501a6b101811caed +fa3d25bdd8e2e1c3c9444216590a121386d90cec6ed5abea2a64af674eda86a85fbebfe98864e4c3 +fe9dbc8057f0f7c08660787bf86003604dd1fd8346f6381fb07745ae04d736fccc83426b33f01eab +71b08041873c005e5f77a057bebde8ae2455464299bf582e614e58f48ff2ddfda2f474ef388789bd +c25366f9c3c8b38e74b475f25546fcd9b97aeb26618b1ddf84846a0e79915f95e2466e598e20b457 +708cd55591c902de4cb90bace1bb8205d011a862487574a99eb77f19b6e0a9dc09662d09a1c43246 +33e85a1f0209f0be8c4a99a0251d6efe101ab93d1d0ba5a4dfa186f20f2868f169dcb7da83573906 +fea1e2ce9b4fcd7f5250115e01a70683faa002b5c40de6d0279af88c27773f8641c3604c0661a806 +b5f0177a28c0f586e0006058aa30dc7d6211e69ed72338ea6353c2dd94c2c21634bbcbee5690bcb6 +deebfc7da1ce591d766f05e4094b7c018839720a3d7c927c2486e3725f724d9db91ac15bb4d39eb8 +fced54557808fca5b5d83d7cd34dad0fc41e50ef5eb161e6f8a28514d96c51133c6fd5c7e756e14e +c4362abfceddc6c837d79a323492638212670efa8e406000e03a39ce37d3faf5cfabc277375ac52d +1b5cb0679e4fa33742d382274099bc9bbed5118e9dbf0f7315d62d1c7ec700c47bb78c1b6b21a190 +45b26eb1be6a366eb45748ab2fbc946e79c6a376d26549c2c8530ff8ee468dde7dd5730a1d4cd04d +c62939bbdba9ba4650ac9526e8be5ee304a1fad5f06a2d519a63ef8ce29a86ee22c089c2b843242e +f6a51e03aa9cf2d0a483c061ba9be96a4d8fe51550ba645bd62826a2f9a73a3ae14ba99586ef5562 +e9c72fefd3f752f7da3f046f6977fa0a5980e4a91587b086019b09e6ad3b3ee593e990fd5a9e34d7 +972cf0b7d9022b8b5196d5ac3a017da67dd1cf3ed67c7d2d281f9f25cfadf2b89b5ad6b4725a88f5 +4ce029ac71e019a5e647b0acfded93fa9be8d3c48d283b57ccf8d5662979132e28785f0191ed7560 +55f7960e44e3d35e8c15056dd488f46dba03a161250564f0bdc3eb9e153c9057a297271aeca93a07 +2a1b3f6d9b1e6321f5f59c66fb26dcf3197533d928b155fdf5035634828aba3cbb28517711c20ad9 +f8abcc5167ccad925f4de817513830dc8e379d58629320f991ea7a90c2fb3e7bce5121ce64774fbe +32a8b6e37ec3293d4648de53696413e680a2ae0810dd6db22469852dfd09072166b39a460a6445c0 +dd586cdecf1c20c8ae5bbef7dd1b588d40ccd2017f6bb4e3bbdda26a7e3a59ff453e350a44bcb4cd +d572eacea8fa6484bb8d6612aebf3c6f47d29be463542f5d9eaec2771bf64e6370740e0d8de75b13 +57f8721671af537d5d4040cb084eb4e2cc34d2466a0115af84e1b0042895983a1d06b89fb4ce6ea0 +486f3f3b823520ab82011a1d4b277227f8611560b1e7933fdcbb3a792b344525bda08839e151ce79 +4b2f32c9b7a01fbac9e01cc87ebcc7d1f6cf0111c3a1e8aac71a908749d44fbd9ad0dadecbd50ada +380339c32ac69136678df9317ce0b12b4ff79e59b743f5bb3af2d519ff27d9459cbf97222c15e6fc +2a0f91fc719b941525fae59361ceb69cebc2a8645912baa8d1b6c1075ee3056a0c10d25065cb03a4 +42e0ec6e0e1698db3b4c98a0be3278e9649f1f9532e0d392dfd3a0342b8971f21e1b0a74414ba334 +8cc5be7120c37632d8df359f8d9b992f2ee60b6f470fe3f11de54cda541edad891ce6279cfcd3e7e +6f1618b166fd2c1d05848fd2c5f6fb2299f523f357a632762393a8353156cccd02acf081625a75eb +b56e16369788d273ccde96629281b949d04c50901b71c65614e6c6c7bd327a140a45e1d006c3f27b +9ac9aa53fd62a80f00bb25bfe235bdd2f671126905b2040222b6cbcf7ccd769c2b53113ec01640e3 +d338abbd602547adf0ba38209cf746ce7677afa1c52075606085cbfe4e8ae88dd87aaaf9b04cf9aa +7e1948c25c02fb8a8c01c36ae4d6ebe1f990d4f869a65cdea03f09252dc208e69fb74e6132ce77e2 +5b578fdfe33ac372e6'''.split()) + + +def test_hex_pi_nth_digits(): + assert pi_hex_digits(0) == '3243f6a8885a30' + assert pi_hex_digits(1) == '243f6a8885a308' + assert pi_hex_digits(10000) == '68ac8fcfb8016c' + assert pi_hex_digits(13) == '08d313198a2e03' + assert pi_hex_digits(0, 3) == '324' + assert pi_hex_digits(0, 0) == '' + raises(ValueError, lambda: pi_hex_digits(-1)) + raises(ValueError, lambda: pi_hex_digits(3.14)) + + # this will pick a random segment to compute every time + # it is run. If it ever fails, there is an error in the + # computation. + n = randint(0, len(dig)) + prec = randint(0, len(dig) - n) + assert pi_hex_digits(n, prec) == dig[n: n + prec] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_elliptic_curve.py b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_elliptic_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..7d49d8eac72cc622fb92dfca8c54e5cc6c8dfb8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_elliptic_curve.py @@ -0,0 +1,20 @@ +from sympy.ntheory.elliptic_curve import EllipticCurve + + +def test_elliptic_curve(): + # Point addition and multiplication + e3 = EllipticCurve(-1, 9) + p = e3(0, 3) + q = e3(-1, 3) + r = p + q + assert r.x == 1 and r.y == -3 + r = 2*p + q + assert r.x == 35 and r.y == 207 + r = -p + q + assert r.x == 37 and r.y == 225 + # Verify result in http://www.lmfdb.org/EllipticCurve/Q + # Discriminant + assert EllipticCurve(-1, 9).discriminant == -34928 + assert EllipticCurve(-2731, -55146, 1, 0, 1).discriminant == 25088 + # Torsion points + assert len(EllipticCurve(0, 1).torsion_points()) == 6 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_generate.py b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_generate.py new file mode 100644 index 0000000000000000000000000000000000000000..80a5dfda389e7595b666e11b97e608786f239df5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_generate.py @@ -0,0 +1,250 @@ +from sympy.core.numbers import (I, Rational, nan, zoo) +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.ntheory.generate import (sieve, Sieve) +from sympy.series.limits import limit + +from sympy.ntheory import isprime, totient, mobius, randprime, nextprime, prevprime, \ + primerange, primepi, prime, primorial, composite, compositepi, reduced_totient +from sympy.ntheory.generate import cycle_length +from sympy.ntheory.primetest import mr +from sympy.testing.pytest import raises + +def test_prime(): + assert prime(1) == 2 + assert prime(2) == 3 + assert prime(5) == 11 + assert prime(11) == 31 + assert prime(57) == 269 + assert prime(296) == 1949 + assert prime(559) == 4051 + assert prime(3000) == 27449 + assert prime(4096) == 38873 + assert prime(9096) == 94321 + assert prime(25023) == 287341 + assert prime(10000000) == 179424673 # issue #20951 + assert prime(99999999) == 2038074739 + raises(ValueError, lambda: prime(0)) + sieve.extend(3000) + assert prime(401) == 2749 + raises(ValueError, lambda: prime(-1)) + + +def test_primepi(): + assert primepi(-1) == 0 + assert primepi(1) == 0 + assert primepi(2) == 1 + assert primepi(Rational(7, 2)) == 2 + assert primepi(3.5) == 2 + assert primepi(5) == 3 + assert primepi(11) == 5 + assert primepi(57) == 16 + assert primepi(296) == 62 + assert primepi(559) == 102 + assert primepi(3000) == 430 + assert primepi(4096) == 564 + assert primepi(9096) == 1128 + assert primepi(25023) == 2763 + assert primepi(10**8) == 5761455 + assert primepi(253425253) == 13856396 + assert primepi(8769575643) == 401464322 + sieve.extend(3000) + assert primepi(2000) == 303 + + n = Symbol('n') + assert primepi(n).subs(n, 2) == 1 + + r = Symbol('r', real=True) + assert primepi(r).subs(r, 2) == 1 + + assert primepi(S.Infinity) is S.Infinity + assert primepi(S.NegativeInfinity) == 0 + + assert limit(primepi(n), n, 100) == 25 + + raises(ValueError, lambda: primepi(I)) + raises(ValueError, lambda: primepi(1 + I)) + raises(ValueError, lambda: primepi(zoo)) + raises(ValueError, lambda: primepi(nan)) + + +def test_composite(): + from sympy.ntheory.generate import sieve + sieve._reset() + assert composite(1) == 4 + assert composite(2) == 6 + assert composite(5) == 10 + assert composite(11) == 20 + assert composite(41) == 58 + assert composite(57) == 80 + assert composite(296) == 370 + assert composite(559) == 684 + assert composite(3000) == 3488 + assert composite(4096) == 4736 + assert composite(9096) == 10368 + assert composite(25023) == 28088 + sieve.extend(3000) + assert composite(1957) == 2300 + assert composite(2568) == 2998 + raises(ValueError, lambda: composite(0)) + + +def test_compositepi(): + assert compositepi(1) == 0 + assert compositepi(2) == 0 + assert compositepi(5) == 1 + assert compositepi(11) == 5 + assert compositepi(57) == 40 + assert compositepi(296) == 233 + assert compositepi(559) == 456 + assert compositepi(3000) == 2569 + assert compositepi(4096) == 3531 + assert compositepi(9096) == 7967 + assert compositepi(25023) == 22259 + assert compositepi(10**8) == 94238544 + assert compositepi(253425253) == 239568856 + assert compositepi(8769575643) == 8368111320 + sieve.extend(3000) + assert compositepi(2321) == 1976 + + +def test_generate(): + from sympy.ntheory.generate import sieve + sieve._reset() + assert nextprime(-4) == 2 + assert nextprime(2) == 3 + assert nextprime(5) == 7 + assert nextprime(12) == 13 + assert prevprime(3) == 2 + assert prevprime(7) == 5 + assert prevprime(13) == 11 + assert prevprime(19) == 17 + assert prevprime(20) == 19 + + sieve.extend_to_no(9) + assert sieve._list[-1] == 23 + + assert sieve._list[-1] < 31 + assert 31 in sieve + + assert nextprime(90) == 97 + assert nextprime(10**40) == (10**40 + 121) + assert prevprime(97) == 89 + assert prevprime(10**40) == (10**40 - 17) + + assert list(sieve.primerange(10, 1)) == [] + assert list(sieve.primerange(5, 9)) == [5, 7] + sieve._reset(prime=True) + assert list(sieve.primerange(2, 13)) == [2, 3, 5, 7, 11] + assert list(sieve.primerange(13)) == [2, 3, 5, 7, 11] + assert list(sieve.primerange(8)) == [2, 3, 5, 7] + assert list(sieve.primerange(-2)) == [] + assert list(sieve.primerange(29)) == [2, 3, 5, 7, 11, 13, 17, 19, 23] + assert list(sieve.primerange(34)) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] + + assert list(sieve.totientrange(5, 15)) == [4, 2, 6, 4, 6, 4, 10, 4, 12, 6] + sieve._reset(totient=True) + assert list(sieve.totientrange(3, 13)) == [2, 2, 4, 2, 6, 4, 6, 4, 10, 4] + assert list(sieve.totientrange(900, 1000)) == [totient(x) for x in range(900, 1000)] + assert list(sieve.totientrange(0, 1)) == [] + assert list(sieve.totientrange(1, 2)) == [1] + + assert list(sieve.mobiusrange(5, 15)) == [-1, 1, -1, 0, 0, 1, -1, 0, -1, 1] + sieve._reset(mobius=True) + assert list(sieve.mobiusrange(3, 13)) == [-1, 0, -1, 1, -1, 0, 0, 1, -1, 0] + assert list(sieve.mobiusrange(1050, 1100)) == [mobius(x) for x in range(1050, 1100)] + assert list(sieve.mobiusrange(0, 1)) == [] + assert list(sieve.mobiusrange(1, 2)) == [1] + + assert list(primerange(10, 1)) == [] + assert list(primerange(2, 7)) == [2, 3, 5] + assert list(primerange(2, 10)) == [2, 3, 5, 7] + assert list(primerange(1050, 1100)) == [1051, 1061, + 1063, 1069, 1087, 1091, 1093, 1097] + s = Sieve() + for i in range(30, 2350, 376): + for j in range(2, 5096, 1139): + A = list(s.primerange(i, i + j)) + B = list(primerange(i, i + j)) + assert A == B + s = Sieve() + assert s[10] == 29 + + assert nextprime(2, 2) == 5 + + raises(ValueError, lambda: totient(0)) + + raises(ValueError, lambda: reduced_totient(0)) + + raises(ValueError, lambda: primorial(0)) + + assert mr(1, [2]) is False + + func = lambda i: (i**2 + 1) % 51 + assert next(cycle_length(func, 4)) == (6, 2) + assert list(cycle_length(func, 4, values=True)) == \ + [17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14] + assert next(cycle_length(func, 4, nmax=5)) == (5, None) + assert list(cycle_length(func, 4, nmax=5, values=True)) == \ + [17, 35, 2, 5, 26] + sieve.extend(3000) + assert nextprime(2968) == 2969 + assert prevprime(2930) == 2927 + raises(ValueError, lambda: prevprime(1)) + raises(ValueError, lambda: prevprime(-4)) + + +def test_randprime(): + assert randprime(10, 1) is None + assert randprime(3, -3) is None + assert randprime(2, 3) == 2 + assert randprime(1, 3) == 2 + assert randprime(3, 5) == 3 + raises(ValueError, lambda: randprime(-12, -2)) + raises(ValueError, lambda: randprime(-10, 0)) + raises(ValueError, lambda: randprime(20, 22)) + raises(ValueError, lambda: randprime(0, 2)) + raises(ValueError, lambda: randprime(1, 2)) + for a in [100, 300, 500, 250000]: + for b in [100, 300, 500, 250000]: + p = randprime(a, a + b) + assert a <= p < (a + b) and isprime(p) + + +def test_primorial(): + assert primorial(1) == 2 + assert primorial(1, nth=0) == 1 + assert primorial(2) == 6 + assert primorial(2, nth=0) == 2 + assert primorial(4, nth=0) == 6 + + +def test_search(): + assert 2 in sieve + assert 2.1 not in sieve + assert 1 not in sieve + assert 2**1000 not in sieve + raises(ValueError, lambda: sieve.search(1)) + + +def test_sieve_slice(): + assert sieve[5] == 11 + assert list(sieve[5:10]) == [sieve[x] for x in range(5, 10)] + assert list(sieve[5:10:2]) == [sieve[x] for x in range(5, 10, 2)] + assert list(sieve[1:5]) == [2, 3, 5, 7] + raises(IndexError, lambda: sieve[:5]) + raises(IndexError, lambda: sieve[0]) + raises(IndexError, lambda: sieve[0:5]) + +def test_sieve_iter(): + values = [] + for value in sieve: + if value > 7: + break + values.append(value) + assert values == list(sieve[1:5]) + + +def test_sieve_repr(): + assert "sieve" in repr(sieve) + assert "prime" in repr(sieve) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_primetest.py b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_primetest.py new file mode 100644 index 0000000000000000000000000000000000000000..8817b645a0ffe1ccb35af9171d2234f9260b833c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_primetest.py @@ -0,0 +1,159 @@ +from sympy.ntheory.generate import Sieve, sieve +from sympy.ntheory.primetest import (mr, is_lucas_prp, is_square, + is_strong_lucas_prp, is_extra_strong_lucas_prp, isprime, is_euler_pseudoprime, + is_gaussian_prime) + +from sympy.testing.pytest import slow +from sympy.core.numbers import I + +def test_euler_pseudoprimes(): + assert is_euler_pseudoprime(9, 1) == True + assert is_euler_pseudoprime(341, 2) == False + assert is_euler_pseudoprime(121, 3) == True + assert is_euler_pseudoprime(341, 4) == True + assert is_euler_pseudoprime(217, 5) == False + assert is_euler_pseudoprime(185, 6) == False + assert is_euler_pseudoprime(55, 111) == True + assert is_euler_pseudoprime(115, 114) == True + assert is_euler_pseudoprime(49, 117) == True + assert is_euler_pseudoprime(85, 84) == True + assert is_euler_pseudoprime(87, 88) == True + assert is_euler_pseudoprime(49, 128) == True + assert is_euler_pseudoprime(39, 77) == True + assert is_euler_pseudoprime(9881, 30) == True + assert is_euler_pseudoprime(8841, 29) == False + assert is_euler_pseudoprime(8421, 29) == False + assert is_euler_pseudoprime(9997, 19) == True + +def test_is_extra_strong_lucas_prp(): + assert is_extra_strong_lucas_prp(4) == False + assert is_extra_strong_lucas_prp(989) == True + assert is_extra_strong_lucas_prp(10877) == True + assert is_extra_strong_lucas_prp(9) == False + assert is_extra_strong_lucas_prp(16) == False + assert is_extra_strong_lucas_prp(169) == False + +@slow +def test_prps(): + oddcomposites = [n for n in range(1, 10**5) if + n % 2 and not isprime(n)] + # A checksum would be better. + assert sum(oddcomposites) == 2045603465 + assert [n for n in oddcomposites if mr(n, [2])] == [ + 2047, 3277, 4033, 4681, 8321, 15841, 29341, 42799, 49141, + 52633, 65281, 74665, 80581, 85489, 88357, 90751] + assert [n for n in oddcomposites if mr(n, [3])] == [ + 121, 703, 1891, 3281, 8401, 8911, 10585, 12403, 16531, + 18721, 19345, 23521, 31621, 44287, 47197, 55969, 63139, + 74593, 79003, 82513, 87913, 88573, 97567] + assert [n for n in oddcomposites if mr(n, [325])] == [ + 9, 25, 27, 49, 65, 81, 325, 341, 343, 697, 1141, 2059, + 2149, 3097, 3537, 4033, 4681, 4941, 5833, 6517, 7987, 8911, + 12403, 12913, 15043, 16021, 20017, 22261, 23221, 24649, + 24929, 31841, 35371, 38503, 43213, 44173, 47197, 50041, + 55909, 56033, 58969, 59089, 61337, 65441, 68823, 72641, + 76793, 78409, 85879] + assert not any(mr(n, [9345883071009581737]) for n in oddcomposites) + assert [n for n in oddcomposites if is_lucas_prp(n)] == [ + 323, 377, 1159, 1829, 3827, 5459, 5777, 9071, 9179, 10877, + 11419, 11663, 13919, 14839, 16109, 16211, 18407, 18971, + 19043, 22499, 23407, 24569, 25199, 25877, 26069, 27323, + 32759, 34943, 35207, 39059, 39203, 39689, 40309, 44099, + 46979, 47879, 50183, 51983, 53663, 56279, 58519, 60377, + 63881, 69509, 72389, 73919, 75077, 77219, 79547, 79799, + 82983, 84419, 86063, 90287, 94667, 97019, 97439] + assert [n for n in oddcomposites if is_strong_lucas_prp(n)] == [ + 5459, 5777, 10877, 16109, 18971, 22499, 24569, 25199, 40309, + 58519, 75077, 97439] + assert [n for n in oddcomposites if is_extra_strong_lucas_prp(n) + ] == [ + 989, 3239, 5777, 10877, 27971, 29681, 30739, 31631, 39059, + 72389, 73919, 75077] + + +def test_isprime(): + s = Sieve() + s.extend(100000) + ps = set(s.primerange(2, 100001)) + for n in range(100001): + # if (n in ps) != isprime(n): print n + assert (n in ps) == isprime(n) + assert isprime(179424673) + assert isprime(20678048681) + assert isprime(1968188556461) + assert isprime(2614941710599) + assert isprime(65635624165761929287) + assert isprime(1162566711635022452267983) + assert isprime(77123077103005189615466924501) + assert isprime(3991617775553178702574451996736229) + assert isprime(273952953553395851092382714516720001799) + assert isprime(int(''' +531137992816767098689588206552468627329593117727031923199444138200403\ +559860852242739162502265229285668889329486246501015346579337652707239\ +409519978766587351943831270835393219031728127''')) + + # Some Mersenne primes + assert isprime(2**61 - 1) + assert isprime(2**89 - 1) + assert isprime(2**607 - 1) + # (but not all Mersenne's are primes + assert not isprime(2**601 - 1) + + # pseudoprimes + #------------- + # to some small bases + assert not isprime(2152302898747) + assert not isprime(3474749660383) + assert not isprime(341550071728321) + assert not isprime(3825123056546413051) + # passes the base set [2, 3, 7, 61, 24251] + assert not isprime(9188353522314541) + # large examples + assert not isprime(877777777777777777777777) + # conjectured psi_12 given at http://mathworld.wolfram.com/StrongPseudoprime.html + assert not isprime(318665857834031151167461) + # conjectured psi_17 given at http://mathworld.wolfram.com/StrongPseudoprime.html + assert not isprime(564132928021909221014087501701) + # Arnault's 1993 number; a factor of it is + # 400958216639499605418306452084546853005188166041132508774506\ + # 204738003217070119624271622319159721973358216316508535816696\ + # 9145233813917169287527980445796800452592031836601 + assert not isprime(int(''' +803837457453639491257079614341942108138837688287558145837488917522297\ +427376533365218650233616396004545791504202360320876656996676098728404\ +396540823292873879185086916685732826776177102938969773947016708230428\ +687109997439976544144845341155872450633409279022275296229414984230688\ +1685404326457534018329786111298960644845216191652872597534901''')) + # Arnault's 1995 number; can be factored as + # p1*(313*(p1 - 1) + 1)*(353*(p1 - 1) + 1) where p1 is + # 296744956686855105501541746429053327307719917998530433509950\ + # 755312768387531717701995942385964281211880336647542183455624\ + # 93168782883 + assert not isprime(int(''' +288714823805077121267142959713039399197760945927972270092651602419743\ +230379915273311632898314463922594197780311092934965557841894944174093\ +380561511397999942154241693397290542371100275104208013496673175515285\ +922696291677532547504444585610194940420003990443211677661994962953925\ +045269871932907037356403227370127845389912612030924484149472897688540\ +6024976768122077071687938121709811322297802059565867''')) + sieve.extend(3000) + assert isprime(2819) + assert not isprime(2931) + assert not isprime(2.0) + + +def test_is_square(): + assert [i for i in range(25) if is_square(i)] == [0, 1, 4, 9, 16] + + # issue #17044 + assert not is_square(60 ** 3) + assert not is_square(60 ** 5) + assert not is_square(84 ** 7) + assert not is_square(105 ** 9) + assert not is_square(120 ** 3) + +def test_is_gaussianprime(): + assert is_gaussian_prime(7*I) + assert is_gaussian_prime(7) + assert is_gaussian_prime(2 + 3*I) + assert not is_gaussian_prime(2 + 2*I) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_qs.py b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_qs.py new file mode 100644 index 0000000000000000000000000000000000000000..19ee1782ce4658d48cf7f3451f15bec8e827b76a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/ntheory/tests/test_qs.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +from sympy.ntheory import qs +from sympy.ntheory.qs import SievePolynomial, _generate_factor_base, \ + _initialize_first_polynomial, _initialize_ith_poly, \ + _gen_sieve_array, _check_smoothness, _trial_division_stage, _gauss_mod_2, \ + _build_matrix, _find_factor +from sympy.testing.pytest import slow + + +@slow +def test_qs_1(): + assert qs(10009202107, 100, 10000) == {100043, 100049} + assert qs(211107295182713951054568361, 1000, 10000) == \ + {13791315212531, 15307263442931} + assert qs(980835832582657*990377764891511, 3000, 50000) == \ + {980835832582657, 990377764891511} + assert qs(18640889198609*20991129234731, 1000, 50000) == \ + {18640889198609, 20991129234731} + + +def test_qs_2() -> None: + n = 10009202107 + M = 50 + # a = 10, b = 15, modified_coeff = [a**2, 2*a*b, b**2 - N] + sieve_poly = SievePolynomial([100, 1600, -10009195707], 10, 80) + assert sieve_poly.eval(10) == -10009169707 + assert sieve_poly.eval(5) == -10009185207 + + idx_1000, idx_5000, factor_base = _generate_factor_base(2000, n) + assert idx_1000 == 82 + assert [factor_base[i].prime for i in range(15)] == \ + [2, 3, 7, 11, 17, 19, 29, 31, 43, 59, 61, 67, 71, 73, 79] + assert [factor_base[i].tmem_p for i in range(15)] == \ + [1, 1, 3, 5, 3, 6, 6, 14, 1, 16, 24, 22, 18, 22, 15] + assert [factor_base[i].log_p for i in range(5)] == \ + [710, 1125, 1993, 2455, 2901] + + g, B = _initialize_first_polynomial( + n, M, factor_base, idx_1000, idx_5000, seed=0) + assert g.a == 1133107 + assert g.b == 682543 + assert B == [272889, 409654] + assert [factor_base[i].soln1 for i in range(15)] == \ + [0, 0, 3, 7, 13, 0, 8, 19, 9, 43, 27, 25, 63, 29, 19] + assert [factor_base[i].soln2 for i in range(15)] == \ + [0, 1, 1, 3, 12, 16, 15, 6, 15, 1, 56, 55, 61, 58, 16] + assert [factor_base[i].a_inv for i in range(15)] == \ + [1, 1, 5, 7, 3, 5, 26, 6, 40, 5, 21, 45, 4, 1, 8] + assert [factor_base[i].b_ainv for i in range(5)] == \ + [[0, 0], [0, 2], [3, 0], [3, 9], [13, 13]] + + g_1 = _initialize_ith_poly(n, factor_base, 1, g, B) + assert g_1.a == 1133107 + assert g_1.b == 136765 + + sieve_array = _gen_sieve_array(M, factor_base) + assert sieve_array[0:5] == [8424, 13603, 1835, 5335, 710] + + assert _check_smoothness(9645, factor_base) == (5, False) + assert _check_smoothness(210313, factor_base)[0][0:15] == \ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + assert _check_smoothness(210313, factor_base)[1] + + partial_relations: dict[int, tuple[int, int]] = {} + smooth_relation, partial_relation = _trial_division_stage( + n, M, factor_base, sieve_array, sieve_poly, partial_relations, + ERROR_TERM=25*2**10) + + assert partial_relations == { + 8699: (440, -10009008507), + 166741: (490, -10008962007), + 131449: (530, -10008921207), + 6653: (550, -10008899607) + } + assert [smooth_relation[i][0] for i in range(5)] == [ + -250, -670615476700, -45211565844500, -231723037747200, -1811665537200] + assert [smooth_relation[i][1] for i in range(5)] == [ + -10009139607, 1133094251961, 5302606761, 53804049849, 1950723889] + assert smooth_relation[0][2][0:15] == [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + assert _gauss_mod_2( + [[0, 0, 1], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 1]] + ) == ( + [[[0, 1, 1], 3], [[0, 1, 1], 4]], + [True, True, True, False, False], + [[0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 1]] + ) + + +def test_qs_3(): + N = 1817 + smooth_relations = [ + (2455024, 637, [0, 0, 0, 1]), + (-27993000, 81536, [0, 1, 0, 1]), + (11461840, 12544, [0, 0, 0, 0]), + (149, 20384, [0, 1, 0, 1]), + (-31138074, 19208, [0, 1, 0, 0]) + ] + + matrix = _build_matrix(smooth_relations) + assert matrix == [ + [0, 0, 0, 1], + [0, 1, 0, 1], + [0, 0, 0, 0], + [0, 1, 0, 1], + [0, 1, 0, 0] + ] + + dependent_row, mark, gauss_matrix = _gauss_mod_2(matrix) + assert dependent_row == [[[0, 0, 0, 0], 2], [[0, 1, 0, 0], 3]] + assert mark == [True, True, False, False, True] + assert gauss_matrix == [ + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 1] + ] + + factor = _find_factor( + dependent_row, mark, gauss_matrix, 0, smooth_relations, N) + assert factor == 23 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a832614b1d48e26bf01e16f040f34dd412e8e32b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__init__.py @@ -0,0 +1,23 @@ +"""A module to manipulate symbolic objects with indices including tensors + +""" +from .indexed import IndexedBase, Idx, Indexed +from .index_methods import get_contraction_structure, get_indices +from .functions import shape +from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray, + MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct, + tensorcontraction, tensordiagonal, derive_by_array, permutedims, Array, + DenseNDimArray, SparseNDimArray,) + +__all__ = [ + 'IndexedBase', 'Idx', 'Indexed', + + 'get_contraction_structure', 'get_indices', + + 'shape', + + 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', + 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray', + 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', 'permutedims', + 'Array', 'DenseNDimArray', 'SparseNDimArray', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b404e0bfd9437878525705dd793f626d9502ff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1891851b95dfc1029dd48d32c0a982f796fa81a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..828270b84474d2914b7f86742740544579dc96f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/index_methods.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7730a1717b97bdad68e1c50b3f0f171c042ec7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/indexed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f255ff35214ad0db119bb4f90ca87dabf3447b4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e3234ded3e192c30bbf1f9fd90530df3c2c6b1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/__pycache__/toperators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eca2eb4c6c58cb113517b6e41737e9d97abbb84e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/__init__.py @@ -0,0 +1,271 @@ +r""" +N-dim array module for SymPy. + +Four classes are provided to handle N-dim arrays, given by the combinations +dense/sparse (i.e. whether to store all elements or only the non-zero ones in +memory) and mutable/immutable (immutable classes are SymPy objects, but cannot +change after they have been created). + +Examples +======== + +The following examples show the usage of ``Array``. This is an abbreviation for +``ImmutableDenseNDimArray``, that is an immutable and dense N-dim array, the +other classes are analogous. For mutable classes it is also possible to change +element values after the object has been constructed. + +Array construction can detect the shape of nested lists and tuples: + +>>> from sympy import Array +>>> a1 = Array([[1, 2], [3, 4], [5, 6]]) +>>> a1 +[[1, 2], [3, 4], [5, 6]] +>>> a1.shape +(3, 2) +>>> a1.rank() +2 +>>> from sympy.abc import x, y, z +>>> a2 = Array([[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]]) +>>> a2 +[[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]] +>>> a2.shape +(2, 2, 2) +>>> a2.rank() +3 + +Otherwise one could pass a 1-dim array followed by a shape tuple: + +>>> m1 = Array(range(12), (3, 4)) +>>> m1 +[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] +>>> m2 = Array(range(12), (3, 2, 2)) +>>> m2 +[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]] +>>> m2[1,1,1] +7 +>>> m2.reshape(4, 3) +[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + +Slice support: + +>>> m2[:, 1, 1] +[3, 7, 11] + +Elementwise derivative: + +>>> from sympy.abc import x, y, z +>>> m3 = Array([x**3, x*y, z]) +>>> m3.diff(x) +[3*x**2, y, 0] +>>> m3.diff(z) +[0, 0, 1] + +Multiplication with other SymPy expressions is applied elementwisely: + +>>> (1+x)*m3 +[x**3*(x + 1), x*y*(x + 1), z*(x + 1)] + +To apply a function to each element of the N-dim array, use ``applyfunc``: + +>>> m3.applyfunc(lambda x: x/2) +[x**3/2, x*y/2, z/2] + +N-dim arrays can be converted to nested lists by the ``tolist()`` method: + +>>> m2.tolist() +[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]] +>>> isinstance(m2.tolist(), list) +True + +If the rank is 2, it is possible to convert them to matrices with ``tomatrix()``: + +>>> m1.tomatrix() +Matrix([ +[0, 1, 2, 3], +[4, 5, 6, 7], +[8, 9, 10, 11]]) + +Products and contractions +------------------------- + +Tensor product between arrays `A_{i_1,\ldots,i_n}` and `B_{j_1,\ldots,j_m}` +creates the combined array `P = A \otimes B` defined as + +`P_{i_1,\ldots,i_n,j_1,\ldots,j_m} := A_{i_1,\ldots,i_n}\cdot B_{j_1,\ldots,j_m}.` + +It is available through ``tensorproduct(...)``: + +>>> from sympy import Array, tensorproduct +>>> from sympy.abc import x,y,z,t +>>> A = Array([x, y, z, t]) +>>> B = Array([1, 2, 3, 4]) +>>> tensorproduct(A, B) +[[x, 2*x, 3*x, 4*x], [y, 2*y, 3*y, 4*y], [z, 2*z, 3*z, 4*z], [t, 2*t, 3*t, 4*t]] + +In case you don't want to evaluate the tensor product immediately, you can use +``ArrayTensorProduct``, which creates an unevaluated tensor product expression: + +>>> from sympy.tensor.array.expressions import ArrayTensorProduct +>>> ArrayTensorProduct(A, B) +ArrayTensorProduct([x, y, z, t], [1, 2, 3, 4]) + +Calling ``.as_explicit()`` on ``ArrayTensorProduct`` is equivalent to just calling +``tensorproduct(...)``: + +>>> ArrayTensorProduct(A, B).as_explicit() +[[x, 2*x, 3*x, 4*x], [y, 2*y, 3*y, 4*y], [z, 2*z, 3*z, 4*z], [t, 2*t, 3*t, 4*t]] + +Tensor product between a rank-1 array and a matrix creates a rank-3 array: + +>>> from sympy import eye +>>> p1 = tensorproduct(A, eye(4)) +>>> p1 +[[[x, 0, 0, 0], [0, x, 0, 0], [0, 0, x, 0], [0, 0, 0, x]], [[y, 0, 0, 0], [0, y, 0, 0], [0, 0, y, 0], [0, 0, 0, y]], [[z, 0, 0, 0], [0, z, 0, 0], [0, 0, z, 0], [0, 0, 0, z]], [[t, 0, 0, 0], [0, t, 0, 0], [0, 0, t, 0], [0, 0, 0, t]]] + +Now, to get back `A_0 \otimes \mathbf{1}` one can access `p_{0,m,n}` by slicing: + +>>> p1[0,:,:] +[[x, 0, 0, 0], [0, x, 0, 0], [0, 0, x, 0], [0, 0, 0, x]] + +Tensor contraction sums over the specified axes, for example contracting +positions `a` and `b` means + +`A_{i_1,\ldots,i_a,\ldots,i_b,\ldots,i_n} \implies \sum_k A_{i_1,\ldots,k,\ldots,k,\ldots,i_n}` + +Remember that Python indexing is zero starting, to contract the a-th and b-th +axes it is therefore necessary to specify `a-1` and `b-1` + +>>> from sympy import tensorcontraction +>>> C = Array([[x, y], [z, t]]) + +The matrix trace is equivalent to the contraction of a rank-2 array: + +`A_{m,n} \implies \sum_k A_{k,k}` + +>>> tensorcontraction(C, (0, 1)) +t + x + +To create an expression representing a tensor contraction that does not get +evaluated immediately, use ``ArrayContraction``, which is equivalent to +``tensorcontraction(...)`` if it is followed by ``.as_explicit()``: + +>>> from sympy.tensor.array.expressions import ArrayContraction +>>> ArrayContraction(C, (0, 1)) +ArrayContraction([[x, y], [z, t]], (0, 1)) +>>> ArrayContraction(C, (0, 1)).as_explicit() +t + x + +Matrix product is equivalent to a tensor product of two rank-2 arrays, followed +by a contraction of the 2nd and 3rd axes (in Python indexing axes number 1, 2). + +`A_{m,n}\cdot B_{i,j} \implies \sum_k A_{m, k}\cdot B_{k, j}` + +>>> D = Array([[2, 1], [0, -1]]) +>>> tensorcontraction(tensorproduct(C, D), (1, 2)) +[[2*x, x - y], [2*z, -t + z]] + +One may verify that the matrix product is equivalent: + +>>> from sympy import Matrix +>>> Matrix([[x, y], [z, t]])*Matrix([[2, 1], [0, -1]]) +Matrix([ +[2*x, x - y], +[2*z, -t + z]]) + +or equivalently + +>>> C.tomatrix()*D.tomatrix() +Matrix([ +[2*x, x - y], +[2*z, -t + z]]) + +Diagonal operator +----------------- + +The ``tensordiagonal`` function acts in a similar manner as ``tensorcontraction``, +but the joined indices are not summed over, for example diagonalizing +positions `a` and `b` means + +`A_{i_1,\ldots,i_a,\ldots,i_b,\ldots,i_n} \implies A_{i_1,\ldots,k,\ldots,k,\ldots,i_n} +\implies \tilde{A}_{i_1,\ldots,i_{a-1},i_{a+1},\ldots,i_{b-1},i_{b+1},\ldots,i_n,k}` + +where `\tilde{A}` is the array equivalent to the diagonal of `A` at positions +`a` and `b` moved to the last index slot. + +Compare the difference between contraction and diagonal operators: + +>>> from sympy import tensordiagonal +>>> from sympy.abc import a, b, c, d +>>> m = Matrix([[a, b], [c, d]]) +>>> tensorcontraction(m, [0, 1]) +a + d +>>> tensordiagonal(m, [0, 1]) +[a, d] + +In short, no summation occurs with ``tensordiagonal``. + + +Derivatives by array +-------------------- + +The usual derivative operation may be extended to support derivation with +respect to arrays, provided that all elements in the that array are symbols or +expressions suitable for derivations. + +The definition of a derivative by an array is as follows: given the array +`A_{i_1, \ldots, i_N}` and the array `X_{j_1, \ldots, j_M}` +the derivative of arrays will return a new array `B` defined by + +`B_{j_1,\ldots,j_M,i_1,\ldots,i_N} := \frac{\partial A_{i_1,\ldots,i_N}}{\partial X_{j_1,\ldots,j_M}}` + +The function ``derive_by_array`` performs such an operation: + +>>> from sympy import derive_by_array +>>> from sympy.abc import x, y, z, t +>>> from sympy import sin, exp + +With scalars, it behaves exactly as the ordinary derivative: + +>>> derive_by_array(sin(x*y), x) +y*cos(x*y) + +Scalar derived by an array basis: + +>>> derive_by_array(sin(x*y), [x, y, z]) +[y*cos(x*y), x*cos(x*y), 0] + +Deriving array by an array basis: `B^{nm} := \frac{\partial A^m}{\partial x^n}` + +>>> basis = [x, y, z] +>>> ax = derive_by_array([exp(x), sin(y*z), t], basis) +>>> ax +[[exp(x), 0, 0], [0, z*cos(y*z), 0], [0, y*cos(y*z), 0]] + +Contraction of the resulting array: `\sum_m \frac{\partial A^m}{\partial x^m}` + +>>> tensorcontraction(ax, (0, 1)) +z*cos(y*z) + exp(x) + +""" + +from .dense_ndim_array import MutableDenseNDimArray, ImmutableDenseNDimArray, DenseNDimArray +from .sparse_ndim_array import MutableSparseNDimArray, ImmutableSparseNDimArray, SparseNDimArray +from .ndim_array import NDimArray, ArrayKind +from .arrayop import tensorproduct, tensorcontraction, tensordiagonal, derive_by_array, permutedims +from .array_comprehension import ArrayComprehension, ArrayComprehensionMap + +Array = ImmutableDenseNDimArray + +__all__ = [ + 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', 'DenseNDimArray', + + 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'SparseNDimArray', + + 'NDimArray', 'ArrayKind', + + 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', + + 'permutedims', 'ArrayComprehension', 'ArrayComprehensionMap', + + 'Array', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py new file mode 100644 index 0000000000000000000000000000000000000000..95702f499f3e40597fd0144929138ac1329962ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_comprehension.py @@ -0,0 +1,399 @@ +import functools, itertools +from sympy.core.sympify import _sympify, sympify +from sympy.core.expr import Expr +from sympy.core import Basic, Tuple +from sympy.tensor.array import ImmutableDenseNDimArray +from sympy.core.symbol import Symbol +from sympy.core.numbers import Integer + + +class ArrayComprehension(Basic): + """ + Generate a list comprehension. + + Explanation + =========== + + If there is a symbolic dimension, for example, say [i for i in range(1, N)] where + N is a Symbol, then the expression will not be expanded to an array. Otherwise, + calling the doit() function will launch the expansion. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a + ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.doit() + [[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]] + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k)) + >>> b.doit() + ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k)) + """ + def __new__(cls, function, *symbols, **assumptions): + if any(len(l) != 3 or None for l in symbols): + raise ValueError('ArrayComprehension requires values lower and upper bound' + ' for the expression') + arglist = [sympify(function)] + arglist.extend(cls._check_limits_validity(function, symbols)) + obj = Basic.__new__(cls, *arglist, **assumptions) + obj._limits = obj._args[1:] + obj._shape = cls._calculate_shape_from_limits(obj._limits) + obj._rank = len(obj._shape) + obj._loop_size = cls._calculate_loop_size(obj._shape) + return obj + + @property + def function(self): + """The function applied across limits. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.function + 10*i + j + """ + return self._args[0] + + @property + def limits(self): + """ + The list of limits that will be applied while expanding the array. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.limits + ((i, 1, 4), (j, 1, 3)) + """ + return self._limits + + @property + def free_symbols(self): + """ + The set of the free_symbols in the array. + Variables appeared in the bounds are supposed to be excluded + from the free symbol set. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.free_symbols + set() + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.free_symbols + {k} + """ + expr_free_sym = self.function.free_symbols + for var, inf, sup in self._limits: + expr_free_sym.discard(var) + curr_free_syms = inf.free_symbols.union(sup.free_symbols) + expr_free_sym = expr_free_sym.union(curr_free_syms) + return expr_free_sym + + @property + def variables(self): + """The tuples of the variables in the limits. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.variables + [i, j] + """ + return [l[0] for l in self._limits] + + @property + def bound_symbols(self): + """The list of dummy variables. + + Note + ==== + + Note that all variables are dummy variables since a limit without + lower bound or upper bound is not accepted. + """ + return [l[0] for l in self._limits if len(l) != 1] + + @property + def shape(self): + """ + The shape of the expanded array, which may have symbols. + + Note + ==== + + Both the lower and the upper bounds are included while + calculating the shape. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.shape + (4, 3) + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.shape + (4, k + 3) + """ + return self._shape + + @property + def is_shape_numeric(self): + """ + Test if the array is shape-numeric which means there is no symbolic + dimension. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.is_shape_numeric + True + >>> b = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, k+3)) + >>> b.is_shape_numeric + False + """ + for _, inf, sup in self._limits: + if Basic(inf, sup).atoms(Symbol): + return False + return True + + def rank(self): + """The rank of the expanded array. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.rank() + 2 + """ + return self._rank + + def __len__(self): + """ + The length of the expanded array which means the number + of elements in the array. + + Raises + ====== + + ValueError : When the length of the array is symbolic + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> len(a) + 12 + """ + if self._loop_size.free_symbols: + raise ValueError('Symbolic length is not supported') + return self._loop_size + + @classmethod + def _check_limits_validity(cls, function, limits): + #limits = sympify(limits) + new_limits = [] + for var, inf, sup in limits: + var = _sympify(var) + inf = _sympify(inf) + #since this is stored as an argument, it should be + #a Tuple + if isinstance(sup, list): + sup = Tuple(*sup) + else: + sup = _sympify(sup) + new_limits.append(Tuple(var, inf, sup)) + if any((not isinstance(i, Expr)) or i.atoms(Symbol, Integer) != i.atoms() + for i in [inf, sup]): + raise TypeError('Bounds should be an Expression(combination of Integer and Symbol)') + if (inf > sup) == True: + raise ValueError('Lower bound should be inferior to upper bound') + if var in inf.free_symbols or var in sup.free_symbols: + raise ValueError('Variable should not be part of its bounds') + return new_limits + + @classmethod + def _calculate_shape_from_limits(cls, limits): + return tuple([sup - inf + 1 for _, inf, sup in limits]) + + @classmethod + def _calculate_loop_size(cls, shape): + if not shape: + return 0 + loop_size = 1 + for l in shape: + loop_size = loop_size * l + + return loop_size + + def doit(self, **hints): + if not self.is_shape_numeric: + return self + + return self._expand_array() + + def _expand_array(self): + res = [] + for values in itertools.product(*[range(inf, sup+1) + for var, inf, sup + in self._limits]): + res.append(self._get_element(values)) + + return ImmutableDenseNDimArray(res, self.shape) + + def _get_element(self, values): + temp = self.function + for var, val in zip(self.variables, values): + temp = temp.subs(var, val) + return temp + + def tolist(self): + """Transform the expanded array to a list. + + Raises + ====== + + ValueError : When there is a symbolic dimension + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.tolist() + [[11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43]] + """ + if self.is_shape_numeric: + return self._expand_array().tolist() + + raise ValueError("A symbolic array cannot be expanded to a list") + + def tomatrix(self): + """Transform the expanded array to a matrix. + + Raises + ====== + + ValueError : When there is a symbolic dimension + ValueError : When the rank of the expanded array is not equal to 2 + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehension + >>> from sympy import symbols + >>> i, j = symbols('i j') + >>> a = ArrayComprehension(10*i + j, (i, 1, 4), (j, 1, 3)) + >>> a.tomatrix() + Matrix([ + [11, 12, 13], + [21, 22, 23], + [31, 32, 33], + [41, 42, 43]]) + """ + from sympy.matrices import Matrix + + if not self.is_shape_numeric: + raise ValueError("A symbolic array cannot be expanded to a matrix") + if self._rank != 2: + raise ValueError('Dimensions must be of size of 2') + + return Matrix(self._expand_array().tomatrix()) + + +def isLambda(v): + LAMBDA = lambda: 0 + return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__ + +class ArrayComprehensionMap(ArrayComprehension): + ''' + A subclass of ArrayComprehension dedicated to map external function lambda. + + Notes + ===== + + Only the lambda function is considered. + At most one argument in lambda function is accepted in order to avoid ambiguity + in value assignment. + + Examples + ======== + + >>> from sympy.tensor.array import ArrayComprehensionMap + >>> from sympy import symbols + >>> i, j, k = symbols('i j k') + >>> a = ArrayComprehensionMap(lambda: 1, (i, 1, 4)) + >>> a.doit() + [1, 1, 1, 1] + >>> b = ArrayComprehensionMap(lambda a: a+1, (j, 1, 4)) + >>> b.doit() + [2, 3, 4, 5] + + ''' + def __new__(cls, function, *symbols, **assumptions): + if any(len(l) != 3 or None for l in symbols): + raise ValueError('ArrayComprehension requires values lower and upper bound' + ' for the expression') + + if not isLambda(function): + raise ValueError('Data type not supported') + + arglist = cls._check_limits_validity(function, symbols) + obj = Basic.__new__(cls, *arglist, **assumptions) + obj._limits = obj._args + obj._shape = cls._calculate_shape_from_limits(obj._limits) + obj._rank = len(obj._shape) + obj._loop_size = cls._calculate_loop_size(obj._shape) + obj._lambda = function + return obj + + @property + def func(self): + class _(ArrayComprehensionMap): + def __new__(cls, *args, **kwargs): + return ArrayComprehensionMap(self._lambda, *args, **kwargs) + return _ + + def _get_element(self, values): + temp = self._lambda + if self._lambda.__code__.co_argcount == 0: + temp = temp() + elif self._lambda.__code__.co_argcount == 1: + temp = temp(functools.reduce(lambda a, b: a*b, values)) + return temp diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..797d5c1f6b3e95aa36c40d205bab5e3f8c9fceec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/array_derivatives.py @@ -0,0 +1,129 @@ +from __future__ import annotations + +from sympy.core.expr import Expr +from sympy.core.function import Derivative +from sympy.core.numbers import Integer +from sympy.matrices.common import MatrixCommon +from .ndim_array import NDimArray +from .arrayop import derive_by_array +from sympy.matrices.expressions.matexpr import MatrixExpr +from sympy.matrices.expressions.special import ZeroMatrix +from sympy.matrices.expressions.matexpr import _matrix_derivative + + +class ArrayDerivative(Derivative): + + is_scalar = False + + def __new__(cls, expr, *variables, **kwargs): + obj = super().__new__(cls, expr, *variables, **kwargs) + if isinstance(obj, ArrayDerivative): + obj._shape = obj._get_shape() + return obj + + def _get_shape(self): + shape = () + for v, count in self.variable_count: + if hasattr(v, "shape"): + for i in range(count): + shape += v.shape + if hasattr(self.expr, "shape"): + shape += self.expr.shape + return shape + + @property + def shape(self): + return self._shape + + @classmethod + def _get_zero_with_shape_like(cls, expr): + if isinstance(expr, (MatrixCommon, NDimArray)): + return expr.zeros(*expr.shape) + elif isinstance(expr, MatrixExpr): + return ZeroMatrix(*expr.shape) + else: + raise RuntimeError("Unable to determine shape of array-derivative.") + + @staticmethod + def _call_derive_scalar_by_matrix(expr: Expr, v: MatrixCommon) -> Expr: + return v.applyfunc(lambda x: expr.diff(x)) + + @staticmethod + def _call_derive_scalar_by_matexpr(expr: Expr, v: MatrixExpr) -> Expr: + if expr.has(v): + return _matrix_derivative(expr, v) + else: + return ZeroMatrix(*v.shape) + + @staticmethod + def _call_derive_scalar_by_array(expr: Expr, v: NDimArray) -> Expr: + return v.applyfunc(lambda x: expr.diff(x)) + + @staticmethod + def _call_derive_matrix_by_scalar(expr: MatrixCommon, v: Expr) -> Expr: + return _matrix_derivative(expr, v) + + @staticmethod + def _call_derive_matexpr_by_scalar(expr: MatrixExpr, v: Expr) -> Expr: + return expr._eval_derivative(v) + + @staticmethod + def _call_derive_array_by_scalar(expr: NDimArray, v: Expr) -> Expr: + return expr.applyfunc(lambda x: x.diff(v)) + + @staticmethod + def _call_derive_default(expr: Expr, v: Expr) -> Expr | None: + if expr.has(v): + return _matrix_derivative(expr, v) + else: + return None + + @classmethod + def _dispatch_eval_derivative_n_times(cls, expr, v, count): + # Evaluate the derivative `n` times. If + # `_eval_derivative_n_times` is not overridden by the current + # object, the default in `Basic` will call a loop over + # `_eval_derivative`: + + if not isinstance(count, (int, Integer)) or ((count <= 0) == True): + return None + + # TODO: this could be done with multiple-dispatching: + if expr.is_scalar: + if isinstance(v, MatrixCommon): + result = cls._call_derive_scalar_by_matrix(expr, v) + elif isinstance(v, MatrixExpr): + result = cls._call_derive_scalar_by_matexpr(expr, v) + elif isinstance(v, NDimArray): + result = cls._call_derive_scalar_by_array(expr, v) + elif v.is_scalar: + # scalar by scalar has a special + return super()._dispatch_eval_derivative_n_times(expr, v, count) + else: + return None + elif v.is_scalar: + if isinstance(expr, MatrixCommon): + result = cls._call_derive_matrix_by_scalar(expr, v) + elif isinstance(expr, MatrixExpr): + result = cls._call_derive_matexpr_by_scalar(expr, v) + elif isinstance(expr, NDimArray): + result = cls._call_derive_array_by_scalar(expr, v) + else: + return None + else: + # Both `expr` and `v` are some array/matrix type: + if isinstance(expr, MatrixCommon) or isinstance(expr, MatrixCommon): + result = derive_by_array(expr, v) + elif isinstance(expr, MatrixExpr) and isinstance(v, MatrixExpr): + result = cls._call_derive_default(expr, v) + elif isinstance(expr, MatrixExpr) or isinstance(v, MatrixExpr): + # if one expression is a symbolic matrix expression while the other isn't, don't evaluate: + return None + else: + result = derive_by_array(expr, v) + if result is None: + return None + if count == 1: + return result + else: + return cls._dispatch_eval_derivative_n_times(result, v, count - 1) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py new file mode 100644 index 0000000000000000000000000000000000000000..2bac3a0af88ac83c624c79f16db7d88d8d74a7db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/arrayop.py @@ -0,0 +1,528 @@ +import itertools +from collections.abc import Iterable + +from sympy.core._print_helpers import Printable +from sympy.core.containers import Tuple +from sympy.core.function import diff +from sympy.core.singleton import S +from sympy.core.sympify import _sympify + +from sympy.tensor.array.ndim_array import NDimArray +from sympy.tensor.array.dense_ndim_array import DenseNDimArray, ImmutableDenseNDimArray +from sympy.tensor.array.sparse_ndim_array import SparseNDimArray + + +def _arrayfy(a): + from sympy.matrices import MatrixBase + + if isinstance(a, NDimArray): + return a + if isinstance(a, (MatrixBase, list, tuple, Tuple)): + return ImmutableDenseNDimArray(a) + return a + + +def tensorproduct(*args): + """ + Tensor product among scalars or array-like objects. + + The equivalent operator for array expressions is ``ArrayTensorProduct``, + which can be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy.tensor.array import tensorproduct, Array + >>> from sympy.abc import x, y, z, t + >>> A = Array([[1, 2], [3, 4]]) + >>> B = Array([x, y]) + >>> tensorproduct(A, B) + [[[x, y], [2*x, 2*y]], [[3*x, 3*y], [4*x, 4*y]]] + >>> tensorproduct(A, x) + [[x, 2*x], [3*x, 4*x]] + >>> tensorproduct(A, B, B) + [[[[x**2, x*y], [x*y, y**2]], [[2*x**2, 2*x*y], [2*x*y, 2*y**2]]], [[[3*x**2, 3*x*y], [3*x*y, 3*y**2]], [[4*x**2, 4*x*y], [4*x*y, 4*y**2]]]] + + Applying this function on two matrices will result in a rank 4 array. + + >>> from sympy import Matrix, eye + >>> m = Matrix([[x, y], [z, t]]) + >>> p = tensorproduct(eye(3), m) + >>> p + [[[[x, y], [z, t]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[x, y], [z, t]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[x, y], [z, t]]]] + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayTensorProduct + + """ + from sympy.tensor.array import SparseNDimArray, ImmutableSparseNDimArray + + if len(args) == 0: + return S.One + if len(args) == 1: + return _arrayfy(args[0]) + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.matrices.expressions.matexpr import MatrixSymbol + if any(isinstance(arg, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)) for arg in args): + return ArrayTensorProduct(*args) + if len(args) > 2: + return tensorproduct(tensorproduct(args[0], args[1]), *args[2:]) + + # length of args is 2: + a, b = map(_arrayfy, args) + + if not isinstance(a, NDimArray) or not isinstance(b, NDimArray): + return a*b + + if isinstance(a, SparseNDimArray) and isinstance(b, SparseNDimArray): + lp = len(b) + new_array = {k1*lp + k2: v1*v2 for k1, v1 in a._sparse_array.items() for k2, v2 in b._sparse_array.items()} + return ImmutableSparseNDimArray(new_array, a.shape + b.shape) + + product_list = [i*j for i in Flatten(a) for j in Flatten(b)] + return ImmutableDenseNDimArray(product_list, a.shape + b.shape) + + +def _util_contraction_diagonal(array, *contraction_or_diagonal_axes): + array = _arrayfy(array) + + # Verify contraction_axes: + taken_dims = set() + for axes_group in contraction_or_diagonal_axes: + if not isinstance(axes_group, Iterable): + raise ValueError("collections of contraction/diagonal axes expected") + + dim = array.shape[axes_group[0]] + + for d in axes_group: + if d in taken_dims: + raise ValueError("dimension specified more than once") + if dim != array.shape[d]: + raise ValueError("cannot contract or diagonalize between axes of different dimension") + taken_dims.add(d) + + rank = array.rank() + + remaining_shape = [dim for i, dim in enumerate(array.shape) if i not in taken_dims] + cum_shape = [0]*rank + _cumul = 1 + for i in range(rank): + cum_shape[rank - i - 1] = _cumul + _cumul *= int(array.shape[rank - i - 1]) + + # DEFINITION: by absolute position it is meant the position along the one + # dimensional array containing all the tensor components. + + # Possible future work on this module: move computation of absolute + # positions to a class method. + + # Determine absolute positions of the uncontracted indices: + remaining_indices = [[cum_shape[i]*j for j in range(array.shape[i])] + for i in range(rank) if i not in taken_dims] + + # Determine absolute positions of the contracted indices: + summed_deltas = [] + for axes_group in contraction_or_diagonal_axes: + lidx = [] + for js in range(array.shape[axes_group[0]]): + lidx.append(sum([cum_shape[ig] * js for ig in axes_group])) + summed_deltas.append(lidx) + + return array, remaining_indices, remaining_shape, summed_deltas + + +def tensorcontraction(array, *contraction_axes): + """ + Contraction of an array-like object on the specified axes. + + The equivalent operator for array expressions is ``ArrayContraction``, + which can be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy import Array, tensorcontraction + >>> from sympy import Matrix, eye + >>> tensorcontraction(eye(3), (0, 1)) + 3 + >>> A = Array(range(18), (3, 2, 3)) + >>> A + [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]] + >>> tensorcontraction(A, (0, 2)) + [21, 30] + + Matrix multiplication may be emulated with a proper combination of + ``tensorcontraction`` and ``tensorproduct`` + + >>> from sympy import tensorproduct + >>> from sympy.abc import a,b,c,d,e,f,g,h + >>> m1 = Matrix([[a, b], [c, d]]) + >>> m2 = Matrix([[e, f], [g, h]]) + >>> p = tensorproduct(m1, m2) + >>> p + [[[[a*e, a*f], [a*g, a*h]], [[b*e, b*f], [b*g, b*h]]], [[[c*e, c*f], [c*g, c*h]], [[d*e, d*f], [d*g, d*h]]]] + >>> tensorcontraction(p, (1, 2)) + [[a*e + b*g, a*f + b*h], [c*e + d*g, c*f + d*h]] + >>> m1*m2 + Matrix([ + [a*e + b*g, a*f + b*h], + [c*e + d*g, c*f + d*h]]) + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayContraction + + """ + from sympy.tensor.array.expressions.array_expressions import _array_contraction + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.matrices.expressions.matexpr import MatrixSymbol + if isinstance(array, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _array_contraction(array, *contraction_axes) + + array, remaining_indices, remaining_shape, summed_deltas = _util_contraction_diagonal(array, *contraction_axes) + + # Compute the contracted array: + # + # 1. external for loops on all uncontracted indices. + # Uncontracted indices are determined by the combinatorial product of + # the absolute positions of the remaining indices. + # 2. internal loop on all contracted indices. + # It sums the values of the absolute contracted index and the absolute + # uncontracted index for the external loop. + contracted_array = [] + for icontrib in itertools.product(*remaining_indices): + index_base_position = sum(icontrib) + isum = S.Zero + for sum_to_index in itertools.product(*summed_deltas): + idx = array._get_tuple_index(index_base_position + sum(sum_to_index)) + isum += array[idx] + + contracted_array.append(isum) + + if len(remaining_indices) == 0: + assert len(contracted_array) == 1 + return contracted_array[0] + + return type(array)(contracted_array, remaining_shape) + + +def tensordiagonal(array, *diagonal_axes): + """ + Diagonalization of an array-like object on the specified axes. + + This is equivalent to multiplying the expression by Kronecker deltas + uniting the axes. + + The diagonal indices are put at the end of the axes. + + The equivalent operator for array expressions is ``ArrayDiagonal``, which + can be used to keep the expression unevaluated. + + Examples + ======== + + ``tensordiagonal`` acting on a 2-dimensional array by axes 0 and 1 is + equivalent to the diagonal of the matrix: + + >>> from sympy import Array, tensordiagonal + >>> from sympy import Matrix, eye + >>> tensordiagonal(eye(3), (0, 1)) + [1, 1, 1] + + >>> from sympy.abc import a,b,c,d + >>> m1 = Matrix([[a, b], [c, d]]) + >>> tensordiagonal(m1, [0, 1]) + [a, d] + + In case of higher dimensional arrays, the diagonalized out dimensions + are appended removed and appended as a single dimension at the end: + + >>> A = Array(range(18), (3, 2, 3)) + >>> A + [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]] + >>> tensordiagonal(A, (0, 2)) + [[0, 7, 14], [3, 10, 17]] + >>> from sympy import permutedims + >>> tensordiagonal(A, (0, 2)) == permutedims(Array([A[0, :, 0], A[1, :, 1], A[2, :, 2]]), [1, 0]) + True + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.ArrayDiagonal + + """ + if any(len(i) <= 1 for i in diagonal_axes): + raise ValueError("need at least two axes to diagonalize") + + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal, _array_diagonal + from sympy.matrices.expressions.matexpr import MatrixSymbol + if isinstance(array, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _array_diagonal(array, *diagonal_axes) + + ArrayDiagonal._validate(array, *diagonal_axes) + + array, remaining_indices, remaining_shape, diagonal_deltas = _util_contraction_diagonal(array, *diagonal_axes) + + # Compute the diagonalized array: + # + # 1. external for loops on all undiagonalized indices. + # Undiagonalized indices are determined by the combinatorial product of + # the absolute positions of the remaining indices. + # 2. internal loop on all diagonal indices. + # It appends the values of the absolute diagonalized index and the absolute + # undiagonalized index for the external loop. + diagonalized_array = [] + diagonal_shape = [len(i) for i in diagonal_deltas] + for icontrib in itertools.product(*remaining_indices): + index_base_position = sum(icontrib) + isum = [] + for sum_to_index in itertools.product(*diagonal_deltas): + idx = array._get_tuple_index(index_base_position + sum(sum_to_index)) + isum.append(array[idx]) + + isum = type(array)(isum).reshape(*diagonal_shape) + diagonalized_array.append(isum) + + return type(array)(diagonalized_array, remaining_shape + diagonal_shape) + + +def derive_by_array(expr, dx): + r""" + Derivative by arrays. Supports both arrays and scalars. + + The equivalent operator for array expressions is ``array_derive``. + + Explanation + =========== + + Given the array `A_{i_1, \ldots, i_N}` and the array `X_{j_1, \ldots, j_M}` + this function will return a new array `B` defined by + + `B_{j_1,\ldots,j_M,i_1,\ldots,i_N} := \frac{\partial A_{i_1,\ldots,i_N}}{\partial X_{j_1,\ldots,j_M}}` + + Examples + ======== + + >>> from sympy import derive_by_array + >>> from sympy.abc import x, y, z, t + >>> from sympy import cos + >>> derive_by_array(cos(x*t), x) + -t*sin(t*x) + >>> derive_by_array(cos(x*t), [x, y, z, t]) + [-t*sin(t*x), 0, 0, -x*sin(t*x)] + >>> derive_by_array([x, y**2*z], [[x, y], [z, t]]) + [[[1, 0], [0, 2*y*z]], [[0, y**2], [0, 0]]] + + """ + from sympy.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + array_types = (Iterable, MatrixBase, NDimArray) + + if isinstance(dx, array_types): + dx = ImmutableDenseNDimArray(dx) + for i in dx: + if not i._diff_wrt: + raise ValueError("cannot derive by this array") + + if isinstance(expr, array_types): + if isinstance(expr, NDimArray): + expr = expr.as_immutable() + else: + expr = ImmutableDenseNDimArray(expr) + + if isinstance(dx, array_types): + if isinstance(expr, SparseNDimArray): + lp = len(expr) + new_array = {k + i*lp: v + for i, x in enumerate(Flatten(dx)) + for k, v in expr.diff(x)._sparse_array.items()} + else: + new_array = [[y.diff(x) for y in Flatten(expr)] for x in Flatten(dx)] + return type(expr)(new_array, dx.shape + expr.shape) + else: + return expr.diff(dx) + else: + expr = _sympify(expr) + if isinstance(dx, array_types): + return ImmutableDenseNDimArray([expr.diff(i) for i in Flatten(dx)], dx.shape) + else: + dx = _sympify(dx) + return diff(expr, dx) + + +def permutedims(expr, perm=None, index_order_old=None, index_order_new=None): + """ + Permutes the indices of an array. + + Parameter specifies the permutation of the indices. + + The equivalent operator for array expressions is ``PermuteDims``, which can + be used to keep the expression unevaluated. + + Examples + ======== + + >>> from sympy.abc import x, y, z, t + >>> from sympy import sin + >>> from sympy import Array, permutedims + >>> a = Array([[x, y, z], [t, sin(x), 0]]) + >>> a + [[x, y, z], [t, sin(x), 0]] + >>> permutedims(a, (1, 0)) + [[x, t], [y, sin(x)], [z, 0]] + + If the array is of second order, ``transpose`` can be used: + + >>> from sympy import transpose + >>> transpose(a) + [[x, t], [y, sin(x)], [z, 0]] + + Examples on higher dimensions: + + >>> b = Array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + >>> permutedims(b, (2, 1, 0)) + [[[1, 5], [3, 7]], [[2, 6], [4, 8]]] + >>> permutedims(b, (1, 2, 0)) + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + An alternative way to specify the same permutations as in the previous + lines involves passing the *old* and *new* indices, either as a list or as + a string: + + >>> permutedims(b, index_order_old="cba", index_order_new="abc") + [[[1, 5], [3, 7]], [[2, 6], [4, 8]]] + >>> permutedims(b, index_order_old="cab", index_order_new="abc") + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + ``Permutation`` objects are also allowed: + + >>> from sympy.combinatorics import Permutation + >>> permutedims(b, Permutation([1, 2, 0])) + [[[1, 5], [2, 6]], [[3, 7], [4, 8]]] + + See Also + ======== + + sympy.tensor.array.expressions.array_expressions.PermuteDims + + """ + from sympy.tensor.array import SparseNDimArray + + from sympy.tensor.array.expressions.array_expressions import _ArrayExpr + from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract + from sympy.tensor.array.expressions.array_expressions import _permute_dims + from sympy.matrices.expressions.matexpr import MatrixSymbol + from sympy.tensor.array.expressions import PermuteDims + from sympy.tensor.array.expressions.array_expressions import get_rank + perm = PermuteDims._get_permutation_from_arguments(perm, index_order_old, index_order_new, get_rank(expr)) + if isinstance(expr, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): + return _permute_dims(expr, perm) + + if not isinstance(expr, NDimArray): + expr = ImmutableDenseNDimArray(expr) + + from sympy.combinatorics import Permutation + if not isinstance(perm, Permutation): + perm = Permutation(list(perm)) + + if perm.size != expr.rank(): + raise ValueError("wrong permutation size") + + # Get the inverse permutation: + iperm = ~perm + new_shape = perm(expr.shape) + + if isinstance(expr, SparseNDimArray): + return type(expr)({tuple(perm(expr._get_tuple_index(k))): v + for k, v in expr._sparse_array.items()}, new_shape) + + indices_span = perm([range(i) for i in expr.shape]) + + new_array = [None]*len(expr) + for i, idx in enumerate(itertools.product(*indices_span)): + t = iperm(idx) + new_array[i] = expr[t] + + return type(expr)(new_array, new_shape) + + +class Flatten(Printable): + """ + Flatten an iterable object to a list in a lazy-evaluation way. + + Notes + ===== + + This class is an iterator with which the memory cost can be economised. + Optimisation has been considered to ameliorate the performance for some + specific data types like DenseNDimArray and SparseNDimArray. + + Examples + ======== + + >>> from sympy.tensor.array.arrayop import Flatten + >>> from sympy.tensor.array import Array + >>> A = Array(range(6)).reshape(2, 3) + >>> Flatten(A) + Flatten([[0, 1, 2], [3, 4, 5]]) + >>> [i for i in Flatten(A)] + [0, 1, 2, 3, 4, 5] + """ + def __init__(self, iterable): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import NDimArray + + if not isinstance(iterable, (Iterable, MatrixBase)): + raise NotImplementedError("Data type not yet supported") + + if isinstance(iterable, list): + iterable = NDimArray(iterable) + + self._iter = iterable + self._idx = 0 + + def __iter__(self): + return self + + def __next__(self): + from sympy.matrices.matrices import MatrixBase + + if len(self._iter) > self._idx: + if isinstance(self._iter, DenseNDimArray): + result = self._iter._array[self._idx] + + elif isinstance(self._iter, SparseNDimArray): + if self._idx in self._iter._sparse_array: + result = self._iter._sparse_array[self._idx] + else: + result = 0 + + elif isinstance(self._iter, MatrixBase): + result = self._iter[self._idx] + + elif hasattr(self._iter, '__next__'): + result = next(self._iter) + + else: + result = self._iter[self._idx] + + else: + raise StopIteration + + self._idx += 1 + return result + + def next(self): + return self.__next__() + + def _sympystr(self, printer): + return type(self).__name__ + '(' + printer._print(self._iter) + ')' diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..576e452c55d8d374ca1f72c553f3a64de7227d43 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/dense_ndim_array.py @@ -0,0 +1,206 @@ +import functools +from typing import List + +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.singleton import S +from sympy.core.sympify import _sympify +from sympy.tensor.array.mutable_ndim_array import MutableNDimArray +from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray, ArrayKind +from sympy.utilities.iterables import flatten + + +class DenseNDimArray(NDimArray): + + _array: List[Basic] + + def __new__(self, *args, **kwargs): + return ImmutableDenseNDimArray(*args, **kwargs) + + @property + def kind(self) -> ArrayKind: + return ArrayKind._union(self._array) + + def __getitem__(self, index): + """ + Allows to get items from N-dim array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([0, 1, 2, 3], (2, 2)) + >>> a + [[0, 1], [2, 3]] + >>> a[0, 0] + 0 + >>> a[1, 1] + 3 + >>> a[0] + [0, 1] + >>> a[1] + [2, 3] + + + Symbolic index: + + >>> from sympy.abc import i, j + >>> a[i, j] + [[0, 1], [2, 3]][i, j] + + Replace `i` and `j` to get element `(1, 1)`: + + >>> a[i, j].subs({i: 1, j: 1}) + 3 + + """ + syindex = self._check_symbolic_index(index) + if syindex is not None: + return syindex + + index = self._check_index_for_getitem(index) + + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + sl_factors, eindices = self._get_slice_data_for_array_access(index) + array = [self._array[self._parse_index(i)] for i in eindices] + nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)] + return type(self)(array, nshape) + else: + index = self._parse_index(index) + return self._array[index] + + @classmethod + def zeros(cls, *shape): + list_length = functools.reduce(lambda x, y: x*y, shape, S.One) + return cls._new(([0]*list_length,), shape) + + def tomatrix(self): + """ + Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1 for i in range(9)], (3, 3)) + >>> b = a.tomatrix() + >>> b + Matrix([ + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + + """ + from sympy.matrices import Matrix + + if self.rank() != 2: + raise ValueError('Dimensions must be of size of 2') + + return Matrix(self.shape[0], self.shape[1], self._array) + + def reshape(self, *newshape): + """ + Returns MutableDenseNDimArray instance with new shape. Elements number + must be suitable to new shape. The only argument of method sets + new shape. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3)) + >>> a.shape + (2, 3) + >>> a + [[1, 2, 3], [4, 5, 6]] + >>> b = a.reshape(3, 2) + >>> b.shape + (3, 2) + >>> b + [[1, 2], [3, 4], [5, 6]] + + """ + new_total_size = functools.reduce(lambda x,y: x*y, newshape) + if new_total_size != self._loop_size: + raise ValueError('Expecting reshape size to %d but got prod(%s) = %d' % ( + self._loop_size, str(newshape), new_total_size)) + + # there is no `.func` as this class does not subtype `Basic`: + return type(self)(self._array, newshape) + + +class ImmutableDenseNDimArray(DenseNDimArray, ImmutableNDimArray): # type: ignore + def __new__(cls, iterable, shape=None, **kwargs): + return cls._new(iterable, shape, **kwargs) + + @classmethod + def _new(cls, iterable, shape, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + shape = Tuple(*map(_sympify, shape)) + cls._check_special_bounds(flat_list, shape) + flat_list = flatten(flat_list) + flat_list = Tuple(*flat_list) + self = Basic.__new__(cls, flat_list, shape, **kwargs) + self._shape = shape + self._array = list(flat_list) + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape, 1) + return self + + def __setitem__(self, index, value): + raise TypeError('immutable N-dim array') + + def as_mutable(self): + return MutableDenseNDimArray(self) + + def _eval_simplify(self, **kwargs): + from sympy.simplify.simplify import simplify + return self.applyfunc(simplify) + +class MutableDenseNDimArray(DenseNDimArray, MutableNDimArray): + + def __new__(cls, iterable=None, shape=None, **kwargs): + return cls._new(iterable, shape, **kwargs) + + @classmethod + def _new(cls, iterable, shape, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + flat_list = flatten(flat_list) + self = object.__new__(cls) + self._shape = shape + self._array = list(flat_list) + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + return self + + def __setitem__(self, index, value): + """Allows to set items to MutableDenseNDimArray. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 2) + >>> a[0,0] = 1 + >>> a[1,1] = 1 + >>> a + [[1, 0], [0, 1]] + + """ + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value) + for i in eindices: + other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None] + self._array[self._parse_index(i)] = value[other_i] + else: + index = self._parse_index(index) + self._setter_iterable_check(value) + value = _sympify(value) + self._array[index] = value + + def as_immutable(self): + return ImmutableDenseNDimArray(self) + + @property + def free_symbols(self): + return {i for j in self._array for i in j.free_symbols} diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..e1eaaf7241bc3b4a48234178d18da3aa5736e189 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/mutable_ndim_array.py @@ -0,0 +1,13 @@ +from sympy.tensor.array.ndim_array import NDimArray + + +class MutableNDimArray(NDimArray): + + def as_immutable(self): + raise NotImplementedError("abstract method") + + def as_mutable(self): + return self + + def _sympy_(self): + return self.as_immutable() diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb1e0ed2cd2db942ef5e510291485411982e35c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/ndim_array.py @@ -0,0 +1,600 @@ +from sympy.core.basic import Basic +from sympy.core.containers import (Dict, Tuple) +from sympy.core.expr import Expr +from sympy.core.kind import Kind, NumberKind, UndefinedKind +from sympy.core.numbers import Integer +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.external.gmpy import SYMPY_INTS +from sympy.printing.defaults import Printable + +import itertools +from collections.abc import Iterable + + +class ArrayKind(Kind): + """ + Kind for N-dimensional array in SymPy. + + This kind represents the multidimensional array that algebraic + operations are defined. Basic class for this kind is ``NDimArray``, + but any expression representing the array can have this. + + Parameters + ========== + + element_kind : Kind + Kind of the element. Default is :obj:NumberKind ``, + which means that the array contains only numbers. + + Examples + ======== + + Any instance of array class has ``ArrayKind``. + + >>> from sympy import NDimArray + >>> NDimArray([1,2,3]).kind + ArrayKind(NumberKind) + + Although expressions representing an array may be not instance of + array class, it will have ``ArrayKind`` as well. + + >>> from sympy import Integral + >>> from sympy.tensor.array import NDimArray + >>> from sympy.abc import x + >>> intA = Integral(NDimArray([1,2,3]), x) + >>> isinstance(intA, NDimArray) + False + >>> intA.kind + ArrayKind(NumberKind) + + Use ``isinstance()`` to check for ``ArrayKind` without specifying + the element kind. Use ``is`` with specifying the element kind. + + >>> from sympy.tensor.array import ArrayKind + >>> from sympy.core import NumberKind + >>> boolA = NDimArray([True, False]) + >>> isinstance(boolA.kind, ArrayKind) + True + >>> boolA.kind is ArrayKind(NumberKind) + False + + See Also + ======== + + shape : Function to return the shape of objects with ``MatrixKind``. + + """ + def __new__(cls, element_kind=NumberKind): + obj = super().__new__(cls, element_kind) + obj.element_kind = element_kind + return obj + + def __repr__(self): + return "ArrayKind(%s)" % self.element_kind + + @classmethod + def _union(cls, kinds) -> 'ArrayKind': + elem_kinds = {e.kind for e in kinds} + if len(elem_kinds) == 1: + elemkind, = elem_kinds + else: + elemkind = UndefinedKind + return ArrayKind(elemkind) + + +class NDimArray(Printable): + """N-dimensional array. + + Examples + ======== + + Create an N-dim array of zeros: + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 3, 4) + >>> a + [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + + Create an N-dim array from a list; + + >>> a = MutableDenseNDimArray([[2, 3], [4, 5]]) + >>> a + [[2, 3], [4, 5]] + + >>> b = MutableDenseNDimArray([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]) + >>> b + [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]] + + Create an N-dim array from a flat list with dimension shape: + + >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3)) + >>> a + [[1, 2, 3], [4, 5, 6]] + + Create an N-dim array from a matrix: + + >>> from sympy import Matrix + >>> a = Matrix([[1,2],[3,4]]) + >>> a + Matrix([ + [1, 2], + [3, 4]]) + >>> b = MutableDenseNDimArray(a) + >>> b + [[1, 2], [3, 4]] + + Arithmetic operations on N-dim arrays + + >>> a = MutableDenseNDimArray([1, 1, 1, 1], (2, 2)) + >>> b = MutableDenseNDimArray([4, 4, 4, 4], (2, 2)) + >>> c = a + b + >>> c + [[5, 5], [5, 5]] + >>> a - b + [[-3, -3], [-3, -3]] + + """ + + _diff_wrt = True + is_scalar = False + + def __new__(cls, iterable, shape=None, **kwargs): + from sympy.tensor.array import ImmutableDenseNDimArray + return ImmutableDenseNDimArray(iterable, shape, **kwargs) + + def __getitem__(self, index): + raise NotImplementedError("A subclass of NDimArray should implement __getitem__") + + def _parse_index(self, index): + if isinstance(index, (SYMPY_INTS, Integer)): + if index >= self._loop_size: + raise ValueError("Only a tuple index is accepted") + return index + + if self._loop_size == 0: + raise ValueError("Index not valid with an empty array") + + if len(index) != self._rank: + raise ValueError('Wrong number of array axes') + + real_index = 0 + # check if input index can exist in current indexing + for i in range(self._rank): + if (index[i] >= self.shape[i]) or (index[i] < -self.shape[i]): + raise ValueError('Index ' + str(index) + ' out of border') + if index[i] < 0: + real_index += 1 + real_index = real_index*self.shape[i] + index[i] + + return real_index + + def _get_tuple_index(self, integer_index): + index = [] + for i, sh in enumerate(reversed(self.shape)): + index.append(integer_index % sh) + integer_index //= sh + index.reverse() + return tuple(index) + + def _check_symbolic_index(self, index): + # Check if any index is symbolic: + tuple_index = (index if isinstance(index, tuple) else (index,)) + if any((isinstance(i, Expr) and (not i.is_number)) for i in tuple_index): + for i, nth_dim in zip(tuple_index, self.shape): + if ((i < 0) == True) or ((i >= nth_dim) == True): + raise ValueError("index out of range") + from sympy.tensor import Indexed + return Indexed(self, *tuple_index) + return None + + def _setter_iterable_check(self, value): + from sympy.matrices.matrices import MatrixBase + if isinstance(value, (Iterable, MatrixBase, NDimArray)): + raise NotImplementedError + + @classmethod + def _scan_iterable_shape(cls, iterable): + def f(pointer): + if not isinstance(pointer, Iterable): + return [pointer], () + + if len(pointer) == 0: + return [], (0,) + + result = [] + elems, shapes = zip(*[f(i) for i in pointer]) + if len(set(shapes)) != 1: + raise ValueError("could not determine shape unambiguously") + for i in elems: + result.extend(i) + return result, (len(shapes),)+shapes[0] + + return f(iterable) + + @classmethod + def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + + if shape is None: + if iterable is None: + shape = () + iterable = () + # Construction of a sparse array from a sparse array + elif isinstance(iterable, SparseNDimArray): + return iterable._shape, iterable._sparse_array + + # Construct N-dim array from another N-dim array: + elif isinstance(iterable, NDimArray): + shape = iterable.shape + + # Construct N-dim array from an iterable (numpy arrays included): + elif isinstance(iterable, Iterable): + iterable, shape = cls._scan_iterable_shape(iterable) + + # Construct N-dim array from a Matrix: + elif isinstance(iterable, MatrixBase): + shape = iterable.shape + + else: + shape = () + iterable = (iterable,) + + if isinstance(iterable, (Dict, dict)) and shape is not None: + new_dict = iterable.copy() + for k, v in new_dict.items(): + if isinstance(k, (tuple, Tuple)): + new_key = 0 + for i, idx in enumerate(k): + new_key = new_key * shape[i] + idx + iterable[new_key] = iterable[k] + del iterable[k] + + if isinstance(shape, (SYMPY_INTS, Integer)): + shape = (shape,) + + if not all(isinstance(dim, (SYMPY_INTS, Integer)) for dim in shape): + raise TypeError("Shape should contain integers only.") + + return tuple(shape), iterable + + def __len__(self): + """Overload common function len(). Returns number of elements in array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3, 3) + >>> a + [[0, 0, 0], [0, 0, 0], [0, 0, 0]] + >>> len(a) + 9 + + """ + return self._loop_size + + @property + def shape(self): + """ + Returns array shape (dimension). + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3, 3) + >>> a.shape + (3, 3) + + """ + return self._shape + + def rank(self): + """ + Returns rank of array. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(3,4,5,6,3) + >>> a.rank() + 5 + + """ + return self._rank + + def diff(self, *args, **kwargs): + """ + Calculate the derivative of each element in the array. + + Examples + ======== + + >>> from sympy import ImmutableDenseNDimArray + >>> from sympy.abc import x, y + >>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]]) + >>> M.diff(x) + [[1, 0], [0, y]] + + """ + from sympy.tensor.array.array_derivatives import ArrayDerivative + kwargs.setdefault('evaluate', True) + return ArrayDerivative(self.as_immutable(), *args, **kwargs) + + def _eval_derivative(self, base): + # Types are (base: scalar, self: array) + return self.applyfunc(lambda x: base.diff(x)) + + def _eval_derivative_n_times(self, s, n): + return Basic._eval_derivative_n_times(self, s, n) + + def applyfunc(self, f): + """Apply a function to each element of the N-dim array. + + Examples + ======== + + >>> from sympy import ImmutableDenseNDimArray + >>> m = ImmutableDenseNDimArray([i*2+j for i in range(2) for j in range(2)], (2, 2)) + >>> m + [[0, 1], [2, 3]] + >>> m.applyfunc(lambda i: 2*i) + [[0, 2], [4, 6]] + """ + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(self, SparseNDimArray) and f(S.Zero) == 0: + return type(self)({k: f(v) for k, v in self._sparse_array.items() if f(v) != 0}, self.shape) + + return type(self)(map(f, Flatten(self)), self.shape) + + def _sympystr(self, printer): + def f(sh, shape_left, i, j): + if len(shape_left) == 1: + return "["+", ".join([printer._print(self[self._get_tuple_index(e)]) for e in range(i, j)])+"]" + + sh //= shape_left[0] + return "[" + ", ".join([f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh) for e in range(shape_left[0])]) + "]" # + "\n"*len(shape_left) + + if self.rank() == 0: + return printer._print(self[()]) + + return f(self._loop_size, self.shape, 0, self._loop_size) + + def tolist(self): + """ + Converting MutableDenseNDimArray to one-dim list + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray([1, 2, 3, 4], (2, 2)) + >>> a + [[1, 2], [3, 4]] + >>> b = a.tolist() + >>> b + [[1, 2], [3, 4]] + """ + + def f(sh, shape_left, i, j): + if len(shape_left) == 1: + return [self[self._get_tuple_index(e)] for e in range(i, j)] + result = [] + sh //= shape_left[0] + for e in range(shape_left[0]): + result.append(f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh)) + return result + + return f(self._loop_size, self.shape, 0, self._loop_size) + + def __add__(self, other): + from sympy.tensor.array.arrayop import Flatten + + if not isinstance(other, NDimArray): + return NotImplemented + + if self.shape != other.shape: + raise ValueError("array shape mismatch") + result_list = [i+j for i,j in zip(Flatten(self), Flatten(other))] + + return type(self)(result_list, self.shape) + + def __sub__(self, other): + from sympy.tensor.array.arrayop import Flatten + + if not isinstance(other, NDimArray): + return NotImplemented + + if self.shape != other.shape: + raise ValueError("array shape mismatch") + result_list = [i-j for i,j in zip(Flatten(self), Flatten(other))] + + return type(self)(result_list, self.shape) + + def __mul__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") + + other = sympify(other) + if isinstance(self, SparseNDimArray): + if other.is_zero: + return type(self)({}, self.shape) + return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [i*other for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __rmul__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") + + other = sympify(other) + if isinstance(self, SparseNDimArray): + if other.is_zero: + return type(self)({}, self.shape) + return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [other*i for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __truediv__(self, other): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(other, (Iterable, NDimArray, MatrixBase)): + raise ValueError("scalar expected") + + other = sympify(other) + if isinstance(self, SparseNDimArray) and other != S.Zero: + return type(self)({k: v/other for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [i/other for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __rtruediv__(self, other): + raise NotImplementedError('unsupported operation on NDimArray') + + def __neg__(self): + from sympy.tensor.array import SparseNDimArray + from sympy.tensor.array.arrayop import Flatten + + if isinstance(self, SparseNDimArray): + return type(self)({k: -v for (k, v) in self._sparse_array.items()}, self.shape) + + result_list = [-i for i in Flatten(self)] + return type(self)(result_list, self.shape) + + def __iter__(self): + def iterator(): + if self._shape: + for i in range(self._shape[0]): + yield self[i] + else: + yield self[()] + + return iterator() + + def __eq__(self, other): + """ + NDimArray instances can be compared to each other. + Instances equal if they have same shape and data. + + Examples + ======== + + >>> from sympy import MutableDenseNDimArray + >>> a = MutableDenseNDimArray.zeros(2, 3) + >>> b = MutableDenseNDimArray.zeros(2, 3) + >>> a == b + True + >>> c = a.reshape(3, 2) + >>> c == b + False + >>> a[0,0] = 1 + >>> b[0,0] = 2 + >>> a == b + False + """ + from sympy.tensor.array import SparseNDimArray + if not isinstance(other, NDimArray): + return False + + if not self.shape == other.shape: + return False + + if isinstance(self, SparseNDimArray) and isinstance(other, SparseNDimArray): + return dict(self._sparse_array) == dict(other._sparse_array) + + return list(self) == list(other) + + def __ne__(self, other): + return not self == other + + def _eval_transpose(self): + if self.rank() != 2: + raise ValueError("array rank not 2") + from .arrayop import permutedims + return permutedims(self, (1, 0)) + + def transpose(self): + return self._eval_transpose() + + def _eval_conjugate(self): + from sympy.tensor.array.arrayop import Flatten + + return self.func([i.conjugate() for i in Flatten(self)], self.shape) + + def conjugate(self): + return self._eval_conjugate() + + def _eval_adjoint(self): + return self.transpose().conjugate() + + def adjoint(self): + return self._eval_adjoint() + + def _slice_expand(self, s, dim): + if not isinstance(s, slice): + return (s,) + start, stop, step = s.indices(dim) + return [start + i*step for i in range((stop-start)//step)] + + def _get_slice_data_for_array_access(self, index): + sl_factors = [self._slice_expand(i, dim) for (i, dim) in zip(index, self.shape)] + eindices = itertools.product(*sl_factors) + return sl_factors, eindices + + def _get_slice_data_for_array_assignment(self, index, value): + if not isinstance(value, NDimArray): + value = type(self)(value) + sl_factors, eindices = self._get_slice_data_for_array_access(index) + slice_offsets = [min(i) if isinstance(i, list) else None for i in sl_factors] + # TODO: add checks for dimensions for `value`? + return value, eindices, slice_offsets + + @classmethod + def _check_special_bounds(cls, flat_list, shape): + if shape == () and len(flat_list) != 1: + raise ValueError("arrays without shape need one scalar value") + if shape == (0,) and len(flat_list) > 0: + raise ValueError("if array shape is (0,) there cannot be elements") + + def _check_index_for_getitem(self, index): + if isinstance(index, (SYMPY_INTS, Integer, slice)): + index = (index,) + + if len(index) < self.rank(): + index = tuple(index) + \ + tuple(slice(None) for i in range(len(index), self.rank())) + + if len(index) > self.rank(): + raise ValueError('Dimension of index greater than rank of array') + + return index + + +class ImmutableNDimArray(NDimArray, Basic): + _op_priority = 11.0 + + def __hash__(self): + return Basic.__hash__(self) + + def as_immutable(self): + return self + + def as_mutable(self): + raise NotImplementedError("abstract method") diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..f11aa95be8ec9d10a9104d48fb28f406fe43845e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/sparse_ndim_array.py @@ -0,0 +1,196 @@ +from sympy.core.basic import Basic +from sympy.core.containers import (Dict, Tuple) +from sympy.core.singleton import S +from sympy.core.sympify import _sympify +from sympy.tensor.array.mutable_ndim_array import MutableNDimArray +from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray +from sympy.utilities.iterables import flatten + +import functools + +class SparseNDimArray(NDimArray): + + def __new__(self, *args, **kwargs): + return ImmutableSparseNDimArray(*args, **kwargs) + + def __getitem__(self, index): + """ + Get an element from a sparse N-dim array. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray(range(4), (2, 2)) + >>> a + [[0, 1], [2, 3]] + >>> a[0, 0] + 0 + >>> a[1, 1] + 3 + >>> a[0] + [0, 1] + >>> a[1] + [2, 3] + + Symbolic indexing: + + >>> from sympy.abc import i, j + >>> a[i, j] + [[0, 1], [2, 3]][i, j] + + Replace `i` and `j` to get element `(0, 0)`: + + >>> a[i, j].subs({i: 0, j: 0}) + 0 + + """ + syindex = self._check_symbolic_index(index) + if syindex is not None: + return syindex + + index = self._check_index_for_getitem(index) + + # `index` is a tuple with one or more slices: + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + sl_factors, eindices = self._get_slice_data_for_array_access(index) + array = [self._sparse_array.get(self._parse_index(i), S.Zero) for i in eindices] + nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)] + return type(self)(array, nshape) + else: + index = self._parse_index(index) + return self._sparse_array.get(index, S.Zero) + + @classmethod + def zeros(cls, *shape): + """ + Return a sparse N-dim array of zeros. + """ + return cls({}, shape) + + def tomatrix(self): + """ + Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray([1 for i in range(9)], (3, 3)) + >>> b = a.tomatrix() + >>> b + Matrix([ + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + """ + from sympy.matrices import SparseMatrix + if self.rank() != 2: + raise ValueError('Dimensions must be of size of 2') + + mat_sparse = {} + for key, value in self._sparse_array.items(): + mat_sparse[self._get_tuple_index(key)] = value + + return SparseMatrix(self.shape[0], self.shape[1], mat_sparse) + + def reshape(self, *newshape): + new_total_size = functools.reduce(lambda x,y: x*y, newshape) + if new_total_size != self._loop_size: + raise ValueError("Invalid reshape parameters " + newshape) + + return type(self)(self._sparse_array, newshape) + +class ImmutableSparseNDimArray(SparseNDimArray, ImmutableNDimArray): # type: ignore + + def __new__(cls, iterable=None, shape=None, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + shape = Tuple(*map(_sympify, shape)) + cls._check_special_bounds(flat_list, shape) + loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + + # Sparse array: + if isinstance(flat_list, (dict, Dict)): + sparse_array = Dict(flat_list) + else: + sparse_array = {} + for i, el in enumerate(flatten(flat_list)): + if el != 0: + sparse_array[i] = _sympify(el) + + sparse_array = Dict(sparse_array) + + self = Basic.__new__(cls, sparse_array, shape, **kwargs) + self._shape = shape + self._rank = len(shape) + self._loop_size = loop_size + self._sparse_array = sparse_array + + return self + + def __setitem__(self, index, value): + raise TypeError("immutable N-dim array") + + def as_mutable(self): + return MutableSparseNDimArray(self) + + +class MutableSparseNDimArray(MutableNDimArray, SparseNDimArray): + + def __new__(cls, iterable=None, shape=None, **kwargs): + shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs) + self = object.__new__(cls) + self._shape = shape + self._rank = len(shape) + self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list) + + # Sparse array: + if isinstance(flat_list, (dict, Dict)): + self._sparse_array = dict(flat_list) + return self + + self._sparse_array = {} + + for i, el in enumerate(flatten(flat_list)): + if el != 0: + self._sparse_array[i] = _sympify(el) + + return self + + def __setitem__(self, index, value): + """Allows to set items to MutableDenseNDimArray. + + Examples + ======== + + >>> from sympy import MutableSparseNDimArray + >>> a = MutableSparseNDimArray.zeros(2, 2) + >>> a[0, 0] = 1 + >>> a[1, 1] = 1 + >>> a + [[1, 0], [0, 1]] + """ + if isinstance(index, tuple) and any(isinstance(i, slice) for i in index): + value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value) + for i in eindices: + other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None] + other_value = value[other_i] + complete_index = self._parse_index(i) + if other_value != 0: + self._sparse_array[complete_index] = other_value + elif complete_index in self._sparse_array: + self._sparse_array.pop(complete_index) + else: + index = self._parse_index(index) + value = _sympify(value) + if value == 0 and index in self._sparse_array: + self._sparse_array.pop(index) + else: + self._sparse_array[index] = value + + def as_immutable(self): + return ImmutableSparseNDimArray(self) + + @property + def free_symbols(self): + return {i for j in self._sparse_array.values() for i in j.free_symbols} diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_comprehension.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_comprehension.py new file mode 100644 index 0000000000000000000000000000000000000000..019743edf678de5a9aa8f71ac626cca40a32dc1d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_comprehension.py @@ -0,0 +1,81 @@ +from sympy.tensor.array.array_comprehension import ArrayComprehension, ArrayComprehensionMap +from sympy.tensor.array import ImmutableDenseNDimArray +from sympy.abc import i, j, k, l +from sympy.testing.pytest import raises, warns +from sympy.utilities.exceptions import SymPyDeprecationWarning +from sympy.matrices import Matrix + + +def test_array_comprehension(): + a = ArrayComprehension(i*j, (i, 1, 3), (j, 2, 4)) + b = ArrayComprehension(i, (i, 1, j+1)) + c = ArrayComprehension(i+j+k+l, (i, 1, 2), (j, 1, 3), (k, 1, 4), (l, 1, 5)) + d = ArrayComprehension(k, (i, 1, 5)) + e = ArrayComprehension(i, (j, k+1, k+5)) + assert a.doit().tolist() == [[2, 3, 4], [4, 6, 8], [6, 9, 12]] + assert a.shape == (3, 3) + assert a.is_shape_numeric == True + assert a.tolist() == [[2, 3, 4], [4, 6, 8], [6, 9, 12]] + assert a.tomatrix() == Matrix([ + [2, 3, 4], + [4, 6, 8], + [6, 9, 12]]) + assert len(a) == 9 + assert isinstance(b.doit(), ArrayComprehension) + assert isinstance(a.doit(), ImmutableDenseNDimArray) + assert b.subs(j, 3) == ArrayComprehension(i, (i, 1, 4)) + assert b.free_symbols == {j} + assert b.shape == (j + 1,) + assert b.rank() == 1 + assert b.is_shape_numeric == False + assert c.free_symbols == set() + assert c.function == i + j + k + l + assert c.limits == ((i, 1, 2), (j, 1, 3), (k, 1, 4), (l, 1, 5)) + assert c.doit().tolist() == [[[[4, 5, 6, 7, 8], [5, 6, 7, 8, 9], [6, 7, 8, 9, 10], [7, 8, 9, 10, 11]], + [[5, 6, 7, 8, 9], [6, 7, 8, 9, 10], [7, 8, 9, 10, 11], [8, 9, 10, 11, 12]], + [[6, 7, 8, 9, 10], [7, 8, 9, 10, 11], [8, 9, 10, 11, 12], [9, 10, 11, 12, 13]]], + [[[5, 6, 7, 8, 9], [6, 7, 8, 9, 10], [7, 8, 9, 10, 11], [8, 9, 10, 11, 12]], + [[6, 7, 8, 9, 10], [7, 8, 9, 10, 11], [8, 9, 10, 11, 12], [9, 10, 11, 12, 13]], + [[7, 8, 9, 10, 11], [8, 9, 10, 11, 12], [9, 10, 11, 12, 13], [10, 11, 12, 13, 14]]]] + assert c.free_symbols == set() + assert c.variables == [i, j, k, l] + assert c.bound_symbols == [i, j, k, l] + assert d.doit().tolist() == [k, k, k, k, k] + assert len(e) == 5 + raises(TypeError, lambda: ArrayComprehension(i*j, (i, 1, 3), (j, 2, [1, 3, 2]))) + raises(ValueError, lambda: ArrayComprehension(i*j, (i, 1, 3), (j, 2, 1))) + raises(ValueError, lambda: ArrayComprehension(i*j, (i, 1, 3), (j, 2, j+1))) + raises(ValueError, lambda: len(ArrayComprehension(i*j, (i, 1, 3), (j, 2, j+4)))) + raises(TypeError, lambda: ArrayComprehension(i*j, (i, 0, i + 1.5), (j, 0, 2))) + raises(ValueError, lambda: b.tolist()) + raises(ValueError, lambda: b.tomatrix()) + raises(ValueError, lambda: c.tomatrix()) + +def test_arraycomprehensionmap(): + a = ArrayComprehensionMap(lambda i: i+1, (i, 1, 5)) + assert a.doit().tolist() == [2, 3, 4, 5, 6] + assert a.shape == (5,) + assert a.is_shape_numeric + assert a.tolist() == [2, 3, 4, 5, 6] + assert len(a) == 5 + assert isinstance(a.doit(), ImmutableDenseNDimArray) + expr = ArrayComprehensionMap(lambda i: i+1, (i, 1, k)) + assert expr.doit() == expr + assert expr.subs(k, 4) == ArrayComprehensionMap(lambda i: i+1, (i, 1, 4)) + assert expr.subs(k, 4).doit() == ImmutableDenseNDimArray([2, 3, 4, 5]) + b = ArrayComprehensionMap(lambda i: i+1, (i, 1, 2), (i, 1, 3), (i, 1, 4), (i, 1, 5)) + assert b.doit().tolist() == [[[[2, 3, 4, 5, 6], [3, 5, 7, 9, 11], [4, 7, 10, 13, 16], [5, 9, 13, 17, 21]], + [[3, 5, 7, 9, 11], [5, 9, 13, 17, 21], [7, 13, 19, 25, 31], [9, 17, 25, 33, 41]], + [[4, 7, 10, 13, 16], [7, 13, 19, 25, 31], [10, 19, 28, 37, 46], [13, 25, 37, 49, 61]]], + [[[3, 5, 7, 9, 11], [5, 9, 13, 17, 21], [7, 13, 19, 25, 31], [9, 17, 25, 33, 41]], + [[5, 9, 13, 17, 21], [9, 17, 25, 33, 41], [13, 25, 37, 49, 61], [17, 33, 49, 65, 81]], + [[7, 13, 19, 25, 31], [13, 25, 37, 49, 61], [19, 37, 55, 73, 91], [25, 49, 73, 97, 121]]]] + + # tests about lambda expression + assert ArrayComprehensionMap(lambda: 3, (i, 1, 5)).doit().tolist() == [3, 3, 3, 3, 3] + assert ArrayComprehensionMap(lambda i: i+1, (i, 1, 5)).doit().tolist() == [2, 3, 4, 5, 6] + raises(ValueError, lambda: ArrayComprehensionMap(i*j, (i, 1, 3), (j, 2, 4))) + # The use of a function here triggers a deprecation warning from sympify() + with warns(SymPyDeprecationWarning, test_stacklevel=False): + a = ArrayComprehensionMap(lambda i, j: i+j, (i, 1, 5)) + raises(ValueError, lambda: a.doit()) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_derivatives.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_derivatives.py new file mode 100644 index 0000000000000000000000000000000000000000..cc220c0d33bad82be2c05a7b52f0e1ec32f310d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_array_derivatives.py @@ -0,0 +1,52 @@ +from sympy.core.symbol import symbols +from sympy.matrices.dense import Matrix +from sympy.matrices.expressions.matexpr import MatrixSymbol +from sympy.tensor.array.ndim_array import NDimArray +from sympy.matrices.common import MatrixCommon +from sympy.tensor.array.array_derivatives import ArrayDerivative + +x, y, z, t = symbols("x y z t") + +m = Matrix([[x, y], [z, t]]) + +M = MatrixSymbol("M", 3, 2) +N = MatrixSymbol("N", 4, 3) + + +def test_array_derivative_construction(): + + d = ArrayDerivative(x, m, evaluate=False) + assert d.shape == (2, 2) + expr = d.doit() + assert isinstance(expr, MatrixCommon) + assert expr.shape == (2, 2) + + d = ArrayDerivative(m, m, evaluate=False) + assert d.shape == (2, 2, 2, 2) + expr = d.doit() + assert isinstance(expr, NDimArray) + assert expr.shape == (2, 2, 2, 2) + + d = ArrayDerivative(m, x, evaluate=False) + assert d.shape == (2, 2) + expr = d.doit() + assert isinstance(expr, MatrixCommon) + assert expr.shape == (2, 2) + + d = ArrayDerivative(M, N, evaluate=False) + assert d.shape == (4, 3, 3, 2) + expr = d.doit() + assert isinstance(expr, ArrayDerivative) + assert expr.shape == (4, 3, 3, 2) + + d = ArrayDerivative(M, (N, 2), evaluate=False) + assert d.shape == (4, 3, 4, 3, 3, 2) + expr = d.doit() + assert isinstance(expr, ArrayDerivative) + assert expr.shape == (4, 3, 4, 3, 3, 2) + + d = ArrayDerivative(M.as_explicit(), (N.as_explicit(), 2), evaluate=False) + assert d.doit().shape == (4, 3, 4, 3, 3, 2) + expr = d.doit() + assert isinstance(expr, ArrayDerivative) + assert expr.shape == (4, 3, 4, 3, 3, 2) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_arrayop.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_arrayop.py new file mode 100644 index 0000000000000000000000000000000000000000..de56e81e0064f1e303a7a58e41932d15f2d0b41e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_arrayop.py @@ -0,0 +1,361 @@ +import itertools +import random + +from sympy.combinatorics import Permutation +from sympy.combinatorics.permutations import _af_invert +from sympy.testing.pytest import raises + +from sympy.core.function import diff +from sympy.core.symbol import symbols +from sympy.functions.elementary.complexes import (adjoint, conjugate, transpose) +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.tensor.array import Array, ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableSparseNDimArray + +from sympy.tensor.array.arrayop import tensorproduct, tensorcontraction, derive_by_array, permutedims, Flatten, \ + tensordiagonal + + +def test_import_NDimArray(): + from sympy.tensor.array import NDimArray + del NDimArray + + +def test_tensorproduct(): + x,y,z,t = symbols('x y z t') + from sympy.abc import a,b,c,d + assert tensorproduct() == 1 + assert tensorproduct([x]) == Array([x]) + assert tensorproduct([x], [y]) == Array([[x*y]]) + assert tensorproduct([x], [y], [z]) == Array([[[x*y*z]]]) + assert tensorproduct([x], [y], [z], [t]) == Array([[[[x*y*z*t]]]]) + + assert tensorproduct(x) == x + assert tensorproduct(x, y) == x*y + assert tensorproduct(x, y, z) == x*y*z + assert tensorproduct(x, y, z, t) == x*y*z*t + + for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]: + A = ArrayType([x, y]) + B = ArrayType([1, 2, 3]) + C = ArrayType([a, b, c, d]) + + assert tensorproduct(A, B, C) == ArrayType([[[a*x, b*x, c*x, d*x], [2*a*x, 2*b*x, 2*c*x, 2*d*x], [3*a*x, 3*b*x, 3*c*x, 3*d*x]], + [[a*y, b*y, c*y, d*y], [2*a*y, 2*b*y, 2*c*y, 2*d*y], [3*a*y, 3*b*y, 3*c*y, 3*d*y]]]) + + assert tensorproduct([x, y], [1, 2, 3]) == tensorproduct(A, B) + + assert tensorproduct(A, 2) == ArrayType([2*x, 2*y]) + assert tensorproduct(A, [2]) == ArrayType([[2*x], [2*y]]) + assert tensorproduct([2], A) == ArrayType([[2*x, 2*y]]) + assert tensorproduct(a, A) == ArrayType([a*x, a*y]) + assert tensorproduct(a, A, B) == ArrayType([[a*x, 2*a*x, 3*a*x], [a*y, 2*a*y, 3*a*y]]) + assert tensorproduct(A, B, a) == ArrayType([[a*x, 2*a*x, 3*a*x], [a*y, 2*a*y, 3*a*y]]) + assert tensorproduct(B, a, A) == ArrayType([[a*x, a*y], [2*a*x, 2*a*y], [3*a*x, 3*a*y]]) + + # tests for large scale sparse array + for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]: + a = SparseArrayType({1:2, 3:4},(1000, 2000)) + b = SparseArrayType({1:2, 3:4},(1000, 2000)) + assert tensorproduct(a, b) == ImmutableSparseNDimArray({2000001: 4, 2000003: 8, 6000001: 8, 6000003: 16}, (1000, 2000, 1000, 2000)) + + +def test_tensorcontraction(): + from sympy.abc import a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x + B = Array(range(18), (2, 3, 3)) + assert tensorcontraction(B, (1, 2)) == Array([12, 39]) + C1 = Array([a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x], (2, 3, 2, 2)) + + assert tensorcontraction(C1, (0, 2)) == Array([[a + o, b + p], [e + s, f + t], [i + w, j + x]]) + assert tensorcontraction(C1, (0, 2, 3)) == Array([a + p, e + t, i + x]) + assert tensorcontraction(C1, (2, 3)) == Array([[a + d, e + h, i + l], [m + p, q + t, u + x]]) + + +def test_derivative_by_array(): + from sympy.abc import i, j, t, x, y, z + + bexpr = x*y**2*exp(z)*log(t) + sexpr = sin(bexpr) + cexpr = cos(bexpr) + + a = Array([sexpr]) + + assert derive_by_array(sexpr, t) == x*y**2*exp(z)*cos(x*y**2*exp(z)*log(t))/t + assert derive_by_array(sexpr, [x, y, z]) == Array([bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr, bexpr*cexpr]) + assert derive_by_array(a, [x, y, z]) == Array([[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr], [bexpr*cexpr]]) + + assert derive_by_array(sexpr, [[x, y], [z, t]]) == Array([[bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr], [bexpr*cexpr, bexpr/log(t)/t*cexpr]]) + assert derive_by_array(a, [[x, y], [z, t]]) == Array([[[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr]], [[bexpr*cexpr], [bexpr/log(t)/t*cexpr]]]) + assert derive_by_array([[x, y], [z, t]], [x, y]) == Array([[[1, 0], [0, 0]], [[0, 1], [0, 0]]]) + assert derive_by_array([[x, y], [z, t]], [[x, y], [z, t]]) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], + [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]]) + + assert diff(sexpr, t) == x*y**2*exp(z)*cos(x*y**2*exp(z)*log(t))/t + assert diff(sexpr, Array([x, y, z])) == Array([bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr, bexpr*cexpr]) + assert diff(a, Array([x, y, z])) == Array([[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr], [bexpr*cexpr]]) + + assert diff(sexpr, Array([[x, y], [z, t]])) == Array([[bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr], [bexpr*cexpr, bexpr/log(t)/t*cexpr]]) + assert diff(a, Array([[x, y], [z, t]])) == Array([[[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr]], [[bexpr*cexpr], [bexpr/log(t)/t*cexpr]]]) + assert diff(Array([[x, y], [z, t]]), Array([x, y])) == Array([[[1, 0], [0, 0]], [[0, 1], [0, 0]]]) + assert diff(Array([[x, y], [z, t]]), Array([[x, y], [z, t]])) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], + [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]]) + + # test for large scale sparse array + for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]: + b = MutableSparseNDimArray({0:i, 1:j}, (10000, 20000)) + assert derive_by_array(b, i) == ImmutableSparseNDimArray({0: 1}, (10000, 20000)) + assert derive_by_array(b, (i, j)) == ImmutableSparseNDimArray({0: 1, 200000001: 1}, (2, 10000, 20000)) + + #https://github.com/sympy/sympy/issues/20655 + U = Array([x, y, z]) + E = 2 + assert derive_by_array(E, U) == ImmutableDenseNDimArray([0, 0, 0]) + + +def test_issue_emerged_while_discussing_10972(): + ua = Array([-1,0]) + Fa = Array([[0, 1], [-1, 0]]) + po = tensorproduct(Fa, ua, Fa, ua) + assert tensorcontraction(po, (1, 2), (4, 5)) == Array([[0, 0], [0, 1]]) + + sa = symbols('a0:144') + po = Array(sa, [2, 2, 3, 3, 2, 2]) + assert tensorcontraction(po, (0, 1), (2, 3), (4, 5)) == sa[0] + sa[108] + sa[111] + sa[124] + sa[127] + sa[140] + sa[143] + sa[16] + sa[19] + sa[3] + sa[32] + sa[35] + assert tensorcontraction(po, (0, 1, 4, 5), (2, 3)) == sa[0] + sa[111] + sa[127] + sa[143] + sa[16] + sa[32] + assert tensorcontraction(po, (0, 1), (4, 5)) == Array([[sa[0] + sa[108] + sa[111] + sa[3], sa[112] + sa[115] + sa[4] + sa[7], + sa[11] + sa[116] + sa[119] + sa[8]], [sa[12] + sa[120] + sa[123] + sa[15], + sa[124] + sa[127] + sa[16] + sa[19], sa[128] + sa[131] + sa[20] + sa[23]], + [sa[132] + sa[135] + sa[24] + sa[27], sa[136] + sa[139] + sa[28] + sa[31], + sa[140] + sa[143] + sa[32] + sa[35]]]) + assert tensorcontraction(po, (0, 1), (2, 3)) == Array([[sa[0] + sa[108] + sa[124] + sa[140] + sa[16] + sa[32], sa[1] + sa[109] + sa[125] + sa[141] + sa[17] + sa[33]], + [sa[110] + sa[126] + sa[142] + sa[18] + sa[2] + sa[34], sa[111] + sa[127] + sa[143] + sa[19] + sa[3] + sa[35]]]) + + +def test_array_permutedims(): + sa = symbols('a0:144') + + for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]: + m1 = ArrayType(sa[:6], (2, 3)) + assert permutedims(m1, (1, 0)) == transpose(m1) + assert m1.tomatrix().T == permutedims(m1, (1, 0)).tomatrix() + + assert m1.tomatrix().T == transpose(m1).tomatrix() + assert m1.tomatrix().C == conjugate(m1).tomatrix() + assert m1.tomatrix().H == adjoint(m1).tomatrix() + + assert m1.tomatrix().T == m1.transpose().tomatrix() + assert m1.tomatrix().C == m1.conjugate().tomatrix() + assert m1.tomatrix().H == m1.adjoint().tomatrix() + + raises(ValueError, lambda: permutedims(m1, (0,))) + raises(ValueError, lambda: permutedims(m1, (0, 0))) + raises(ValueError, lambda: permutedims(m1, (1, 2, 0))) + + # Some tests with random arrays: + dims = 6 + shape = [random.randint(1,5) for i in range(dims)] + elems = [random.random() for i in range(tensorproduct(*shape))] + ra = ArrayType(elems, shape) + perm = list(range(dims)) + # Randomize the permutation: + random.shuffle(perm) + # Test inverse permutation: + assert permutedims(permutedims(ra, perm), _af_invert(perm)) == ra + # Test that permuted shape corresponds to action by `Permutation`: + assert permutedims(ra, perm).shape == tuple(Permutation(perm)(shape)) + + z = ArrayType.zeros(4,5,6,7) + + assert permutedims(z, (2, 3, 1, 0)).shape == (6, 7, 5, 4) + assert permutedims(z, [2, 3, 1, 0]).shape == (6, 7, 5, 4) + assert permutedims(z, Permutation([2, 3, 1, 0])).shape == (6, 7, 5, 4) + + po = ArrayType(sa, [2, 2, 3, 3, 2, 2]) + + raises(ValueError, lambda: permutedims(po, (1, 1))) + raises(ValueError, lambda: po.transpose()) + raises(ValueError, lambda: po.adjoint()) + + assert permutedims(po, reversed(range(po.rank()))) == ArrayType( + [[[[[[sa[0], sa[72]], [sa[36], sa[108]]], [[sa[12], sa[84]], [sa[48], sa[120]]], [[sa[24], + sa[96]], [sa[60], sa[132]]]], + [[[sa[4], sa[76]], [sa[40], sa[112]]], [[sa[16], + sa[88]], [sa[52], sa[124]]], + [[sa[28], sa[100]], [sa[64], sa[136]]]], + [[[sa[8], + sa[80]], [sa[44], sa[116]]], [[sa[20], sa[92]], [sa[56], sa[128]]], [[sa[32], + sa[104]], [sa[68], sa[140]]]]], + [[[[sa[2], sa[74]], [sa[38], sa[110]]], [[sa[14], + sa[86]], [sa[50], sa[122]]], [[sa[26], sa[98]], [sa[62], sa[134]]]], + [[[sa[6], + sa[78]], [sa[42], sa[114]]], [[sa[18], sa[90]], [sa[54], sa[126]]], [[sa[30], + sa[102]], [sa[66], sa[138]]]], + [[[sa[10], sa[82]], [sa[46], sa[118]]], [[sa[22], + sa[94]], [sa[58], sa[130]]], + [[sa[34], sa[106]], [sa[70], sa[142]]]]]], + [[[[[sa[1], + sa[73]], [sa[37], sa[109]]], [[sa[13], sa[85]], [sa[49], sa[121]]], [[sa[25], + sa[97]], [sa[61], sa[133]]]], + [[[sa[5], sa[77]], [sa[41], sa[113]]], [[sa[17], + sa[89]], [sa[53], sa[125]]], + [[sa[29], sa[101]], [sa[65], sa[137]]]], + [[[sa[9], + sa[81]], [sa[45], sa[117]]], [[sa[21], sa[93]], [sa[57], sa[129]]], [[sa[33], + sa[105]], [sa[69], sa[141]]]]], + [[[[sa[3], sa[75]], [sa[39], sa[111]]], [[sa[15], + sa[87]], [sa[51], sa[123]]], [[sa[27], sa[99]], [sa[63], sa[135]]]], + [[[sa[7], + sa[79]], [sa[43], sa[115]]], [[sa[19], sa[91]], [sa[55], sa[127]]], [[sa[31], + sa[103]], [sa[67], sa[139]]]], + [[[sa[11], sa[83]], [sa[47], sa[119]]], [[sa[23], + sa[95]], [sa[59], sa[131]]], + [[sa[35], sa[107]], [sa[71], sa[143]]]]]]]) + + assert permutedims(po, (1, 0, 2, 3, 4, 5)) == ArrayType( + [[[[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10], + sa[11]]]], + [[[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18], + sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]]], + [[[sa[24], sa[25]], [sa[26], + sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34], + sa[35]]]]], + [[[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78], + sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]]], + [[[sa[84], sa[85]], [sa[86], + sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94], + sa[95]]]], + [[[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102], + sa[103]]], + [[sa[104], sa[105]], [sa[106], sa[107]]]]]], [[[[[sa[36], sa[37]], [sa[38], + sa[39]]], + [[sa[40], sa[41]], [sa[42], sa[43]]], + [[sa[44], sa[45]], [sa[46], + sa[47]]]], + [[[sa[48], sa[49]], [sa[50], sa[51]]], + [[sa[52], sa[53]], [sa[54], + sa[55]]], + [[sa[56], sa[57]], [sa[58], sa[59]]]], + [[[sa[60], sa[61]], [sa[62], + sa[63]]], + [[sa[64], sa[65]], [sa[66], sa[67]]], + [[sa[68], sa[69]], [sa[70], + sa[71]]]]], [ + [[[sa[108], sa[109]], [sa[110], sa[111]]], + [[sa[112], sa[113]], [sa[114], + sa[115]]], + [[sa[116], sa[117]], [sa[118], sa[119]]]], + [[[sa[120], sa[121]], [sa[122], + sa[123]]], + [[sa[124], sa[125]], [sa[126], sa[127]]], + [[sa[128], sa[129]], [sa[130], + sa[131]]]], + [[[sa[132], sa[133]], [sa[134], sa[135]]], + [[sa[136], sa[137]], [sa[138], + sa[139]]], + [[sa[140], sa[141]], [sa[142], sa[143]]]]]]]) + + assert permutedims(po, (0, 2, 1, 4, 3, 5)) == ArrayType( + [[[[[[sa[0], sa[1]], [sa[4], sa[5]], [sa[8], sa[9]]], [[sa[2], sa[3]], [sa[6], sa[7]], [sa[10], + sa[11]]]], + [[[sa[36], sa[37]], [sa[40], sa[41]], [sa[44], sa[45]]], [[sa[38], + sa[39]], [sa[42], sa[43]], [sa[46], sa[47]]]]], + [[[[sa[12], sa[13]], [sa[16], + sa[17]], [sa[20], sa[21]]], [[sa[14], sa[15]], [sa[18], sa[19]], [sa[22], + sa[23]]]], + [[[sa[48], sa[49]], [sa[52], sa[53]], [sa[56], sa[57]]], [[sa[50], + sa[51]], [sa[54], sa[55]], [sa[58], sa[59]]]]], + [[[[sa[24], sa[25]], [sa[28], + sa[29]], [sa[32], sa[33]]], [[sa[26], sa[27]], [sa[30], sa[31]], [sa[34], + sa[35]]]], + [[[sa[60], sa[61]], [sa[64], sa[65]], [sa[68], sa[69]]], [[sa[62], + sa[63]], [sa[66], sa[67]], [sa[70], sa[71]]]]]], + [[[[[sa[72], sa[73]], [sa[76], + sa[77]], [sa[80], sa[81]]], [[sa[74], sa[75]], [sa[78], sa[79]], [sa[82], + sa[83]]]], + [[[sa[108], sa[109]], [sa[112], sa[113]], [sa[116], sa[117]]], [[sa[110], + sa[111]], [sa[114], sa[115]], + [sa[118], sa[119]]]]], + [[[[sa[84], sa[85]], [sa[88], + sa[89]], [sa[92], sa[93]]], [[sa[86], sa[87]], [sa[90], sa[91]], [sa[94], + sa[95]]]], + [[[sa[120], sa[121]], [sa[124], sa[125]], [sa[128], sa[129]]], [[sa[122], + sa[123]], [sa[126], sa[127]], + [sa[130], sa[131]]]]], + [[[[sa[96], sa[97]], [sa[100], + sa[101]], [sa[104], sa[105]]], [[sa[98], sa[99]], [sa[102], sa[103]], [sa[106], + sa[107]]]], + [[[sa[132], sa[133]], [sa[136], sa[137]], [sa[140], sa[141]]], [[sa[134], + sa[135]], [sa[138], sa[139]], + [sa[142], sa[143]]]]]]]) + + po2 = po.reshape(4, 9, 2, 2) + assert po2 == ArrayType([[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10], sa[11]]], [[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18], sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]], [[sa[24], sa[25]], [sa[26], sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34], sa[35]]]], [[[sa[36], sa[37]], [sa[38], sa[39]]], [[sa[40], sa[41]], [sa[42], sa[43]]], [[sa[44], sa[45]], [sa[46], sa[47]]], [[sa[48], sa[49]], [sa[50], sa[51]]], [[sa[52], sa[53]], [sa[54], sa[55]]], [[sa[56], sa[57]], [sa[58], sa[59]]], [[sa[60], sa[61]], [sa[62], sa[63]]], [[sa[64], sa[65]], [sa[66], sa[67]]], [[sa[68], sa[69]], [sa[70], sa[71]]]], [[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78], sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]], [[sa[84], sa[85]], [sa[86], sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94], sa[95]]], [[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102], sa[103]]], [[sa[104], sa[105]], [sa[106], sa[107]]]], [[[sa[108], sa[109]], [sa[110], sa[111]]], [[sa[112], sa[113]], [sa[114], sa[115]]], [[sa[116], sa[117]], [sa[118], sa[119]]], [[sa[120], sa[121]], [sa[122], sa[123]]], [[sa[124], sa[125]], [sa[126], sa[127]]], [[sa[128], sa[129]], [sa[130], sa[131]]], [[sa[132], sa[133]], [sa[134], sa[135]]], [[sa[136], sa[137]], [sa[138], sa[139]]], [[sa[140], sa[141]], [sa[142], sa[143]]]]]) + + assert permutedims(po2, (3, 2, 0, 1)) == ArrayType([[[[sa[0], sa[4], sa[8], sa[12], sa[16], sa[20], sa[24], sa[28], sa[32]], [sa[36], sa[40], sa[44], sa[48], sa[52], sa[56], sa[60], sa[64], sa[68]], [sa[72], sa[76], sa[80], sa[84], sa[88], sa[92], sa[96], sa[100], sa[104]], [sa[108], sa[112], sa[116], sa[120], sa[124], sa[128], sa[132], sa[136], sa[140]]], [[sa[2], sa[6], sa[10], sa[14], sa[18], sa[22], sa[26], sa[30], sa[34]], [sa[38], sa[42], sa[46], sa[50], sa[54], sa[58], sa[62], sa[66], sa[70]], [sa[74], sa[78], sa[82], sa[86], sa[90], sa[94], sa[98], sa[102], sa[106]], [sa[110], sa[114], sa[118], sa[122], sa[126], sa[130], sa[134], sa[138], sa[142]]]], [[[sa[1], sa[5], sa[9], sa[13], sa[17], sa[21], sa[25], sa[29], sa[33]], [sa[37], sa[41], sa[45], sa[49], sa[53], sa[57], sa[61], sa[65], sa[69]], [sa[73], sa[77], sa[81], sa[85], sa[89], sa[93], sa[97], sa[101], sa[105]], [sa[109], sa[113], sa[117], sa[121], sa[125], sa[129], sa[133], sa[137], sa[141]]], [[sa[3], sa[7], sa[11], sa[15], sa[19], sa[23], sa[27], sa[31], sa[35]], [sa[39], sa[43], sa[47], sa[51], sa[55], sa[59], sa[63], sa[67], sa[71]], [sa[75], sa[79], sa[83], sa[87], sa[91], sa[95], sa[99], sa[103], sa[107]], [sa[111], sa[115], sa[119], sa[123], sa[127], sa[131], sa[135], sa[139], sa[143]]]]]) + + # test for large scale sparse array + for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]: + A = SparseArrayType({1:1, 10000:2}, (10000, 20000, 10000)) + assert permutedims(A, (0, 1, 2)) == A + assert permutedims(A, (1, 0, 2)) == SparseArrayType({1: 1, 100000000: 2}, (20000, 10000, 10000)) + B = SparseArrayType({1:1, 20000:2}, (10000, 20000)) + assert B.transpose() == SparseArrayType({10000: 1, 1: 2}, (20000, 10000)) + + +def test_permutedims_with_indices(): + A = Array(range(32)).reshape(2, 2, 2, 2, 2) + indices_new = list("abcde") + indices_old = list("ebdac") + new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old) + for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)): + assert new_A[a, b, c, d, e] == A[e, b, d, a, c] + indices_old = list("cabed") + new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old) + for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)): + assert new_A[a, b, c, d, e] == A[c, a, b, e, d] + raises(ValueError, lambda: permutedims(A, index_order_old=list("aacde"), index_order_new=list("abcde"))) + raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abcce"))) + raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abce"))) + raises(ValueError, lambda: permutedims(A, index_order_old=list("abce"), index_order_new=list("abce"))) + raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_old=list("abcde"))) + raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_new=list("abcde"))) + + +def test_flatten(): + from sympy.matrices.dense import Matrix + for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray, Matrix]: + A = ArrayType(range(24)).reshape(4, 6) + assert list(Flatten(A)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + + for i, v in enumerate(Flatten(A)): + assert i == v + + +def test_tensordiagonal(): + from sympy.matrices.dense import eye + expr = Array(range(9)).reshape(3, 3) + raises(ValueError, lambda: tensordiagonal(expr, [0], [1])) + raises(ValueError, lambda: tensordiagonal(expr, [0, 0])) + assert tensordiagonal(eye(3), [0, 1]) == Array([1, 1, 1]) + assert tensordiagonal(expr, [0, 1]) == Array([0, 4, 8]) + x, y, z = symbols("x y z") + expr2 = tensorproduct([x, y, z], expr) + assert tensordiagonal(expr2, [1, 2]) == Array([[0, 4*x, 8*x], [0, 4*y, 8*y], [0, 4*z, 8*z]]) + assert tensordiagonal(expr2, [0, 1]) == Array([[0, 3*y, 6*z], [x, 4*y, 7*z], [2*x, 5*y, 8*z]]) + assert tensordiagonal(expr2, [0, 1, 2]) == Array([0, 4*y, 8*z]) + # assert tensordiagonal(expr2, [0]) == permutedims(expr2, [1, 2, 0]) + # assert tensordiagonal(expr2, [1]) == permutedims(expr2, [0, 2, 1]) + # assert tensordiagonal(expr2, [2]) == expr2 + # assert tensordiagonal(expr2, [1], [2]) == expr2 + # assert tensordiagonal(expr2, [0], [1]) == permutedims(expr2, [2, 0, 1]) + + a, b, c, X, Y, Z = symbols("a b c X Y Z") + expr3 = tensorproduct([x, y, z], [1, 2, 3], [a, b, c], [X, Y, Z]) + assert tensordiagonal(expr3, [0, 1, 2, 3]) == Array([x*a*X, 2*y*b*Y, 3*z*c*Z]) + assert tensordiagonal(expr3, [0, 1], [2, 3]) == tensorproduct([x, 2*y, 3*z], [a*X, b*Y, c*Z]) + + # assert tensordiagonal(expr3, [0], [1, 2], [3]) == tensorproduct([x, y, z], [a, 2*b, 3*c], [X, Y, Z]) + assert tensordiagonal(tensordiagonal(expr3, [2, 3]), [0, 1]) == tensorproduct([a*X, b*Y, c*Z], [x, 2*y, 3*z]) + + raises(ValueError, lambda: tensordiagonal([[1, 2, 3], [4, 5, 6]], [0, 1])) + raises(ValueError, lambda: tensordiagonal(expr3.reshape(3, 3, 9), [1, 2])) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_mutable_ndim_array.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_mutable_ndim_array.py new file mode 100644 index 0000000000000000000000000000000000000000..9a232f399bbc0639d326217975fb0a12e645a984 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/array/tests/test_mutable_ndim_array.py @@ -0,0 +1,374 @@ +from copy import copy + +from sympy.tensor.array.dense_ndim_array import MutableDenseNDimArray +from sympy.core.function import diff +from sympy.core.numbers import Rational +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.matrices import SparseMatrix +from sympy.matrices import Matrix +from sympy.tensor.array.sparse_ndim_array import MutableSparseNDimArray +from sympy.testing.pytest import raises + + +def test_ndim_array_initiation(): + arr_with_one_element = MutableDenseNDimArray([23]) + assert len(arr_with_one_element) == 1 + assert arr_with_one_element[0] == 23 + assert arr_with_one_element.rank() == 1 + raises(ValueError, lambda: arr_with_one_element[1]) + + arr_with_symbol_element = MutableDenseNDimArray([Symbol('x')]) + assert len(arr_with_symbol_element) == 1 + assert arr_with_symbol_element[0] == Symbol('x') + assert arr_with_symbol_element.rank() == 1 + + number5 = 5 + vector = MutableDenseNDimArray.zeros(number5) + assert len(vector) == number5 + assert vector.shape == (number5,) + assert vector.rank() == 1 + raises(ValueError, lambda: arr_with_one_element[5]) + + vector = MutableSparseNDimArray.zeros(number5) + assert len(vector) == number5 + assert vector.shape == (number5,) + assert vector._sparse_array == {} + assert vector.rank() == 1 + + n_dim_array = MutableDenseNDimArray(range(3**4), (3, 3, 3, 3,)) + assert len(n_dim_array) == 3 * 3 * 3 * 3 + assert n_dim_array.shape == (3, 3, 3, 3) + assert n_dim_array.rank() == 4 + raises(ValueError, lambda: n_dim_array[0, 0, 0, 3]) + raises(ValueError, lambda: n_dim_array[3, 0, 0, 0]) + raises(ValueError, lambda: n_dim_array[3**4]) + + array_shape = (3, 3, 3, 3) + sparse_array = MutableSparseNDimArray.zeros(*array_shape) + assert len(sparse_array._sparse_array) == 0 + assert len(sparse_array) == 3 * 3 * 3 * 3 + assert n_dim_array.shape == array_shape + assert n_dim_array.rank() == 4 + + one_dim_array = MutableDenseNDimArray([2, 3, 1]) + assert len(one_dim_array) == 3 + assert one_dim_array.shape == (3,) + assert one_dim_array.rank() == 1 + assert one_dim_array.tolist() == [2, 3, 1] + + shape = (3, 3) + array_with_many_args = MutableSparseNDimArray.zeros(*shape) + assert len(array_with_many_args) == 3 * 3 + assert array_with_many_args.shape == shape + assert array_with_many_args[0, 0] == 0 + assert array_with_many_args.rank() == 2 + + shape = (int(3), int(3)) + array_with_long_shape = MutableSparseNDimArray.zeros(*shape) + assert len(array_with_long_shape) == 3 * 3 + assert array_with_long_shape.shape == shape + assert array_with_long_shape[int(0), int(0)] == 0 + assert array_with_long_shape.rank() == 2 + + vector_with_long_shape = MutableDenseNDimArray(range(5), int(5)) + assert len(vector_with_long_shape) == 5 + assert vector_with_long_shape.shape == (int(5),) + assert vector_with_long_shape.rank() == 1 + raises(ValueError, lambda: vector_with_long_shape[int(5)]) + + from sympy.abc import x + for ArrayType in [MutableDenseNDimArray, MutableSparseNDimArray]: + rank_zero_array = ArrayType(x) + assert len(rank_zero_array) == 1 + assert rank_zero_array.shape == () + assert rank_zero_array.rank() == 0 + assert rank_zero_array[()] == x + raises(ValueError, lambda: rank_zero_array[0]) + +def test_sympify(): + from sympy.abc import x, y, z, t + arr = MutableDenseNDimArray([[x, y], [1, z*t]]) + arr_other = sympify(arr) + assert arr_other.shape == (2, 2) + assert arr_other == arr + + +def test_reshape(): + array = MutableDenseNDimArray(range(50), 50) + assert array.shape == (50,) + assert array.rank() == 1 + + array = array.reshape(5, 5, 2) + assert array.shape == (5, 5, 2) + assert array.rank() == 3 + assert len(array) == 50 + + +def test_iterator(): + array = MutableDenseNDimArray(range(4), (2, 2)) + assert array[0] == MutableDenseNDimArray([0, 1]) + assert array[1] == MutableDenseNDimArray([2, 3]) + + array = array.reshape(4) + j = 0 + for i in array: + assert i == j + j += 1 + + +def test_getitem(): + for ArrayType in [MutableDenseNDimArray, MutableSparseNDimArray]: + array = ArrayType(range(24)).reshape(2, 3, 4) + assert array.tolist() == [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] + assert array[0] == ArrayType([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) + assert array[0, 0] == ArrayType([0, 1, 2, 3]) + value = 0 + for i in range(2): + for j in range(3): + for k in range(4): + assert array[i, j, k] == value + value += 1 + + raises(ValueError, lambda: array[3, 4, 5]) + raises(ValueError, lambda: array[3, 4, 5, 6]) + raises(ValueError, lambda: array[3, 4, 5, 3:4]) + + +def test_sparse(): + sparse_array = MutableSparseNDimArray([0, 0, 0, 1], (2, 2)) + assert len(sparse_array) == 2 * 2 + # dictionary where all data is, only non-zero entries are actually stored: + assert len(sparse_array._sparse_array) == 1 + + assert sparse_array.tolist() == [[0, 0], [0, 1]] + + for i, j in zip(sparse_array, [[0, 0], [0, 1]]): + assert i == MutableSparseNDimArray(j) + + sparse_array[0, 0] = 123 + assert len(sparse_array._sparse_array) == 2 + assert sparse_array[0, 0] == 123 + assert sparse_array/0 == MutableSparseNDimArray([[S.ComplexInfinity, S.NaN], [S.NaN, S.ComplexInfinity]], (2, 2)) + + # when element in sparse array become zero it will disappear from + # dictionary + sparse_array[0, 0] = 0 + assert len(sparse_array._sparse_array) == 1 + sparse_array[1, 1] = 0 + assert len(sparse_array._sparse_array) == 0 + assert sparse_array[0, 0] == 0 + + # test for large scale sparse array + # equality test + a = MutableSparseNDimArray.zeros(100000, 200000) + b = MutableSparseNDimArray.zeros(100000, 200000) + assert a == b + a[1, 1] = 1 + b[1, 1] = 2 + assert a != b + + # __mul__ and __rmul__ + assert a * 3 == MutableSparseNDimArray({200001: 3}, (100000, 200000)) + assert 3 * a == MutableSparseNDimArray({200001: 3}, (100000, 200000)) + assert a * 0 == MutableSparseNDimArray({}, (100000, 200000)) + assert 0 * a == MutableSparseNDimArray({}, (100000, 200000)) + + # __truediv__ + assert a/3 == MutableSparseNDimArray({200001: Rational(1, 3)}, (100000, 200000)) + + # __neg__ + assert -a == MutableSparseNDimArray({200001: -1}, (100000, 200000)) + + +def test_calculation(): + + a = MutableDenseNDimArray([1]*9, (3, 3)) + b = MutableDenseNDimArray([9]*9, (3, 3)) + + c = a + b + for i in c: + assert i == MutableDenseNDimArray([10, 10, 10]) + + assert c == MutableDenseNDimArray([10]*9, (3, 3)) + assert c == MutableSparseNDimArray([10]*9, (3, 3)) + + c = b - a + for i in c: + assert i == MutableSparseNDimArray([8, 8, 8]) + + assert c == MutableDenseNDimArray([8]*9, (3, 3)) + assert c == MutableSparseNDimArray([8]*9, (3, 3)) + + +def test_ndim_array_converting(): + dense_array = MutableDenseNDimArray([1, 2, 3, 4], (2, 2)) + alist = dense_array.tolist() + + assert alist == [[1, 2], [3, 4]] + + matrix = dense_array.tomatrix() + assert (isinstance(matrix, Matrix)) + + for i in range(len(dense_array)): + assert dense_array[dense_array._get_tuple_index(i)] == matrix[i] + assert matrix.shape == dense_array.shape + + assert MutableDenseNDimArray(matrix) == dense_array + assert MutableDenseNDimArray(matrix.as_immutable()) == dense_array + assert MutableDenseNDimArray(matrix.as_mutable()) == dense_array + + sparse_array = MutableSparseNDimArray([1, 2, 3, 4], (2, 2)) + alist = sparse_array.tolist() + + assert alist == [[1, 2], [3, 4]] + + matrix = sparse_array.tomatrix() + assert(isinstance(matrix, SparseMatrix)) + + for i in range(len(sparse_array)): + assert sparse_array[sparse_array._get_tuple_index(i)] == matrix[i] + assert matrix.shape == sparse_array.shape + + assert MutableSparseNDimArray(matrix) == sparse_array + assert MutableSparseNDimArray(matrix.as_immutable()) == sparse_array + assert MutableSparseNDimArray(matrix.as_mutable()) == sparse_array + + +def test_converting_functions(): + arr_list = [1, 2, 3, 4] + arr_matrix = Matrix(((1, 2), (3, 4))) + + # list + arr_ndim_array = MutableDenseNDimArray(arr_list, (2, 2)) + assert (isinstance(arr_ndim_array, MutableDenseNDimArray)) + assert arr_matrix.tolist() == arr_ndim_array.tolist() + + # Matrix + arr_ndim_array = MutableDenseNDimArray(arr_matrix) + assert (isinstance(arr_ndim_array, MutableDenseNDimArray)) + assert arr_matrix.tolist() == arr_ndim_array.tolist() + assert arr_matrix.shape == arr_ndim_array.shape + + +def test_equality(): + first_list = [1, 2, 3, 4] + second_list = [1, 2, 3, 4] + third_list = [4, 3, 2, 1] + assert first_list == second_list + assert first_list != third_list + + first_ndim_array = MutableDenseNDimArray(first_list, (2, 2)) + second_ndim_array = MutableDenseNDimArray(second_list, (2, 2)) + third_ndim_array = MutableDenseNDimArray(third_list, (2, 2)) + fourth_ndim_array = MutableDenseNDimArray(first_list, (2, 2)) + + assert first_ndim_array == second_ndim_array + second_ndim_array[0, 0] = 0 + assert first_ndim_array != second_ndim_array + assert first_ndim_array != third_ndim_array + assert first_ndim_array == fourth_ndim_array + + +def test_arithmetic(): + a = MutableDenseNDimArray([3 for i in range(9)], (3, 3)) + b = MutableDenseNDimArray([7 for i in range(9)], (3, 3)) + + c1 = a + b + c2 = b + a + assert c1 == c2 + + d1 = a - b + d2 = b - a + assert d1 == d2 * (-1) + + e1 = a * 5 + e2 = 5 * a + e3 = copy(a) + e3 *= 5 + assert e1 == e2 == e3 + + f1 = a / 5 + f2 = copy(a) + f2 /= 5 + assert f1 == f2 + assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \ + f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5) + + assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \ + == type(e1) == type(e2) == type(e3) == type(f1) + + z0 = -a + assert z0 == MutableDenseNDimArray([-3 for i in range(9)], (3, 3)) + + +def test_higher_dimenions(): + m3 = MutableDenseNDimArray(range(10, 34), (2, 3, 4)) + + assert m3.tolist() == [[[10, 11, 12, 13], + [14, 15, 16, 17], + [18, 19, 20, 21]], + + [[22, 23, 24, 25], + [26, 27, 28, 29], + [30, 31, 32, 33]]] + + assert m3._get_tuple_index(0) == (0, 0, 0) + assert m3._get_tuple_index(1) == (0, 0, 1) + assert m3._get_tuple_index(4) == (0, 1, 0) + assert m3._get_tuple_index(12) == (1, 0, 0) + + assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]' + + m3_rebuilt = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]) + assert m3 == m3_rebuilt + + m3_other = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4)) + + assert m3 == m3_other + + +def test_slices(): + md = MutableDenseNDimArray(range(10, 34), (2, 3, 4)) + + assert md[:] == MutableDenseNDimArray(range(10, 34), (2, 3, 4)) + assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) + assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) + assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) + assert md[:, :, :] == md + + sd = MutableSparseNDimArray(range(10, 34), (2, 3, 4)) + assert sd == MutableSparseNDimArray(md) + + assert sd[:] == MutableSparseNDimArray(range(10, 34), (2, 3, 4)) + assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) + assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) + assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) + assert sd[:, :, :] == sd + + +def test_slices_assign(): + a = MutableDenseNDimArray(range(12), shape=(4, 3)) + b = MutableSparseNDimArray(range(12), shape=(4, 3)) + + for i in [a, b]: + assert i.tolist() == [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + i[0, :] = [2, 2, 2] + assert i.tolist() == [[2, 2, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + i[0, 1:] = [8, 8] + assert i.tolist() == [[2, 8, 8], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + i[1:3, 1] = [20, 44] + assert i.tolist() == [[2, 8, 8], [3, 20, 5], [6, 44, 8], [9, 10, 11]] + + +def test_diff(): + from sympy.abc import x, y, z + md = MutableDenseNDimArray([[x, y], [x*z, x*y*z]]) + assert md.diff(x) == MutableDenseNDimArray([[1, 0], [z, y*z]]) + assert diff(md, x) == MutableDenseNDimArray([[1, 0], [z, y*z]]) + + sd = MutableSparseNDimArray(md) + assert sd == MutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2)) + assert sd.diff(x) == MutableSparseNDimArray([[1, 0], [z, y*z]]) + assert diff(sd, x) == MutableSparseNDimArray([[1, 0], [z, y*z]]) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/functions.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..9434b2035ef17f66e80c032a3d513dd07dc2e79f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/functions.py @@ -0,0 +1,154 @@ +from collections.abc import Iterable +from functools import singledispatch + +from sympy.core.expr import Expr +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.core.parameters import global_parameters + + +class TensorProduct(Expr): + """ + Generic class for tensor products. + """ + is_number = False + + def __new__(cls, *args, **kwargs): + from sympy.tensor.array import NDimArray, tensorproduct, Array + from sympy.matrices.expressions.matexpr import MatrixExpr + from sympy.matrices.matrices import MatrixBase + from sympy.strategies import flatten + + args = [sympify(arg) for arg in args] + evaluate = kwargs.get("evaluate", global_parameters.evaluate) + + if not evaluate: + obj = Expr.__new__(cls, *args) + return obj + + arrays = [] + other = [] + scalar = S.One + for arg in args: + if isinstance(arg, (Iterable, MatrixBase, NDimArray)): + arrays.append(Array(arg)) + elif isinstance(arg, (MatrixExpr,)): + other.append(arg) + else: + scalar *= arg + + coeff = scalar*tensorproduct(*arrays) + if len(other) == 0: + return coeff + if coeff != 1: + newargs = [coeff] + other + else: + newargs = other + obj = Expr.__new__(cls, *newargs, **kwargs) + return flatten(obj) + + def rank(self): + return len(self.shape) + + def _get_args_shapes(self): + from sympy.tensor.array import Array + return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args] + + @property + def shape(self): + shape_list = self._get_args_shapes() + return sum(shape_list, ()) + + def __getitem__(self, index): + index = iter(index) + return Mul.fromiter( + arg.__getitem__(tuple(next(index) for i in shp)) + for arg, shp in zip(self.args, self._get_args_shapes()) + ) + + +@singledispatch +def shape(expr): + """ + Return the shape of the *expr* as a tuple. *expr* should represent + suitable object such as matrix or array. + + Parameters + ========== + + expr : SymPy object having ``MatrixKind`` or ``ArrayKind``. + + Raises + ====== + + NoShapeError : Raised when object with wrong kind is passed. + + Examples + ======== + + This function returns the shape of any object representing matrix or array. + + >>> from sympy import shape, Array, ImmutableDenseMatrix, Integral + >>> from sympy.abc import x + >>> A = Array([1, 2]) + >>> shape(A) + (2,) + >>> shape(Integral(A, x)) + (2,) + >>> M = ImmutableDenseMatrix([1, 2]) + >>> shape(M) + (2, 1) + >>> shape(Integral(M, x)) + (2, 1) + + You can support new type by dispatching. + + >>> from sympy import Expr + >>> class NewExpr(Expr): + ... pass + >>> @shape.register(NewExpr) + ... def _(expr): + ... return shape(expr.args[0]) + >>> shape(NewExpr(M)) + (2, 1) + + If unsuitable expression is passed, ``NoShapeError()`` will be raised. + + >>> shape(Integral(x, x)) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: Integral(x, x) + + Notes + ===== + + Array-like classes (such as ``Matrix`` or ``NDimArray``) has ``shape`` + property which returns its shape, but it cannot be used for non-array + classes containing array. This function returns the shape of any + registered object representing array. + + """ + if hasattr(expr, "shape"): + return expr.shape + raise NoShapeError( + "%s does not have shape, or its type is not registered to shape()." % expr) + + +class NoShapeError(Exception): + """ + Raised when ``shape()`` is called on non-array object. + + This error can be imported from ``sympy.tensor.functions``. + + Examples + ======== + + >>> from sympy import shape + >>> from sympy.abc import x + >>> shape(x) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: x + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/index_methods.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/index_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..12f707b60b4ad0bcadc35a222d9abe0cc5e033fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/index_methods.py @@ -0,0 +1,469 @@ +"""Module with functions operating on IndexedBase, Indexed and Idx objects + + - Check shape conformance + - Determine indices in resulting expression + + etc. + + Methods in this module could be implemented by calling methods on Expr + objects instead. When things stabilize this could be a useful + refactoring. +""" + +from functools import reduce + +from sympy.core.function import Function +from sympy.functions import exp, Piecewise +from sympy.tensor.indexed import Idx, Indexed +from sympy.utilities import sift + +from collections import OrderedDict + +class IndexConformanceException(Exception): + pass + +def _unique_and_repeated(inds): + """ + Returns the unique and repeated indices. Also note, from the examples given below + that the order of indices is maintained as given in the input. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _unique_and_repeated + >>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0]) + ([2, 1, 4], [3, 0]) + """ + uniq = OrderedDict() + for i in inds: + if i in uniq: + uniq[i] = 0 + else: + uniq[i] = 1 + return sift(uniq, lambda x: uniq[x], binary=True) + +def _remove_repeated(inds): + """ + Removes repeated objects from sequences + + Returns a set of the unique objects and a tuple of all that have been + removed. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _remove_repeated + >>> l1 = [1, 2, 3, 2] + >>> _remove_repeated(l1) + ({1, 3}, (2,)) + + """ + u, r = _unique_and_repeated(inds) + return set(u), tuple(r) + + +def _get_indices_Mul(expr, return_dummies=False): + """Determine the outer indices of a Mul object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Mul + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Mul(x[i, k]*y[j, k]) + ({i, j}, {}) + >>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True) + ({i, j}, {}, (k,)) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + inds = list(map(list, inds)) + inds = list(reduce(lambda x, y: x + y, inds)) + inds, dummies = _remove_repeated(inds) + + symmetry = {} + for s in syms: + for pair in s: + if pair in symmetry: + symmetry[pair] *= s[pair] + else: + symmetry[pair] = s[pair] + + if return_dummies: + return inds, symmetry, dummies + else: + return inds, symmetry + + +def _get_indices_Pow(expr): + """Determine outer indices of a power or an exponential. + + A power is considered a universal function, so that the indices of a Pow is + just the collection of indices present in the expression. This may be + viewed as a bit inconsistent in the special case: + + x[i]**2 = x[i]*x[i] (1) + + The above expression could have been interpreted as the contraction of x[i] + with itself, but we choose instead to interpret it as a function + + lambda y: y**2 + + applied to each element of x (a universal function in numpy terms). In + order to allow an interpretation of (1) as a contraction, we need + contravariant and covariant Idx subclasses. (FIXME: this is not yet + implemented) + + Expressions in the base or exponent are subject to contraction as usual, + but an index that is present in the exponent, will not be considered + contractable with its own base. Note however, that indices in the same + exponent can be contracted with each other. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Pow + >>> from sympy import Pow, exp, IndexedBase, Idx + >>> A = IndexedBase('A') + >>> x = IndexedBase('x') + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> _get_indices_Pow(exp(A[i, j]*x[j])) + ({i}, {}) + >>> _get_indices_Pow(Pow(x[i], x[i])) + ({i}, {}) + >>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i])) + ({i}, {}) + + """ + base, exp = expr.as_base_exp() + binds, bsyms = get_indices(base) + einds, esyms = get_indices(exp) + + inds = binds | einds + + # FIXME: symmetries from power needs to check special cases, else nothing + symmetries = {} + + return inds, symmetries + + +def _get_indices_Add(expr): + """Determine outer indices of an Add object. + + In a sum, each term must have the same set of outer indices. A valid + expression could be + + x(i)*y(j) - x(j)*y(i) + + But we do not allow expressions like: + + x(i)*y(j) - z(j)*z(j) + + FIXME: Add support for Numpy broadcasting + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Add + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Add(x[i] + x[k]*y[i, k]) + ({i}, {}) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + # allow broadcast of scalars + non_scalars = [x for x in inds if x != set()] + if not non_scalars: + return set(), {} + + if not all(x == non_scalars[0] for x in non_scalars[1:]): + raise IndexConformanceException("Indices are not consistent: %s" % expr) + if not reduce(lambda x, y: x != y or y, syms): + symmetries = syms[0] + else: + # FIXME: search for symmetries + symmetries = {} + + return non_scalars[0], symmetries + + +def get_indices(expr): + """Determine the outer indices of expression ``expr`` + + By *outer* we mean indices that are not summation indices. Returns a set + and a dict. The set contains outer indices and the dict contains + information about index symmetries. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_indices + >>> from sympy import symbols + >>> from sympy.tensor import IndexedBase + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, a, z = symbols('i j a z', integer=True) + + The indices of the total expression is determined, Repeated indices imply a + summation, for instance the trace of a matrix A: + + >>> get_indices(A[i, i]) + (set(), {}) + + In the case of many terms, the terms are required to have identical + outer indices. Else an IndexConformanceException is raised. + + >>> get_indices(x[i] + A[i, j]*y[j]) + ({i}, {}) + + :Exceptions: + + An IndexConformanceException means that the terms ar not compatible, e.g. + + >>> get_indices(x[i] + y[j]) #doctest: +SKIP + (...) + IndexConformanceException: Indices are not consistent: x(i) + y(j) + + .. warning:: + The concept of *outer* indices applies recursively, starting on the deepest + level. This implies that dummies inside parenthesis are assumed to be + summed first, so that the following expression is handled gracefully: + + >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) + ({i, j}, {}) + + This is correct and may appear convenient, but you need to be careful + with this as SymPy will happily .expand() the product, if requested. The + resulting expression would mix the outer ``j`` with the dummies inside + the parenthesis, which makes it a different expression. To be on the + safe side, it is best to avoid such ambiguities by using unique indices + for all contractions that should be held separate. + + """ + # We call ourself recursively to determine indices of sub expressions. + + # break recursion + if isinstance(expr, Indexed): + c = expr.indices + inds, dummies = _remove_repeated(c) + return inds, {} + elif expr is None: + return set(), {} + elif isinstance(expr, Idx): + return {expr}, {} + elif expr.is_Atom: + return set(), {} + + + # recurse via specialized functions + else: + if expr.is_Mul: + return _get_indices_Mul(expr) + elif expr.is_Add: + return _get_indices_Add(expr) + elif expr.is_Pow or isinstance(expr, exp): + return _get_indices_Pow(expr) + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return set(), {} + elif isinstance(expr, Function): + # Support ufunc like behaviour by returning indices from arguments. + # Functions do not interpret repeated indices across arguments + # as summation + ind0 = set() + for arg in expr.args: + ind, sym = get_indices(arg) + ind0 |= ind + return ind0, sym + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return set(), {} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) + + +def get_contraction_structure(expr): + """Determine dummy indices of ``expr`` and describe its structure + + By *dummy* we mean indices that are summation indices. + + The structure of the expression is determined and described as follows: + + 1) A conforming summation of Indexed objects is described with a dict where + the keys are summation indices and the corresponding values are sets + containing all terms for which the summation applies. All Add objects + in the SymPy expression tree are described like this. + + 2) For all nodes in the SymPy expression tree that are *not* of type Add, the + following applies: + + If a node discovers contractions in one of its arguments, the node + itself will be stored as a key in the dict. For that key, the + corresponding value is a list of dicts, each of which is the result of a + recursive call to get_contraction_structure(). The list contains only + dicts for the non-trivial deeper contractions, omitting dicts with None + as the one and only key. + + .. Note:: The presence of expressions among the dictionary keys indicates + multiple levels of index contractions. A nested dict displays nested + contractions and may itself contain dicts from a deeper level. In + practical calculations the summation in the deepest nested level must be + calculated first so that the outer expression can access the resulting + indexed object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_contraction_structure + >>> from sympy import default_sort_key + >>> from sympy.tensor import IndexedBase, Idx + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) + >>> get_contraction_structure(x[i]*y[i] + A[j, j]) + {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} + >>> get_contraction_structure(x[i]*y[j]) + {None: {x[i]*y[j]}} + + A multiplication of contracted factors results in nested dicts representing + the internal contractions. + + >>> d = get_contraction_structure(x[i, i]*y[j, j]) + >>> sorted(d.keys(), key=default_sort_key) + [None, x[i, i]*y[j, j]] + + In this case, the product has no contractions: + + >>> d[None] + {x[i, i]*y[j, j]} + + Factors are contracted "first": + + >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) + [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] + + A parenthesized Add object is also returned as a nested dictionary. The + term containing the parenthesis is a Mul with a contraction among the + arguments, so it will be found as a key in the result. It stores the + dictionary resulting from a recursive call on the Add expression. + + >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) + >>> sorted(d.keys(), key=default_sort_key) + [(A[i, j]*x[j] + y[i])*x[i], (i,)] + >>> d[(i,)] + {(A[i, j]*x[j] + y[i])*x[i]} + >>> d[x[i]*(A[i, j]*x[j] + y[i])] + [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] + + Powers with contractions in either base or exponent will also be found as + keys in the dictionary, mapping to a list of results from recursive calls: + + >>> d = get_contraction_structure(A[j, j]**A[i, i]) + >>> d[None] + {A[j, j]**A[i, i]} + >>> nested_contractions = d[A[j, j]**A[i, i]] + >>> nested_contractions[0] + {(j,): {A[j, j]}} + >>> nested_contractions[1] + {(i,): {A[i, i]}} + + The description of the contraction structure may appear complicated when + represented with a string in the above examples, but it is easy to iterate + over: + + >>> from sympy import Expr + >>> for key in d: + ... if isinstance(key, Expr): + ... continue + ... for term in d[key]: + ... if term in d: + ... # treat deepest contraction first + ... pass + ... # treat outermost contactions here + + """ + + # We call ourself recursively to inspect sub expressions. + + if isinstance(expr, Indexed): + junk, key = _remove_repeated(expr.indices) + return {key or None: {expr}} + elif expr.is_Atom: + return {None: {expr}} + elif expr.is_Mul: + junk, junk, key = _get_indices_Mul(expr, return_dummies=True) + result = {key or None: {expr}} + # recurse on every factor + nested = [] + for fac in expr.args: + facd = get_contraction_structure(fac) + if not (None in facd and len(facd) == 1): + nested.append(facd) + if nested: + result[expr] = nested + return result + elif expr.is_Pow or isinstance(expr, exp): + # recurse in base and exp separately. If either has internal + # contractions we must include ourselves as a key in the returned dict + b, e = expr.as_base_exp() + dbase = get_contraction_structure(b) + dexp = get_contraction_structure(e) + + dicts = [] + for d in dbase, dexp: + if not (None in d and len(d) == 1): + dicts.append(d) + result = {None: {expr}} + if dicts: + result[expr] = dicts + return result + elif expr.is_Add: + # Note: we just collect all terms with identical summation indices, We + # do nothing to identify equivalent terms here, as this would require + # substitutions or pattern matching in expressions of unknown + # complexity. + result = {} + for term in expr.args: + # recurse on every term + d = get_contraction_structure(term) + for key in d: + if key in result: + result[key] |= d[key] + else: + result[key] = d[key] + return result + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return {None: expr} + elif isinstance(expr, Function): + # Collect non-trivial contraction structures in each argument + # We do not report repeated indices in separate arguments as a + # contraction + deeplist = [] + for arg in expr.args: + deep = get_contraction_structure(arg) + if not (None in deep and len(deep) == 1): + deeplist.append(deep) + d = {None: {expr}} + if deeplist: + d[expr] = deeplist + return d + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return {None: {expr}} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/indexed.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..4a050a24b241bb19604462fdf1bd17e43c83f354 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/indexed.py @@ -0,0 +1,797 @@ +r"""Module that defines indexed objects. + +The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a +matrix element ``M[i, j]`` as in the following diagram:: + + 1) The Indexed class represents the entire indexed object. + | + ___|___ + ' ' + M[i, j] + / \__\______ + | | + | | + | 2) The Idx class represents indices; each Idx can + | optionally contain information about its range. + | + 3) IndexedBase represents the 'stem' of an indexed object, here `M`. + The stem used by itself is usually taken to represent the entire + array. + +There can be any number of indices on an Indexed object. No +transformation properties are implemented in these Base objects, but +implicit contraction of repeated indices is supported. + +Note that the support for complicated (i.e. non-atomic) integer +expressions as indices is limited. (This should be improved in +future releases.) + +Examples +======== + +To express the above matrix element example you would write: + +>>> from sympy import symbols, IndexedBase, Idx +>>> M = IndexedBase('M') +>>> i, j = symbols('i j', cls=Idx) +>>> M[i, j] +M[i, j] + +Repeated indices in a product implies a summation, so to express a +matrix-vector product in terms of Indexed objects: + +>>> x = IndexedBase('x') +>>> M[i, j]*x[j] +M[i, j]*x[j] + +If the indexed objects will be converted to component based arrays, e.g. +with the code printers or the autowrap framework, you also need to provide +(symbolic or numerical) dimensions. This can be done by passing an +optional shape parameter to IndexedBase upon construction: + +>>> dim1, dim2 = symbols('dim1 dim2', integer=True) +>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2)) +>>> A.shape +(dim1, 2*dim1, dim2) +>>> A[i, j, 3].shape +(dim1, 2*dim1, dim2) + +If an IndexedBase object has no shape information, it is assumed that the +array is as large as the ranges of its indices: + +>>> n, m = symbols('n m', integer=True) +>>> i = Idx('i', m) +>>> j = Idx('j', n) +>>> M[i, j].shape +(m, n) +>>> M[i, j].ranges +[(0, m - 1), (0, n - 1)] + +The above can be compared with the following: + +>>> A[i, 2, j].shape +(dim1, 2*dim1, dim2) +>>> A[i, 2, j].ranges +[(0, m - 1), None, (0, n - 1)] + +To analyze the structure of indexed expressions, you can use the methods +get_indices() and get_contraction_structure(): + +>>> from sympy.tensor import get_indices, get_contraction_structure +>>> get_indices(A[i, j, j]) +({i}, {}) +>>> get_contraction_structure(A[i, j, j]) +{(j,): {A[i, j, j]}} + +See the appropriate docstrings for a detailed explanation of the output. +""" + +# TODO: (some ideas for improvement) +# +# o test and guarantee numpy compatibility +# - implement full support for broadcasting +# - strided arrays +# +# o more functions to analyze indexed expressions +# - identify standard constructs, e.g matrix-vector product in a subexpression +# +# o functions to generate component based arrays (numpy and sympy.Matrix) +# - generate a single array directly from Indexed +# - convert simple sub-expressions +# +# o sophisticated indexing (possibly in subclasses to preserve simplicity) +# - Idx with range smaller than dimension of Indexed +# - Idx with stepsize != 1 +# - Idx with step determined by function call +from collections.abc import Iterable + +from sympy.core.numbers import Number +from sympy.core.assumptions import StdFactKB +from sympy.core import Expr, Tuple, sympify, S +from sympy.core.symbol import _filter_assumptions, Symbol +from sympy.core.logic import fuzzy_bool, fuzzy_not +from sympy.core.sympify import _sympify +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.multipledispatch import dispatch +from sympy.utilities.iterables import is_sequence, NotIterable +from sympy.utilities.misc import filldedent + + +class IndexException(Exception): + pass + + +class Indexed(Expr): + """Represents a mathematical object with indices. + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j) + A[i, j] + + It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``: + ``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``. + + >>> A = IndexedBase('A') + >>> a_ij = A[i, j] # Prefer this, + >>> b_ij = Indexed(A, i, j) # over this. + >>> a_ij == b_ij + True + + """ + is_commutative = True + is_Indexed = True + is_symbol = True + is_Atom = True + + def __new__(cls, base, *args, **kw_args): + from sympy.tensor.array.ndim_array import NDimArray + from sympy.matrices.matrices import MatrixBase + + if not args: + raise IndexException("Indexed needs at least one index.") + if isinstance(base, (str, Symbol)): + base = IndexedBase(base) + elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase): + raise TypeError(filldedent(""" + The base can only be replaced with a string, Symbol, + IndexedBase or an object with a method for getting + items (i.e. an object with a `__getitem__` method). + """)) + args = list(map(sympify, args)) + if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all(i.is_number for i in args): + if len(args) == 1: + return base[args[0]] + else: + return base[args] + + base = _sympify(base) + + obj = Expr.__new__(cls, base, *args, **kw_args) + + try: + IndexedBase._set_assumptions(obj, base.assumptions0) + except AttributeError: + IndexedBase._set_assumptions(obj, {}) + return obj + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def name(self): + return str(self) + + @property + def _diff_wrt(self): + """Allow derivatives with respect to an ``Indexed`` object.""" + return True + + def _eval_derivative(self, wrt): + from sympy.tensor.array.ndim_array import NDimArray + + if isinstance(wrt, Indexed) and wrt.base == self.base: + if len(self.indices) != len(wrt.indices): + msg = "Different # of indices: d({!s})/d({!s})".format(self, + wrt) + raise IndexException(msg) + result = S.One + for index1, index2 in zip(self.indices, wrt.indices): + result *= KroneckerDelta(index1, index2) + return result + elif isinstance(self.base, NDimArray): + from sympy.tensor.array import derive_by_array + return Indexed(derive_by_array(self.base, wrt), *self.args[1:]) + else: + if Tuple(self.indices).has(wrt): + return S.NaN + return S.Zero + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + @property + def base(self): + """Returns the ``IndexedBase`` of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).base + A + >>> B = IndexedBase('B') + >>> B == B[i, j].base + True + + """ + return self.args[0] + + @property + def indices(self): + """ + Returns the indices of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).indices + (i, j) + + """ + return self.args[1:] + + @property + def rank(self): + """ + Returns the rank of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j, k, l, m = symbols('i:m', cls=Idx) + >>> Indexed('A', i, j).rank + 2 + >>> q = Indexed('A', i, j, k, l, m) + >>> q.rank + 5 + >>> q.rank == len(q.indices) + True + + """ + return len(self.args) - 1 + + @property + def shape(self): + """Returns a list with dimensions of each index. + + Dimensions is a property of the array, not of the indices. Still, if + the ``IndexedBase`` does not define a shape attribute, it is assumed + that the ranges of the indices correspond to the shape of the array. + + >>> from sympy import IndexedBase, Idx, symbols + >>> n, m = symbols('n m', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', m) + >>> A = IndexedBase('A', shape=(n, n)) + >>> B = IndexedBase('B') + >>> A[i, j].shape + (n, n) + >>> B[i, j].shape + (m, m) + """ + + if self.base.shape: + return self.base.shape + sizes = [] + for i in self.indices: + upper = getattr(i, 'upper', None) + lower = getattr(i, 'lower', None) + if None in (upper, lower): + raise IndexException(filldedent(""" + Range is not defined for all indices in: %s""" % self)) + try: + size = upper - lower + 1 + except TypeError: + raise IndexException(filldedent(""" + Shape cannot be inferred from Idx with + undefined range: %s""" % self)) + sizes.append(size) + return Tuple(*sizes) + + @property + def ranges(self): + """Returns a list of tuples with lower and upper range of each index. + + If an index does not define the data members upper and lower, the + corresponding slot in the list contains ``None`` instead of a tuple. + + Examples + ======== + + >>> from sympy import Indexed,Idx, symbols + >>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges + [(0, 1), (0, 3), (0, 7)] + >>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges + [(0, 2), (0, 2), (0, 2)] + >>> x, y, z = symbols('x y z', integer=True) + >>> Indexed('A', x, y, z).ranges + [None, None, None] + + """ + ranges = [] + sentinel = object() + for i in self.indices: + upper = getattr(i, 'upper', sentinel) + lower = getattr(i, 'lower', sentinel) + if sentinel not in (upper, lower): + ranges.append((lower, upper)) + else: + ranges.append(None) + return ranges + + def _sympystr(self, p): + indices = list(map(p.doprint, self.indices)) + return "%s[%s]" % (p.doprint(self.base), ", ".join(indices)) + + @property + def free_symbols(self): + base_free_symbols = self.base.free_symbols + indices_free_symbols = { + fs for i in self.indices for fs in i.free_symbols} + if base_free_symbols: + return {self} | base_free_symbols | indices_free_symbols + else: + return indices_free_symbols + + @property + def expr_free_symbols(self): + from sympy.utilities.exceptions import sympy_deprecation_warning + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + + return {self} + + +class IndexedBase(Expr, NotIterable): + """Represent the base or stem of an indexed object + + The IndexedBase class represent an array that contains elements. The main purpose + of this class is to allow the convenient creation of objects of the Indexed + class. The __getitem__ method of IndexedBase returns an instance of + Indexed. Alone, without indices, the IndexedBase class can be used as a + notation for e.g. matrix equations, resembling what you could do with the + Symbol class. But, the IndexedBase class adds functionality that is not + available for Symbol instances: + + - An IndexedBase object can optionally store shape information. This can + be used in to check array conformance and conditions for numpy + broadcasting. (TODO) + - An IndexedBase object implements syntactic sugar that allows easy symbolic + representation of array operations, using implicit summation of + repeated indices. + - The IndexedBase object symbolizes a mathematical structure equivalent + to arrays, and is recognized as such for code generation and automatic + compilation and wrapping. + + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> A = IndexedBase('A'); A + A + >>> type(A) + + + When an IndexedBase object receives indices, it returns an array with named + axes, represented by an Indexed object: + + >>> i, j = symbols('i j', integer=True) + >>> A[i, j, 2] + A[i, j, 2] + >>> type(A[i, j, 2]) + + + The IndexedBase constructor takes an optional shape argument. If given, + it overrides any shape information in the indices. (But not the index + ranges!) + + >>> m, n, o, p = symbols('m n o p', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', n) + >>> A[i, j].shape + (m, n) + >>> B = IndexedBase('B', shape=(o, p)) + >>> B[i, j].shape + (o, p) + + Assumptions can be specified with keyword arguments the same way as for Symbol: + + >>> A_real = IndexedBase('A', real=True) + >>> A_real.is_real + True + >>> A != A_real + True + + Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase: + + >>> I = symbols('I', integer=True) + >>> C_inherit = IndexedBase(I) + >>> C_explicit = IndexedBase('I', integer=True) + >>> C_inherit == C_explicit + True + """ + is_commutative = True + is_symbol = True + is_Atom = True + + @staticmethod + def _set_assumptions(obj, assumptions): + """Set assumptions on obj, making sure to apply consistent values.""" + tmp_asm_copy = assumptions.copy() + is_commutative = fuzzy_bool(assumptions.get('commutative', True)) + assumptions['commutative'] = is_commutative + obj._assumptions = StdFactKB(assumptions) + obj._assumptions._generator = tmp_asm_copy # Issue #8873 + + def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args): + from sympy.matrices.matrices import MatrixBase + from sympy.tensor.array.ndim_array import NDimArray + + assumptions, kw_args = _filter_assumptions(kw_args) + if isinstance(label, str): + label = Symbol(label, **assumptions) + elif isinstance(label, Symbol): + assumptions = label._merge(assumptions) + elif isinstance(label, (MatrixBase, NDimArray)): + return label + elif isinstance(label, Iterable): + return _sympify(label) + else: + label = _sympify(label) + + if is_sequence(shape): + shape = Tuple(*shape) + elif shape is not None: + shape = Tuple(shape) + + if shape is not None: + obj = Expr.__new__(cls, label, shape) + else: + obj = Expr.__new__(cls, label) + obj._shape = shape + obj._offset = offset + obj._strides = strides + obj._name = str(label) + + IndexedBase._set_assumptions(obj, assumptions) + return obj + + @property + def name(self): + return self._name + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + def __getitem__(self, indices, **kw_args): + if is_sequence(indices): + # Special case needed because M[*my_tuple] is a syntax error. + if self.shape and len(self.shape) != len(indices): + raise IndexException("Rank mismatch.") + return Indexed(self, *indices, **kw_args) + else: + if self.shape and len(self.shape) != 1: + raise IndexException("Rank mismatch.") + return Indexed(self, indices, **kw_args) + + @property + def shape(self): + """Returns the shape of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase, Idx + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).shape + (x, y) + + Note: If the shape of the ``IndexedBase`` is specified, it will override + any shape information given by the indices. + + >>> A = IndexedBase('A', shape=(x, y)) + >>> B = IndexedBase('B') + >>> i = Idx('i', 2) + >>> j = Idx('j', 1) + >>> A[i, j].shape + (x, y) + >>> B[i, j].shape + (2, 1) + + """ + return self._shape + + @property + def strides(self): + """Returns the strided scheme for the ``IndexedBase`` object. + + Normally this is a tuple denoting the number of + steps to take in the respective dimension when traversing + an array. For code generation purposes strides='C' and + strides='F' can also be used. + + strides='C' would mean that code printer would unroll + in row-major order and 'F' means unroll in column major + order. + + """ + + return self._strides + + @property + def offset(self): + """Returns the offset for the ``IndexedBase`` object. + + This is the value added to the resulting index when the + 2D Indexed object is unrolled to a 1D form. Used in code + generation. + + Examples + ========== + >>> from sympy.printing import ccode + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> l, m, n, o = symbols('l m n o', integer=True) + >>> A = IndexedBase('A', strides=(l, m, n), offset=o) + >>> i, j, k = map(Idx, 'ijk') + >>> ccode(A[i, j, k]) + 'A[l*i + m*j + n*k + o]' + + """ + return self._offset + + @property + def label(self): + """Returns the label of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).label + A + + """ + return self.args[0] + + def _sympystr(self, p): + return p.doprint(self.label) + + +class Idx(Expr): + """Represents an integer index as an ``Integer`` or integer expression. + + There are a number of ways to create an ``Idx`` object. The constructor + takes two arguments: + + ``label`` + An integer or a symbol that labels the index. + ``range`` + Optionally you can specify a range as either + + * ``Symbol`` or integer: This is interpreted as a dimension. Lower and + upper bounds are set to ``0`` and ``range - 1``, respectively. + * ``tuple``: The two elements are interpreted as the lower and upper + bounds of the range, respectively. + + Note: bounds of the range are assumed to be either integer or infinite (oo + and -oo are allowed to specify an unbounded range). If ``n`` is given as a + bound, then ``n.is_integer`` must not return false. + + For convenience, if the label is given as a string it is automatically + converted to an integer symbol. (Note: this conversion is not done for + range or dimension arguments.) + + Examples + ======== + + >>> from sympy import Idx, symbols, oo + >>> n, i, L, U = symbols('n i L U', integer=True) + + If a string is given for the label an integer ``Symbol`` is created and the + bounds are both ``None``: + + >>> idx = Idx('qwerty'); idx + qwerty + >>> idx.lower, idx.upper + (None, None) + + Both upper and lower bounds can be specified: + + >>> idx = Idx(i, (L, U)); idx + i + >>> idx.lower, idx.upper + (L, U) + + When only a single bound is given it is interpreted as the dimension + and the lower bound defaults to 0: + + >>> idx = Idx(i, n); idx.lower, idx.upper + (0, n - 1) + >>> idx = Idx(i, 4); idx.lower, idx.upper + (0, 3) + >>> idx = Idx(i, oo); idx.lower, idx.upper + (0, oo) + + """ + + is_integer = True + is_finite = True + is_real = True + is_symbol = True + is_Atom = True + _diff_wrt = True + + def __new__(cls, label, range=None, **kw_args): + + if isinstance(label, str): + label = Symbol(label, integer=True) + label, range = list(map(sympify, (label, range))) + + if label.is_Number: + if not label.is_integer: + raise TypeError("Index is not an integer number.") + return label + + if not label.is_integer: + raise TypeError("Idx object requires an integer label.") + + elif is_sequence(range): + if len(range) != 2: + raise ValueError(filldedent(""" + Idx range tuple must have length 2, but got %s""" % len(range))) + for bound in range: + if (bound.is_integer is False and bound is not S.Infinity + and bound is not S.NegativeInfinity): + raise TypeError("Idx object requires integer bounds.") + args = label, Tuple(*range) + elif isinstance(range, Expr): + if range is not S.Infinity and fuzzy_not(range.is_integer): + raise TypeError("Idx object requires an integer dimension.") + args = label, Tuple(0, range - 1) + elif range: + raise TypeError(filldedent(""" + The range must be an ordered iterable or + integer SymPy expression.""")) + else: + args = label, + + obj = Expr.__new__(cls, *args, **kw_args) + obj._assumptions["finite"] = True + obj._assumptions["real"] = True + return obj + + @property + def label(self): + """Returns the label (Integer or integer expression) of the Idx object. + + Examples + ======== + + >>> from sympy import Idx, Symbol + >>> x = Symbol('x', integer=True) + >>> Idx(x).label + x + >>> j = Symbol('j', integer=True) + >>> Idx(j).label + j + >>> Idx(j + 1).label + j + 1 + + """ + return self.args[0] + + @property + def lower(self): + """Returns the lower bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).lower + 0 + >>> Idx('j', 5).lower + 0 + >>> Idx('j').lower is None + True + + """ + try: + return self.args[1][0] + except IndexError: + return + + @property + def upper(self): + """Returns the upper bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).upper + 1 + >>> Idx('j', 5).upper + 4 + >>> Idx('j').upper is None + True + + """ + try: + return self.args[1][1] + except IndexError: + return + + def _sympystr(self, p): + return p.doprint(self.label) + + @property + def name(self): + return self.label.name if self.label.is_Symbol else str(self.label) + + @property + def free_symbols(self): + return {self} + + +@dispatch(Idx, Idx) +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs if rhs.upper is None else rhs.upper + other_lower = rhs if rhs.lower is None else rhs.lower + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Idx, Number) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs + other_lower = rhs + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Number, Idx) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = lhs + other_lower = lhs + + if rhs.upper is not None and (rhs.upper <= other_lower) == True: + return True + if rhs.lower is not None and (rhs.lower > other_upper) == True: + return False + return None diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/tensor.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..c917f4be9eb9043aa347828fc4daed766c59911d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/tensor.py @@ -0,0 +1,4863 @@ +""" +This module defines tensors with abstract index notation. + +The abstract index notation has been first formalized by Penrose. + +Tensor indices are formal objects, with a tensor type; there is no +notion of index range, it is only possible to assign the dimension, +used to trace the Kronecker delta; the dimension can be a Symbol. + +The Einstein summation convention is used. +The covariant indices are indicated with a minus sign in front of the index. + +For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c`` +contracted. + +A tensor expression ``t`` can be called; called with its +indices in sorted order it is equal to itself: +in the above example ``t(a, b) == t``; +one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``. + +The contracted indices are dummy indices, internally they have no name, +the indices being represented by a graph-like structure. + +Tensors are put in canonical form using ``canon_bp``, which uses +the Butler-Portugal algorithm for canonicalization using the monoterm +symmetries of the tensors. + +If there is a (anti)symmetric metric, the indices can be raised and +lowered when the tensor is put in canonical form. +""" + +from __future__ import annotations +from typing import Any +from functools import reduce +from math import prod + +from abc import abstractmethod, ABC +from collections import defaultdict +import operator +import itertools +from sympy.core.numbers import (Integer, Rational) +from sympy.combinatorics import Permutation +from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \ + bsgs_direct_product, canonicalize, riemann_bsgs +from sympy.core import Basic, Expr, sympify, Add, Mul, S +from sympy.core.containers import Tuple, Dict +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import Symbol, symbols +from sympy.core.sympify import CantSympify, _sympify +from sympy.core.operations import AssocOp +from sympy.external.gmpy import SYMPY_INTS +from sympy.matrices import eye +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, + ignore_warnings) +from sympy.utilities.decorator import memoize_property, deprecated +from sympy.utilities.iterables import sift + + +def deprecate_data(): + sympy_deprecation_warning( + """ + The data attribute of TensorIndexType is deprecated. Use The + replace_with_arrays() method instead. + """, + deprecated_since_version="1.4", + active_deprecations_target="deprecated-tensorindextype-attrs", + stacklevel=4, + ) + +def deprecate_fun_eval(): + sympy_deprecation_warning( + """ + The Tensor.fun_eval() method is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +def deprecate_call(): + sympy_deprecation_warning( + """ + Calling a tensor like Tensor(*indices) is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +class _IndexStructure(CantSympify): + """ + This class handles the indices (free and dummy ones). It contains the + algorithms to manage the dummy indices replacements and contractions of + free indices under multiplications of tensor expressions, as well as stuff + related to canonicalization sorting, getting the permutation of the + expression and so on. It also includes tools to get the ``TensorIndex`` + objects corresponding to the given index structure. + """ + + def __init__(self, free, dum, index_types, indices, canon_bp=False): + self.free = free + self.dum = dum + self.index_types = index_types + self.indices = indices + self._ext_rank = len(self.free) + 2*len(self.dum) + self.dum.sort(key=lambda x: x[0]) + + @staticmethod + def from_indices(*indices): + """ + Create a new ``_IndexStructure`` object from a list of ``indices``. + + Explanation + =========== + + ``indices`` ``TensorIndex`` objects, the indices. Contractions are + detected upon construction. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure.from_indices(m0, m1, -m1, m3) + _IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz]) + """ + + free, dum = _IndexStructure._free_dum_from_indices(*indices) + index_types = [i.tensor_index_type for i in indices] + indices = _IndexStructure._replace_dummy_names(indices, free, dum) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def from_components_free_dum(components, free, dum): + index_types = [] + for component in components: + index_types.extend(component.index_types) + indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def _free_dum_from_indices(*indices): + """ + Convert ``indices`` into ``free``, ``dum`` for single component tensor. + + Explanation + =========== + + ``free`` list of tuples ``(index, pos, 0)``, + where ``pos`` is the position of index in + the list of indices formed by the component tensors + + ``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)`` + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \ + _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3) + ([(m0, 0), (m3, 3)], [(1, 2)]) + """ + n = len(indices) + if n == 1: + return [(indices[0], 0)], [] + + # find the positions of the free indices and of the dummy indices + free = [True]*len(indices) + index_dict = {} + dum = [] + for i, index in enumerate(indices): + name = index.name + typ = index.tensor_index_type + contr = index.is_up + if (name, typ) in index_dict: + # found a pair of dummy indices + is_contr, pos = index_dict[(name, typ)] + # check consistency and update free + if is_contr: + if contr: + raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i)) + else: + free[pos] = False + free[i] = False + else: + if contr: + free[pos] = False + free[i] = False + else: + raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i)) + if contr: + dum.append((i, pos)) + else: + dum.append((pos, i)) + else: + index_dict[(name, typ)] = index.is_up, i + + free = [(index, i) for i, index in enumerate(indices) if free[i]] + free.sort() + return free, dum + + def get_indices(self): + """ + Get a list of indices, creating new tensor indices to complete dummy indices. + """ + return self.indices[:] + + @staticmethod + def generate_indices_from_free_dum_index_types(free, dum, index_types): + indices = [None]*(len(free)+2*len(dum)) + for idx, pos in free: + indices[pos] = idx + + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for pos1, pos2 in dum: + typ1 = index_types[pos1] + indname = generate_dummy_name(typ1) + indices[pos1] = TensorIndex(indname, typ1, True) + indices[pos2] = TensorIndex(indname, typ1, False) + + return _IndexStructure._replace_dummy_names(indices, free, dum) + + @staticmethod + def _get_generator_for_dummy_indices(free): + cdt = defaultdict(int) + # if the free indices have names with dummy_name, start with an + # index higher than those for the dummy indices + # to avoid name collisions + for indx, ipos in free: + if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name: + cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + return dummy_name_gen + + @staticmethod + def _replace_dummy_names(indices, free, dum): + dum.sort(key=lambda x: x[0]) + new_indices = list(indices) + assert len(indices) == len(free) + 2*len(dum) + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for ipos1, ipos2 in dum: + typ1 = new_indices[ipos1].tensor_index_type + indname = generate_dummy_name(typ1) + new_indices[ipos1] = TensorIndex(indname, typ1, True) + new_indices[ipos2] = TensorIndex(indname, typ1, False) + return new_indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices. + """ + # get sorted indices according to their position: + free = sorted(self.free, key=lambda x: x[1]) + return [i[0] for i in free] + + def __str__(self): + return "_IndexStructure({}, {}, {})".format(self.free, self.dum, self.index_types) + + def __repr__(self): + return self.__str__() + + def _get_sorted_free_indices_for_canon(self): + sorted_free = self.free[:] + sorted_free.sort(key=lambda x: x[0]) + return sorted_free + + def _get_sorted_dum_indices_for_canon(self): + return sorted(self.dum, key=lambda x: x[0]) + + def _get_lexicographically_sorted_index_types(self): + permutation = self.indices_canon_args()[0] + index_types = [None]*self._ext_rank + for i, it in enumerate(self.index_types): + index_types[permutation(i)] = it + return index_types + + def _get_lexicographically_sorted_indices(self): + permutation = self.indices_canon_args()[0] + indices = [None]*self._ext_rank + for i, it in enumerate(self.indices): + indices[permutation(i)] = it + return indices + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``. + + Explanation + =========== + + ``g`` permutation corresponding to the tensor in the representation + used in canonicalization + + ``is_canon_bp`` if True, then ``g`` is the permutation + corresponding to the canonical form of the tensor + """ + sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()] + lex_index_types = self._get_lexicographically_sorted_index_types() + lex_indices = self._get_lexicographically_sorted_indices() + nfree = len(sorted_free) + rank = self._ext_rank + dum = [[None]*2 for i in range((rank - nfree)//2)] + free = [] + + index_types = [None]*rank + indices = [None]*rank + for i in range(rank): + gi = g[i] + index_types[i] = lex_index_types[gi] + indices[i] = lex_indices[gi] + if gi < nfree: + ind = sorted_free[gi] + assert index_types[i] == sorted_free[gi].tensor_index_type + free.append((ind, i)) + else: + j = gi - nfree + idum, cov = divmod(j, 2) + if cov: + dum[idum][1] = i + else: + dum[idum][0] = i + dum = [tuple(x) for x in dum] + + return _IndexStructure(free, dum, index_types, indices) + + def indices_canon_args(self): + """ + Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize`` + + See ``canonicalize`` in ``tensor_can.py`` in combinatorics module. + """ + # to be called after sorted_components + from sympy.combinatorics.permutations import _af_new + n = self._ext_rank + g = [None]*n + [n, n+1] + + # Converts the symmetry of the metric into msym from .canonicalize() + # method in the combinatorics module + def metric_symmetry_to_msym(metric): + if metric is None: + return None + sym = metric.symmetry + if sym == TensorSymmetry.fully_symmetric(2): + return 0 + if sym == TensorSymmetry.fully_symmetric(-2): + return 1 + return None + + # ordered indices: first the free indices, ordered by types + # then the dummy indices, ordered by types and contravariant before + # covariant + # g[position in tensor] = position in ordered indices + for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()): + g[ipos] = i + pos = len(self.free) + j = len(self.free) + dummies = [] + prev = None + a = [] + msym = [] + for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon(): + g[ipos1] = j + g[ipos2] = j + 1 + j += 2 + typ = self.index_types[ipos1] + if typ != prev: + if a: + dummies.append(a) + a = [pos, pos + 1] + prev = typ + msym.append(metric_symmetry_to_msym(typ.metric)) + else: + a.extend([pos, pos + 1]) + pos += 2 + if a: + dummies.append(a) + + return _af_new(g), dummies, msym + + +def components_canon_args(components): + numtyp = [] + prev = None + for t in components: + if t == prev: + numtyp[-1][1] += 1 + else: + prev = t + numtyp.append([prev, 1]) + v = [] + for h, n in numtyp: + if h.comm in (0, 1): + comm = h.comm + else: + comm = TensorManager.get_comm(h.comm, h.comm) + v.append((h.symmetry.base, h.symmetry.generators, n, comm)) + return v + + +class _TensorDataLazyEvaluator(CantSympify): + """ + EXPERIMENTAL: do not rely on this class, it may change without deprecation + warnings in future versions of SymPy. + + Explanation + =========== + + This object contains the logic to associate components data to a tensor + expression. Components data are set via the ``.data`` property of tensor + expressions, is stored inside this class as a mapping between the tensor + expression and the ``ndarray``. + + Computations are executed lazily: whereas the tensor expressions can have + contractions, tensor products, and additions, components data are not + computed until they are accessed by reading the ``.data`` property + associated to the tensor expression. + """ + _substitutions_dict: dict[Any, Any] = {} + _substitutions_dict_tensmul: dict[Any, Any] = {} + + def __getitem__(self, key): + dat = self._get(key) + if dat is None: + return None + + from .array import NDimArray + if not isinstance(dat, NDimArray): + return dat + + if dat.rank() == 0: + return dat[()] + elif dat.rank() == 1 and len(dat) == 1: + return dat[0] + return dat + + def _get(self, key): + """ + Retrieve ``data`` associated with ``key``. + + Explanation + =========== + + This algorithm looks into ``self._substitutions_dict`` for all + ``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a + TensorHead instance). It reconstructs the components data that the + tensor expression should have by performing on components data the + operations that correspond to the abstract tensor operations applied. + + Metric tensor is handled in a different manner: it is pre-computed in + ``self._substitutions_dict_tensmul``. + """ + if key in self._substitutions_dict: + return self._substitutions_dict[key] + + if isinstance(key, TensorHead): + return None + + if isinstance(key, Tensor): + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in key.get_indices()]) + srch = (key.component,) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + array_list = [self.data_from_tensor(key)] + return self.data_contract_dum(array_list, key.dum, key.ext_rank) + + if isinstance(key, TensMul): + tensmul_args = key.args + if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1: + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in tensmul_args[0].get_indices()]) + srch = (tensmul_args[0].components[0],) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + #data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)] + data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)] + coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank) + return coeff*data_result + + if isinstance(key, TensAdd): + data_list = [] + free_args_list = [] + for arg in key.args: + if isinstance(arg, TensExpr): + data_list.append(arg.data) + free_args_list.append([x[0] for x in arg.free]) + else: + data_list.append(arg) + free_args_list.append([]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + + sum_list = [] + from .array import permutedims + for data, free_args in zip(data_list, free_args_list): + if len(free_args) < 2: + sum_list.append(data) + else: + free_args_pos = {y: x for x, y in enumerate(free_args)} + axes = [free_args_pos[arg] for arg in key.free_args] + sum_list.append(permutedims(data, axes)) + return reduce(lambda x, y: x+y, sum_list) + + return None + + @staticmethod + def data_contract_dum(ndarray_list, dum, ext_rank): + from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray + arrays = list(map(MutableDenseNDimArray, ndarray_list)) + prodarr = tensorproduct(*arrays) + return tensorcontraction(prodarr, *dum) + + def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead): + """ + This method is used when assigning components data to a ``TensMul`` + object, it converts components data to a fully contravariant ndarray, + which is then stored according to the ``TensorHead`` key. + """ + if data is None: + return None + + return self._correct_signature_from_indices( + data, + tensmul.get_indices(), + tensmul.free, + tensmul.dum, + True) + + def data_from_tensor(self, tensor): + """ + This method corrects the components data to the right signature + (covariant/contravariant) using the metric associated with each + ``TensorIndexType``. + """ + tensorhead = tensor.component + + if tensorhead.data is None: + return None + + return self._correct_signature_from_indices( + tensorhead.data, + tensor.get_indices(), + tensor.free, + tensor.dum) + + def _assign_data_to_tensor_expr(self, key, data): + if isinstance(key, TensAdd): + raise ValueError('cannot assign data to TensAdd') + # here it is assumed that `key` is a `TensMul` instance. + if len(key.components) != 1: + raise ValueError('cannot assign data to TensMul with multiple components') + tensorhead = key.components[0] + newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead) + return tensorhead, newdata + + def _check_permutations_on_data(self, tens, data): + from .array import permutedims + from .array.arrayop import Flatten + + if isinstance(tens, TensorHead): + rank = tens.rank + generators = tens.symmetry.generators + elif isinstance(tens, Tensor): + rank = tens.rank + generators = tens.components[0].symmetry.generators + elif isinstance(tens, TensorIndexType): + rank = tens.metric.rank + generators = tens.metric.symmetry.generators + + # Every generator is a permutation, check that by permuting the array + # by that permutation, the array will be the same, except for a + # possible sign change if the permutation admits it. + for gener in generators: + sign_change = +1 if (gener(rank) == rank) else -1 + data_swapped = data + last_data = data + permute_axes = list(map(gener, range(rank))) + # the order of a permutation is the number of times to get the + # identity by applying that permutation. + for i in range(gener.order()-1): + data_swapped = permutedims(data_swapped, permute_axes) + # if any value in the difference array is non-zero, raise an error: + if any(Flatten(last_data - sign_change*data_swapped)): + raise ValueError("Component data symmetry structure error") + last_data = data_swapped + + def __setitem__(self, key, value): + """ + Set the components data of a tensor object/expression. + + Explanation + =========== + + Components data are transformed to the all-contravariant form and stored + with the corresponding ``TensorHead`` object. If a ``TensorHead`` object + cannot be uniquely identified, it will raise an error. + """ + data = _TensorDataLazyEvaluator.parse_data(value) + self._check_permutations_on_data(key, data) + + # TensorHead and TensorIndexType can be assigned data directly, while + # TensMul must first convert data to a fully contravariant form, and + # assign it to its corresponding TensorHead single component. + if not isinstance(key, (TensorHead, TensorIndexType)): + key, data = self._assign_data_to_tensor_expr(key, data) + + if isinstance(key, TensorHead): + for dim, indextype in zip(data.shape, key.index_types): + if indextype.data is None: + raise ValueError("index type {} has no components data"\ + " associated (needed to raise/lower index)".format(indextype)) + if not indextype.dim.is_number: + continue + if dim != indextype.dim: + raise ValueError("wrong dimension of ndarray") + self._substitutions_dict[key] = data + + def __delitem__(self, key): + del self._substitutions_dict[key] + + def __contains__(self, key): + return key in self._substitutions_dict + + def add_metric_data(self, metric, data): + """ + Assign data to the ``metric`` tensor. The metric tensor behaves in an + anomalous way when raising and lowering indices. + + Explanation + =========== + + A fully covariant metric is the inverse transpose of the fully + contravariant metric (it is meant matrix inverse). If the metric is + symmetric, the transpose is not necessary and mixed + covariant/contravariant metrics are Kronecker deltas. + """ + # hard assignment, data should not be added to `TensorHead` for metric: + # the problem with `TensorHead` is that the metric is anomalous, i.e. + # raising and lowering the index means considering the metric or its + # inverse, this is not the case for other tensors. + self._substitutions_dict_tensmul[metric, True, True] = data + inverse_transpose = self.inverse_transpose_matrix(data) + # in symmetric spaces, the transpose is the same as the original matrix, + # the full covariant metric tensor is the inverse transpose, so this + # code will be able to handle non-symmetric metrics. + self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose + # now mixed cases, these are identical to the unit matrix if the metric + # is symmetric. + m = data.tomatrix() + invt = inverse_transpose.tomatrix() + self._substitutions_dict_tensmul[metric, True, False] = m * invt + self._substitutions_dict_tensmul[metric, False, True] = invt * m + + @staticmethod + def _flip_index_by_metric(data, metric, pos): + from .array import tensorproduct, tensorcontraction + + mdim = metric.rank() + ddim = data.rank() + + if pos == 0: + data = tensorcontraction( + tensorproduct( + metric, + data + ), + (1, mdim+pos) + ) + else: + data = tensorcontraction( + tensorproduct( + data, + metric + ), + (pos, ddim) + ) + return data + + @staticmethod + def inverse_matrix(ndarray): + m = ndarray.tomatrix().inv() + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def inverse_transpose_matrix(ndarray): + m = ndarray.tomatrix().inv().T + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def _correct_signature_from_indices(data, indices, free, dum, inverse=False): + """ + Utility function to correct the values inside the components data + ndarray according to whether indices are covariant or contravariant. + + It uses the metric matrix to lower values of covariant indices. + """ + # change the ndarray values according covariantness/contravariantness of the indices + # use the metric + for i, indx in enumerate(indices): + if not indx.is_up and not inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i) + elif not indx.is_up and inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric( + data, + _TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data), + i + ) + return data + + @staticmethod + def _sort_data_axes(old, new): + from .array import permutedims + + new_data = old.data.copy() + + old_free = [i[0] for i in old.free] + new_free = [i[0] for i in new.free] + + for i in range(len(new_free)): + for j in range(i, len(old_free)): + if old_free[j] == new_free[i]: + old_free[i], old_free[j] = old_free[j], old_free[i] + new_data = permutedims(new_data, (i, j)) + break + return new_data + + @staticmethod + def add_rearrange_tensmul_parts(new_tensmul, old_tensmul): + def sorted_compo(): + return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul) + + _TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo() + + @staticmethod + def parse_data(data): + """ + Transform ``data`` to array. The parameter ``data`` may + contain data in various formats, e.g. nested lists, SymPy ``Matrix``, + and so on. + + Examples + ======== + + >>> from sympy.tensor.tensor import _TensorDataLazyEvaluator + >>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12]) + [1, 3, -6, 12] + + >>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]]) + [[1, 2], [4, 7]] + """ + from .array import MutableDenseNDimArray + + if not isinstance(data, MutableDenseNDimArray): + if len(data) == 2 and hasattr(data[0], '__call__'): + data = MutableDenseNDimArray(data[0], data[1]) + else: + data = MutableDenseNDimArray(data) + return data + +_tensor_data_substitution_dict = _TensorDataLazyEvaluator() + + +class _TensorManager: + """ + Class to manage tensor properties. + + Notes + ===== + + Tensors belong to tensor commutation groups; each group has a label + ``comm``; there are predefined labels: + + ``0`` tensors commuting with any other tensor + + ``1`` tensors anticommuting among themselves + + ``2`` tensors not commuting, apart with those with ``comm=0`` + + Other groups can be defined using ``set_comm``; tensors in those + groups commute with those with ``comm=0``; by default they + do not commute with any other group. + """ + def __init__(self): + self._comm_init() + + def _comm_init(self): + self._comm = [{} for i in range(3)] + for i in range(3): + self._comm[0][i] = 0 + self._comm[i][0] = 0 + self._comm[1][1] = 1 + self._comm[2][1] = None + self._comm[1][2] = None + self._comm_symbols2i = {0:0, 1:1, 2:2} + self._comm_i2symbol = {0:0, 1:1, 2:2} + + @property + def comm(self): + return self._comm + + def comm_symbols2i(self, i): + """ + Get the commutation group number corresponding to ``i``. + + ``i`` can be a symbol or a number or a string. + + If ``i`` is not already defined its commutation group number + is set. + """ + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + return n + return self._comm_symbols2i[i] + + def comm_i2symbol(self, i): + """ + Returns the symbol corresponding to the commutation group number. + """ + return self._comm_i2symbol[i] + + def set_comm(self, i, j, c): + """ + Set the commutation parameter ``c`` for commutation groups ``i, j``. + + Parameters + ========== + + i, j : symbols representing commutation groups + + c : group commutation number + + Notes + ===== + + ``i, j`` can be symbols, strings or numbers, + apart from ``0, 1`` and ``2`` which are reserved respectively + for commuting, anticommuting tensors and tensors not commuting + with any other group apart with the commuting tensors. + For the remaining cases, use this method to set the commutation rules; + by default ``c=None``. + + The group commutation number ``c`` is assigned in correspondence + to the group commutation symbols; it can be + + 0 commuting + + 1 anticommuting + + None no commutation property + + Examples + ======== + + ``G`` and ``GH`` do not commute with themselves and commute with + each other; A is commuting. + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz') + >>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz) + >>> A = TensorHead('A', [Lorentz]) + >>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm') + >>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm') + >>> TensorManager.set_comm('Gcomm', 'GHcomm', 0) + >>> (GH(i1)*G(i0)).canon_bp() + G(i0)*GH(i1) + >>> (G(i1)*G(i0)).canon_bp() + G(i1)*G(i0) + >>> (G(i1)*A(i0)).canon_bp() + A(i0)*G(i1) + """ + if c not in (0, 1, None): + raise ValueError('`c` can assume only the values 0, 1 or None') + + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + if j not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[0][n] = 0 + self._comm[n][0] = 0 + self._comm_symbols2i[j] = n + self._comm_i2symbol[n] = j + ni = self._comm_symbols2i[i] + nj = self._comm_symbols2i[j] + self._comm[ni][nj] = c + self._comm[nj][ni] = c + + def set_comms(self, *args): + """ + Set the commutation group numbers ``c`` for symbols ``i, j``. + + Parameters + ========== + + args : sequence of ``(i, j, c)`` + """ + for i, j, c in args: + self.set_comm(i, j, c) + + def get_comm(self, i, j): + """ + Return the commutation parameter for commutation group numbers ``i, j`` + + see ``_TensorManager.set_comm`` + """ + return self._comm[i].get(j, 0 if i == 0 or j == 0 else None) + + def clear(self): + """ + Clear the TensorManager. + """ + self._comm_init() + + +TensorManager = _TensorManager() + + +class TensorIndexType(Basic): + """ + A TensorIndexType is characterized by its name and its metric. + + Parameters + ========== + + name : name of the tensor type + dummy_name : name of the head of dummy indices + dim : dimension, it can be a symbol or an integer or ``None`` + eps_dim : dimension of the epsilon tensor + metric_symmetry : integer that denotes metric symmetry or ``None`` for no metric + metric_name : string with the name of the metric tensor + + Attributes + ========== + + ``metric`` : the metric tensor + ``delta`` : ``Kronecker delta`` + ``epsilon`` : the ``Levi-Civita epsilon`` tensor + ``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis. + + Notes + ===== + + The possible values of the ``metric_symmetry`` parameter are: + + ``1`` : metric tensor is fully symmetric + ``0`` : metric tensor possesses no index symmetry + ``-1`` : metric tensor is fully antisymmetric + ``None``: there is no metric tensor (metric equals to ``None``) + + The metric is assumed to be symmetric by default. It can also be set + to a custom tensor by the ``.set_metric()`` method. + + If there is a metric the metric is used to raise and lower indices. + + In the case of non-symmetric metric, the following raising and + lowering conventions will be adopted: + + ``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)`` + + From these it is easy to find: + + ``g(-a, b) = delta(-a, b)`` + + where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta`` + (see ``TensorIndex`` for the conventions on indices). + For antisymmetric metrics there is also the following equality: + + ``g(a, -b) = -delta(a, -b)`` + + If there is no metric it is not possible to raise or lower indices; + e.g. the index of the defining representation of ``SU(N)`` + is 'covariant' and the conjugate representation is + 'contravariant'; for ``N > 2`` they are linearly independent. + + ``eps_dim`` is by default equal to ``dim``, if the latter is an integer; + else it can be assigned (for use in naive dimensional regularization); + if ``eps_dim`` is not an integer ``epsilon`` is ``None``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> Lorentz.metric + metric(Lorentz,Lorentz) + """ + + def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None, + metric_symmetry=1, metric_name='metric', **kwargs): + if 'dummy_fmt' in kwargs: + dummy_fmt = kwargs['dummy_fmt'] + sympy_deprecation_warning( + f""" + The dummy_fmt keyword to TensorIndexType is deprecated. Use + dummy_name={dummy_fmt} instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-dummy-fmt", + ) + dummy_name = dummy_fmt + + if isinstance(name, str): + name = Symbol(name) + + if dummy_name is None: + dummy_name = str(name)[0] + if isinstance(dummy_name, str): + dummy_name = Symbol(dummy_name) + + if dim is None: + dim = Symbol("dim_" + dummy_name.name) + else: + dim = sympify(dim) + + if eps_dim is None: + eps_dim = dim + else: + eps_dim = sympify(eps_dim) + + metric_symmetry = sympify(metric_symmetry) + + if isinstance(metric_name, str): + metric_name = Symbol(metric_name) + + if 'metric' in kwargs: + SymPyDeprecationWarning( + """ + The 'metric' keyword argument to TensorIndexType is + deprecated. Use the 'metric_symmetry' keyword argument or the + TensorIndexType.set_metric() method instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-metric", + ) + metric = kwargs.get('metric') + if metric is not None: + if metric in (True, False, 0, 1): + metric_name = 'metric' + #metric_antisym = metric + else: + metric_name = metric.name + #metric_antisym = metric.antisym + + if metric: + metric_symmetry = -1 + else: + metric_symmetry = 1 + + obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim, + metric_symmetry, metric_name) + + obj._autogenerated = [] + return obj + + @property + def name(self): + return self.args[0].name + + @property + def dummy_name(self): + return self.args[1].name + + @property + def dim(self): + return self.args[2] + + @property + def eps_dim(self): + return self.args[3] + + @memoize_property + def metric(self): + metric_symmetry = self.args[4] + metric_name = self.args[5] + if metric_symmetry is None: + return None + + if metric_symmetry == 0: + symmetry = TensorSymmetry.no_symmetry(2) + elif metric_symmetry == 1: + symmetry = TensorSymmetry.fully_symmetric(2) + elif metric_symmetry == -1: + symmetry = TensorSymmetry.fully_symmetric(-2) + + return TensorHead(metric_name, [self]*2, symmetry) + + @memoize_property + def delta(self): + return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2)) + + @memoize_property + def epsilon(self): + if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)): + return None + symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim) + return TensorHead('Eps', [self]*self.eps_dim, symmetry) + + def set_metric(self, tensor): + self._metric = tensor + + def __lt__(self, other): + return self.name < other.name + + def __str__(self): + return self.name + + __repr__ = __str__ + + # Everything below this line is deprecated + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # This assignment is a bit controversial, should metric components be assigned + # to the metric only or also to the TensorIndexType object? The advantage here + # is the ability to assign a 1D array and transform it to a 2D diagonal array. + from .array import MutableDenseNDimArray + + data = _TensorDataLazyEvaluator.parse_data(data) + if data.rank() > 2: + raise ValueError("data have to be of rank 1 (diagonal metric) or 2.") + if data.rank() == 1: + if self.dim.is_number: + nda_dim = data.shape[0] + if nda_dim != self.dim: + raise ValueError("Dimension mismatch") + + dim = data.shape[0] + newndarray = MutableDenseNDimArray.zeros(dim, dim) + for i, val in enumerate(data): + newndarray[i, i] = val + data = newndarray + dim1, dim2 = data.shape + if dim1 != dim2: + raise ValueError("Non-square matrix tensor.") + if self.dim.is_number: + if self.dim != dim1: + raise ValueError("Dimension mismatch") + _tensor_data_substitution_dict[self] = data + _tensor_data_substitution_dict.add_metric_data(self.metric, data) + with ignore_warnings(SymPyDeprecationWarning): + delta = self.get_kronecker_delta() + i1 = TensorIndex('i1', self) + i2 = TensorIndex('i2', self) + with ignore_warnings(SymPyDeprecationWarning): + delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1)) + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + @deprecated( + """ + The TensorIndexType.get_kronecker_delta() method is deprecated. Use + the TensorIndexType.delta attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_kronecker_delta(self): + sym2 = TensorSymmetry(get_symmetric_group_sgs(2)) + delta = TensorHead('KD', [self]*2, sym2) + return delta + + @deprecated( + """ + The TensorIndexType.get_epsilon() method is deprecated. Use + the TensorIndexType.epsilon attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_epsilon(self): + if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)): + return None + sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1)) + epsilon = TensorHead('Eps', [self]*self._eps_dim, sym) + return epsilon + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + This destroys components data associated to the ``TensorIndexType``, if + any, specifically: + + * metric tensor data + * Kronecker tensor data + """ + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def delete_tensmul_data(key): + if key in _tensor_data_substitution_dict._substitutions_dict_tensmul: + del _tensor_data_substitution_dict._substitutions_dict_tensmul[key] + + # delete metric data: + delete_tensmul_data((self.metric, True, True)) + delete_tensmul_data((self.metric, True, False)) + delete_tensmul_data((self.metric, False, True)) + delete_tensmul_data((self.metric, False, False)) + + # delete delta tensor data: + delta = self.get_kronecker_delta() + if delta in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[delta] + + +class TensorIndex(Basic): + """ + Represents a tensor index + + Parameters + ========== + + name : name of the index, or ``True`` if you want it to be automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + + Attributes + ========== + + ``name`` + ``tensor_index_type`` + ``is_up`` + + Notes + ===== + + Tensor indices are contracted with the Einstein summation convention. + + An index can be in contravariant or in covariant form; in the latter + case it is represented prepending a ``-`` to the index name. Adding + ``-`` to a covariant (is_up=False) index makes it contravariant. + + Dummy indices have a name with head given by + ``tensor_inde_type.dummy_name`` with underscore and a number. + + Similar to ``symbols`` multiple contravariant indices can be created + at once using ``tensor_indices(s, typ)``, where ``s`` is a string + of names. + + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> mu = TensorIndex('mu', Lorentz, is_up=False) + >>> nu, rho = tensor_indices('nu, rho', Lorentz) + >>> A = TensorHead('A', [Lorentz, Lorentz]) + >>> A(mu, nu) + A(-mu, nu) + >>> A(-mu, -rho) + A(mu, -rho) + >>> A(mu, -mu) + A(-L_0, L_0) + """ + def __new__(cls, name, tensor_index_type, is_up=True): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up) + + @property + def name(self): + return self.args[0].name + + @property + def tensor_index_type(self): + return self.args[1] + + @property + def is_up(self): + return self.args[2] + + def _print(self): + s = self.name + if not self.is_up: + s = '-%s' % s + return s + + def __lt__(self, other): + return ((self.tensor_index_type, self.name) < + (other.tensor_index_type, other.name)) + + def __neg__(self): + t1 = TensorIndex(self.name, self.tensor_index_type, + (not self.is_up)) + return t1 + + +def tensor_indices(s, typ): + """ + Returns list of tensor indices given their names and their types. + + Parameters + ========== + + s : string of comma separated names of indices + + typ : ``TensorIndexType`` of the indices + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + """ + if isinstance(s, str): + a = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + tilist = [TensorIndex(i, typ) for i in a] + if len(tilist) == 1: + return tilist[0] + return tilist + + +class TensorSymmetry(Basic): + """ + Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric + index permutation). For the relevant terminology see ``tensor_can.py`` + section of the combinatorics module. + + Parameters + ========== + + bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor + + Attributes + ========== + + ``base`` : base of the BSGS + ``generators`` : generators of the BSGS + ``rank`` : rank of the tensor + + Notes + ===== + + A tensor can have an arbitrary monoterm symmetry provided by its BSGS. + Multiterm symmetries, like the cyclic symmetry of the Riemann tensor + (i.e., Bianchi identity), are not covered. See combinatorics module for + information on how to generate BSGS for a general index permutation group. + Simple symmetries can be generated using built-in methods. + + See Also + ======== + + sympy.combinatorics.tensor_can.get_symmetric_group_sgs + + Examples + ======== + + Define a symmetric tensor of rank 2 + + >>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> sym = TensorSymmetry(get_symmetric_group_sgs(2)) + >>> T = TensorHead('T', [Lorentz]*2, sym) + + Note, that the same can also be done using built-in TensorSymmetry methods + + >>> sym2 = TensorSymmetry.fully_symmetric(2) + >>> sym == sym2 + True + """ + def __new__(cls, *args, **kw_args): + if len(args) == 1: + base, generators = args[0] + elif len(args) == 2: + base, generators = args + else: + raise TypeError("bsgs required, either two separate parameters or one tuple") + + if not isinstance(base, Tuple): + base = Tuple(*base) + if not isinstance(generators, Tuple): + generators = Tuple(*generators) + + return Basic.__new__(cls, base, generators, **kw_args) + + @property + def base(self): + return self.args[0] + + @property + def generators(self): + return self.args[1] + + @property + def rank(self): + return self.generators[0].size - 2 + + @classmethod + def fully_symmetric(cls, rank): + """ + Returns a fully symmetric (antisymmetric if ``rank``<0) + TensorSymmetry object for ``abs(rank)`` indices. + """ + if rank > 0: + bsgs = get_symmetric_group_sgs(rank, False) + elif rank < 0: + bsgs = get_symmetric_group_sgs(-rank, True) + elif rank == 0: + bsgs = ([], [Permutation(1)]) + return TensorSymmetry(bsgs) + + @classmethod + def direct_product(cls, *args): + """ + Returns a TensorSymmetry object that is being a direct product of + fully (anti-)symmetric index permutation groups. + + Notes + ===== + + Some examples for different values of ``(*args)``: + ``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)`` + ``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)`` + ``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)`` + ``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting + ``(1, 1, 1)`` tensor with 3 indices without any symmetry + """ + base, sgs = [], [Permutation(1)] + for arg in args: + if arg > 0: + bsgs2 = get_symmetric_group_sgs(arg, False) + elif arg < 0: + bsgs2 = get_symmetric_group_sgs(-arg, True) + else: + continue + base, sgs = bsgs_direct_product(base, sgs, *bsgs2) + + return TensorSymmetry(base, sgs) + + @classmethod + def riemann(cls): + """ + Returns a monotorem symmetry of the Riemann tensor + """ + return TensorSymmetry(riemann_bsgs) + + @classmethod + def no_symmetry(cls, rank): + """ + TensorSymmetry object for ``rank`` indices with no symmetry + """ + return TensorSymmetry([], [Permutation(rank+1)]) + + +@deprecated( + """ + The tensorsymmetry() function is deprecated. Use the TensorSymmetry + constructor instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorsymmetry", +) +def tensorsymmetry(*args): + """ + Returns a ``TensorSymmetry`` object. This method is deprecated, use + ``TensorSymmetry.direct_product()`` or ``.riemann()`` instead. + + Explanation + =========== + + One can represent a tensor with any monoterm slot symmetry group + using a BSGS. + + ``args`` can be a BSGS + ``args[0]`` base + ``args[1]`` sgs + + Usually tensors are in (direct products of) representations + of the symmetric group; + ``args`` can be a list of lists representing the shapes of Young tableaux + + Notes + ===== + + For instance: + ``[[1]]`` vector + ``[[1]*n]`` symmetric tensor of rank ``n`` + ``[[n]]`` antisymmetric tensor of rank ``n`` + ``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor + ``[[1],[1]]`` vector*vector + ``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector + + Notice that with the shape ``[2, 2]`` we associate only the monoterm + symmetries of the Riemann tensor; this is an abuse of notation, + since the shape ``[2, 2]`` corresponds usually to the irreducible + representation characterized by the monoterm symmetries and by the + cyclic symmetry. + """ + from sympy.combinatorics import Permutation + + def tableau2bsgs(a): + if len(a) == 1: + # antisymmetric vector + n = a[0] + bsgs = get_symmetric_group_sgs(n, 1) + else: + if all(x == 1 for x in a): + # symmetric vector + n = len(a) + bsgs = get_symmetric_group_sgs(n) + elif a == [2, 2]: + bsgs = riemann_bsgs + else: + raise NotImplementedError + return bsgs + + if not args: + return TensorSymmetry(Tuple(), Tuple(Permutation(1))) + + if len(args) == 2 and isinstance(args[1][0], Permutation): + return TensorSymmetry(args) + base, sgs = tableau2bsgs(args[0]) + for a in args[1:]: + basex, sgsx = tableau2bsgs(a) + base, sgs = bsgs_direct_product(base, sgs, basex, sgsx) + return TensorSymmetry(Tuple(base, sgs)) + +@deprecated( + "TensorType is deprecated. Use tensor_heads() instead.", + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensortype", +) +class TensorType(Basic): + """ + Class of tensor types. Deprecated, use tensor_heads() instead. + + Parameters + ========== + + index_types : list of ``TensorIndexType`` of the tensor indices + symmetry : ``TensorSymmetry`` of the tensor + + Attributes + ========== + + ``index_types`` + ``symmetry`` + ``types`` : list of ``TensorIndexType`` without repetitions + """ + is_commutative = False + + def __new__(cls, index_types, symmetry, **kw_args): + assert symmetry.rank == len(index_types) + obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args) + return obj + + @property + def index_types(self): + return self.args[0] + + @property + def symmetry(self): + return self.args[1] + + @property + def types(self): + return sorted(set(self.index_types), key=lambda x: x.name) + + def __str__(self): + return 'TensorType(%s)' % ([str(x) for x in self.index_types]) + + def __call__(self, s, comm=0): + """ + Return a TensorHead object or a list of TensorHead objects. + + Parameters + ========== + + s : name or string of names. + + comm : Commutation group. + + see ``_TensorManager.set_comm`` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + if len(names) == 1: + return TensorHead(names[0], self.index_types, self.symmetry, comm) + else: + return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names] + + +@deprecated( + """ + The tensorhead() function is deprecated. Use tensor_heads() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorhead", +) +def tensorhead(name, typ, sym=None, comm=0): + """ + Function generating tensorhead(s). This method is deprecated, + use TensorHead constructor or tensor_heads() instead. + + Parameters + ========== + + name : name or sequence of names (as in ``symbols``) + + typ : index types + + sym : same as ``*args`` in ``tensorsymmetry`` + + comm : commutation group number + see ``_TensorManager.set_comm`` + """ + if sym is None: + sym = [[1] for i in range(len(typ))] + with ignore_warnings(SymPyDeprecationWarning): + sym = tensorsymmetry(*sym) + return TensorHead(name, typ, sym, comm) + + +class TensorHead(Basic): + """ + Tensor head of the tensor. + + Parameters + ========== + + name : name of the tensor + index_types : list of TensorIndexType + symmetry : TensorSymmetry of the tensor + comm : commutation group number + + Attributes + ========== + + ``name`` + ``index_types`` + ``rank`` : total number of indices + ``symmetry`` + ``comm`` : commutation group + + Notes + ===== + + Similar to ``symbols`` multiple TensorHeads can be created using + ``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s`` + is the string of names and ``sym`` is the monoterm tensor symmetry + (see ``tensorsymmetry``). + + A ``TensorHead`` belongs to a commutation group, defined by a + symbol on number ``comm`` (see ``_TensorManager.set_comm``); + tensors in a commutation group have the same commutation properties; + by default ``comm`` is ``0``, the group of the commuting tensors. + + Examples + ======== + + Define a fully antisymmetric tensor of rank 2: + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> asym2 = TensorSymmetry.fully_symmetric(-2) + >>> A = TensorHead('A', [Lorentz, Lorentz], asym2) + + Examples with ndarray values, the components data assigned to the + ``TensorHead`` object are assumed to be in a fully-contravariant + representation. In case it is necessary to assign components data which + represents the values of a non-fully covariant tensor, see the other + examples. + + >>> from sympy.tensor.tensor import tensor_indices + >>> from sympy import diag + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i0, i1 = tensor_indices('i0:2', Lorentz) + + Specify a replacement dictionary to keep track of the arrays to use for + replacements in the tensorial expression. The ``TensorIndexType`` is + associated to the metric used for contractions (in fully covariant form): + + >>> repl = {Lorentz: diag(1, -1, -1, -1)} + + Let's see some examples of working with components with the electromagnetic + tensor: + + >>> from sympy import symbols + >>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z') + >>> c = symbols('c', positive=True) + + Let's define `F`, an antisymmetric tensor: + + >>> F = TensorHead('F', [Lorentz, Lorentz], asym2) + + Let's update the dictionary to contain the matrix to use in the + replacements: + + >>> repl.update({F(-i0, -i1): [ + ... [0, Ex/c, Ey/c, Ez/c], + ... [-Ex/c, 0, -Bz, By], + ... [-Ey/c, Bz, 0, -Bx], + ... [-Ez/c, -By, Bx, 0]]}) + + Now it is possible to retrieve the contravariant form of the Electromagnetic + tensor: + + >>> F(i0, i1).replace_with_arrays(repl, [i0, i1]) + [[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]] + + and the mixed contravariant-covariant form: + + >>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1]) + [[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]] + + Energy-momentum of a particle may be represented as: + + >>> from sympy import symbols + >>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1)) + >>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True) + >>> repl.update({P(i0): [E, px, py, pz]}) + + The contravariant and covariant components are, respectively: + + >>> P(i0).replace_with_arrays(repl, [i0]) + [E, p_x, p_y, p_z] + >>> P(-i0).replace_with_arrays(repl, [-i0]) + [E, -p_x, -p_y, -p_z] + + The contraction of a 1-index tensor by itself: + + >>> expr = P(i0)*P(-i0) + >>> expr.replace_with_arrays(repl, []) + E**2 - p_x**2 - p_y**2 - p_z**2 + """ + is_commutative = False + + def __new__(cls, name, index_types, symmetry=None, comm=0): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry) + obj.comm = TensorManager.comm_symbols2i(comm) + return obj + + @property + def name(self): + return self.args[0].name + + @property + def index_types(self): + return list(self.args[1]) + + @property + def symmetry(self): + return self.args[2] + + @property + def rank(self): + return len(self.index_types) + + def __lt__(self, other): + return (self.name, self.index_types) < (other.name, other.index_types) + + def commutes_with(self, other): + """ + Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute. + + Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute. + """ + r = TensorManager.get_comm(self.comm, other.comm) + return r + + def _print(self): + return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types])) + + def __call__(self, *indices, **kw_args): + """ + Returns a tensor with indices. + + Explanation + =========== + + There is a special behavior in case of indices denoted by ``True``, + they are considered auto-matrix indices, their slots are automatically + filled, and confer to the tensor the behavior of a matrix or vector + upon multiplication with another tensor containing auto-matrix indices + of the same ``TensorIndexType``. This means indices get summed over the + same way as in matrix multiplication. For matrix behavior, define two + auto-matrix indices, for vector behavior define just one. + + Indices can also be strings, in which case the attribute + ``index_types`` is used to convert them to proper ``TensorIndex``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + >>> t = A(a, -b) + >>> t + A(a, -b) + + """ + + updated_indices = [] + for idx, typ in zip(indices, self.index_types): + if isinstance(idx, str): + idx = idx.strip().replace(" ", "") + if idx.startswith('-'): + updated_indices.append(TensorIndex(idx[1:], typ, + is_up=False)) + else: + updated_indices.append(TensorIndex(idx, typ)) + else: + updated_indices.append(idx) + + updated_indices += indices[len(updated_indices):] + + tensor = Tensor(self, updated_indices, **kw_args) + return tensor.doit() + + # Everything below this line is deprecated + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power on abstract tensors.") + from .array import tensorproduct, tensorcontraction + metrics = [_.data for _ in self.index_types] + + marray = self.data + marraydim = marray.rank() + for metric in metrics: + marray = tensorproduct(marray, metric, marray) + marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2)) + + return marray ** (other * S.Half) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + Destroy components data associated to the ``TensorHead`` object, this + checks for attached components data, and destroys components data too. + """ + # do not garbage collect Kronecker tensor (it should be done by + # ``TensorIndexType`` garbage collection) + deprecate_data() + if self.name == "KD": + return + + # the data attached to a tensor must be deleted only by the TensorHead + # destructor. If the TensorHead is deleted, it means that there are no + # more instances of that tensor anywhere. + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + +def tensor_heads(s, index_types, symmetry=None, comm=0): + """ + Returns a sequence of TensorHeads from a string `s` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + thlist = [TensorHead(name, index_types, symmetry, comm) for name in names] + if len(thlist) == 1: + return thlist[0] + return thlist + + +class TensExpr(Expr, ABC): + """ + Abstract base class for tensor expressions + + Notes + ===== + + A tensor expression is an expression formed by tensors; + currently the sums of tensors are distributed. + + A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``. + + ``TensMul`` objects are formed by products of component tensors, + and include a coefficient, which is a SymPy expression. + + + In the internal representation contracted indices are represented + by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position + of the component tensor with contravariant index, ``ipos1`` is the + slot which the index occupies in that component tensor. + + Contracted indices are therefore nameless in the internal representation. + """ + + _op_priority = 12.0 + is_commutative = False + + def __neg__(self): + return self*S.NegativeOne + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + return TensAdd(self, other).doit() + + def __radd__(self, other): + return TensAdd(other, self).doit() + + def __sub__(self, other): + return TensAdd(self, -other).doit() + + def __rsub__(self, other): + return TensAdd(other, -self).doit() + + def __mul__(self, other): + """ + Multiply two tensors using Einstein summation convention. + + Explanation + =========== + + If the two tensors have an index in common, one contravariant + and the other covariant, in their product the indices are summed + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t1 = p(m0) + >>> t2 = q(-m0) + >>> t1*t2 + p(L_0)*q(-L_0) + """ + return TensMul(self, other).doit() + + def __rmul__(self, other): + return TensMul(other, self).doit() + + def __truediv__(self, other): + other = _sympify(other) + if isinstance(other, TensExpr): + raise ValueError('cannot divide by a tensor') + return TensMul(self, S.One/other).doit() + + def __rtruediv__(self, other): + raise ValueError('cannot divide by a tensor') + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power without ndarray data.") + from .array import tensorproduct, tensorcontraction + free = self.free + marray = self.data + mdim = marray.rank() + for metric in free: + marray = tensorcontraction( + tensorproduct( + marray, + metric[0].tensor_index_type.data, + marray), + (0, mdim), (mdim+1, mdim+2) + ) + return marray ** (other * S.Half) + + def __rpow__(self, other): + raise NotImplementedError + + @property + @abstractmethod + def nocoeff(self): + raise NotImplementedError("abstract method") + + @property + @abstractmethod + def coeff(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_indices(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_free_indices(self) -> list[TensorIndex]: + raise NotImplementedError("abstract method") + + @abstractmethod + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + raise NotImplementedError("abstract method") + + def fun_eval(self, *index_tuples): + deprecate_fun_eval() + return self.substitute_indices(*index_tuples) + + def get_matrix(self): + """ + DEPRECATED: do not use. + + Returns ndarray components data as a matrix, if components data are + available and ndarray dimension does not exceed 2. + """ + from sympy.matrices.dense import Matrix + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if 0 < self.rank <= 2: + rows = self.data.shape[0] + columns = self.data.shape[1] if self.rank == 2 else 1 + if self.rank == 2: + mat_list = [] * rows + for i in range(rows): + mat_list.append([]) + for j in range(columns): + mat_list[i].append(self[i, j]) + else: + mat_list = [None] * rows + for i in range(rows): + mat_list[i] = self[i] + return Matrix(mat_list) + else: + raise NotImplementedError( + "missing multidimensional reduction to matrix.") + + @staticmethod + def _get_indices_permutation(indices1, indices2): + return [indices1.index(i) for i in indices2] + + def expand(self, **hints): + return _expand(self, **hints).doit() + + def _expand(self, **kwargs): + return self + + def _get_free_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_free_indices_set()) + return indset + + def _get_dummy_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_dummy_indices_set()) + return indset + + def _get_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_indices_set()) + return indset + + @property + def _iterate_dummy_indices(self): + dummy_set = self._get_dummy_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in dummy_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_free_indices(self): + free_set = self._get_free_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in free_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_indices(self): + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @staticmethod + def _contract_and_permute_with_metric(metric, array, pos, dim): + # TODO: add possibility of metric after (spinors) + from .array import tensorcontraction, tensorproduct, permutedims + + array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos)) + permu = list(range(dim)) + permu[0], permu[pos] = permu[pos], permu[0] + return permutedims(array, permu) + + @staticmethod + def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict): + from .array import permutedims + + index_types1 = [i.tensor_index_type for i in free_ind1] + + # Check if variance of indices needs to be fixed: + pos2up = [] + pos2down = [] + free2remaining = free_ind2[:] + for pos1, index1 in enumerate(free_ind1): + if index1 in free2remaining: + pos2 = free2remaining.index(index1) + free2remaining[pos2] = None + continue + if -index1 in free2remaining: + pos2 = free2remaining.index(-index1) + free2remaining[pos2] = None + free_ind2[pos2] = index1 + if index1.is_up: + pos2up.append(pos2) + else: + pos2down.append(pos2) + else: + index2 = free2remaining[pos1] + if index2 is None: + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + free2remaining[pos1] = None + free_ind2[pos1] = index1 + if index1.is_up ^ index2.is_up: + if index1.is_up: + pos2up.append(pos1) + else: + pos2down.append(pos1) + + if len(set(free_ind1) & set(free_ind2)) < len(free_ind1): + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + + # Raise indices: + for pos in pos2up: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1)) + # Lower indices: + for pos in pos2down: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1)) + + if free_ind1: + permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1) + array = permutedims(array, permutation) + + if hasattr(array, "rank") and array.rank() == 0: + array = array[()] + + return free_ind2, array + + def replace_with_arrays(self, replacement_dict, indices=None): + """ + Replace the tensorial expressions with arrays. The final array will + correspond to the N-dimensional array with indices arranged according + to ``indices``. + + Parameters + ========== + + replacement_dict + dictionary containing the replacement rules for tensors. + indices + the index order with respect to which the array is read. The + original index order will be used if no value is passed. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> from sympy.tensor.tensor import TensorHead + >>> from sympy import symbols, diag + + >>> L = TensorIndexType("L") + >>> i, j = tensor_indices("i j", L) + >>> A = TensorHead("A", [L]) + >>> A(i).replace_with_arrays({A(i): [1, 2]}, [i]) + [1, 2] + + Since 'indices' is optional, we can also call replace_with_arrays by + this way if no specific index order is needed: + + >>> A(i).replace_with_arrays({A(i): [1, 2]}) + [1, 2] + + >>> expr = A(i)*A(j) + >>> expr.replace_with_arrays({A(i): [1, 2]}) + [[1, 2], [2, 4]] + + For contractions, specify the metric of the ``TensorIndexType``, which + in this case is ``L``, in its covariant form: + + >>> expr = A(i)*A(-i) + >>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)}) + -3 + + Symmetrization of an array: + + >>> H = TensorHead("H", [L, L]) + >>> a, b, c, d = symbols("a b c d") + >>> expr = H(i, j)/2 + H(j, i)/2 + >>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]}) + [[a, b/2 + c/2], [b/2 + c/2, d]] + + Anti-symmetrization of an array: + + >>> expr = H(i, j)/2 - H(j, i)/2 + >>> repl = {H(i, j): [[a, b], [c, d]]} + >>> expr.replace_with_arrays(repl) + [[0, b/2 - c/2], [-b/2 + c/2, 0]] + + The same expression can be read as the transpose by inverting ``i`` and + ``j``: + + >>> expr.replace_with_arrays(repl, [j, i]) + [[0, -b/2 + c/2], [b/2 - c/2, 0]] + """ + from .array import Array + + indices = indices or [] + remap = {k.args[0] if k.is_up else -k.args[0]: k for k in self.get_free_indices()} + for i, index in enumerate(indices): + if isinstance(index, (Symbol, Mul)): + if index in remap: + indices[i] = remap[index] + else: + indices[i] = -remap[-index] + + replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()} + + # Check dimensions of replaced arrays: + for tensor, array in replacement_dict.items(): + if isinstance(tensor, TensorIndexType): + expected_shape = [tensor.dim for i in range(2)] + else: + expected_shape = [index_type.dim for index_type in tensor.index_types] + if len(expected_shape) != array.rank() or (not all(dim1 == dim2 if + dim1.is_number else True for dim1, dim2 in zip(expected_shape, + array.shape))): + raise ValueError("shapes for tensor %s expected to be %s, "\ + "replacement array shape is %s" % (tensor, expected_shape, + array.shape)) + + ret_indices, array = self._extract_data(replacement_dict) + + last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict) + return array + + def _check_add_Sum(self, expr, index_symbols): + from sympy.concrete.summations import Sum + indices = self.get_indices() + dum = self.dum + sum_indices = [ (index_symbols[i], 0, + indices[i].tensor_index_type.dim-1) for i, j in dum] + if sum_indices: + expr = Sum(expr, *sum_indices) + return expr + + def _expand_partial_derivative(self): + # simply delegate the _expand_partial_derivative() to + # its arguments to expand a possibly found PartialDerivative + return self.func(*[ + a._expand_partial_derivative() + if isinstance(a, TensExpr) else a + for a in self.args]) + + +class TensAdd(TensExpr, AssocOp): + """ + Sum of tensors. + + Parameters + ========== + + free_args : list of the free indices + + Attributes + ========== + + ``args`` : tuple of addends + ``rank`` : rank of the tensor + ``free_args`` : list of the free indices in sorted order + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(a) + q(a); t + p(a) + q(a) + + Examples with components data added to the tensor expression: + + >>> from sympy import symbols, diag + >>> x, y, z, t = symbols("x y z t") + >>> repl = {} + >>> repl[Lorentz] = diag(1, -1, -1, -1) + >>> repl[p(a)] = [1, 2, 3, 4] + >>> repl[q(a)] = [x, y, z, t] + + The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58 + + >>> expr = p(a) + q(a) + >>> expr.replace_with_arrays(repl, [a]) + [x + 1, y + 2, z + 3, t + 4] + """ + + def __new__(cls, *args, **kw_args): + args = [_sympify(x) for x in args if x] + args = TensAdd._tensAdd_flatten(args) + args.sort(key=default_sort_key) + if not args: + return S.Zero + if len(args) == 1: + return args[0] + + return Basic.__new__(cls, *args, **kw_args) + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self) -> list[TensorIndex]: + return self.free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args] + return self.func(*newargs) + + @memoize_property + def rank(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].rank + else: + return 0 + + @memoize_property + def free_args(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].free_args + else: + return [] + + @memoize_property + def free_indices(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].get_free_indices() + else: + return set() + + def doit(self, **hints): + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + else: + args = self.args + + # if any of the args are zero (after doit), drop them. Otherwise, _tensAdd_check will complain about non-matching indices, even though the TensAdd is correctly formed. + args = [arg for arg in args if arg != S.Zero] + + if len(args) == 0: + return S.Zero + elif len(args) == 1: + return args[0] + + # now check that all addends have the same indices: + TensAdd._tensAdd_check(args) + + # Collect terms appearing more than once, differing by their coefficients: + args = TensAdd._tensAdd_collect_terms(args) + + # collect canonicalized terms + def sort_key(t): + if not isinstance(t, TensExpr): + return [], [], [] + if hasattr(t, "_index_structure") and hasattr(t, "components"): + x = get_index_structure(t) + return t.components, x.free, x.dum + return [], [], [] + args.sort(key=sort_key) + + if not args: + return S.Zero + # it there is only a component tensor return it + if len(args) == 1: + return args[0] + + obj = self.func(*args) + return obj + + @staticmethod + def _tensAdd_flatten(args): + # flatten TensAdd, coerce terms which are not tensors to tensors + a = [] + for x in args: + if isinstance(x, (Add, TensAdd)): + a.extend(list(x.args)) + else: + a.append(x) + args = [x for x in a if x.coeff] + return args + + @staticmethod + def _tensAdd_check(args): + # check that all addends have the same free indices + + def get_indices_set(x: Expr) -> set[TensorIndex]: + if isinstance(x, TensExpr): + return set(x.get_free_indices()) + return set() + + indices0 = get_indices_set(args[0]) + list_indices = [get_indices_set(arg) for arg in args[1:]] + if not all(x == indices0 for x in list_indices): + raise ValueError('all tensors must have the same indices') + + @staticmethod + def _tensAdd_collect_terms(args): + # collect TensMul terms differing at most by their coefficient + terms_dict = defaultdict(list) + scalars = S.Zero + if isinstance(args[0], TensExpr): + free_indices = set(args[0].get_free_indices()) + else: + free_indices = set() + + for arg in args: + if not isinstance(arg, TensExpr): + if free_indices != set(): + raise ValueError("wrong valence") + scalars += arg + continue + if free_indices != set(arg.get_free_indices()): + raise ValueError("wrong valence") + # TODO: what is the part which is not a coeff? + # needs an implementation similar to .as_coeff_Mul() + terms_dict[arg.nocoeff].append(arg.coeff) + + new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0] + if isinstance(scalars, Add): + new_args = list(scalars.args) + new_args + elif scalars != 0: + new_args = [scalars] + new_args + return new_args + + def get_indices(self): + indices = [] + for arg in self.args: + indices.extend([i for i in get_indices(arg) if i not in indices]) + return indices + + def _expand(self, **hints): + return TensAdd(*[_expand(i, **hints) for i in self.args]) + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + index_tuples = list(zip(free_args, indices)) + a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args] + res = TensAdd(*a).doit() + return res + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + """ + expr = self.expand() + args = [canon_bp(x) for x in expr.args] + res = TensAdd(*args).doit() + return res + + def equals(self, other): + other = _sympify(other) + if isinstance(other, TensMul) and other.coeff == 0: + return all(x.coeff == 0 for x in self.args) + if isinstance(other, TensExpr): + if self.rank != other.rank: + return False + if isinstance(other, TensAdd): + if set(self.args) != set(other.args): + return False + else: + return True + t = self - other + if not isinstance(t, TensExpr): + return t == 0 + else: + if isinstance(t, TensMul): + return t.coeff == 0 + else: + return all(x.coeff == 0 for x in t.args) + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def contract_delta(self, delta): + args = [x.contract_delta(delta) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + contract_all : if True, eliminate all ``g`` which are contracted + + Notes + ===== + + see the ``TensorIndexType`` docstring for the contraction conventions + """ + + args = [contract_metric(x, g) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensAdd(*new_args).doit() + + def _print(self): + a = [] + args = self.args + for x in args: + a.append(str(x)) + s = ' + '.join(a) + s = s.replace('+ -', '- ') + return s + + def _extract_data(self, replacement_dict): + from sympy.tensor.array import Array, permutedims + args_indices, arrays = zip(*[ + arg._extract_data(replacement_dict) if + isinstance(arg, TensExpr) else ([], arg) for arg in self.args + ]) + arrays = [Array(i) for i in arrays] + ref_indices = args_indices[0] + for i in range(1, len(args_indices)): + indices = args_indices[i] + array = arrays[i] + permutation = TensMul._get_indices_permutation(indices, ref_indices) + arrays[i] = permutedims(array, permutation) + return ref_indices, sum(arrays, Array.zeros(*array.shape)) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self.expand()] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + if not self.data: + raise ValueError("No iteration on abstract tensors") + return self.data.flatten().__iter__() + + def _eval_rewrite_as_Indexed(self, *args): + return Add.fromiter(args) + + def _eval_partial_derivative(self, s): + # Evaluation like Add + list_addends = [] + for a in self.args: + if isinstance(a, TensExpr): + list_addends.append(a._eval_partial_derivative(s)) + # do not call diff if s is no symbol + elif s._diff_wrt: + list_addends.append(a._eval_derivative(s)) + + return self.func(*list_addends) + + +class Tensor(TensExpr): + """ + Base tensor class, i.e. this represents a tensor, the single unit to be + put into an expression. + + Explanation + =========== + + This object is usually created from a ``TensorHead``, by attaching indices + to it. Indices preceded by a minus sign are considered contravariant, + otherwise covariant. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead + >>> Lorentz = TensorIndexType("Lorentz", dummy_name="L") + >>> mu, nu = tensor_indices('mu nu', Lorentz) + >>> A = TensorHead("A", [Lorentz, Lorentz]) + >>> A(mu, -nu) + A(mu, -nu) + >>> A(mu, -mu) + A(L_0, -L_0) + + It is also possible to use symbols instead of inidices (appropriate indices + are then generated automatically). + + >>> from sympy import Symbol + >>> x = Symbol('x') + >>> A(x, mu) + A(x, mu) + >>> A(x, -x) + A(L_0, -L_0) + + """ + + is_commutative = False + + _index_structure = None # type: _IndexStructure + args: tuple[TensorHead, Tuple] + + def __new__(cls, tensor_head, indices, *, is_canon_bp=False, **kw_args): + indices = cls._parse_indices(tensor_head, indices) + obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args) + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = Tensor._build_index_map(indices, obj._index_structure) + return obj + + @property + def free(self): + return self._free + + @property + def dum(self): + return self._dum + + @property + def ext_rank(self): + return self._ext_rank + + @property + def coeff(self): + return self._coeff + + @property + def nocoeff(self): + return self._nocoeff + + @property + def component(self): + return self._component + + @property + def components(self): + return self._components + + @property + def head(self): + return self.args[0] + + @property + def indices(self): + return self.args[1] + + @property + def free_indices(self): + return set(self._index_structure.get_free_indices()) + + @property + def index_types(self): + return self.head.index_types + + @property + def rank(self): + return len(self.free_indices) + + @staticmethod + def _build_index_map(indices, index_structure): + index_map = {} + for idx in indices: + index_map[idx] = (indices.index(idx),) + return index_map + + def doit(self, **hints): + args, indices, free, dum = TensMul._tensMul_contract_indices([self]) + return args[0] + + @staticmethod + def _parse_indices(tensor_head, indices): + if not isinstance(indices, (tuple, list, Tuple)): + raise TypeError("indices should be an array, got %s" % type(indices)) + indices = list(indices) + for i, index in enumerate(indices): + if isinstance(index, Symbol): + indices[i] = TensorIndex(index, tensor_head.index_types[i], True) + elif isinstance(index, Mul): + c, e = index.as_coeff_Mul() + if c == -1 and isinstance(e, Symbol): + indices[i] = TensorIndex(e, tensor_head.index_types[i], False) + else: + raise ValueError("index not understood: %s" % index) + elif not isinstance(index, TensorIndex): + raise TypeError("wrong type for index: %s is %s" % (index, type(index))) + return indices + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + return self.func(self.args[0], indices, is_canon_bp=is_canon_bp).doit() + + def _get_free_indices_set(self): + return {i[0] for i in self._index_structure.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self._index_structure.dum)) + return {idx for i, idx in enumerate(self.args[1]) if i in dummy_pos} + + def _get_indices_set(self): + return set(self.args[1].args) + + @property + def free_in_args(self): + return [(ind, pos, 0) for ind, pos in self.free] + + @property + def dum_in_args(self): + return [(p1, p2, 0, 0) for p1, p2 in self.dum] + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + def commutes_with(self, other): + """ + :param other: + :return: + 0 commute + 1 anticommute + None neither commute nor anticommute + """ + if not isinstance(other, TensExpr): + return 0 + elif isinstance(other, Tensor): + return self.component.commutes_with(other.component) + return NotImplementedError + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g``. + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp) + + def canon_bp(self): + if self.is_canon_bp: + return self + expr = self.expand() + g, dummies, msym = expr._index_structure.indices_canon_args() + v = components_canon_args([expr.component]) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tensor = self.perm2tensor(can, True) + return tensor + + def split(self): + return [self] + + def _expand(self, **kwargs): + return self + + def sorted_components(self): + return self + + def get_indices(self) -> list[TensorIndex]: + """ + Get a list of indices, corresponding to those of the tensor. + """ + return list(self.args[1]) + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices, corresponding to those of the tensor. + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: this could be optimized by only swapping the indices + # instead of visiting the whole expression tree: + return self.xreplace(repl) + + def as_base_exp(self): + return self, S.One + + def substitute_indices(self, *index_tuples): + """ + Return a tensor with free indices substituted according to ``index_tuples``. + + ``index_types`` list of tuples ``(old_index, new_index)``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(i, k)*B(-k, -j); t + A(i, L_0)*B(-L_0, -j) + >>> t.substitute_indices((i, k),(-j, l)) + A(k, L_0)*B(-L_0, l) + """ + indices = [] + for index in self.indices: + for ind_old, ind_new in index_tuples: + if (index.name == ind_old.name and index.tensor_index_type == + ind_old.tensor_index_type): + if index.is_up == ind_old.is_up: + indices.append(ind_new) + else: + indices.append(-ind_new) + break + else: + indices.append(index) + return self.head(*indices) + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + # TODO: put this into TensExpr? + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + # TODO: put this into TensExpr? + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _extract_data(self, replacement_dict): + from .array import Array + for k, v in replacement_dict.items(): + if isinstance(k, Tensor) and k.args[0] == self.args[0]: + other = k + array = v + break + else: + raise ValueError("%s not found in %s" % (self, replacement_dict)) + + # TODO: inefficient, this should be done at root level only: + replacement_dict = {k: Array(v) for k, v in replacement_dict.items()} + array = Array(array) + + dum1 = self.dum + dum2 = other.dum + + if len(dum2) > 0: + for pair in dum2: + # allow `dum2` if the contained values are also in `dum1`. + if pair not in dum1: + raise NotImplementedError("%s with contractions is not implemented" % other) + # Remove elements in `dum2` from `dum1`: + dum1 = [pair for pair in dum1 if pair not in dum2] + if len(dum1) > 0: + indices1 = self.get_indices() + indices2 = other.get_indices() + repl = {} + for p1, p2 in dum1: + repl[indices2[p2]] = -indices2[p1] + for pos in (p1, p2): + if indices1[pos].is_up ^ indices2[pos].is_up: + metric = replacement_dict[indices1[pos].tensor_index_type] + if indices1[pos].is_up: + metric = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = self._contract_and_permute_with_metric(metric, array, pos, len(indices2)) + other = other.xreplace(repl).doit() + array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2)) + + free_ind1 = self.get_free_indices() + free_ind2 = other.get_free_indices() + + return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # TODO: check data compatibility with properties of tensor. + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + def _print(self): + indices = [str(ind) for ind in self.indices] + component = self.component + if component.rank > 0: + return ('%s(%s)' % (component.name, ', '.join(indices))) + else: + return ('%s' % component.name) + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return S.One == other + + def _get_compar_comp(self): + t = self.canon_bp() + r = (t.coeff, tuple(t.components), \ + tuple(sorted(t.free)), tuple(sorted(t.dum))) + return r + + return _get_compar_comp(self) == _get_compar_comp(other) + + def contract_metric(self, g): + # if metric is not the same, ignore this step: + if self.component != g: + return self + # in case there are free components, do not perform anything: + if len(self.free) != 0: + return self + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + sign = S.One + typ = g.index_types[0] + + if not antisym: + # g(i, -i) + sign = sign*typ.dim + else: + # g(i, -i) + sign = sign*typ.dim + + dp0, dp1 = self.dum[0] + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + + return sign + + def contract_delta(self, metric): + return self.contract_metric(metric) + + def _eval_rewrite_as_Indexed(self, tens, indices): + from sympy.tensor.indexed import Indexed + # TODO: replace .args[0] with .name: + index_symbols = [i.args[0] for i in self.get_indices()] + expr = Indexed(tens.args[0], *index_symbols) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr + + if not isinstance(s, Tensor): + return S.Zero + else: + + # @a_i/@a_k = delta_i^k + # @a_i/@a^k = g_ij delta^j_k + # @a^i/@a^k = delta^i_k + # @a^i/@a_k = g^ij delta_j^k + # TODO: if there is no metric present, the derivative should be zero? + + if self.head != s.head: + return S.Zero + + # if heads are the same, provide delta and/or metric products + # for every free index pair in the appropriate tensor + # assumed that the free indices are in proper order + # A contravariante index in the derivative becomes covariant + # after performing the derivative and vice versa + + kronecker_delta_list = [1] + + # not guarantee a correct index order + + for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())): + if iself.tensor_index_type != iother.tensor_index_type: + raise ValueError("index types not compatible") + else: + tensor_index_type = iself.tensor_index_type + tensor_metric = tensor_index_type.metric + dummy = TensorIndex("d_" + str(count), tensor_index_type, + is_up=iself.is_up) + if iself.is_up == iother.is_up: + kroneckerdelta = tensor_index_type.delta(iself, -iother) + else: + kroneckerdelta = ( + TensMul(tensor_metric(iself, dummy), + tensor_index_type.delta(-dummy, -iother)) + ) + kronecker_delta_list.append(kroneckerdelta) + return TensMul.fromiter(kronecker_delta_list).doit() + # doit necessary to rename dummy indices accordingly + + +class TensMul(TensExpr, AssocOp): + """ + Product of tensors. + + Parameters + ========== + + coeff : SymPy coefficient of the tensor + args + + Attributes + ========== + + ``components`` : list of ``TensorHead`` of the component tensors + ``types`` : list of nonrepeated ``TensorIndexType`` + ``free`` : list of ``(ind, ipos, icomp)``, see Notes + ``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes + ``ext_rank`` : rank of the tensor counting the dummy indices + ``rank`` : rank of the tensor + ``coeff`` : SymPy coefficient of the tensor + ``free_args`` : list of the free indices in sorted order + ``is_canon_bp`` : ``True`` if the tensor in in canonical form + + Notes + ===== + + ``args[0]`` list of ``TensorHead`` of the component tensors. + + ``args[1]`` list of ``(ind, ipos, icomp)`` + where ``ind`` is a free index, ``ipos`` is the slot position + of ``ind`` in the ``icomp``-th component tensor. + + ``args[2]`` list of tuples representing dummy indices. + ``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant + dummy index is the ``ipos1``-th slot position in the ``icomp1``-th + component tensor; the corresponding covariant index is + in the ``ipos2`` slot position in the ``icomp2``-th component tensor. + + """ + identity = S.One + + _index_structure = None # type: _IndexStructure + + def __new__(cls, *args, **kw_args): + is_canon_bp = kw_args.get('is_canon_bp', False) + args = list(map(_sympify, args)) + + """ + If the internal dummy indices in one arg conflict with the free indices + of the remaining args, we need to rename those internal dummy indices. + """ + free = [get_free_indices(arg) for arg in args] + free = set(itertools.chain(*free)) #flatten free + newargs = [] + for arg in args: + dum_this = set(get_dummy_indices(arg)) + dum_other = [get_dummy_indices(a) for a in newargs] + dum_other = set(itertools.chain(*dum_other)) #flatten dum_other + free_this = set(get_free_indices(arg)) + if len(dum_this.intersection(free)) > 0: + exclude = free_this.union(free, dum_other) + newarg = TensMul._dedupe_indices(arg, exclude) + else: + newarg = arg + newargs.append(newarg) + + args = newargs + + # Flatten: + args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = TensExpr.__new__(cls, *args) + obj._indices = indices + obj._index_types = index_types[:] + obj._index_structure = index_structure + obj._free = index_structure.free[:] + obj._dum = index_structure.dum[:] + obj._free_indices = {x[0] for x in obj.free} + obj._rank = len(obj.free) + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = S.One + obj._is_canon_bp = is_canon_bp + return obj + + index_types = property(lambda self: self._index_types) + free = property(lambda self: self._free) + dum = property(lambda self: self._dum) + free_indices = property(lambda self: self._free_indices) + rank = property(lambda self: self._rank) + ext_rank = property(lambda self: self._ext_rank) + + @staticmethod + def _indices_to_free_dum(args_indices): + free2pos1 = {} + free2pos2 = {} + dummy_data = [] + indices = [] + + # Notation for positions (to better understand the code): + # `pos1`: position in the `args`. + # `pos2`: position in the indices. + + # Example: + # A(i, j)*B(k, m, n)*C(p) + # `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul). + # `pos2` of `n` is 4 because it's the fifth overall index. + + # Counter for the index position wrt the whole expression: + pos2 = 0 + + for pos1, arg_indices in enumerate(args_indices): + + for index_pos, index in enumerate(arg_indices): + if not isinstance(index, TensorIndex): + raise TypeError("expected TensorIndex") + if -index in free2pos1: + # Dummy index detected: + other_pos1 = free2pos1.pop(-index) + other_pos2 = free2pos2.pop(-index) + if index.is_up: + dummy_data.append((index, pos1, other_pos1, pos2, other_pos2)) + else: + dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2)) + indices.append(index) + elif index in free2pos1: + raise ValueError("Repeated index: %s" % index) + else: + free2pos1[index] = pos1 + free2pos2[index] = pos2 + indices.append(index) + pos2 += 1 + + free = [(i, p) for (i, p) in free2pos2.items()] + free_names = [i.name for i in free2pos2.keys()] + + dummy_data.sort(key=lambda x: x[3]) + return indices, free, free_names, dummy_data + + @staticmethod + def _dummy_data_to_dum(dummy_data): + return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data] + + @staticmethod + def _tensMul_contract_indices(args, replace_indices=True): + replacements = [{} for _ in args] + + #_index_order = all(_has_index_order(arg) for arg in args) + + args_indices = [get_indices(arg) for arg in args] + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + + cdt = defaultdict(int) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + if replace_indices: + for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data: + index_type = old_index.tensor_index_type + while True: + dummy_name = dummy_name_gen(index_type) + if dummy_name not in free_names: + break + dummy = TensorIndex(dummy_name, index_type, True) + replacements[pos1cov][old_index] = dummy + replacements[pos1contra][-old_index] = -dummy + indices[pos2cov] = dummy + indices[pos2contra] = -dummy + args = [ + arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg + for arg, repl in zip(args, replacements)] + + dum = TensMul._dummy_data_to_dum(dummy_data) + return args, indices, free, dum + + @staticmethod + def _get_components_from_args(args): + """ + Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied + by one another. + """ + components = [] + for arg in args: + if not isinstance(arg, TensExpr): + continue + if isinstance(arg, TensAdd): + continue + components.extend(arg.components) + return components + + @staticmethod + def _rebuild_tensors_list(args, index_structure): + indices = index_structure.get_indices() + #tensors = [None for i in components] # pre-allocate list + ind_pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + prev_pos = ind_pos + ind_pos += arg.ext_rank + args[i] = Tensor(arg.component, indices[prev_pos:ind_pos]) + + def doit(self, **hints): + is_canon_bp = self._is_canon_bp + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + + """ + There may now be conflicts between dummy indices of different args + (each arg's doit method does not have any information about which + dummy indices are already used in the other args), so we + deduplicate them. + """ + rule = dict(zip(self.args, args)) + rule = self._dedupe_indices_in_rule(rule) + args = [rule[a] for a in self.args] + + else: + args = self.args + + args = [arg for arg in args if arg != self.identity] + + # Extract non-tensor coefficients: + coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One) + args = [arg for arg in args if isinstance(arg, TensExpr)] + + if len(args) == 0: + return coeff + + if coeff != self.identity: + args = [coeff] + args + if coeff == 0: + return S.Zero + + if len(args) == 1: + return args[0] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = self.func(*args) + obj._index_types = index_types + obj._index_structure = index_structure + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = coeff + obj._is_canon_bp = is_canon_bp + return obj + + # TODO: this method should be private + # TODO: should this method be renamed _from_components_free_dum ? + @staticmethod + def from_data(coeff, components, free, dum, **kw_args): + return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit() + + @staticmethod + def _get_tensors_from_components_free_dum(components, free, dum): + """ + Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``. + """ + index_structure = _IndexStructure.from_components_free_dum(components, free, dum) + indices = index_structure.get_indices() + tensors = [None for i in components] # pre-allocate list + + # distribute indices on components to build a list of tensors: + ind_pos = 0 + for i, component in enumerate(components): + prev_pos = ind_pos + ind_pos += component.rank + tensors[i] = Tensor(component, indices[prev_pos:ind_pos]) + return tensors + + def _get_free_indices_set(self): + return {i[0] for i in self.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self.dum)) + return {idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos} + + def _get_position_offset_for_indices(self): + arg_offset = [None for i in range(self.ext_rank)] + counter = 0 + for i, arg in enumerate(self.args): + if not isinstance(arg, TensExpr): + continue + for j in range(arg.ext_rank): + arg_offset[j + counter] = counter + counter += arg.ext_rank + return arg_offset + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + @property + def components(self): + return self._get_components_from_args(self.args) + + @property + def free_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free] + + @property + def coeff(self): + # return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)]) + return self._coeff + + @property + def nocoeff(self): + return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit() + + @property + def dum_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum] + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return self.coeff == other + + return self.canon_bp() == other.canon_bp() + + def get_indices(self): + """ + Returns the list of indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + The dummy indices are given a name which does not collide with + the names of the free indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_indices() + [L_0, -L_0, m2] + """ + return self._indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Returns the list of free indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_free_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_free_indices() + [m2] + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]) + + def split(self): + """ + Returns a list of tensors, whose product is ``self``. + + Explanation + =========== + + Dummy indices contracted among different tensor components + become free indices with the same name as the one used to + represent the dummy indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(a,b)*B(-b,c) + >>> t + A(a, L_0)*B(-L_0, c) + >>> t.split() + [A(a, L_0), B(-L_0, c)] + """ + if self.args == (): + return [self] + splitp = [] + res = 1 + for arg in self.args: + if isinstance(arg, Tensor): + splitp.append(res*arg) + res = 1 + else: + res *= arg + return splitp + + def _expand(self, **hints): + # TODO: temporary solution, in the future this should be linked to + # `Expr.expand`. + args = [_expand(arg, **hints) for arg in self.args] + args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args] + return TensAdd(*[ + TensMul(*i) for i in itertools.product(*args1)] + ) + + def __neg__(self): + return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit() + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _get_args_for_traditional_printer(self): + args = list(self.args) + if self.coeff.could_extract_minus_sign(): + # expressions like "-A(a)" + sign = "-" + if args[0] == S.NegativeOne: + args = args[1:] + else: + args[0] = -args[0] + else: + sign = "" + return sign, args + + def _sort_args_for_sorted_components(self): + """ + Returns the ``args`` sorted according to the components commutation + properties. + + Explanation + =========== + + The sorting is done taking into account the commutation group + of the component tensors. + """ + cv = [arg for arg in self.args if isinstance(arg, TensExpr)] + sign = 1 + n = len(cv) - 1 + for i in range(n): + for j in range(n, i, -1): + c = cv[j-1].commutes_with(cv[j]) + # if `c` is `None`, it does neither commute nor anticommute, skip: + if c not in (0, 1): + continue + typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name) + typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name) + if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name): + cv[j-1], cv[j] = cv[j], cv[j-1] + # if `c` is 1, the anticommute, so change sign: + if c: + sign = -sign + + coeff = sign * self.coeff + if coeff != 1: + return [coeff] + cv + return cv + + def sorted_components(self): + """ + Returns a tensor product with sorted components. + """ + return TensMul(*self._sort_args_for_sorted_components()).doit() + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp=is_canon_bp) + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) + >>> t = A(m0,-m1)*A(m1,-m0) + >>> t.canon_bp() + -A(L_0, L_1)*A(-L_0, -L_1) + >>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0) + >>> t.canon_bp() + 0 + """ + if self._is_canon_bp: + return self + expr = self.expand() + if isinstance(expr, TensAdd): + return expr.canon_bp() + if not expr.components: + return expr + t = expr.sorted_components() + g, dummies, msym = t._index_structure.indices_canon_args() + v = components_canon_args(t.components) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tmul = t.perm2tensor(can, True) + return tmul + + def contract_delta(self, delta): + t = self.contract_metric(delta) + return t + + def _get_indices_to_args_pos(self): + """ + Get a dict mapping the index position to TensMul's argument number. + """ + pos_map = {} + pos_counter = 0 + for arg_i, arg in enumerate(self.args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + for i in range(arg.ext_rank): + pos_map[pos_counter] = arg_i + pos_counter += 1 + return pos_map + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + Notes + ===== + + See the ``TensorIndexType`` docstring for the contraction conventions. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m0)*q(m1)*g(-m0, -m1) + >>> t.canon_bp() + metric(L_0, L_1)*p(-L_0)*q(-L_1) + >>> t.contract_metric(g).canon_bp() + p(L_0)*q(-L_0) + """ + expr = self.expand() + if self != expr: + expr = canon_bp(expr) + return contract_metric(expr, g) + pos_map = self._get_indices_to_args_pos() + args = list(self.args) + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + + # list of positions of the metric ``g`` inside ``args`` + gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g] + if not gpos: + return self + + # Sign is either 1 or -1, to correct the sign after metric contraction + # (for spinor indices). + sign = 1 + dum = self.dum[:] + free = self.free[:] + elim = set() + for gposx in gpos: + if gposx in elim: + continue + free1 = [x for x in free if pos_map[x[1]] == gposx] + dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx] + if not dum1: + continue + elim.add(gposx) + # subs with the multiplication neutral element, that is, remove it: + args[gposx] = 1 + if len(dum1) == 2: + if not antisym: + dum10, dum11 = dum1 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 contravariant + p0 = dum10[0] + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + else: + dum10, dum11 = dum1 + # change the sign to bring the indices of the metric to contravariant + # form; change the sign if dum10 has the metric index in position 0 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 is contravariant + p0 = dum10[0] + if dum10[1] == 1: + sign = -sign + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if dum10[0] == 0: + sign = -sign + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + sign = -sign + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + + elif len(dum1) == 1: + if not antisym: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + else: + p1 = dp0 + + ind, p = free1[0] + free.append((ind, p1)) + else: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + if dp0 == 0: + sign = -sign + else: + p1 = dp0 + ind, p = free1[0] + free.append((ind, p1)) + dum = [x for x in dum if x not in dum1] + free = [x for x in free if x not in free1] + + # shift positions: + shift = 0 + shifts = [0]*len(args) + for i in range(len(args)): + if i in elim: + shift += 2 + continue + shifts[i] = shift + free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim] + dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim] + + res = sign*TensMul(*args).doit() + if not isinstance(res, TensExpr): + return res + im = _IndexStructure.from_components_free_dum(res.components, free, dum) + return res._set_new_index_structure(im) + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + args = list(self.args)[:] + pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + ext_rank = arg.ext_rank + args[i] = arg._set_indices(*indices[pos:pos+ext_rank]) + pos += ext_rank + return TensMul(*args, is_canon_bp=is_canon_bp).doit() + + @staticmethod + def _index_replacement_for_contract_metric(args, free, dum): + for arg in args: + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensMul(*new_args).doit() + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + def _extract_data(self, replacement_dict): + args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)]) + coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One) + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + dum = TensMul._dummy_data_to_dum(dummy_data) + ext_rank = self.ext_rank + free.sort(key=lambda x: x[1]) + free_indices = [i[0] for i in free] + return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + dat = _tensor_data_substitution_dict[self.expand()] + return dat + + @data.setter + def data(self, data): + deprecate_data() + raise ValueError("Not possible to set component data to a tensor expression") + + @data.deleter + def data(self): + deprecate_data() + raise ValueError("Not possible to delete component data to a tensor expression") + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No iteration on abstract tensors") + return self.data.__iter__() + + @staticmethod + def _dedupe_indices(new, exclude): + """ + exclude: set + new: TensExpr + + If ``new`` has any dummy indices that are in ``exclude``, return a version + of new with those indices replaced. If no replacements are needed, + return None + + """ + exclude = set(exclude) + dums_new = set(get_dummy_indices(new)) + free_new = set(get_free_indices(new)) + + conflicts = dums_new.intersection(exclude) + if len(conflicts) == 0: + return None + + """ + ``exclude_for_gen`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. + Since the latter does not use the index position for anything, we just + set it as ``None`` here. + """ + exclude.update(dums_new) + exclude.update(free_new) + exclude_for_gen = [(i, None) for i in exclude] + gen = _IndexStructure._get_generator_for_dummy_indices(exclude_for_gen) + repl = {} + for d in conflicts: + if -d in repl.keys(): + continue + newname = gen(d.tensor_index_type) + new_d = d.func(newname, *d.args[1:]) + repl[d] = new_d + repl[-d] = -new_d + + if len(repl) == 0: + return None + + new_renamed = new._replace_indices(repl) + return new_renamed + + def _dedupe_indices_in_rule(self, rule): + """ + rule: dict + + This applies TensMul._dedupe_indices on all values of rule. + + """ + index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)} + other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()} + exclude = set(self.get_indices()) + + newrule = {} + newrule.update(index_rules) + exclude.update(index_rules.keys()) + exclude.update(index_rules.values()) + for old, new in other_rules.items(): + new_renamed = TensMul._dedupe_indices(new, exclude) + if old == new or new_renamed is None: + newrule[old] = new + else: + newrule[old] = new_renamed + exclude.update(get_indices(new_renamed)) + return newrule + + def _eval_rewrite_as_Indexed(self, *args): + from sympy.concrete.summations import Sum + index_symbols = [i.args[0] for i in self.get_indices()] + args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args] + expr = Mul.fromiter(args) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): + # Evaluation like Mul + terms = [] + for i, arg in enumerate(self.args): + # checking whether some tensor instance is differentiated + # or some other thing is necessary, but ugly + if isinstance(arg, TensExpr): + d = arg._eval_partial_derivative(s) + else: + # do not call diff is s is no symbol + if s._diff_wrt: + d = arg._eval_derivative(s) + else: + d = S.Zero + if d: + terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:])) + return TensAdd.fromiter(terms) + + +class TensorElement(TensExpr): + """ + Tensor with evaluated components. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> i, j, k = symbols("i j k") + >>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2)) + >>> A(i, j).get_free_indices() + [i, j] + + If we want to set component ``i`` to a specific value, use the + ``TensorElement`` class: + + >>> from sympy.tensor.tensor import TensorElement + >>> te = TensorElement(A(i, j), {i: 2}) + + As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd + element), the free indices will only contain ``j``: + + >>> te.get_free_indices() + [j] + """ + + def __new__(cls, expr, index_map): + if not isinstance(expr, Tensor): + # remap + if not isinstance(expr, TensExpr): + raise TypeError("%s is not a tensor expression" % expr) + return expr.func(*[TensorElement(arg, index_map) for arg in expr.args]) + expr_free_indices = expr.get_free_indices() + name_translation = {i.args[0]: i for i in expr_free_indices} + index_map = {name_translation.get(index, index): value for index, value in index_map.items()} + index_map = {index: value for index, value in index_map.items() if index in expr_free_indices} + if len(index_map) == 0: + return expr + free_indices = [i for i in expr_free_indices if i not in index_map.keys()] + index_map = Dict(index_map) + obj = TensExpr.__new__(cls, expr, index_map) + obj._free_indices = free_indices + return obj + + @property + def free(self): + return [(index, i) for i, index in enumerate(self.get_free_indices())] + + @property + def dum(self): + # TODO: inherit dummies from expr + return [] + + @property + def expr(self): + return self._args[0] + + @property + def index_map(self): + return self._args[1] + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self): + return self._free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: can be improved: + return self.xreplace(repl) + + def get_indices(self): + return self.get_free_indices() + + def _extract_data(self, replacement_dict): + ret_indices, array = self.expr._extract_data(replacement_dict) + index_map = self.index_map + slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices) + ret_indices = [i for i in ret_indices if i not in index_map] + array = array.__getitem__(slice_tuple) + return ret_indices, array + + +class WildTensorHead(TensorHead): + """ + A wild object that is used to create ``WildTensor`` instances + + Explanation + =========== + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + + A WildTensorHead can be created without specifying a ``TensorIndexType`` + + >>> W = WildTensorHead("W") + + Calling it with a ``TensorIndex`` creates a ``WildTensor`` instance. + + >>> type(W(p)) + + + The ``TensorIndexType`` is automatically detected from the index that is passed + + >>> W(p).component + W(R3) + + Calling it with no indices returns an object that can match tensors with any number of indices. + + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + If you want to ignore the order of indices while matching, pass ``unordered_indices=True``. + + >>> U = WildTensorHead("U", unordered_indices=True) + >>> W(p,q).matches(Q(q,p)) + >>> U(p,q).matches(Q(q,p)) + {U(R3,R3): _WildTensExpr(Q(q, p))} + + Parameters + ========== + name : name of the tensor + unordered_indices : whether the order of the indices matters for matching + (default: False) + + See also + ======== + ``WildTensor`` + ``TensorHead`` + + """ + def __new__(cls, name, index_types=None, symmetry=None, comm=0, unordered_indices=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if index_types is None: + index_types = [] + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + if symmetry != TensorSymmetry.no_symmetry(len(index_types)): + raise NotImplementedError("Wild matching based on symmetry is not implemented.") + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), sympify(symmetry), sympify(comm), sympify(unordered_indices)) + obj.comm = TensorManager.comm_symbols2i(comm) + obj.unordered_indices = unordered_indices + + return obj + + def __call__(self, *indices, **kwargs): + tensor = WildTensor(self, indices, **kwargs) + return tensor.doit() + + +class WildTensor(Tensor): + """ + A wild object which matches ``Tensor`` instances + + Explanation + =========== + This is instantiated by attaching indices to a ``WildTensorHead`` instance. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + + Matching also takes the indices into account + >>> W(p).matches(K(p)) + {W(R3): _WildTensExpr(K(p))} + >>> W(p).matches(K(q)) + >>> W(p).matches(K(-p)) + + If you want to match objects with any number of indices, just use a ``WildTensor`` with no indices. + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + See Also + ======== + ``WildTensorHead`` + ``Tensor`` + + """ + def __new__(cls, tensor_head, indices, **kw_args): + is_canon_bp = kw_args.pop("is_canon_bp", False) + + if tensor_head.func == TensorHead: + """ + If someone tried to call WildTensor by supplying a TensorHead (not a WildTensorHead), return a normal tensor instead. This is helpful when using subs on an expression to replace occurrences of a WildTensorHead with a TensorHead. + """ + return Tensor(tensor_head, indices, is_canon_bp=is_canon_bp, **kw_args) + elif tensor_head.func == _WildTensExpr: + return tensor_head(*indices) + + indices = cls._parse_indices(tensor_head, indices) + index_types = [ind.tensor_index_type for ind in indices] + tensor_head = tensor_head.func( + tensor_head.name, + index_types, + symmetry=None, + comm=tensor_head.comm, + unordered_indices=tensor_head.unordered_indices, + ) + + obj = Basic.__new__(cls, tensor_head, Tuple(*indices)) + obj.name = tensor_head.name + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = obj._build_index_map(indices, obj._index_structure) + + return obj + + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensExpr) and expr != S(1): + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + if len(self.indices) > 0: + if not hasattr(expr, "get_free_indices"): + return None + expr_indices = expr.get_free_indices() + if len(expr_indices) != len(self.indices): + return None + if self._component.unordered_indices: + m = self._match_indices_ignoring_order(expr) + if m is None: + return None + else: + repl_dict.update(m) + else: + for i in range(len(expr_indices)): + m = self.indices[i].matches(expr_indices[i]) + if m is None: + return None + else: + repl_dict.update(m) + + repl_dict[self.component] = _WildTensExpr(expr) + else: + #If no indices were passed to the WildTensor, it may match tensors with any number of indices. + repl_dict[self] = expr + + return repl_dict + + def _match_indices_ignoring_order(self, expr, repl_dict=None, old=False): + """ + Helper method for matches. Checks if the indices of self and expr + match disregarding index ordering. + """ + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + def siftkey(ind): + if isinstance(ind, WildTensorIndex): + if ind.ignore_updown: + return "wild, updown" + else: + return "wild" + else: + return "nonwild" + + indices_sifted = sift(self.indices, siftkey) + + matched_indices = [] + expr_indices_remaining = expr.get_indices() + for ind in indices_sifted["nonwild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + if e_ind in matched_indices: + continue + m = ind.matches(e_ind) + if m is not None: + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild, updown"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + if len(matched_indices) < len(self.indices): + return None + else: + return repl_dict + +class WildTensorIndex(TensorIndex): + """ + A wild object that matches TensorIndex instances. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorIndex, TensorIndexType, WildTensorIndex + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex("p", R3) + + By default, covariant indices only match with covariant indices (and + similarly for contravariant) + + >>> q = WildTensorIndex("q", R3) + >>> (q).matches(p) + {q: p} + >>> (q).matches(-p) + + If you want matching to ignore whether the index is co/contra-variant, set + ignore_updown=True + + >>> r = WildTensorIndex("r", R3, ignore_updown=True) + >>> (r).matches(-p) + {r: -p} + >>> (r).matches(p) + {r: p} + + Parameters + ========== + name : name of the index (string), or ``True`` if you want it to be + automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + ignore_updown : bool, Whether this should match both co- and contra-variant + indices (default:False) + """ + def __new__(cls, name, tensor_index_type, is_up=True, ignore_updown=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + ignore_updown = sympify(ignore_updown) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up, ignore_updown) + + @property + def ignore_updown(self): + return self.args[3] + + def __neg__(self): + t1 = WildTensorIndex(self.name, self.tensor_index_type, + (not self.is_up), self.ignore_updown) + return t1 + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensorIndex): + return None + if self.tensor_index_type != expr.tensor_index_type: + return None + if not self.ignore_updown: + if self.is_up != expr.is_up: + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + repl_dict[self] = expr + return repl_dict + + +class _WildTensExpr(Basic): + """ + INTERNAL USE ONLY + + This is an object that helps with replacement of WildTensors in expressions. + When this object is set as the tensor_head of a WildTensor, it replaces the + WildTensor by a TensExpr (passed when initializing this object). + + Examples + ======== + >>> from sympy.tensor.tensor import WildTensorHead, TensorIndex, TensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> print( ( K(p) ).replace( W(p), W(q)*W(-q)*W(p) ) ) + K(R_0)*K(-R_0)*K(p) + + """ + def __init__(self, expr): + if not isinstance(expr, TensExpr): + raise TypeError("_WildTensExpr expects a TensExpr as argument") + self.expr = expr + + def __call__(self, *indices): + return self.expr._replace_indices(dict(zip(self.expr.get_free_indices(), indices))) + + def __neg__(self): + return self.func(self.expr*S.NegativeOne) + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(self.expr+other.expr) + + def __radd__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(other.expr+self.expr) + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + return other + (-self) + + def __mul__(self, other): + raise NotImplementedError + + def __rmul__(self, other): + raise NotImplementedError + + def __truediv__(self, other): + raise NotImplementedError + + def __rtruediv__(self, other): + raise NotImplementedError + + def __pow__(self, other): + raise NotImplementedError + + def __rpow__(self, other): + raise NotImplementedError + + +def canon_bp(p): + """ + Butler-Portugal canonicalization. See ``tensor_can.py`` from the + combinatorics module for the details. + """ + if isinstance(p, TensExpr): + return p.canon_bp() + return p + + +def tensor_mul(*a): + """ + product of tensors + """ + if not a: + return TensMul.from_data(S.One, [], [], []) + t = a[0] + for tx in a[1:]: + t = t*tx + return t + + +def riemann_cyclic_replace(t_r): + """ + replace Riemann tensor with an equivalent expression + + ``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)`` + + """ + free = sorted(t_r.free, key=lambda x: x[1]) + m, n, p, q = [x[0] for x in free] + t0 = t_r*Rational(2, 3) + t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3) + t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3) + t3 = t0 + t1 + t2 + return t3 + +def riemann_cyclic(t2): + """ + Replace each Riemann tensor with an equivalent expression + satisfying the cyclic identity. + + This trick is discussed in the reference guide to Cadabra. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann()) + >>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l)) + >>> riemann_cyclic(t) + 0 + """ + t2 = t2.expand() + if isinstance(t2, (TensMul, Tensor)): + args = [t2] + else: + args = t2.args + a1 = [x.split() for x in args] + a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1] + a3 = [tensor_mul(*v) for v in a2] + t3 = TensAdd(*a3).doit() + if not t3: + return t3 + else: + return canon_bp(t3) + + +def get_lines(ex, index_type): + """ + Returns ``(lines, traces, rest)`` for an index type, + where ``lines`` is the list of list of positions of a matrix line, + ``traces`` is the list of list of traced matrix lines, + ``rest`` is the rest of the elements of the tensor. + """ + def _join_lines(a): + i = 0 + while i < len(a): + x = a[i] + xend = x[-1] + xstart = x[0] + hit = True + while hit: + hit = False + for j in range(i + 1, len(a)): + if j >= len(a): + break + if a[j][0] == xend: + hit = True + x.extend(a[j][1:]) + xend = x[-1] + a.pop(j) + continue + if a[j][0] == xstart: + hit = True + a[i] = reversed(a[j][1:]) + x + x = a[i] + xstart = a[i][0] + a.pop(j) + continue + if a[j][-1] == xend: + hit = True + x.extend(reversed(a[j][:-1])) + xend = x[-1] + a.pop(j) + continue + if a[j][-1] == xstart: + hit = True + a[i] = a[j][:-1] + x + x = a[i] + xstart = x[0] + a.pop(j) + continue + i += 1 + return a + + arguments = ex.args + dt = {} + for c in ex.args: + if not isinstance(c, TensExpr): + continue + if c in dt: + continue + index_types = c.index_types + a = [] + for i in range(len(index_types)): + if index_types[i] is index_type: + a.append(i) + if len(a) > 2: + raise ValueError('at most two indices of type %s allowed' % index_type) + if len(a) == 2: + dt[c] = a + #dum = ex.dum + lines = [] + traces = [] + traces1 = [] + #indices_to_args_pos = ex._get_indices_to_args_pos() + # TODO: add a dum_to_components_map ? + for p0, p1, c0, c1 in ex.dum_in_args: + if arguments[c0] not in dt: + continue + if c0 == c1: + traces.append([c0]) + continue + ta0 = dt[arguments[c0]] + ta1 = dt[arguments[c1]] + if p0 not in ta0: + continue + if ta0.index(p0) == ta1.index(p1): + # case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1; + # to deal with this case one could add to the position + # a flag for transposition; + # one could write [(c0, False), (c1, True)] + raise NotImplementedError + # if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1 + # if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0 + ta0 = dt[arguments[c0]] + b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0) + lines1 = lines[:] + for line in lines: + if line[-1] == b0: + if line[0] == b1: + n = line.index(min(line)) + traces1.append(line) + traces.append(line[n:] + line[:n]) + else: + line.append(b1) + break + elif line[0] == b1: + line.insert(0, b0) + break + else: + lines1.append([b0, b1]) + + lines = [x for x in lines1 if x not in traces1] + lines = _join_lines(lines) + rest = [] + for line in lines: + for y in line: + rest.append(y) + for line in traces: + for y in line: + rest.append(y) + rest = [x for x in range(len(arguments)) if x not in rest] + + return lines, traces, rest + + +def get_free_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_free_indices() + + +def get_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_indices() + +def get_dummy_indices(t): + if not isinstance(t, TensExpr): + return () + inds = t.get_indices() + free = t.get_free_indices() + return [i for i in inds if i not in free] + +def get_index_structure(t): + if isinstance(t, TensExpr): + return t._index_structure + return _IndexStructure([], [], [], []) + + +def get_coeff(t): + if isinstance(t, Tensor): + return S.One + if isinstance(t, TensMul): + return t.coeff + if isinstance(t, TensExpr): + raise ValueError("no coefficient associated to this tensor expression") + return t + +def contract_metric(t, g): + if isinstance(t, TensExpr): + return t.contract_metric(g) + return t + + +def perm2tensor(t, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + if not isinstance(t, TensExpr): + return t + elif isinstance(t, (Tensor, TensMul)): + nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp) + res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp) + if g[-1] != len(g) - 1: + return -res + + return res + raise NotImplementedError() + + +def substitute_indices(t, *index_tuples): + if not isinstance(t, TensExpr): + return t + return t.substitute_indices(*index_tuples) + + +def _expand(expr, **kwargs): + if isinstance(expr, TensExpr): + return expr._expand(**kwargs) + else: + return expr.expand(**kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/tensor/toperators.py b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/toperators.py new file mode 100644 index 0000000000000000000000000000000000000000..1bdd67c4f4a7e86b9821ee55b1d2f9bde29c96a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/tensor/toperators.py @@ -0,0 +1,256 @@ +from sympy import permutedims +from sympy.core.numbers import Number +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.tensor.tensor import Tensor, TensExpr, TensAdd, TensMul + + +class PartialDerivative(TensExpr): + """ + Partial derivative for tensor expressions. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead + >>> from sympy.tensor.toperators import PartialDerivative + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> A = TensorHead("A", [L]) + >>> B = TensorHead("B", [L]) + >>> i, j, k = symbols("i j k") + + >>> expr = PartialDerivative(A(i), A(j)) + >>> expr + PartialDerivative(A(i), A(j)) + + The ``PartialDerivative`` object behaves like a tensorial expression: + + >>> expr.get_indices() + [i, -j] + + Notice that the deriving variables have opposite valence than the + printed one: ``A(j)`` is printed as covariant, but the index of the + derivative is actually contravariant, i.e. ``-j``. + + Indices can be contracted: + + >>> expr = PartialDerivative(A(i), A(i)) + >>> expr + PartialDerivative(A(L_0), A(L_0)) + >>> expr.get_indices() + [L_0, -L_0] + + The method ``.get_indices()`` always returns all indices (even the + contracted ones). If only uncontracted indices are needed, call + ``.get_free_indices()``: + + >>> expr.get_free_indices() + [] + + Nested partial derivatives are flattened: + + >>> expr = PartialDerivative(PartialDerivative(A(i), A(j)), A(k)) + >>> expr + PartialDerivative(A(i), A(j), A(k)) + >>> expr.get_indices() + [i, -j, -k] + + Replace a derivative with array values: + + >>> from sympy.abc import x, y + >>> from sympy import sin, log + >>> compA = [sin(x), log(x)*y**3] + >>> compB = [x, y] + >>> expr = PartialDerivative(A(i), B(j)) + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}) + [[cos(x), 0], [y**3/x, 3*y**2*log(x)]] + + The returned array is indexed by `(i, -j)`. + + Be careful that other SymPy modules put the indices of the deriving + variables before the indices of the derivand in the derivative result. + For example: + + >>> expr.get_free_indices() + [i, -j] + + >>> from sympy import Matrix, Array + >>> Matrix(compA).diff(Matrix(compB)).reshape(2, 2) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + >>> Array(compA).diff(Array(compB)) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + + These are the transpose of the result of ``PartialDerivative``, + as the matrix and the array modules put the index `-j` before `i` in the + derivative result. An array read with index order `(-j, i)` is indeed the + transpose of the same array read with index order `(i, -j)`. By specifying + the index order to ``.replace_with_arrays`` one can get a compatible + expression: + + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}, [-j, i]) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + """ + + def __new__(cls, expr, *variables): + + # Flatten: + if isinstance(expr, PartialDerivative): + variables = expr.variables + variables + expr = expr.expr + + args, indices, free, dum = cls._contract_indices_for_derivative( + S(expr), variables) + + obj = TensExpr.__new__(cls, *args) + + obj._indices = indices + obj._free = free + obj._dum = dum + return obj + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + @classmethod + def _contract_indices_for_derivative(cls, expr, variables): + variables_opposite_valence = [] + + for i in variables: + if isinstance(i, Tensor): + i_free_indices = i.get_free_indices() + variables_opposite_valence.append( + i.xreplace({k: -k for k in i_free_indices})) + elif isinstance(i, Symbol): + variables_opposite_valence.append(i) + + args, indices, free, dum = TensMul._tensMul_contract_indices( + [expr] + variables_opposite_valence, replace_indices=True) + + for i in range(1, len(args)): + args_i = args[i] + if isinstance(args_i, Tensor): + i_indices = args[i].get_free_indices() + args[i] = args[i].xreplace({k: -k for k in i_indices}) + + return args, indices, free, dum + + def doit(self, **hints): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + return obj + + def _expand_partial_derivative(self): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + result = obj + + if not args[0].free_symbols: + return S.Zero + elif isinstance(obj.expr, TensAdd): + # take care of sums of multi PDs + result = obj.expr.func(*[ + self.func(a, *obj.variables)._expand_partial_derivative() + for a in result.expr.args]) + elif isinstance(obj.expr, TensMul): + # take care of products of multi PDs + if len(obj.variables) == 1: + # derivative with respect to single variable + terms = [] + mulargs = list(obj.expr.args) + for ind in range(len(mulargs)): + if not isinstance(sympify(mulargs[ind]), Number): + # a number coefficient is not considered for + # expansion of PartialDerivative + d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative() + terms.append(TensMul(*(mulargs[:ind] + + [d] + + mulargs[(ind + 1):]))) + result = TensAdd.fromiter(terms) + else: + # derivative with respect to multiple variables + # decompose: + # partial(expr, (u, v)) + # = partial(partial(expr, u).doit(), v).doit() + result = obj.expr # init with expr + for v in obj.variables: + result = self.func(result, v)._expand_partial_derivative() + # then throw PD on it + + return result + + def _perform_derivative(self): + result = self.expr + for v in self.variables: + if isinstance(result, TensExpr): + result = result._eval_partial_derivative(v) + else: + if v._diff_wrt: + result = result._eval_derivative(v) + else: + result = S.Zero + return result + + def get_indices(self): + return self._indices + + def get_free_indices(self): + free = sorted(self._free, key=lambda x: x[1]) + return [i[0] for i in free] + + def _replace_indices(self, repl): + expr = self.expr.xreplace(repl) + mirrored = {-k: -v for k, v in repl.items()} + variables = [i.xreplace(mirrored) for i in self.variables] + return self.func(expr, *variables) + + @property + def expr(self): + return self.args[0] + + @property + def variables(self): + return self.args[1:] + + def _extract_data(self, replacement_dict): + from .array import derive_by_array, tensorcontraction + indices, array = self.expr._extract_data(replacement_dict) + for variable in self.variables: + var_indices, var_array = variable._extract_data(replacement_dict) + var_indices = [-i for i in var_indices] + coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array]) + dim_before = len(array.shape) + array = derive_by_array(array, var_array) + dim_after = len(array.shape) + dim_increase = dim_after - dim_before + array = permutedims(array, [i + dim_increase for i in range(dim_before)] + list(range(dim_increase))) + array = array.as_mutable() + varindex = var_indices[0] + # Remove coefficients of base vector: + coeff_index = [0] + [slice(None) for i in range(len(indices))] + for i, coeff in enumerate(coeff_array): + coeff_index[0] = i + array[tuple(coeff_index)] /= coeff + if -varindex in indices: + pos = indices.index(-varindex) + array = tensorcontraction(array, (0, pos+1)) + indices.pop(pos) + else: + indices.append(varindex) + return indices, array diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92c50b991d76f0821556ddcd301eff99b1767d7a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__init__.py @@ -0,0 +1,7 @@ +"""This module contains code for running the tests in SymPy. +""" +from .runtests import test, doctest + +__all__ = [ + 'test', 'doctest', +] diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..362df2768fe6a650d6b38cbc8313464afa986cb0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/matrices.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e87b7802ec8d994741c68cac5cb70208a9c5af5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/matrices.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/quality_unicode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/quality_unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d746c73914de8fd2dbad7e6355bde5ee1622df0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/quality_unicode.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/randtest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/randtest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..050b8094a2afee6aff9a75ebc344f7e4b8a7fdae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/randtest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/runtests.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/runtests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bab4062d7b723904f894878f2a80b88a85578482 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sympy/testing/__pycache__/runtests.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/matrices.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..236a384366df7f69d0d92f43f7e007e95c12388c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/matrices.py @@ -0,0 +1,8 @@ +def allclose(A, B, rtol=1e-05, atol=1e-08): + if len(A) != len(B): + return False + + for x, y in zip(A, B): + if abs(x-y) > atol + rtol * max(abs(x), abs(y)): + return False + return True diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/pytest.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/pytest.py new file mode 100644 index 0000000000000000000000000000000000000000..ff92cfa0029c4b0f4f76114277cb409153e1474e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/pytest.py @@ -0,0 +1,388 @@ +"""py.test hacks to support XFAIL/XPASS""" + +import sys +import re +import functools +import os +import contextlib +import warnings +import inspect +import pathlib +from typing import Any, Callable + +from sympy.utilities.exceptions import SymPyDeprecationWarning +# Imported here for backwards compatibility. Note: do not import this from +# here in library code (importing sympy.pytest in library code will break the +# pytest integration). +from sympy.utilities.exceptions import ignore_warnings # noqa:F401 + +ON_CI = os.getenv('CI', None) == "true" + +try: + import pytest + USE_PYTEST = getattr(sys, '_running_pytest', False) +except ImportError: + USE_PYTEST = False + + +raises: Callable[[Any, Any], Any] +XFAIL: Callable[[Any], Any] +skip: Callable[[Any], Any] +SKIP: Callable[[Any], Any] +slow: Callable[[Any], Any] +nocache_fail: Callable[[Any], Any] + + +if USE_PYTEST: + raises = pytest.raises + skip = pytest.skip + XFAIL = pytest.mark.xfail + SKIP = pytest.mark.skip + slow = pytest.mark.slow + nocache_fail = pytest.mark.nocache_fail + from _pytest.outcomes import Failed + +else: + # Not using pytest so define the things that would have been imported from + # there. + + # _pytest._code.code.ExceptionInfo + class ExceptionInfo: + def __init__(self, value): + self.value = value + + def __repr__(self): + return "".format(self.value) + + + def raises(expectedException, code=None): + """ + Tests that ``code`` raises the exception ``expectedException``. + + ``code`` may be a callable, such as a lambda expression or function + name. + + If ``code`` is not given or None, ``raises`` will return a context + manager for use in ``with`` statements; the code to execute then + comes from the scope of the ``with``. + + ``raises()`` does nothing if the callable raises the expected exception, + otherwise it raises an AssertionError. + + Examples + ======== + + >>> from sympy.testing.pytest import raises + + >>> raises(ZeroDivisionError, lambda: 1/0) + + >>> raises(ZeroDivisionError, lambda: 1/2) + Traceback (most recent call last): + ... + Failed: DID NOT RAISE + + >>> with raises(ZeroDivisionError): + ... n = 1/0 + >>> with raises(ZeroDivisionError): + ... n = 1/2 + Traceback (most recent call last): + ... + Failed: DID NOT RAISE + + Note that you cannot test multiple statements via + ``with raises``: + + >>> with raises(ZeroDivisionError): + ... n = 1/0 # will execute and raise, aborting the ``with`` + ... n = 9999/0 # never executed + + This is just what ``with`` is supposed to do: abort the + contained statement sequence at the first exception and let + the context manager deal with the exception. + + To test multiple statements, you'll need a separate ``with`` + for each: + + >>> with raises(ZeroDivisionError): + ... n = 1/0 # will execute and raise + >>> with raises(ZeroDivisionError): + ... n = 9999/0 # will also execute and raise + + """ + if code is None: + return RaisesContext(expectedException) + elif callable(code): + try: + code() + except expectedException as e: + return ExceptionInfo(e) + raise Failed("DID NOT RAISE") + elif isinstance(code, str): + raise TypeError( + '\'raises(xxx, "code")\' has been phased out; ' + 'change \'raises(xxx, "expression")\' ' + 'to \'raises(xxx, lambda: expression)\', ' + '\'raises(xxx, "statement")\' ' + 'to \'with raises(xxx): statement\'') + else: + raise TypeError( + 'raises() expects a callable for the 2nd argument.') + + class RaisesContext: + def __init__(self, expectedException): + self.expectedException = expectedException + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is None: + raise Failed("DID NOT RAISE") + return issubclass(exc_type, self.expectedException) + + class XFail(Exception): + pass + + class XPass(Exception): + pass + + class Skipped(Exception): + pass + + class Failed(Exception): # type: ignore + pass + + def XFAIL(func): + def wrapper(): + try: + func() + except Exception as e: + message = str(e) + if message != "Timeout": + raise XFail(func.__name__) + else: + raise Skipped("Timeout") + raise XPass(func.__name__) + + wrapper = functools.update_wrapper(wrapper, func) + return wrapper + + def skip(str): + raise Skipped(str) + + def SKIP(reason): + """Similar to ``skip()``, but this is a decorator. """ + def wrapper(func): + def func_wrapper(): + raise Skipped(reason) + + func_wrapper = functools.update_wrapper(func_wrapper, func) + return func_wrapper + + return wrapper + + def slow(func): + func._slow = True + + def func_wrapper(): + func() + + func_wrapper = functools.update_wrapper(func_wrapper, func) + func_wrapper.__wrapped__ = func + return func_wrapper + + def nocache_fail(func): + "Dummy decorator for marking tests that fail when cache is disabled" + return func + +@contextlib.contextmanager +def warns(warningcls, *, match='', test_stacklevel=True): + ''' + Like raises but tests that warnings are emitted. + + >>> from sympy.testing.pytest import warns + >>> import warnings + + >>> with warns(UserWarning): + ... warnings.warn('deprecated', UserWarning, stacklevel=2) + + >>> with warns(UserWarning): + ... pass + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type UserWarning\ + was emitted. The list of emitted warnings is: []. + + ``test_stacklevel`` makes it check that the ``stacklevel`` parameter to + ``warn()`` is set so that the warning shows the user line of code (the + code under the warns() context manager). Set this to False if this is + ambiguous or if the context manager does not test the direct user code + that emits the warning. + + If the warning is a ``SymPyDeprecationWarning``, this additionally tests + that the ``active_deprecations_target`` is a real target in the + ``active-deprecations.md`` file. + + ''' + # Absorbs all warnings in warnrec + with warnings.catch_warnings(record=True) as warnrec: + # Any warning other than the one we are looking for is an error + warnings.simplefilter("error") + warnings.filterwarnings("always", category=warningcls) + # Now run the test + yield warnrec + + # Raise if expected warning not found + if not any(issubclass(w.category, warningcls) for w in warnrec): + msg = ('Failed: DID NOT WARN.' + ' No warnings of type %s was emitted.' + ' The list of emitted warnings is: %s.' + ) % (warningcls, [w.message for w in warnrec]) + raise Failed(msg) + + # We don't include the match in the filter above because it would then + # fall to the error filter, so we instead manually check that it matches + # here + for w in warnrec: + # Should always be true due to the filters above + assert issubclass(w.category, warningcls) + if not re.compile(match, re.I).match(str(w.message)): + raise Failed(f"Failed: WRONG MESSAGE. A warning with of the correct category ({warningcls.__name__}) was issued, but it did not match the given match regex ({match!r})") + + if test_stacklevel: + for f in inspect.stack(): + thisfile = f.filename + file = os.path.split(thisfile)[1] + if file.startswith('test_'): + break + elif file == 'doctest.py': + # skip the stacklevel testing in the doctests of this + # function + return + else: + raise RuntimeError("Could not find the file for the given warning to test the stacklevel") + for w in warnrec: + if w.filename != thisfile: + msg = f'''\ +Failed: Warning has the wrong stacklevel. The warning stacklevel needs to be +set so that the line of code shown in the warning message is user code that +calls the deprecated code (the current stacklevel is showing code from +{w.filename} (line {w.lineno}), expected {thisfile})'''.replace('\n', ' ') + raise Failed(msg) + + if warningcls == SymPyDeprecationWarning: + this_file = pathlib.Path(__file__) + active_deprecations_file = (this_file.parent.parent.parent / 'doc' / + 'src' / 'explanation' / + 'active-deprecations.md') + if not active_deprecations_file.exists(): + # We can only test that the active_deprecations_target works if we are + # in the git repo. + return + targets = [] + for w in warnrec: + targets.append(w.message.active_deprecations_target) + with open(active_deprecations_file, encoding="utf-8") as f: + text = f.read() + for target in targets: + if f'({target})=' not in text: + raise Failed(f"The active deprecations target {target!r} does not appear to be a valid target in the active-deprecations.md file ({active_deprecations_file}).") + +def _both_exp_pow(func): + """ + Decorator used to run the test twice: the first time `e^x` is represented + as ``Pow(E, x)``, the second time as ``exp(x)`` (exponential object is not + a power). + + This is a temporary trick helping to manage the elimination of the class + ``exp`` in favor of a replacement by ``Pow(E, ...)``. + """ + from sympy.core.parameters import _exp_is_pow + + def func_wrap(): + with _exp_is_pow(True): + func() + with _exp_is_pow(False): + func() + + wrapper = functools.update_wrapper(func_wrap, func) + return wrapper + + +@contextlib.contextmanager +def warns_deprecated_sympy(): + ''' + Shorthand for ``warns(SymPyDeprecationWarning)`` + + This is the recommended way to test that ``SymPyDeprecationWarning`` is + emitted for deprecated features in SymPy. To test for other warnings use + ``warns``. To suppress warnings without asserting that they are emitted + use ``ignore_warnings``. + + .. note:: + + ``warns_deprecated_sympy()`` is only intended for internal use in the + SymPy test suite to test that a deprecation warning triggers properly. + All other code in the SymPy codebase, including documentation examples, + should not use deprecated behavior. + + If you are a user of SymPy and you want to disable + SymPyDeprecationWarnings, use ``warnings`` filters (see + :ref:`silencing-sympy-deprecation-warnings`). + + >>> from sympy.testing.pytest import warns_deprecated_sympy + >>> from sympy.utilities.exceptions import sympy_deprecation_warning + >>> with warns_deprecated_sympy(): + ... sympy_deprecation_warning("Don't use", + ... deprecated_since_version="1.0", + ... active_deprecations_target="active-deprecations") + + >>> with warns_deprecated_sympy(): + ... pass + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type \ + SymPyDeprecationWarning was emitted. The list of emitted warnings is: []. + + .. note:: + + Sometimes the stacklevel test will fail because the same warning is + emitted multiple times. In this case, you can use + :func:`sympy.utilities.exceptions.ignore_warnings` in the code to + prevent the ``SymPyDeprecationWarning`` from being emitted again + recursively. In rare cases it is impossible to have a consistent + ``stacklevel`` for deprecation warnings because different ways of + calling a function will produce different call stacks.. In those cases, + use ``warns(SymPyDeprecationWarning)`` instead. + + See Also + ======== + sympy.utilities.exceptions.SymPyDeprecationWarning + sympy.utilities.exceptions.sympy_deprecation_warning + sympy.utilities.decorator.deprecated + + ''' + with warns(SymPyDeprecationWarning): + yield + + +def _running_under_pyodide(): + """Test if running under pyodide.""" + try: + import pyodide_js # type: ignore # noqa + except ImportError: + return False + else: + return True + + +def skip_under_pyodide(message): + """Decorator to skip a test if running under pyodide.""" + def decorator(test_func): + @functools.wraps(test_func) + def test_wrapper(): + if _running_under_pyodide(): + skip(message) + return test_func() + return test_wrapper + return decorator diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/quality_unicode.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/quality_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..de575b75e8d81f1995171ff4ed514628d98e39c7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/quality_unicode.py @@ -0,0 +1,97 @@ +import re +import fnmatch + + +message_unicode_B = \ + "File contains a unicode character : %s, line %s. " \ + "But not in the whitelist. " \ + "Add the file to the whitelist in " + __file__ +message_unicode_D = \ + "File does not contain a unicode character : %s." \ + "but is in the whitelist. " \ + "Remove the file from the whitelist in " + __file__ + + +encoding_header_re = re.compile( + r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)') + +# Whitelist pattern for files which can have unicode. +unicode_whitelist = [ + # Author names can include non-ASCII characters + r'*/bin/authors_update.py', + r'*/bin/mailmap_check.py', + + # These files have functions and test functions for unicode input and + # output. + r'*/sympy/testing/tests/test_code_quality.py', + r'*/sympy/physics/vector/tests/test_printing.py', + r'*/physics/quantum/tests/test_printing.py', + r'*/sympy/vector/tests/test_printing.py', + r'*/sympy/parsing/tests/test_sympy_parser.py', + r'*/sympy/printing/pretty/tests/test_pretty.py', + r'*/sympy/printing/tests/test_conventions.py', + r'*/sympy/printing/tests/test_preview.py', + r'*/liealgebras/type_g.py', + r'*/liealgebras/weyl_group.py', + r'*/liealgebras/tests/test_type_G.py', + + # wigner.py and polarization.py have unicode doctests. These probably + # don't need to be there but some of the examples that are there are + # pretty ugly without use_unicode (matrices need to be wrapped across + # multiple lines etc) + r'*/sympy/physics/wigner.py', + r'*/sympy/physics/optics/polarization.py', + + # joint.py uses some unicode for variable names in the docstrings + r'*/sympy/physics/mechanics/joint.py', + + # lll method has unicode in docstring references and author name + r'*/sympy/polys/matrices/domainmatrix.py', +] + +unicode_strict_whitelist = [ + r'*/sympy/parsing/latex/_antlr/__init__.py', + # test_mathematica.py uses some unicode for testing Greek characters are working #24055 + r'*/sympy/parsing/tests/test_mathematica.py', +] + + +def _test_this_file_encoding( + fname, test_file, + unicode_whitelist=unicode_whitelist, + unicode_strict_whitelist=unicode_strict_whitelist): + """Test helper function for unicode test + + The test may have to operate on filewise manner, so it had moved + to a separate process. + """ + has_unicode = False + + is_in_whitelist = False + is_in_strict_whitelist = False + for patt in unicode_whitelist: + if fnmatch.fnmatch(fname, patt): + is_in_whitelist = True + break + for patt in unicode_strict_whitelist: + if fnmatch.fnmatch(fname, patt): + is_in_strict_whitelist = True + is_in_whitelist = True + break + + if is_in_whitelist: + for idx, line in enumerate(test_file): + try: + line.encode(encoding='ascii') + except (UnicodeEncodeError, UnicodeDecodeError): + has_unicode = True + + if not has_unicode and not is_in_strict_whitelist: + assert False, message_unicode_D % fname + + else: + for idx, line in enumerate(test_file): + try: + line.encode(encoding='ascii') + except (UnicodeEncodeError, UnicodeDecodeError): + assert False, message_unicode_B % (fname, idx + 1) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/randtest.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/randtest.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce2c8c031eec1c886532daba32c96d83e9cf85c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/randtest.py @@ -0,0 +1,19 @@ +""" +.. deprecated:: 1.10 + + ``sympy.testing.randtest`` functions have been moved to + :mod:`sympy.core.random`. + +""" +from sympy.utilities.exceptions import sympy_deprecation_warning + +sympy_deprecation_warning("The sympy.testing.randtest submodule is deprecated. Use sympy.core.random instead.", + deprecated_since_version="1.10", + active_deprecations_target="deprecated-sympy-testing-randtest") + +from sympy.core.random import ( # noqa:F401 + random_complex_number, + verify_numerically, + test_derivative_numerically, + _randrange, + _randint) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/runtests.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..19a16e2436048c5fc044008fefe850e03764bd30 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/runtests.py @@ -0,0 +1,2387 @@ +""" +This is our testing framework. + +Goals: + +* it should be compatible with py.test and operate very similarly + (or identically) +* does not require any external dependencies +* preferably all the functionality should be in this file only +* no magic, just import the test file and execute the test functions, that's it +* portable + +""" + +import os +import sys +import platform +import inspect +import traceback +import pdb +import re +import linecache +import time +from fnmatch import fnmatch +from timeit import default_timer as clock +import doctest as pdoctest # avoid clashing with our doctest() function +from doctest import DocTestFinder, DocTestRunner +import random +import subprocess +import shutil +import signal +import stat +import tempfile +import warnings +from contextlib import contextmanager +from inspect import unwrap + +from sympy.core.cache import clear_cache +from sympy.external import import_module +from sympy.external.gmpy import GROUND_TYPES, HAS_GMPY + +IS_WINDOWS = (os.name == 'nt') +ON_CI = os.getenv('CI', None) + +# empirically generated list of the proportion of time spent running +# an even split of tests. This should periodically be regenerated. +# A list of [.6, .1, .3] would mean that if the tests are evenly split +# into '1/3', '2/3', '3/3', the first split would take 60% of the time, +# the second 10% and the third 30%. These lists are normalized to sum +# to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3]. +# +# This list can be generated with the code: +# from time import time +# import sympy +# import os +# os.environ["CI"] = 'true' # Mock CI to get more correct densities +# delays, num_splits = [], 30 +# for i in range(1, num_splits + 1): +# tic = time() +# sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False) # Add slow=True for slow tests +# delays.append(time() - tic) +# tot = sum(delays) +# print([round(x / tot, 4) for x in delays]) +SPLIT_DENSITY = [ + 0.0059, 0.0027, 0.0068, 0.0011, 0.0006, + 0.0058, 0.0047, 0.0046, 0.004, 0.0257, + 0.0017, 0.0026, 0.004, 0.0032, 0.0016, + 0.0015, 0.0004, 0.0011, 0.0016, 0.0014, + 0.0077, 0.0137, 0.0217, 0.0074, 0.0043, + 0.0067, 0.0236, 0.0004, 0.1189, 0.0142, + 0.0234, 0.0003, 0.0003, 0.0047, 0.0006, + 0.0013, 0.0004, 0.0008, 0.0007, 0.0006, + 0.0139, 0.0013, 0.0007, 0.0051, 0.002, + 0.0004, 0.0005, 0.0213, 0.0048, 0.0016, + 0.0012, 0.0014, 0.0024, 0.0015, 0.0004, + 0.0005, 0.0007, 0.011, 0.0062, 0.0015, + 0.0021, 0.0049, 0.0006, 0.0006, 0.0011, + 0.0006, 0.0019, 0.003, 0.0044, 0.0054, + 0.0057, 0.0049, 0.0016, 0.0006, 0.0009, + 0.0006, 0.0012, 0.0006, 0.0149, 0.0532, + 0.0076, 0.0041, 0.0024, 0.0135, 0.0081, + 0.2209, 0.0459, 0.0438, 0.0488, 0.0137, + 0.002, 0.0003, 0.0008, 0.0039, 0.0024, + 0.0005, 0.0004, 0.003, 0.056, 0.0026] +SPLIT_DENSITY_SLOW = [0.0086, 0.0004, 0.0568, 0.0003, 0.0032, 0.0005, 0.0004, 0.0013, 0.0016, 0.0648, 0.0198, 0.1285, 0.098, 0.0005, 0.0064, 0.0003, 0.0004, 0.0026, 0.0007, 0.0051, 0.0089, 0.0024, 0.0033, 0.0057, 0.0005, 0.0003, 0.001, 0.0045, 0.0091, 0.0006, 0.0005, 0.0321, 0.0059, 0.1105, 0.216, 0.1489, 0.0004, 0.0003, 0.0006, 0.0483] + +class Skipped(Exception): + pass + +class TimeOutError(Exception): + pass + +class DependencyError(Exception): + pass + + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning of + every non-blank line in ``s``, and return the result. + If the string ``s`` is Unicode, it is encoded using the stdout + encoding and the ``backslashreplace`` error handler. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + + +pdoctest._indent = _indent # type: ignore + +# override reporter to maintain windows and python3 + + +def _report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + s = self._checker.output_difference(example, got, self.optionflags) + s = s.encode('raw_unicode_escape').decode('utf8', 'ignore') + out(self._failure_header(test, example) + s) + + +if IS_WINDOWS: + DocTestRunner.report_failure = _report_failure # type: ignore + + +def convert_to_native_paths(lst): + """ + Converts a list of '/' separated paths into a list of + native (os.sep separated) paths and converts to lowercase + if the system is case insensitive. + """ + newlst = [] + for i, rv in enumerate(lst): + rv = os.path.join(*rv.split("/")) + # on windows the slash after the colon is dropped + if sys.platform == "win32": + pos = rv.find(':') + if pos != -1: + if rv[pos + 1] != '\\': + rv = rv[:pos + 1] + '\\' + rv[pos + 1:] + newlst.append(os.path.normcase(rv)) + return newlst + + +def get_sympy_dir(): + """ + Returns the root SymPy directory and set the global value + indicating whether the system is case sensitive or not. + """ + this_file = os.path.abspath(__file__) + sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") + sympy_dir = os.path.normpath(sympy_dir) + return os.path.normcase(sympy_dir) + + +def setup_pprint(): + from sympy.interactive.printing import init_printing + from sympy.printing.pretty.pretty import pprint_use_unicode + import sympy.interactive.printing as interactive_printing + + # force pprint to be in ascii mode in doctests + use_unicode_prev = pprint_use_unicode(False) + + # hook our nice, hash-stable strprinter + init_printing(pretty_print=False) + + # Prevent init_printing() in doctests from affecting other doctests + interactive_printing.NO_GLOBAL = True + return use_unicode_prev + + +@contextmanager +def raise_on_deprecated(): + """Context manager to make DeprecationWarning raise an error + + This is to catch SymPyDeprecationWarning from library code while running + tests and doctests. It is important to use this context manager around + each individual test/doctest in case some tests modify the warning + filters. + """ + with warnings.catch_warnings(): + warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*') + yield + + +def run_in_subprocess_with_hash_randomization( + function, function_args=(), + function_kwargs=None, command=sys.executable, + module='sympy.testing.runtests', force=False): + """ + Run a function in a Python subprocess with hash randomization enabled. + + If hash randomization is not supported by the version of Python given, it + returns False. Otherwise, it returns the exit value of the command. The + function is passed to sys.exit(), so the return value of the function will + be the return value. + + The environment variable PYTHONHASHSEED is used to seed Python's hash + randomization. If it is set, this function will return False, because + starting a new subprocess is unnecessary in that case. If it is not set, + one is set at random, and the tests are run. Note that if this + environment variable is set when Python starts, hash randomization is + automatically enabled. To force a subprocess to be created even if + PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a + subprocess in Python versions that do not support hash randomization (see + below), because those versions of Python do not support the ``-R`` flag. + + ``function`` should be a string name of a function that is importable from + the module ``module``, like "_test". The default for ``module`` is + "sympy.testing.runtests". ``function_args`` and ``function_kwargs`` + should be a repr-able tuple and dict, respectively. The default Python + command is sys.executable, which is the currently running Python command. + + This function is necessary because the seed for hash randomization must be + set by the environment variable before Python starts. Hence, in order to + use a predetermined seed for tests, we must start Python in a separate + subprocess. + + Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, + 3.1.5, and 3.2.3, and is enabled by default in all Python versions after + and including 3.3.0. + + Examples + ======== + + >>> from sympy.testing.runtests import ( + ... run_in_subprocess_with_hash_randomization) + >>> # run the core tests in verbose mode + >>> run_in_subprocess_with_hash_randomization("_test", + ... function_args=("core",), + ... function_kwargs={'verbose': True}) # doctest: +SKIP + # Will return 0 if sys.executable supports hash randomization and tests + # pass, 1 if they fail, and False if it does not support hash + # randomization. + + """ + cwd = get_sympy_dir() + # Note, we must return False everywhere, not None, as subprocess.call will + # sometimes return None. + + # First check if the Python version supports hash randomization + # If it does not have this support, it won't recognize the -R flag + p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, cwd=cwd) + p.communicate() + if p.returncode != 0: + return False + + hash_seed = os.getenv("PYTHONHASHSEED") + if not hash_seed: + os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32)) + else: + if not force: + return False + + function_kwargs = function_kwargs or {} + + # Now run the command + commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" % + (module, function, function, repr(function_args), + repr(function_kwargs))) + + try: + p = subprocess.Popen([command, "-R", "-c", commandstring], cwd=cwd) + p.communicate() + except KeyboardInterrupt: + p.wait() + finally: + # Put the environment variable back, so that it reads correctly for + # the current Python process. + if hash_seed is None: + del os.environ["PYTHONHASHSEED"] + else: + os.environ["PYTHONHASHSEED"] = hash_seed + return p.returncode + + +def run_all_tests(test_args=(), test_kwargs=None, + doctest_args=(), doctest_kwargs=None, + examples_args=(), examples_kwargs=None): + """ + Run all tests. + + Right now, this runs the regular tests (bin/test), the doctests + (bin/doctest), and the examples (examples/all.py). + + This is what ``setup.py test`` uses. + + You can pass arguments and keyword arguments to the test functions that + support them (for now, test, doctest, and the examples). See the + docstrings of those functions for a description of the available options. + + For example, to run the solvers tests with colors turned off: + + >>> from sympy.testing.runtests import run_all_tests + >>> run_all_tests(test_args=("solvers",), + ... test_kwargs={"colors:False"}) # doctest: +SKIP + + """ + tests_successful = True + + test_kwargs = test_kwargs or {} + doctest_kwargs = doctest_kwargs or {} + examples_kwargs = examples_kwargs or {'quiet': True} + + try: + # Regular tests + if not test(*test_args, **test_kwargs): + # some regular test fails, so set the tests_successful + # flag to false and continue running the doctests + tests_successful = False + + # Doctests + print() + if not doctest(*doctest_args, **doctest_kwargs): + tests_successful = False + + # Examples + print() + sys.path.append("examples") # examples/all.py + from all import run_examples # type: ignore + if not run_examples(*examples_args, **examples_kwargs): + tests_successful = False + + if tests_successful: + return + else: + # Return nonzero exit code + sys.exit(1) + except KeyboardInterrupt: + print() + print("DO *NOT* COMMIT!") + sys.exit(1) + + +def test(*paths, subprocess=True, rerun=0, **kwargs): + """ + Run tests in the specified test_*.py files. + + Tests in a particular test_*.py file are run if any of the given strings + in ``paths`` matches a part of the test file's path. If ``paths=[]``, + tests in all test_*.py files are run. + + Notes: + + - If sort=False, tests are run in random order (not default). + - Paths can be entered in native system format or in unix, + forward-slash format. + - Files that are on the blacklist can be tested by providing + their path; they are only excluded if no paths are given. + + **Explanation of test results** + + ====== =============================================================== + Output Meaning + ====== =============================================================== + . passed + F failed + X XPassed (expected to fail but passed) + f XFAILed (expected to fail and indeed failed) + s skipped + w slow + T timeout (e.g., when ``--timeout`` is used) + K KeyboardInterrupt (when running the slow tests with ``--slow``, + you can interrupt one of them without killing the test runner) + ====== =============================================================== + + + Colors have no additional meaning and are used just to facilitate + interpreting the output. + + Examples + ======== + + >>> import sympy + + Run all tests: + + >>> sympy.test() # doctest: +SKIP + + Run one file: + + >>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP + >>> sympy.test("_basic") # doctest: +SKIP + + Run all tests in sympy/functions/ and some particular file: + + >>> sympy.test("sympy/core/tests/test_basic.py", + ... "sympy/functions") # doctest: +SKIP + + Run all tests in sympy/core and sympy/utilities: + + >>> sympy.test("/core", "/util") # doctest: +SKIP + + Run specific test from a file: + + >>> sympy.test("sympy/core/tests/test_basic.py", + ... kw="test_equality") # doctest: +SKIP + + Run specific test from any file: + + >>> sympy.test(kw="subs") # doctest: +SKIP + + Run the tests with verbose mode on: + + >>> sympy.test(verbose=True) # doctest: +SKIP + + Do not sort the test output: + + >>> sympy.test(sort=False) # doctest: +SKIP + + Turn on post-mortem pdb: + + >>> sympy.test(pdb=True) # doctest: +SKIP + + Turn off colors: + + >>> sympy.test(colors=False) # doctest: +SKIP + + Force colors, even when the output is not to a terminal (this is useful, + e.g., if you are piping to ``less -r`` and you still want colors) + + >>> sympy.test(force_colors=False) # doctest: +SKIP + + The traceback verboseness can be set to "short" or "no" (default is + "short") + + >>> sympy.test(tb='no') # doctest: +SKIP + + The ``split`` option can be passed to split the test run into parts. The + split currently only splits the test files, though this may change in the + future. ``split`` should be a string of the form 'a/b', which will run + part ``a`` of ``b``. For instance, to run the first half of the test suite: + + >>> sympy.test(split='1/2') # doctest: +SKIP + + The ``time_balance`` option can be passed in conjunction with ``split``. + If ``time_balance=True`` (the default for ``sympy.test``), SymPy will attempt + to split the tests such that each split takes equal time. This heuristic + for balancing is based on pre-recorded test data. + + >>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP + + You can disable running the tests in a separate subprocess using + ``subprocess=False``. This is done to support seeding hash randomization, + which is enabled by default in the Python versions where it is supported. + If subprocess=False, hash randomization is enabled/disabled according to + whether it has been enabled or not in the calling Python process. + However, even if it is enabled, the seed cannot be printed unless it is + called from a new Python process. + + Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, + 3.1.5, and 3.2.3, and is enabled by default in all Python versions after + and including 3.3.0. + + If hash randomization is not supported ``subprocess=False`` is used + automatically. + + >>> sympy.test(subprocess=False) # doctest: +SKIP + + To set the hash randomization seed, set the environment variable + ``PYTHONHASHSEED`` before running the tests. This can be done from within + Python using + + >>> import os + >>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP + + Or from the command line using + + $ PYTHONHASHSEED=42 ./bin/test + + If the seed is not set, a random seed will be chosen. + + Note that to reproduce the same hash values, you must use both the same seed + as well as the same architecture (32-bit vs. 64-bit). + + """ + # count up from 0, do not print 0 + print_counter = lambda i : (print("rerun %d" % (rerun-i)) + if rerun-i else None) + + if subprocess: + # loop backwards so last i is 0 + for i in range(rerun, -1, -1): + print_counter(i) + ret = run_in_subprocess_with_hash_randomization("_test", + function_args=paths, function_kwargs=kwargs) + if ret is False: + break + val = not bool(ret) + # exit on the first failure or if done + if not val or i == 0: + return val + + # rerun even if hash randomization is not supported + for i in range(rerun, -1, -1): + print_counter(i) + val = not bool(_test(*paths, **kwargs)) + if not val or i == 0: + return val + + +def _test(*paths, + verbose=False, tb="short", kw=None, pdb=False, colors=True, + force_colors=False, sort=True, seed=None, timeout=False, + fail_on_timeout=False, slow=False, enhance_asserts=False, split=None, + time_balance=True, blacklist=(), + fast_threshold=None, slow_threshold=None): + """ + Internal function that actually runs the tests. + + All keyword arguments from ``test()`` are passed to this function except for + ``subprocess``. + + Returns 0 if tests passed and 1 if they failed. See the docstring of + ``test()`` for more information. + """ + kw = kw or () + # ensure that kw is a tuple + if isinstance(kw, str): + kw = (kw,) + post_mortem = pdb + if seed is None: + seed = random.randrange(100000000) + if ON_CI and timeout is False: + timeout = 595 + fail_on_timeout = True + if ON_CI: + blacklist = list(blacklist) + ['sympy/plotting/pygletplot/tests'] + blacklist = convert_to_native_paths(blacklist) + r = PyTestReporter(verbose=verbose, tb=tb, colors=colors, + force_colors=force_colors, split=split) + t = SymPyTests(r, kw, post_mortem, seed, + fast_threshold=fast_threshold, + slow_threshold=slow_threshold) + + test_files = t.get_test_files('sympy') + + not_blacklisted = [f for f in test_files + if not any(b in f for b in blacklist)] + + if len(paths) == 0: + matched = not_blacklisted + else: + paths = convert_to_native_paths(paths) + matched = [] + for f in not_blacklisted: + basename = os.path.basename(f) + for p in paths: + if p in f or fnmatch(basename, p): + matched.append(f) + break + + density = None + if time_balance: + if slow: + density = SPLIT_DENSITY_SLOW + else: + density = SPLIT_DENSITY + + if split: + matched = split_list(matched, split, density=density) + + t._testfiles.extend(matched) + + return int(not t.test(sort=sort, timeout=timeout, slow=slow, + enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout)) + + +def doctest(*paths, subprocess=True, rerun=0, **kwargs): + r""" + Runs doctests in all \*.py files in the SymPy directory which match + any of the given strings in ``paths`` or all tests if paths=[]. + + Notes: + + - Paths can be entered in native system format or in unix, + forward-slash format. + - Files that are on the blacklist can be tested by providing + their path; they are only excluded if no paths are given. + + Examples + ======== + + >>> import sympy + + Run all tests: + + >>> sympy.doctest() # doctest: +SKIP + + Run one file: + + >>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP + >>> sympy.doctest("polynomial.rst") # doctest: +SKIP + + Run all tests in sympy/functions/ and some particular file: + + >>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP + + Run any file having polynomial in its name, doc/src/modules/polynomial.rst, + sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py: + + >>> sympy.doctest("polynomial") # doctest: +SKIP + + The ``split`` option can be passed to split the test run into parts. The + split currently only splits the test files, though this may change in the + future. ``split`` should be a string of the form 'a/b', which will run + part ``a`` of ``b``. Note that the regular doctests and the Sphinx + doctests are split independently. For instance, to run the first half of + the test suite: + + >>> sympy.doctest(split='1/2') # doctest: +SKIP + + The ``subprocess`` and ``verbose`` options are the same as with the function + ``test()`` (see the docstring of that function for more information) except + that ``verbose`` may also be set equal to ``2`` in order to print + individual doctest lines, as they are being tested. + """ + # count up from 0, do not print 0 + print_counter = lambda i : (print("rerun %d" % (rerun-i)) + if rerun-i else None) + + if subprocess: + # loop backwards so last i is 0 + for i in range(rerun, -1, -1): + print_counter(i) + ret = run_in_subprocess_with_hash_randomization("_doctest", + function_args=paths, function_kwargs=kwargs) + if ret is False: + break + val = not bool(ret) + # exit on the first failure or if done + if not val or i == 0: + return val + + # rerun even if hash randomization is not supported + for i in range(rerun, -1, -1): + print_counter(i) + val = not bool(_doctest(*paths, **kwargs)) + if not val or i == 0: + return val + + +def _get_doctest_blacklist(): + '''Get the default blacklist for the doctests''' + blacklist = [] + + blacklist.extend([ + "doc/src/modules/plotting.rst", # generates live plots + "doc/src/modules/physics/mechanics/autolev_parser.rst", + "sympy/codegen/array_utils.py", # raises deprecation warning + "sympy/core/compatibility.py", # backwards compatibility shim, importing it triggers a deprecation warning + "sympy/core/trace.py", # backwards compatibility shim, importing it triggers a deprecation warning + "sympy/galgebra.py", # no longer part of SymPy + "sympy/parsing/autolev/_antlr/autolevlexer.py", # generated code + "sympy/parsing/autolev/_antlr/autolevlistener.py", # generated code + "sympy/parsing/autolev/_antlr/autolevparser.py", # generated code + "sympy/parsing/latex/_antlr/latexlexer.py", # generated code + "sympy/parsing/latex/_antlr/latexparser.py", # generated code + "sympy/plotting/pygletplot/__init__.py", # crashes on some systems + "sympy/plotting/pygletplot/plot.py", # crashes on some systems + "sympy/printing/ccode.py", # backwards compatibility shim, importing it breaks the codegen doctests + "sympy/printing/cxxcode.py", # backwards compatibility shim, importing it breaks the codegen doctests + "sympy/printing/fcode.py", # backwards compatibility shim, importing it breaks the codegen doctests + "sympy/testing/randtest.py", # backwards compatibility shim, importing it triggers a deprecation warning + "sympy/this.py", # prints text + ]) + # autolev parser tests + num = 12 + for i in range (1, num+1): + blacklist.append("sympy/parsing/autolev/test-examples/ruletest" + str(i) + ".py") + blacklist.extend(["sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py", + "sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py", + "sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py", + "sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py"]) + + if import_module('numpy') is None: + blacklist.extend([ + "sympy/plotting/experimental_lambdify.py", + "sympy/plotting/plot_implicit.py", + "examples/advanced/autowrap_integrators.py", + "examples/advanced/autowrap_ufuncify.py", + "examples/intermediate/sample.py", + "examples/intermediate/mplot2d.py", + "examples/intermediate/mplot3d.py", + "doc/src/modules/numeric-computation.rst" + ]) + else: + if import_module('matplotlib') is None: + blacklist.extend([ + "examples/intermediate/mplot2d.py", + "examples/intermediate/mplot3d.py" + ]) + else: + # Use a non-windowed backend, so that the tests work on CI + import matplotlib + matplotlib.use('Agg') + + if ON_CI or import_module('pyglet') is None: + blacklist.extend(["sympy/plotting/pygletplot"]) + + if import_module('aesara') is None: + blacklist.extend([ + "sympy/printing/aesaracode.py", + "doc/src/modules/numeric-computation.rst", + ]) + + if import_module('cupy') is None: + blacklist.extend([ + "doc/src/modules/numeric-computation.rst", + ]) + + if import_module('jax') is None: + blacklist.extend([ + "doc/src/modules/numeric-computation.rst", + ]) + + if import_module('antlr4') is None: + blacklist.extend([ + "sympy/parsing/autolev/__init__.py", + "sympy/parsing/latex/_parse_latex_antlr.py", + ]) + + if import_module('lfortran') is None: + #throws ImportError when lfortran not installed + blacklist.extend([ + "sympy/parsing/sym_expr.py", + ]) + + if import_module("scipy") is None: + # throws ModuleNotFoundError when scipy not installed + blacklist.extend([ + "doc/src/guides/solving/solve-numerically.md", + "doc/src/guides/solving/solve-ode.md", + ]) + + if import_module("numpy") is None: + # throws ModuleNotFoundError when numpy not installed + blacklist.extend([ + "doc/src/guides/solving/solve-ode.md", + "doc/src/guides/solving/solve-numerically.md", + ]) + + # disabled because of doctest failures in asmeurer's bot + blacklist.extend([ + "sympy/utilities/autowrap.py", + "examples/advanced/autowrap_integrators.py", + "examples/advanced/autowrap_ufuncify.py" + ]) + + blacklist.extend([ + "sympy/conftest.py", # Depends on pytest + ]) + + # These are deprecated stubs to be removed: + blacklist.extend([ + "sympy/utilities/tmpfiles.py", + "sympy/utilities/pytest.py", + "sympy/utilities/runtests.py", + "sympy/utilities/quality_unicode.py", + "sympy/utilities/randtest.py", + ]) + + blacklist = convert_to_native_paths(blacklist) + return blacklist + + +def _doctest(*paths, **kwargs): + """ + Internal function that actually runs the doctests. + + All keyword arguments from ``doctest()`` are passed to this function + except for ``subprocess``. + + Returns 0 if tests passed and 1 if they failed. See the docstrings of + ``doctest()`` and ``test()`` for more information. + """ + from sympy.printing.pretty.pretty import pprint_use_unicode + + normal = kwargs.get("normal", False) + verbose = kwargs.get("verbose", False) + colors = kwargs.get("colors", True) + force_colors = kwargs.get("force_colors", False) + blacklist = kwargs.get("blacklist", []) + split = kwargs.get('split', None) + + blacklist.extend(_get_doctest_blacklist()) + + # Use a non-windowed backend, so that the tests work on CI + if import_module('matplotlib') is not None: + import matplotlib + matplotlib.use('Agg') + + # Disable warnings for external modules + import sympy.external + sympy.external.importtools.WARN_OLD_VERSION = False + sympy.external.importtools.WARN_NOT_INSTALLED = False + + # Disable showing up of plots + from sympy.plotting.plot import unset_show + unset_show() + + r = PyTestReporter(verbose, split=split, colors=colors,\ + force_colors=force_colors) + t = SymPyDocTests(r, normal) + + test_files = t.get_test_files('sympy') + test_files.extend(t.get_test_files('examples', init_only=False)) + + not_blacklisted = [f for f in test_files + if not any(b in f for b in blacklist)] + if len(paths) == 0: + matched = not_blacklisted + else: + # take only what was requested...but not blacklisted items + # and allow for partial match anywhere or fnmatch of name + paths = convert_to_native_paths(paths) + matched = [] + for f in not_blacklisted: + basename = os.path.basename(f) + for p in paths: + if p in f or fnmatch(basename, p): + matched.append(f) + break + + matched.sort() + + if split: + matched = split_list(matched, split) + + t._testfiles.extend(matched) + + # run the tests and record the result for this *py portion of the tests + if t._testfiles: + failed = not t.test() + else: + failed = False + + # N.B. + # -------------------------------------------------------------------- + # Here we test *.rst and *.md files at or below doc/src. Code from these + # must be self supporting in terms of imports since there is no importing + # of necessary modules by doctest.testfile. If you try to pass *.py files + # through this they might fail because they will lack the needed imports + # and smarter parsing that can be done with source code. + # + test_files_rst = t.get_test_files('doc/src', '*.rst', init_only=False) + test_files_md = t.get_test_files('doc/src', '*.md', init_only=False) + test_files = test_files_rst + test_files_md + test_files.sort() + + not_blacklisted = [f for f in test_files + if not any(b in f for b in blacklist)] + + if len(paths) == 0: + matched = not_blacklisted + else: + # Take only what was requested as long as it's not on the blacklist. + # Paths were already made native in *py tests so don't repeat here. + # There's no chance of having a *py file slip through since we + # only have *rst files in test_files. + matched = [] + for f in not_blacklisted: + basename = os.path.basename(f) + for p in paths: + if p in f or fnmatch(basename, p): + matched.append(f) + break + + if split: + matched = split_list(matched, split) + + first_report = True + for rst_file in matched: + if not os.path.isfile(rst_file): + continue + old_displayhook = sys.displayhook + try: + use_unicode_prev = setup_pprint() + out = sympytestfile( + rst_file, module_relative=False, encoding='utf-8', + optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE | + pdoctest.IGNORE_EXCEPTION_DETAIL) + finally: + # make sure we return to the original displayhook in case some + # doctest has changed that + sys.displayhook = old_displayhook + # The NO_GLOBAL flag overrides the no_global flag to init_printing + # if True + import sympy.interactive.printing as interactive_printing + interactive_printing.NO_GLOBAL = False + pprint_use_unicode(use_unicode_prev) + + rstfailed, tested = out + if tested: + failed = rstfailed or failed + if first_report: + first_report = False + msg = 'rst/md doctests start' + if not t._testfiles: + r.start(msg=msg) + else: + r.write_center(msg) + print() + # use as the id, everything past the first 'sympy' + file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:] + print(file_id, end=" ") + # get at least the name out so it is know who is being tested + wid = r.terminal_width - len(file_id) - 1 # update width + test_file = '[%s]' % (tested) + report = '[%s]' % (rstfailed or 'OK') + print(''.join( + [test_file, ' '*(wid - len(test_file) - len(report)), report]) + ) + + # the doctests for *py will have printed this message already if there was + # a failure, so now only print it if there was intervening reporting by + # testing the *rst as evidenced by first_report no longer being True. + if not first_report and failed: + print() + print("DO *NOT* COMMIT!") + + return int(failed) + +sp = re.compile(r'([0-9]+)/([1-9][0-9]*)') + +def split_list(l, split, density=None): + """ + Splits a list into part a of b + + split should be a string of the form 'a/b'. For instance, '1/3' would give + the split one of three. + + If the length of the list is not divisible by the number of splits, the + last split will have more items. + + `density` may be specified as a list. If specified, + tests will be balanced so that each split has as equal-as-possible + amount of mass according to `density`. + + >>> from sympy.testing.runtests import split_list + >>> a = list(range(10)) + >>> split_list(a, '1/3') + [0, 1, 2] + >>> split_list(a, '2/3') + [3, 4, 5] + >>> split_list(a, '3/3') + [6, 7, 8, 9] + """ + m = sp.match(split) + if not m: + raise ValueError("split must be a string of the form a/b where a and b are ints") + i, t = map(int, m.groups()) + + if not density: + return l[(i - 1)*len(l)//t : i*len(l)//t] + + # normalize density + tot = sum(density) + density = [x / tot for x in density] + + def density_inv(x): + """Interpolate the inverse to the cumulative + distribution function given by density""" + if x <= 0: + return 0 + if x >= sum(density): + return 1 + + # find the first time the cumulative sum surpasses x + # and linearly interpolate + cumm = 0 + for i, d in enumerate(density): + cumm += d + if cumm >= x: + break + frac = (d - (cumm - x)) / d + return (i + frac) / len(density) + + lower_frac = density_inv((i - 1) / t) + higher_frac = density_inv(i / t) + return l[int(lower_frac*len(l)) : int(higher_frac*len(l))] + +from collections import namedtuple +SymPyTestResults = namedtuple('SymPyTestResults', 'failed attempted') + +def sympytestfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, + parser=pdoctest.DocTestParser(), encoding=None): + + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg ``module_relative`` specifies how filenames + should be interpreted: + + - If ``module_relative`` is True (the default), then ``filename`` + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + ``package`` argument is specified, then it is relative to that + package. To ensure os-independence, ``filename`` should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If ``module_relative`` is False, then ``filename`` specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg ``name`` gives the name of the test; by default + use the file's basename. + + Optional keyword argument ``package`` is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify ``package`` if ``module_relative`` is False. + + Optional keyword arg ``globs`` gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg ``extraglobs`` gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg ``verbose`` prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg ``report`` prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg ``optionflags`` or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + - DONT_ACCEPT_TRUE_FOR_1 + - DONT_ACCEPT_BLANKLINE + - NORMALIZE_WHITESPACE + - ELLIPSIS + - SKIP + - IGNORE_EXCEPTION_DETAIL + - REPORT_UDIFF + - REPORT_CDIFF + - REPORT_NDIFF + - REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg ``raise_on_error`` raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg ``parser`` specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg ``encoding`` specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + text, filename = pdoctest._load_testfile( + filename, package, module_relative, encoding) + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + if '__name__' not in globs: + globs['__name__'] = '__main__' + + if raise_on_error: + runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags) + runner._checker = SymPyOutputChecker() + + # Read the file, convert it to a test, and run it. + test = parser.get_doctest(text, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if pdoctest.master is None: + pdoctest.master = runner + else: + pdoctest.master.merge(runner) + + return SymPyTestResults(runner.failures, runner.tries) + + +class SymPyTests: + + def __init__(self, reporter, kw="", post_mortem=False, + seed=None, fast_threshold=None, slow_threshold=None): + self._post_mortem = post_mortem + self._kw = kw + self._count = 0 + self._root_dir = get_sympy_dir() + self._reporter = reporter + self._reporter.root_dir(self._root_dir) + self._testfiles = [] + self._seed = seed if seed is not None else random.random() + + # Defaults in seconds, from human / UX design limits + # http://www.nngroup.com/articles/response-times-3-important-limits/ + # + # These defaults are *NOT* set in stone as we are measuring different + # things, so others feel free to come up with a better yardstick :) + if fast_threshold: + self._fast_threshold = float(fast_threshold) + else: + self._fast_threshold = 8 + if slow_threshold: + self._slow_threshold = float(slow_threshold) + else: + self._slow_threshold = 10 + + def test(self, sort=False, timeout=False, slow=False, + enhance_asserts=False, fail_on_timeout=False): + """ + Runs the tests returning True if all tests pass, otherwise False. + + If sort=False run tests in random order. + """ + if sort: + self._testfiles.sort() + elif slow: + pass + else: + random.seed(self._seed) + random.shuffle(self._testfiles) + self._reporter.start(self._seed) + for f in self._testfiles: + try: + self.test_file(f, sort, timeout, slow, + enhance_asserts, fail_on_timeout) + except KeyboardInterrupt: + print(" interrupted by user") + self._reporter.finish() + raise + return self._reporter.finish() + + def _enhance_asserts(self, source): + from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple, + Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations) + + ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=', + "Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not', + "In": 'in', "NotIn": 'not in'} + + class Transform(NodeTransformer): + def visit_Assert(self, stmt): + if isinstance(stmt.test, Compare): + compare = stmt.test + values = [compare.left] + compare.comparators + names = [ "_%s" % i for i, _ in enumerate(values) ] + names_store = [ Name(n, Store()) for n in names ] + names_load = [ Name(n, Load()) for n in names ] + target = Tuple(names_store, Store()) + value = Tuple(values, Load()) + assign = Assign([target], value) + new_compare = Compare(names_load[0], compare.ops, names_load[1:]) + msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s" + msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load())) + test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset) + return [assign, test] + else: + return stmt + + tree = parse(source) + new_tree = Transform().visit(tree) + return fix_missing_locations(new_tree) + + def test_file(self, filename, sort=True, timeout=False, slow=False, + enhance_asserts=False, fail_on_timeout=False): + reporter = self._reporter + funcs = [] + try: + gl = {'__file__': filename} + try: + open_file = lambda: open(filename, encoding="utf8") + + with open_file() as f: + source = f.read() + if self._kw: + for l in source.splitlines(): + if l.lstrip().startswith('def '): + if any(l.lower().find(k.lower()) != -1 for k in self._kw): + break + else: + return + + if enhance_asserts: + try: + source = self._enhance_asserts(source) + except ImportError: + pass + + code = compile(source, filename, "exec", flags=0, dont_inherit=True) + exec(code, gl) + except (SystemExit, KeyboardInterrupt): + raise + except ImportError: + reporter.import_error(filename, sys.exc_info()) + return + except Exception: + reporter.test_exception(sys.exc_info()) + + clear_cache() + self._count += 1 + random.seed(self._seed) + disabled = gl.get("disabled", False) + if not disabled: + # we need to filter only those functions that begin with 'test_' + # We have to be careful about decorated functions. As long as + # the decorator uses functools.wraps, we can detect it. + funcs = [] + for f in gl: + if (f.startswith("test_") and (inspect.isfunction(gl[f]) + or inspect.ismethod(gl[f]))): + func = gl[f] + # Handle multiple decorators + while hasattr(func, '__wrapped__'): + func = func.__wrapped__ + + if inspect.getsourcefile(func) == filename: + funcs.append(gl[f]) + if slow: + funcs = [f for f in funcs if getattr(f, '_slow', False)] + # Sorting of XFAILed functions isn't fixed yet :-( + funcs.sort(key=lambda x: inspect.getsourcelines(x)[1]) + i = 0 + while i < len(funcs): + if inspect.isgeneratorfunction(funcs[i]): + # some tests can be generators, that return the actual + # test functions. We unpack it below: + f = funcs.pop(i) + for fg in f(): + func = fg[0] + args = fg[1:] + fgw = lambda: func(*args) + funcs.insert(i, fgw) + i += 1 + else: + i += 1 + # drop functions that are not selected with the keyword expression: + funcs = [x for x in funcs if self.matches(x)] + + if not funcs: + return + except Exception: + reporter.entering_filename(filename, len(funcs)) + raise + + reporter.entering_filename(filename, len(funcs)) + if not sort: + random.shuffle(funcs) + + for f in funcs: + start = time.time() + reporter.entering_test(f) + try: + if getattr(f, '_slow', False) and not slow: + raise Skipped("Slow") + with raise_on_deprecated(): + if timeout: + self._timeout(f, timeout, fail_on_timeout) + else: + random.seed(self._seed) + f() + except KeyboardInterrupt: + if getattr(f, '_slow', False): + reporter.test_skip("KeyboardInterrupt") + else: + raise + except Exception: + if timeout: + signal.alarm(0) # Disable the alarm. It could not be handled before. + t, v, tr = sys.exc_info() + if t is AssertionError: + reporter.test_fail((t, v, tr)) + if self._post_mortem: + pdb.post_mortem(tr) + elif t.__name__ == "Skipped": + reporter.test_skip(v) + elif t.__name__ == "XFail": + reporter.test_xfail() + elif t.__name__ == "XPass": + reporter.test_xpass(v) + else: + reporter.test_exception((t, v, tr)) + if self._post_mortem: + pdb.post_mortem(tr) + else: + reporter.test_pass() + taken = time.time() - start + if taken > self._slow_threshold: + filename = os.path.relpath(filename, reporter._root_dir) + reporter.slow_test_functions.append( + (filename + "::" + f.__name__, taken)) + if getattr(f, '_slow', False) and slow: + if taken < self._fast_threshold: + filename = os.path.relpath(filename, reporter._root_dir) + reporter.fast_test_functions.append( + (filename + "::" + f.__name__, taken)) + reporter.leaving_filename() + + def _timeout(self, function, timeout, fail_on_timeout): + def callback(x, y): + signal.alarm(0) + if fail_on_timeout: + raise TimeOutError("Timed out after %d seconds" % timeout) + else: + raise Skipped("Timeout") + signal.signal(signal.SIGALRM, callback) + signal.alarm(timeout) # Set an alarm with a given timeout + function() + signal.alarm(0) # Disable the alarm + + def matches(self, x): + """ + Does the keyword expression self._kw match "x"? Returns True/False. + + Always returns True if self._kw is "". + """ + if not self._kw: + return True + for kw in self._kw: + if x.__name__.lower().find(kw.lower()) != -1: + return True + return False + + def get_test_files(self, dir, pat='test_*.py'): + """ + Returns the list of test_*.py (default) files at or below directory + ``dir`` relative to the SymPy home directory. + """ + dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) + + g = [] + for path, folders, files in os.walk(dir): + g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)]) + + return sorted([os.path.normcase(gi) for gi in g]) + + +class SymPyDocTests: + + def __init__(self, reporter, normal): + self._count = 0 + self._root_dir = get_sympy_dir() + self._reporter = reporter + self._reporter.root_dir(self._root_dir) + self._normal = normal + + self._testfiles = [] + + def test(self): + """ + Runs the tests and returns True if all tests pass, otherwise False. + """ + self._reporter.start() + for f in self._testfiles: + try: + self.test_file(f) + except KeyboardInterrupt: + print(" interrupted by user") + self._reporter.finish() + raise + return self._reporter.finish() + + def test_file(self, filename): + clear_cache() + + from io import StringIO + import sympy.interactive.printing as interactive_printing + from sympy.printing.pretty.pretty import pprint_use_unicode + + rel_name = filename[len(self._root_dir) + 1:] + dirname, file = os.path.split(filename) + module = rel_name.replace(os.sep, '.')[:-3] + + if rel_name.startswith("examples"): + # Examples files do not have __init__.py files, + # So we have to temporarily extend sys.path to import them + sys.path.insert(0, dirname) + module = file[:-3] # remove ".py" + try: + module = pdoctest._normalize_module(module) + tests = SymPyDocTestFinder().find(module) + except (SystemExit, KeyboardInterrupt): + raise + except ImportError: + self._reporter.import_error(filename, sys.exc_info()) + return + finally: + if rel_name.startswith("examples"): + del sys.path[0] + + tests = [test for test in tests if len(test.examples) > 0] + # By default tests are sorted by alphabetical order by function name. + # We sort by line number so one can edit the file sequentially from + # bottom to top. However, if there are decorated functions, their line + # numbers will be too large and for now one must just search for these + # by text and function name. + tests.sort(key=lambda x: -x.lineno) + + if not tests: + return + self._reporter.entering_filename(filename, len(tests)) + for test in tests: + assert len(test.examples) != 0 + + if self._reporter._verbose: + self._reporter.write("\n{} ".format(test.name)) + + # check if there are external dependencies which need to be met + if '_doctest_depends_on' in test.globs: + try: + self._check_dependencies(**test.globs['_doctest_depends_on']) + except DependencyError as e: + self._reporter.test_skip(v=str(e)) + continue + + runner = SymPyDocTestRunner(verbose=self._reporter._verbose==2, + optionflags=pdoctest.ELLIPSIS | + pdoctest.NORMALIZE_WHITESPACE | + pdoctest.IGNORE_EXCEPTION_DETAIL) + runner._checker = SymPyOutputChecker() + old = sys.stdout + new = old if self._reporter._verbose==2 else StringIO() + sys.stdout = new + # If the testing is normal, the doctests get importing magic to + # provide the global namespace. If not normal (the default) then + # then must run on their own; all imports must be explicit within + # a function's docstring. Once imported that import will be + # available to the rest of the tests in a given function's + # docstring (unless clear_globs=True below). + if not self._normal: + test.globs = {} + # if this is uncommented then all the test would get is what + # comes by default with a "from sympy import *" + #exec('from sympy import *') in test.globs + old_displayhook = sys.displayhook + use_unicode_prev = setup_pprint() + + try: + f, t = runner.run(test, + out=new.write, clear_globs=False) + except KeyboardInterrupt: + raise + finally: + sys.stdout = old + if f > 0: + self._reporter.doctest_fail(test.name, new.getvalue()) + else: + self._reporter.test_pass() + sys.displayhook = old_displayhook + interactive_printing.NO_GLOBAL = False + pprint_use_unicode(use_unicode_prev) + + self._reporter.leaving_filename() + + def get_test_files(self, dir, pat='*.py', init_only=True): + r""" + Returns the list of \*.py files (default) from which docstrings + will be tested which are at or below directory ``dir``. By default, + only those that have an __init__.py in their parent directory + and do not start with ``test_`` will be included. + """ + def importable(x): + """ + Checks if given pathname x is an importable module by checking for + __init__.py file. + + Returns True/False. + + Currently we only test if the __init__.py file exists in the + directory with the file "x" (in theory we should also test all the + parent dirs). + """ + init_py = os.path.join(os.path.dirname(x), "__init__.py") + return os.path.exists(init_py) + + dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) + + g = [] + for path, folders, files in os.walk(dir): + g.extend([os.path.join(path, f) for f in files + if not f.startswith('test_') and fnmatch(f, pat)]) + if init_only: + # skip files that are not importable (i.e. missing __init__.py) + g = [x for x in g if importable(x)] + + return [os.path.normcase(gi) for gi in g] + + def _check_dependencies(self, + executables=(), + modules=(), + disable_viewers=(), + python_version=(3, 5)): + """ + Checks if the dependencies for the test are installed. + + Raises ``DependencyError`` it at least one dependency is not installed. + """ + + for executable in executables: + if not shutil.which(executable): + raise DependencyError("Could not find %s" % executable) + + for module in modules: + if module == 'matplotlib': + matplotlib = import_module( + 'matplotlib', + import_kwargs={'fromlist': + ['pyplot', 'cm', 'collections']}, + min_module_version='1.0.0', catch=(RuntimeError,)) + if matplotlib is None: + raise DependencyError("Could not import matplotlib") + else: + if not import_module(module): + raise DependencyError("Could not import %s" % module) + + if disable_viewers: + tempdir = tempfile.mkdtemp() + os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH']) + + vw = ('#!/usr/bin/env python3\n' + 'import sys\n' + 'if len(sys.argv) <= 1:\n' + ' exit("wrong number of args")\n') + + for viewer in disable_viewers: + with open(os.path.join(tempdir, viewer), 'w') as fh: + fh.write(vw) + + # make the file executable + os.chmod(os.path.join(tempdir, viewer), + stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR) + + if python_version: + if sys.version_info < python_version: + raise DependencyError("Requires Python >= " + '.'.join(map(str, python_version))) + + if 'pyglet' in modules: + # monkey-patch pyglet s.t. it does not open a window during + # doctesting + import pyglet + class DummyWindow: + def __init__(self, *args, **kwargs): + self.has_exit = True + self.width = 600 + self.height = 400 + + def set_vsync(self, x): + pass + + def switch_to(self): + pass + + def push_handlers(self, x): + pass + + def close(self): + pass + + pyglet.window.Window = DummyWindow + + +class SymPyDocTestFinder(DocTestFinder): + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + + Modified from doctest's version to look harder for code that + appears comes from a different module. For example, the @vectorize + decorator makes it look like functions come from multidimensional.py + even though their code exists elsewhere. + """ + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to ``tests``. + """ + if self._verbose: + print('Finding tests in %s' % name) + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Make sure we don't run doctests for classes outside of sympy, such + # as in numpy or scipy. + if inspect.isclass(obj): + if obj.__module__.split('.')[0] != 'sympy': + return + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + if not self._recurse: + return + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj): + for rawname, val in obj.__dict__.items(): + # Recurse to functions & classes. + if inspect.isfunction(val) or inspect.isclass(val): + # Make sure we don't run doctests functions or classes + # from different modules + if val.__module__ != module.__name__: + continue + + assert self._from_module(module, val), \ + "%s is not in module %s (rawname %s)" % (val, module, rawname) + + try: + valname = '%s.%s' % (name, rawname) + self._find(tests, val, valname, module, + source_lines, globs, seen) + except KeyboardInterrupt: + raise + + # Look for tests in a module's __test__ dictionary. + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, str): + raise ValueError("SymPyDocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isfunction(val) or inspect.isclass(val) or + inspect.ismethod(val) or inspect.ismodule(val) or + isinstance(val, str)): + raise ValueError("SymPyDocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + + # Look for tests in a class's contained objects. + if inspect.isclass(obj): + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + + # Recurse to methods, properties, and nested classes. + if ((inspect.isfunction(unwrap(val)) or + inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + # Make sure we don't run doctests functions or classes + # from different modules + if isinstance(val, property): + if hasattr(val.fget, '__module__'): + if val.fget.__module__ != module.__name__: + continue + else: + if val.__module__ != module.__name__: + continue + + assert self._from_module(module, val), \ + "%s is not in module %s (valname %s)" % ( + val, module, valname) + + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + + lineno = None + + # Extract the object's docstring. If it does not have one, + # then return None (no test for this object). + if isinstance(obj, str): + # obj is a string in the case for objects in the polys package. + # Note that source_lines is a binary string (compiled polys + # modules), which can't be handled by _find_lineno so determine + # the line number here. + + docstring = obj + + matches = re.findall(r"line \d+", name) + assert len(matches) == 1, \ + "string '%s' does not contain lineno " % name + + # NOTE: this is not the exact linenumber but its better than no + # lineno ;) + lineno = int(matches[0][5:]) + + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, str): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # check that properties have a docstring because _find_lineno + # assumes it + if isinstance(obj, property): + if obj.fget.__doc__ is None: + return None + + # Find the docstring's location in the file. + if lineno is None: + obj = unwrap(obj) + # handling of properties is not implemented in _find_lineno so do + # it here + if hasattr(obj, 'func_closure') and obj.func_closure is not None: + tobj = obj.func_closure[0].cell_contents + elif isinstance(obj, property): + tobj = obj.fget + else: + tobj = obj + lineno = self._find_lineno(tobj, source_lines) + + if lineno is None: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + filename = getattr(module, '__file__', module.__name__) + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + + globs['_doctest_depends_on'] = getattr(obj, '_doctest_depends_on', {}) + + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + +class SymPyDocTestRunner(DocTestRunner): + """ + A class used to run DocTest test cases, and accumulate statistics. + The ``run`` method is used to process a single DocTest case. It + returns a tuple ``(f, t)``, where ``t`` is the number of test cases + tried, and ``f`` is the number of test cases that failed. + + Modified from the doctest version to not reset the sys.displayhook (see + issue 5140). + + See the docstring of the original DocTestRunner for more information. + """ + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in ``test``, and display the results using the + writer function ``out``. + + The examples are run in the namespace ``test.globs``. If + ``clear_globs`` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use ``clear_globs=False``. + + ``compileflags`` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to ``globs``. + + The output of each example is checked using + ``SymPyDocTestRunner.check_output``, and the results are + formatted by the ``SymPyDocTestRunner.report_*`` methods. + """ + self.test = test + + # Remove ``` from the end of example, which may appear in Markdown + # files + for example in test.examples: + example.want = example.want.replace('```\n', '') + example.exc_msg = example.exc_msg and example.exc_msg.replace('```\n', '') + + + if compileflags is None: + compileflags = pdoctest._extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + out = save_stdout.write + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_set_trace = pdb.set_trace + self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = pdoctest.linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + # Fail for deprecation warnings + with raise_on_deprecated(): + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + linecache.getlines = self.save_linecache_getlines + if clear_globs: + test.globs.clear() + + +# We have to override the name mangled methods. +monkeypatched_methods = [ + 'patched_linecache_getlines', + 'run', + 'record_outcome' +] +for method in monkeypatched_methods: + oldname = '_DocTestRunner__' + method + newname = '_SymPyDocTestRunner__' + method + setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname)) + + +class SymPyOutputChecker(pdoctest.OutputChecker): + """ + Compared to the OutputChecker from the stdlib our OutputChecker class + supports numerical comparison of floats occurring in the output of the + doctest examples + """ + + def __init__(self): + # NOTE OutputChecker is an old-style class with no __init__ method, + # so we can't call the base class version of __init__ here + + got_floats = r'(\d+\.\d*|\.\d+)' + + # floats in the 'want' string may contain ellipses + want_floats = got_floats + r'(\.{3})?' + + front_sep = r'\s|\+|\-|\*|,' + back_sep = front_sep + r'|j|e' + + fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep) + fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep) + self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) + + fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep) + fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep) + self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) + + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # TODO parse integers as well ? + # Parse floats and compare them. If some of the parsed floats contain + # ellipses, skip the comparison. + matches = self.num_got_rgx.finditer(got) + numbers_got = [match.group(1) for match in matches] # list of strs + matches = self.num_want_rgx.finditer(want) + numbers_want = [match.group(1) for match in matches] # list of strs + if len(numbers_got) != len(numbers_want): + return False + + if len(numbers_got) > 0: + nw_ = [] + for ng, nw in zip(numbers_got, numbers_want): + if '...' in nw: + nw_.append(ng) + continue + else: + nw_.append(nw) + + if abs(float(ng)-float(nw)) > 1e-5: + return False + + got = self.num_got_rgx.sub(r'%s', got) + got = got % tuple(nw_) + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub(r'(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & pdoctest.NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & pdoctest.ELLIPSIS: + if pdoctest._ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + +class Reporter: + """ + Parent class for all reporters. + """ + pass + + +class PyTestReporter(Reporter): + """ + Py.test like reporter. Should produce output identical to py.test. + """ + + def __init__(self, verbose=False, tb="short", colors=True, + force_colors=False, split=None): + self._verbose = verbose + self._tb_style = tb + self._colors = colors + self._force_colors = force_colors + self._xfailed = 0 + self._xpassed = [] + self._failed = [] + self._failed_doctest = [] + self._passed = 0 + self._skipped = 0 + self._exceptions = [] + self._terminal_width = None + self._default_width = 80 + self._split = split + self._active_file = '' + self._active_f = None + + # TODO: Should these be protected? + self.slow_test_functions = [] + self.fast_test_functions = [] + + # this tracks the x-position of the cursor (useful for positioning + # things on the screen), without the need for any readline library: + self._write_pos = 0 + self._line_wrap = False + + def root_dir(self, dir): + self._root_dir = dir + + @property + def terminal_width(self): + if self._terminal_width is not None: + return self._terminal_width + + def findout_terminal_width(): + if sys.platform == "win32": + # Windows support is based on: + # + # http://code.activestate.com/recipes/ + # 440694-determine-size-of-console-window-on-windows/ + + from ctypes import windll, create_string_buffer + + h = windll.kernel32.GetStdHandle(-12) + csbi = create_string_buffer(22) + res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) + + if res: + import struct + (_, _, _, _, _, left, _, right, _, _, _) = \ + struct.unpack("hhhhHhhhhhh", csbi.raw) + return right - left + else: + return self._default_width + + if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty(): + return self._default_width # leave PIPEs alone + + try: + process = subprocess.Popen(['stty', '-a'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + stdout = stdout.decode("utf-8") + except OSError: + pass + else: + # We support the following output formats from stty: + # + # 1) Linux -> columns 80 + # 2) OS X -> 80 columns + # 3) Solaris -> columns = 80 + + re_linux = r"columns\s+(?P\d+);" + re_osx = r"(?P\d+)\s*columns;" + re_solaris = r"columns\s+=\s+(?P\d+);" + + for regex in (re_linux, re_osx, re_solaris): + match = re.search(regex, stdout) + + if match is not None: + columns = match.group('columns') + + try: + width = int(columns) + except ValueError: + pass + if width != 0: + return width + + return self._default_width + + width = findout_terminal_width() + self._terminal_width = width + + return width + + def write(self, text, color="", align="left", width=None, + force_colors=False): + """ + Prints a text on the screen. + + It uses sys.stdout.write(), so no readline library is necessary. + + Parameters + ========== + + color : choose from the colors below, "" means default color + align : "left"/"right", "left" is a normal print, "right" is aligned on + the right-hand side of the screen, filled with spaces if + necessary + width : the screen width + + """ + color_templates = ( + ("Black", "0;30"), + ("Red", "0;31"), + ("Green", "0;32"), + ("Brown", "0;33"), + ("Blue", "0;34"), + ("Purple", "0;35"), + ("Cyan", "0;36"), + ("LightGray", "0;37"), + ("DarkGray", "1;30"), + ("LightRed", "1;31"), + ("LightGreen", "1;32"), + ("Yellow", "1;33"), + ("LightBlue", "1;34"), + ("LightPurple", "1;35"), + ("LightCyan", "1;36"), + ("White", "1;37"), + ) + + colors = {} + + for name, value in color_templates: + colors[name] = value + c_normal = '\033[0m' + c_color = '\033[%sm' + + if width is None: + width = self.terminal_width + + if align == "right": + if self._write_pos + len(text) > width: + # we don't fit on the current line, create a new line + self.write("\n") + self.write(" "*(width - self._write_pos - len(text))) + + if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \ + sys.stdout.isatty(): + # the stdout is not a terminal, this for example happens if the + # output is piped to less, e.g. "bin/test | less". In this case, + # the terminal control sequences would be printed verbatim, so + # don't use any colors. + color = "" + elif sys.platform == "win32": + # Windows consoles don't support ANSI escape sequences + color = "" + elif not self._colors: + color = "" + + if self._line_wrap: + if text[0] != "\n": + sys.stdout.write("\n") + + # Avoid UnicodeEncodeError when printing out test failures + if IS_WINDOWS: + text = text.encode('raw_unicode_escape').decode('utf8', 'ignore') + elif not sys.stdout.encoding.lower().startswith('utf'): + text = text.encode(sys.stdout.encoding, 'backslashreplace' + ).decode(sys.stdout.encoding) + + if color == "": + sys.stdout.write(text) + else: + sys.stdout.write("%s%s%s" % + (c_color % colors[color], text, c_normal)) + sys.stdout.flush() + l = text.rfind("\n") + if l == -1: + self._write_pos += len(text) + else: + self._write_pos = len(text) - l - 1 + self._line_wrap = self._write_pos >= width + self._write_pos %= width + + def write_center(self, text, delim="="): + width = self.terminal_width + if text != "": + text = " %s " % text + idx = (width - len(text)) // 2 + t = delim*idx + text + delim*(width - idx - len(text)) + self.write(t + "\n") + + def write_exception(self, e, val, tb): + # remove the first item, as that is always runtests.py + tb = tb.tb_next + t = traceback.format_exception(e, val, tb) + self.write("".join(t)) + + def start(self, seed=None, msg="test process starts"): + self.write_center(msg) + executable = sys.executable + v = tuple(sys.version_info) + python_version = "%s.%s.%s-%s-%s" % v + implementation = platform.python_implementation() + if implementation == 'PyPy': + implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info + self.write("executable: %s (%s) [%s]\n" % + (executable, python_version, implementation)) + from sympy.utilities.misc import ARCH + self.write("architecture: %s\n" % ARCH) + from sympy.core.cache import USE_CACHE + self.write("cache: %s\n" % USE_CACHE) + version = '' + if GROUND_TYPES =='gmpy': + if HAS_GMPY == 1: + import gmpy + elif HAS_GMPY == 2: + import gmpy2 as gmpy + version = gmpy.version() + self.write("ground types: %s %s\n" % (GROUND_TYPES, version)) + numpy = import_module('numpy') + self.write("numpy: %s\n" % (None if not numpy else numpy.__version__)) + if seed is not None: + self.write("random seed: %d\n" % seed) + from sympy.utilities.misc import HASH_RANDOMIZATION + self.write("hash randomization: ") + hash_seed = os.getenv("PYTHONHASHSEED") or '0' + if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)): + self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed) + else: + self.write("off\n") + if self._split: + self.write("split: %s\n" % self._split) + self.write('\n') + self._t_start = clock() + + def finish(self): + self._t_end = clock() + self.write("\n") + global text, linelen + text = "tests finished: %d passed, " % self._passed + linelen = len(text) + + def add_text(mytext): + global text, linelen + """Break new text if too long.""" + if linelen + len(mytext) > self.terminal_width: + text += '\n' + linelen = 0 + text += mytext + linelen += len(mytext) + + if len(self._failed) > 0: + add_text("%d failed, " % len(self._failed)) + if len(self._failed_doctest) > 0: + add_text("%d failed, " % len(self._failed_doctest)) + if self._skipped > 0: + add_text("%d skipped, " % self._skipped) + if self._xfailed > 0: + add_text("%d expected to fail, " % self._xfailed) + if len(self._xpassed) > 0: + add_text("%d expected to fail but passed, " % len(self._xpassed)) + if len(self._exceptions) > 0: + add_text("%d exceptions, " % len(self._exceptions)) + add_text("in %.2f seconds" % (self._t_end - self._t_start)) + + if self.slow_test_functions: + self.write_center('slowest tests', '_') + sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1]) + for slow_func_name, taken in sorted_slow: + print('%s - Took %.3f seconds' % (slow_func_name, taken)) + + if self.fast_test_functions: + self.write_center('unexpectedly fast tests', '_') + sorted_fast = sorted(self.fast_test_functions, + key=lambda r: r[1]) + for fast_func_name, taken in sorted_fast: + print('%s - Took %.3f seconds' % (fast_func_name, taken)) + + if len(self._xpassed) > 0: + self.write_center("xpassed tests", "_") + for e in self._xpassed: + self.write("%s: %s\n" % (e[0], e[1])) + self.write("\n") + + if self._tb_style != "no" and len(self._exceptions) > 0: + for e in self._exceptions: + filename, f, (t, val, tb) = e + self.write_center("", "_") + if f is None: + s = "%s" % filename + else: + s = "%s:%s" % (filename, f.__name__) + self.write_center(s, "_") + self.write_exception(t, val, tb) + self.write("\n") + + if self._tb_style != "no" and len(self._failed) > 0: + for e in self._failed: + filename, f, (t, val, tb) = e + self.write_center("", "_") + self.write_center("%s:%s" % (filename, f.__name__), "_") + self.write_exception(t, val, tb) + self.write("\n") + + if self._tb_style != "no" and len(self._failed_doctest) > 0: + for e in self._failed_doctest: + filename, msg = e + self.write_center("", "_") + self.write_center("%s" % filename, "_") + self.write(msg) + self.write("\n") + + self.write_center(text) + ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \ + len(self._failed_doctest) == 0 + if not ok: + self.write("DO *NOT* COMMIT!\n") + return ok + + def entering_filename(self, filename, n): + rel_name = filename[len(self._root_dir) + 1:] + self._active_file = rel_name + self._active_file_error = False + self.write(rel_name) + self.write("[%d] " % n) + + def leaving_filename(self): + self.write(" ") + if self._active_file_error: + self.write("[FAIL]", "Red", align="right") + else: + self.write("[OK]", "Green", align="right") + self.write("\n") + if self._verbose: + self.write("\n") + + def entering_test(self, f): + self._active_f = f + if self._verbose: + self.write("\n" + f.__name__ + " ") + + def test_xfail(self): + self._xfailed += 1 + self.write("f", "Green") + + def test_xpass(self, v): + message = str(v) + self._xpassed.append((self._active_file, message)) + self.write("X", "Green") + + def test_fail(self, exc_info): + self._failed.append((self._active_file, self._active_f, exc_info)) + self.write("F", "Red") + self._active_file_error = True + + def doctest_fail(self, name, error_msg): + # the first line contains "******", remove it: + error_msg = "\n".join(error_msg.split("\n")[1:]) + self._failed_doctest.append((name, error_msg)) + self.write("F", "Red") + self._active_file_error = True + + def test_pass(self, char="."): + self._passed += 1 + if self._verbose: + self.write("ok", "Green") + else: + self.write(char, "Green") + + def test_skip(self, v=None): + char = "s" + self._skipped += 1 + if v is not None: + message = str(v) + if message == "KeyboardInterrupt": + char = "K" + elif message == "Timeout": + char = "T" + elif message == "Slow": + char = "w" + if self._verbose: + if v is not None: + self.write(message + ' ', "Blue") + else: + self.write(" - ", "Blue") + self.write(char, "Blue") + + def test_exception(self, exc_info): + self._exceptions.append((self._active_file, self._active_f, exc_info)) + if exc_info[0] is TimeOutError: + self.write("T", "Red") + else: + self.write("E", "Red") + self._active_file_error = True + + def import_error(self, filename, exc_info): + self._exceptions.append((filename, None, exc_info)) + rel_name = filename[len(self._root_dir) + 1:] + self.write(rel_name) + self.write("[?] Failed to import", "Red") + self.write(" ") + self.write("[FAIL]", "Red", align="right") + self.write("\n") diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/diagnose_imports.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/diagnose_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..7ddad652f055d41950cb825eee3a943425fa2fa7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/diagnose_imports.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python + +""" +Import diagnostics. Run bin/diagnose_imports.py --help for details. +""" + +from __future__ import annotations + +if __name__ == "__main__": + + import sys + import inspect + import builtins + + import optparse + + from os.path import abspath, dirname, join, normpath + this_file = abspath(__file__) + sympy_dir = join(dirname(this_file), '..', '..', '..') + sympy_dir = normpath(sympy_dir) + sys.path.insert(0, sympy_dir) + + option_parser = optparse.OptionParser( + usage= + "Usage: %prog option [options]\n" + "\n" + "Import analysis for imports between SymPy modules.") + option_group = optparse.OptionGroup( + option_parser, + 'Analysis options', + 'Options that define what to do. Exactly one of these must be given.') + option_group.add_option( + '--problems', + help= + 'Print all import problems, that is: ' + 'If an import pulls in a package instead of a module ' + '(e.g. sympy.core instead of sympy.core.add); ' # see ##PACKAGE## + 'if it imports a symbol that is already present; ' # see ##DUPLICATE## + 'if it imports a symbol ' + 'from somewhere other than the defining module.', # see ##ORIGIN## + action='count') + option_group.add_option( + '--origins', + help= + 'For each imported symbol in each module, ' + 'print the module that defined it. ' + '(This is useful for import refactoring.)', + action='count') + option_parser.add_option_group(option_group) + option_group = optparse.OptionGroup( + option_parser, + 'Sort options', + 'These options define the sort order for output lines. ' + 'At most one of these options is allowed. ' + 'Unsorted output will reflect the order in which imports happened.') + option_group.add_option( + '--by-importer', + help='Sort output lines by name of importing module.', + action='count') + option_group.add_option( + '--by-origin', + help='Sort output lines by name of imported module.', + action='count') + option_parser.add_option_group(option_group) + (options, args) = option_parser.parse_args() + if args: + option_parser.error( + 'Unexpected arguments %s (try %s --help)' % (args, sys.argv[0])) + if options.problems > 1: + option_parser.error('--problems must not be given more than once.') + if options.origins > 1: + option_parser.error('--origins must not be given more than once.') + if options.by_importer > 1: + option_parser.error('--by-importer must not be given more than once.') + if options.by_origin > 1: + option_parser.error('--by-origin must not be given more than once.') + options.problems = options.problems == 1 + options.origins = options.origins == 1 + options.by_importer = options.by_importer == 1 + options.by_origin = options.by_origin == 1 + if not options.problems and not options.origins: + option_parser.error( + 'At least one of --problems and --origins is required') + if options.problems and options.origins: + option_parser.error( + 'At most one of --problems and --origins is allowed') + if options.by_importer and options.by_origin: + option_parser.error( + 'At most one of --by-importer and --by-origin is allowed') + options.by_process = not options.by_importer and not options.by_origin + + builtin_import = builtins.__import__ + + class Definition: + """Information about a symbol's definition.""" + def __init__(self, name, value, definer): + self.name = name + self.value = value + self.definer = definer + def __hash__(self): + return hash(self.name) + def __eq__(self, other): + return self.name == other.name and self.value == other.value + def __ne__(self, other): + return not (self == other) + def __repr__(self): + return 'Definition(%s, ..., %s)' % ( + repr(self.name), repr(self.definer)) + + # Maps each function/variable to name of module to define it + symbol_definers: dict[Definition, str] = {} + + def in_module(a, b): + """Is a the same module as or a submodule of b?""" + return a == b or a != None and b != None and a.startswith(b + '.') + + def relevant(module): + """Is module relevant for import checking? + + Only imports between relevant modules will be checked.""" + return in_module(module, 'sympy') + + sorted_messages = [] + + def msg(msg, *args): + global options, sorted_messages + if options.by_process: + print(msg % args) + else: + sorted_messages.append(msg % args) + + def tracking_import(module, globals=globals(), locals=[], fromlist=None, level=-1): + """__import__ wrapper - does not change imports at all, but tracks them. + + Default order is implemented by doing output directly. + All other orders are implemented by collecting output information into + a sorted list that will be emitted after all imports are processed. + + Indirect imports can only occur after the requested symbol has been + imported directly (because the indirect import would not have a module + to pick the symbol up from). + So this code detects indirect imports by checking whether the symbol in + question was already imported. + + Keeps the semantics of __import__ unchanged.""" + global options, symbol_definers + caller_frame = inspect.getframeinfo(sys._getframe(1)) + importer_filename = caller_frame.filename + importer_module = globals['__name__'] + if importer_filename == caller_frame.filename: + importer_reference = '%s line %s' % ( + importer_filename, str(caller_frame.lineno)) + else: + importer_reference = importer_filename + result = builtin_import(module, globals, locals, fromlist, level) + importee_module = result.__name__ + # We're only interested if importer and importee are in SymPy + if relevant(importer_module) and relevant(importee_module): + for symbol in result.__dict__.iterkeys(): + definition = Definition( + symbol, result.__dict__[symbol], importer_module) + if definition not in symbol_definers: + symbol_definers[definition] = importee_module + if hasattr(result, '__path__'): + ##PACKAGE## + # The existence of __path__ is documented in the tutorial on modules. + # Python 3.3 documents this in http://docs.python.org/3.3/reference/import.html + if options.by_origin: + msg('Error: %s (a package) is imported by %s', + module, importer_reference) + else: + msg('Error: %s contains package import %s', + importer_reference, module) + if fromlist != None: + symbol_list = fromlist + if '*' in symbol_list: + if (importer_filename.endswith('__init__.py') + or importer_filename.endswith('__init__.pyc') + or importer_filename.endswith('__init__.pyo')): + # We do not check starred imports inside __init__ + # That's the normal "please copy over its imports to my namespace" + symbol_list = [] + else: + symbol_list = result.__dict__.iterkeys() + for symbol in symbol_list: + if symbol not in result.__dict__: + if options.by_origin: + msg('Error: %s.%s is not defined (yet), but %s tries to import it', + importee_module, symbol, importer_reference) + else: + msg('Error: %s tries to import %s.%s, which did not define it (yet)', + importer_reference, importee_module, symbol) + else: + definition = Definition( + symbol, result.__dict__[symbol], importer_module) + symbol_definer = symbol_definers[definition] + if symbol_definer == importee_module: + ##DUPLICATE## + if options.by_origin: + msg('Error: %s.%s is imported again into %s', + importee_module, symbol, importer_reference) + else: + msg('Error: %s imports %s.%s again', + importer_reference, importee_module, symbol) + else: + ##ORIGIN## + if options.by_origin: + msg('Error: %s.%s is imported by %s, which should import %s.%s instead', + importee_module, symbol, importer_reference, symbol_definer, symbol) + else: + msg('Error: %s imports %s.%s but should import %s.%s instead', + importer_reference, importee_module, symbol, symbol_definer, symbol) + return result + + builtins.__import__ = tracking_import + __import__('sympy') + + sorted_messages.sort() + for message in sorted_messages: + print(message) diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_deprecated.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..696933d96d6232ea869da1002ec9ebee5309724d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_deprecated.py @@ -0,0 +1,5 @@ +from sympy.testing.pytest import warns_deprecated_sympy + +def test_deprecated_testing_randtest(): + with warns_deprecated_sympy(): + import sympy.testing.randtest # noqa:F401 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_module_imports.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_module_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..d16dbaa98156c287c18b46ff07c0ede5d26e069a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tests/test_module_imports.py @@ -0,0 +1,42 @@ +""" +Checks that SymPy does not contain indirect imports. + +An indirect import is importing a symbol from a module that itself imported the +symbol from elsewhere. Such a constellation makes it harder to diagnose +inter-module dependencies and import order problems, and is therefore strongly +discouraged. + +(Indirect imports from end-user code is fine and in fact a best practice.) + +Implementation note: Forcing Python into actually unloading already-imported +submodules is a tricky and partly undocumented process. To avoid these issues, +the actual diagnostic code is in bin/diagnose_imports, which is run as a +separate, pristine Python process. +""" + +import subprocess +import sys +from os.path import abspath, dirname, join, normpath +import inspect + +from sympy.testing.pytest import XFAIL + +@XFAIL +def test_module_imports_are_direct(): + my_filename = abspath(inspect.getfile(inspect.currentframe())) + my_dirname = dirname(my_filename) + diagnose_imports_filename = join(my_dirname, 'diagnose_imports.py') + diagnose_imports_filename = normpath(diagnose_imports_filename) + + process = subprocess.Popen( + [ + sys.executable, + normpath(diagnose_imports_filename), + '--problems', + '--by-importer' + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + bufsize=-1) + output, _ = process.communicate() + assert output == '', "There are import problems:\n" + output.decode() diff --git a/llmeval-env/lib/python3.10/site-packages/sympy/testing/tmpfiles.py b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tmpfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5c69cb58aa11f77679855f3df21f03a10d3b2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy/testing/tmpfiles.py @@ -0,0 +1,46 @@ +""" +This module adds context manager for temporary files generated by the tests. +""" + +import shutil +import os + + +class TmpFileManager: + """ + A class to track record of every temporary files created by the tests. + """ + tmp_files = set('') + tmp_folders = set('') + + @classmethod + def tmp_file(cls, name=''): + cls.tmp_files.add(name) + return name + + @classmethod + def tmp_folder(cls, name=''): + cls.tmp_folders.add(name) + return name + + @classmethod + def cleanup(cls): + while cls.tmp_files: + file = cls.tmp_files.pop() + if os.path.isfile(file): + os.remove(file) + while cls.tmp_folders: + folder = cls.tmp_folders.pop() + shutil.rmtree(folder) + +def cleanup_tmp_files(test_func): + """ + A decorator to help test codes remove temporary files after the tests. + """ + def wrapper_function(): + try: + test_func() + finally: + TmpFileManager.cleanup() + + return wrapper_function