diff --git a/env-llmeval/lib/python3.10/site-packages/certifi/cacert.pem b/env-llmeval/lib/python3.10/site-packages/certifi/cacert.pem
new file mode 100644
index 0000000000000000000000000000000000000000..fac3c31909bf46d527df7c89352e2a4e9d31c906
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/certifi/cacert.pem
@@ -0,0 +1,4814 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Label: "GlobalSign Root CA - R6"
+# Serial: 1417766617973444989252670301619537
+# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae
+# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1
+# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg
+MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx
+MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET
+MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI
+xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k
+ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD
+aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw
+LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw
+1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX
+k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2
+SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h
+bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n
+WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY
+rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce
+MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu
+bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN
+nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt
+Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61
+55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj
+vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf
+cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz
+oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp
+nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs
+pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v
+JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R
+8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4
+5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GC CA"
+# Serial: 44084345621038548146064804565436152554
+# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23
+# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31
+# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d
+-----BEGIN CERTIFICATE-----
+MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw
+CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91
+bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg
+Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ
+BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu
+ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS
+b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni
+eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W
+p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T
+rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
+57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
+Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Global G2 Root O=UniTrust
+# Subject: CN=UCA Global G2 Root O=UniTrust
+# Label: "UCA Global G2 Root"
+# Serial: 124779693093741543919145257850076631279
+# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
+# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
+# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
+bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
+CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
+b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
+b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
+kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
+VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
+VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
+C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
+tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
+D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
+j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
+NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
+iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
+O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
+ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
+L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
+1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
+1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
+b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
+PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
+y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
+EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
+DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
+YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
+UB+K+wb1whnw0A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Extended Validation Root O=UniTrust
+# Subject: CN=UCA Extended Validation Root O=UniTrust
+# Label: "UCA Extended Validation Root"
+# Serial: 106100277556486529736699587978573607008
+# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
+# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
+# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
+eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
+MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
+BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
+D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
+sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
+O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
+sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
+c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
+VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
+KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
+TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
+sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
+1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
+fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
+l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
+ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
+VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
+c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
+4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
+t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
+2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
+vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
+xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
+cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
+fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Label: "Certigna Root CA"
+# Serial: 269714418870597844693661054334862075617
+# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
+# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
+# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
+-----BEGIN CERTIFICATE-----
+MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
+WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
+MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
+MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
+VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
+BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
+ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
+CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
+I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
+TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
+C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
+ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
+IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
+Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
+JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
+hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
+GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
+1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
+L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
+dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
+aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
+hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
+6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
+HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
+0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
+lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
+o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
+gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
+faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
+Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
+jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
+3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign Root CA - G1"
+# Serial: 235931866688319308814040
+# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac
+# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c
+# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD
+VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU
+ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH
+MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO
+MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv
+Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz
+f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO
+8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq
+d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM
+tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt
+Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB
+o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x
+PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM
+wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d
+GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH
+6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby
+RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx
+iN66zB+Afko=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign ECC Root CA - G3"
+# Serial: 287880440101571086945156
+# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40
+# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1
+# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b
+-----BEGIN CERTIFICATE-----
+MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG
+EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo
+bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g
+RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ
+TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s
+b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0
+WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS
+fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB
+zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq
+hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB
+CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD
++JbNR6iC8hZVdyR+EhCVBCyj
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign Root CA - C1"
+# Serial: 825510296613316004955058
+# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68
+# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01
+# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG
+A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg
+SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v
+dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ
+BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ
+HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH
+3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH
+GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c
+xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1
+aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq
+TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87
+/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4
+kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG
+YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT
++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo
+WXzhriKi4gp6D/piq1JM4fHfyr6DDUI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign ECC Root CA - C3"
+# Serial: 582948710642506000014504
+# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5
+# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66
+# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3
+-----BEGIN CERTIFICATE-----
+MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG
+EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx
+IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND
+IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci
+MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti
+sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O
+BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c
+3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J
+0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Label: "Hongkong Post Root CA 3"
+# Serial: 46170865288971385588281144162979347873371282084
+# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0
+# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02
+# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6
+-----BEGIN CERTIFICATE-----
+MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL
+BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ
+SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n
+a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5
+NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT
+CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u
+Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO
+dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI
+VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV
+9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY
+2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY
+vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt
+bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb
+x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+
+l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK
+TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj
+Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e
+i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw
+DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG
+7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk
+MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr
+gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk
+GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS
+3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm
+Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+
+l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c
+JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP
+L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa
+LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG
+mpv0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G4"
+# Serial: 289383649854506086828220374796556676440
+# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88
+# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01
+# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw
+gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw
+BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0
+MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1
+c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ
+bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg
+Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ
+2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E
+T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j
+5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM
+C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T
+DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX
+wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A
+2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm
+nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8
+dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl
+N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj
+c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS
+5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS
+Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr
+hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/
+B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI
+AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw
+H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+
+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk
+2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol
+IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
+5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
+n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft ECC Root Certificate Authority 2017"
+# Serial: 136839042543790627607696632466672567020
+# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
+# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
+# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
+-----BEGIN CERTIFICATE-----
+MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
+VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
+MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
+UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
+b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
+ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
+hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
+FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
+L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
+iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft RSA Root Certificate Authority 2017"
+# Serial: 40975477897264996090493496164228220339
+# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
+# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
+# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
+-----BEGIN CERTIFICATE-----
+MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
+MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
+NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
+IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
+EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
+aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
+Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
+ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
+HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
+gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
+jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
+aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
+YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
+W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
+UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
+W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
+LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
+gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
+tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
+SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
+TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
+pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
+xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
+GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
+dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
+AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
+RA+GsCyRxj3qrg+E
+-----END CERTIFICATE-----
+
+# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Label: "e-Szigno Root CA 2017"
+# Serial: 411379200276854331539784714
+# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
+# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
+# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
+-----BEGIN CERTIFICATE-----
+MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
+BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
+LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
+b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
+BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
+THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
+IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
+xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
+Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
+eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
+jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
++efcMQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Label: "certSIGN Root CA G2"
+# Serial: 313609486401300475190
+# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
+# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
+# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
+BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
+Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
+BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
+R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
+dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
+vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
+uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
+n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
+cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
+xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
+rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
+DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
+DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
+LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
+eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
+d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
+kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
+b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
+qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
+OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
+NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
+ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
+pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
+03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
+PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
+1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
+QRBdJ3NghVdJIgc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global Certification Authority"
+# Serial: 1846098327275375458322922162
+# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e
+# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5
+# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8
+-----BEGIN CERTIFICATE-----
+MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw
+CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x
+ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1
+c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx
+OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI
+SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI
+b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn
+swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu
+7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8
+1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW
+80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP
+JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l
+RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw
+hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10
+coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc
+BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n
+twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud
+DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W
+0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe
+uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q
+lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB
+aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE
+sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT
+MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe
+qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh
+VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8
+h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9
+EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK
+yeC2nOnOcXHebD8WpHk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P256 Certification Authority"
+# Serial: 4151900041497450638097112925
+# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54
+# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf
+# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4
+-----BEGIN CERTIFICATE-----
+MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG
+SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN
+FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w
+DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw
+CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh
+DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P384 Certification Authority"
+# Serial: 2704997926503831671788816187
+# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6
+# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2
+# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ
+j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF
+1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G
+A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3
+AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC
+MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu
+Sw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Label: "NAVER Global Root Certification Authority"
+# Serial: 9013692873798656336226253319739695165984492813
+# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b
+# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1
+# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65
+-----BEGIN CERTIFICATE-----
+MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM
+BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG
+T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx
+CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD
+b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA
+iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH
+38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE
+HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz
+kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP
+szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq
+vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf
+nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG
+YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo
+0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a
+CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K
+AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I
+36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN
+qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj
+cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm
++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL
+hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe
+lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7
+p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8
+piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR
+LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX
+5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO
+dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul
+9XXeifdy
+-----END CERTIFICATE-----
+
+# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS"
+# Serial: 131542671362353147877283741781055151509
+# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb
+# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a
+# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb
+-----BEGIN CERTIFICATE-----
+MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw
+CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw
+FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S
+Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5
+MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL
+DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS
+QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH
+sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK
+Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu
+SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC
+MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy
+v+c=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root R46"
+# Serial: 1552617688466950547958867513931858518042577
+# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef
+# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90
+# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA
+MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD
+VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy
+MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt
+c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ
+OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG
+vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud
+316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo
+0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE
+y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF
+zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE
++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN
+I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs
+x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa
+ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC
+4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4
+7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg
+JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti
+2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk
+pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF
+FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt
+rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk
+ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5
+u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP
+4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6
+N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3
+vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root E46"
+# Serial: 1552617690338932563915843282459653771421763
+# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f
+# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84
+# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58
+-----BEGIN CERTIFICATE-----
+MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx
+CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD
+ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw
+MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
+HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq
+R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd
+yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ
+7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8
++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Label: "GLOBALTRUST 2020"
+# Serial: 109160994242082918454945253
+# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8
+# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2
+# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a
+-----BEGIN CERTIFICATE-----
+MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG
+A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw
+FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx
+MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u
+aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq
+hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b
+RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z
+YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3
+QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw
+yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+
+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ
+SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH
+r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0
+4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me
+dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw
+q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2
+nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu
+H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA
+VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC
+XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd
+6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf
++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi
+kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7
+wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB
+TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C
+MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn
+4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I
+aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy
+qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Label: "ANF Secure Server Root CA"
+# Serial: 996390341000653745
+# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96
+# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74
+# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99
+-----BEGIN CERTIFICATE-----
+MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV
+BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk
+YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV
+BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN
+MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF
+UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD
+VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj
+cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q
+yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH
+2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX
+H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL
+zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR
+p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz
+W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/
+SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn
+LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3
+n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B
+u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj
+o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
+AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L
+9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej
+rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK
+pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0
+vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq
+OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ
+/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9
+2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI
++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2
+MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo
+tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum EC-384 CA"
+# Serial: 160250656287871593594747141429395092468
+# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1
+# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed
+# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6
+-----BEGIN CERTIFICATE-----
+MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw
+CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw
+JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT
+EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0
+WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT
+LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX
+BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE
+KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm
+Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8
+EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J
+UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn
+nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Root CA"
+# Serial: 40870380103424195783807378461123655149
+# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29
+# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5
+# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd
+-----BEGIN CERTIFICATE-----
+MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6
+MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu
+MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV
+BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw
+MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg
+U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ
+n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q
+p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq
+NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF
+8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3
+HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa
+mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi
+7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF
+ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P
+qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ
+v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6
+Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1
+vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD
+ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4
+WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo
+zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR
+5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ
+GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf
+5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq
+0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D
+P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM
+qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP
+0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf
+E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb
+-----END CERTIFICATE-----
+
+# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Label: "TunTrust Root CA"
+# Serial: 108534058042236574382096126452369648152337120275
+# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4
+# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb
+# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41
+-----BEGIN CERTIFICATE-----
+MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL
+BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg
+Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv
+b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG
+EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u
+IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ
+n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd
+2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF
+VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ
+GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF
+li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU
+r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2
+eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb
+MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg
+jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB
+7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW
+5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE
+ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0
+90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z
+xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu
+QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4
+FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH
+22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP
+xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn
+dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5
+Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b
+nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ
+CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH
+u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj
+d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS RSA Root CA 2021"
+# Serial: 76817823531813593706434026085292783742
+# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91
+# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d
+# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs
+MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg
+Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL
+MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
+YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv
+b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l
+mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE
+4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv
+a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M
+pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw
+Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b
+LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY
+AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB
+AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq
+E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr
+W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ
+CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU
+X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3
+f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja
+H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP
+JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P
+zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt
+jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0
+/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT
+BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79
+aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW
+xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU
+63ZTGI0RmLo=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS ECC Root CA 2021"
+# Serial: 137515985548005187474074462014555733966
+# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0
+# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48
+# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01
+-----BEGIN CERTIFICATE-----
+MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw
+CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh
+cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v
+dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG
+A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
+aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg
+Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7
+KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y
+STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD
+AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
+SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
+nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 1977337328857672817
+# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3
+# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe
+# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1
+MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc
+tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd
+IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j
+b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC
+AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw
+ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m
+iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF
+Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ
+hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P
+Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE
+EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV
+1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t
+CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR
+5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw
+f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9
+ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK
+GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus ECC Root CA"
+# Serial: 630369271402956006249506845124680065938238527194
+# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85
+# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1
+# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3
+-----BEGIN CERTIFICATE-----
+MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw
+RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY
+BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz
+MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u
+LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0
+v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd
+e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw
+V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA
+AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG
+GJTO
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus Root CA"
+# Serial: 387574501246983434957692974888460947164905180485
+# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc
+# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7
+# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87
+-----BEGIN CERTIFICATE-----
+MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL
+BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x
+FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx
+MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s
+THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc
+IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU
+AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+
+GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9
+8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH
+flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt
+J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim
+0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN
+pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ
+UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW
+OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB
+AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet
+8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd
+nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j
+bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM
+Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv
+TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS
+S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr
+I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9
+b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB
+UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P
+Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven
+sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X2 O=Internet Security Research Group
+# Subject: CN=ISRG Root X2 O=Internet Security Research Group
+# Label: "ISRG Root X2"
+# Serial: 87493402998870891108772069816698636114
+# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5
+# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af
+# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70
+-----BEGIN CERTIFICATE-----
+MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw
+CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg
+R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00
+MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT
+ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW
++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9
+ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI
+zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW
+tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1
+/q4AaOeMSQ+2b1tbFfLn
+-----END CERTIFICATE-----
+
+# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Label: "HiPKI Root CA - G1"
+# Serial: 60966262342023497858655262305426234976
+# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3
+# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60
+# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc
+-----BEGIN CERTIFICATE-----
+MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa
+Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3
+YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw
+qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv
+Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6
+lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz
+Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ
+KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK
+FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj
+HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr
+y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ
+/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM
+a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6
+fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG
+SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi
+7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc
+SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza
+ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc
+XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg
+iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho
+L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF
+Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr
+kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+
+vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU
+YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 159662223612894884239637590694
+# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc
+# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28
+# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2
+-----BEGIN CERTIFICATE-----
+MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD
+VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw
+MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g
+UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT
+BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx
+uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV
+HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/
++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147
+bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 159662320309726417404178440727
+# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40
+# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a
+# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
+27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
+Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
+TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
+qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
+szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
+Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
+MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
+wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
+aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
+VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
+C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
+QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
+h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
+7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
+ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
+MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
+Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
+6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
+0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
+2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
+bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 159662449406622349769042896298
+# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc
+# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94
+# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt
+nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY
+6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu
+MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k
+RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg
+f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV
++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo
+dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW
+Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa
+G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq
+gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H
+vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8
+0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC
+B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u
+NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg
+yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev
+HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6
+xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR
+TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg
+JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV
+7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl
+6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 159662495401136852707857743206
+# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73
+# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46
+# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G
+jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2
+4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7
+VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm
+ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 159662532700760215368942768210
+# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8
+# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47
+# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi
+QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR
+HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D
+9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8
+p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD
+-----END CERTIFICATE-----
+
+# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Label: "Telia Root CA v2"
+# Serial: 7288924052977061235122729490515358
+# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48
+# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd
+# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx
+CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE
+AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1
+NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ
+MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq
+AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9
+vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9
+lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD
+n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT
+7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o
+6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC
+TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6
+WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R
+DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI
+pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj
+YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy
+rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ
+8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi
+0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM
+A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS
+SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K
+TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF
+6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er
+3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt
+Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT
+VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW
+ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA
+rBPuUBQemMc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST BR Root CA 1 2020"
+# Serial: 165870826978392376648679885835942448534
+# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed
+# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67
+# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5
+NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS
+zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0
+QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/
+VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW
+wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV
+dWNbFJWcHwHP2NVypw87
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST EV Root CA 1 2020"
+# Serial: 126288379621884218666039612629459926992
+# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e
+# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07
+# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5
+NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC
+/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD
+wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3
+OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA
+y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb
+gfM0agPnIjhQW+0ZT0MW
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS ECC P384 Root G5"
+# Serial: 13129116028163249804115411775095713523
+# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed
+# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee
+# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05
+-----BEGIN CERTIFICATE-----
+MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp
+Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2
+MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ
+bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG
+ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS
+7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp
+0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS
+B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49
+BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ
+LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4
+DXZDjC5Ty3zfDBeWUA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS RSA4096 Root G5"
+# Serial: 11930366277458970227240571539258396554
+# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1
+# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35
+# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN
+MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT
+HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN
+NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs
+IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+
+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0
+2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp
+wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM
+pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD
+nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po
+sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx
+Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd
+Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX
+KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe
+XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL
+tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv
+TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
+AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw
+GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H
+PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF
+O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ
+REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik
+AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv
+/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+
+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw
+MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF
+qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK
+ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root R1 O=Certainly
+# Subject: CN=Certainly Root R1 O=Certainly
+# Label: "Certainly Root R1"
+# Serial: 188833316161142517227353805653483829216
+# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12
+# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af
+# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw
+PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy
+dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0
+YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2
+1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT
+vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed
+aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0
+1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5
+r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5
+cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ
+wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ
+6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA
+2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH
+Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR
+eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB
+/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u
+d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr
+PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d
+8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi
+1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd
+rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di
+taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7
+lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj
+yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn
+Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy
+yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n
+wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6
+OV+KmalBWQewLK8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root E1 O=Certainly
+# Subject: CN=Certainly Root E1 O=Certainly
+# Label: "Certainly Root E1"
+# Serial: 8168531406727139161245376702891150584
+# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9
+# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b
+# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2
+-----BEGIN CERTIFICATE-----
+MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw
+CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu
+bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ
+BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s
+eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK
++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2
+QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4
+hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm
+ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG
+BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication RootCA3"
+# Serial: 16247922307909811815
+# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26
+# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a
+# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94
+-----BEGIN CERTIFICATE-----
+MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV
+BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw
+JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2
+MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
+U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg
+Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r
+CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA
+lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG
+TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7
+9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7
+8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4
+g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we
+GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst
++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M
+0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ
+T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw
+HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS
+YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA
+FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd
+9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI
+UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+
+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke
+gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf
+iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV
+nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD
+2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//
+1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad
+TdJ0MN1kURXbg4NR16/9M51NZg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication ECC RootCA1"
+# Serial: 15446673492073852651
+# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86
+# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41
+# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11
+-----BEGIN CERTIFICATE-----
+MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT
+AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD
+VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx
+NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT
+HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5
+IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl
+dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK
+ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu
+9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O
+be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY
+# Subject: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY
+# Label: "BJCA Global Root CA1"
+# Serial: 113562791157148395269083148143378328608
+# MD5 Fingerprint: 42:32:99:76:43:33:36:24:35:07:82:9b:28:f9:d0:90
+# SHA1 Fingerprint: d5:ec:8d:7b:4c:ba:79:f4:e7:e8:cb:9d:6b:ae:77:83:10:03:21:6a
+# SHA256 Fingerprint: f3:89:6f:88:fe:7c:0a:88:27:66:a7:fa:6a:d2:74:9f:b5:7a:7f:3e:98:fb:76:9c:1f:a7:b0:9c:2c:44:d5:ae
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBU
+MQswCQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRI
+T1JJVFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAz
+MTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJF
+SUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2Jh
+bCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFmCL3Z
+xRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZ
+spDyRhySsTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O5
+58dnJCNPYwpj9mZ9S1WnP3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgR
+at7GGPZHOiJBhyL8xIkoVNiMpTAK+BcWyqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll
+5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRjeulumijWML3mG90Vr4Tq
+nMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNnMoH1V6XK
+V0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/
+pj+bOT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZO
+z2nxbkRs1CTqjSShGL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXn
+jSXWgXSHRtQpdaJCbPdzied9v3pKH9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+
+WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMBAAGjQjBAMB0GA1UdDgQWBBTF
+7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4
+YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3Kli
+awLwQ8hOnThJdMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u
++2D2/VnGKhs/I0qUJDAnyIm860Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88
+X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuhTaRjAv04l5U/BXCga99igUOLtFkN
+SoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW4AB+dAb/OMRyHdOo
+P2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmpGQrI
++pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRz
+znfSxqxx4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9
+eVzYH6Eze9mCUAyTF6ps3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2
+YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4SSPfSKcOYKMryMguTjClPPGAyzQWWYezy
+r/6zcCwupvI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY
+# Subject: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY
+# Label: "BJCA Global Root CA2"
+# Serial: 58605626836079930195615843123109055211
+# MD5 Fingerprint: 5e:0a:f6:47:5f:a6:14:e8:11:01:95:3f:4d:01:eb:3c
+# SHA1 Fingerprint: f4:27:86:eb:6e:b8:6d:88:31:67:02:fb:ba:66:a4:53:00:aa:7a:a6
+# SHA256 Fingerprint: 57:4d:f6:93:1e:27:80:39:66:7b:72:0a:fd:c1:60:0f:c2:7e:b6:6d:d3:09:29:79:fb:73:85:64:87:21:28:82
+-----BEGIN CERTIFICATE-----
+MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQsw
+CQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJ
+VFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgy
+MVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJ
+TkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2JhbCBS
+b290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jlSR9B
+IgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK+
++kpRuDCK/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJK
+sVF/BvDRgh9Obl+rg/xI1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA
+94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8gUXOQwKhbYdDFUDn9hf7B
+43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited
+# Subject: CN=Sectigo Public Server Authentication Root E46 O=Sectigo Limited
+# Label: "Sectigo Public Server Authentication Root E46"
+# Serial: 88989738453351742415770396670917916916
+# MD5 Fingerprint: 28:23:f8:b2:98:5c:37:16:3b:3e:46:13:4e:b0:b3:01
+# SHA1 Fingerprint: ec:8a:39:6c:40:f0:2e:bc:42:75:d4:9f:ab:1c:1a:5b:67:be:d2:9a
+# SHA256 Fingerprint: c9:0f:26:f0:fb:1b:40:18:b2:22:27:51:9b:5c:a2:b5:3e:2c:a5:b3:be:5c:f1:8e:fe:1b:ef:47:38:0c:53:83
+-----BEGIN CERTIFICATE-----
+MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQsw
+CQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T
+ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcN
+MjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYG
+A1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT
+ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccC
+WvkEN/U0NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+
+6xnOQ6OjQjBAMB0GA1UdDgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8B
+Af8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNnADBkAjAn7qRa
+qCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RHlAFWovgzJQxC36oCMB3q
+4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21USAGKcw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited
+# Subject: CN=Sectigo Public Server Authentication Root R46 O=Sectigo Limited
+# Label: "Sectigo Public Server Authentication Root R46"
+# Serial: 156256931880233212765902055439220583700
+# MD5 Fingerprint: 32:10:09:52:00:d5:7e:6c:43:df:15:c0:b1:16:93:e5
+# SHA1 Fingerprint: ad:98:f9:f3:e4:7d:75:3b:65:d4:82:b3:a4:52:17:bb:6e:f5:e4:38
+# SHA256 Fingerprint: 7b:b6:47:a6:2a:ee:ac:88:bf:25:7a:a5:22:d0:1f:fe:a3:95:e0:ab:45:c7:3f:93:f6:56:54:ec:38:f2:5a:06
+-----BEGIN CERTIFICATE-----
+MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBf
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQD
+Ey1TZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYw
+HhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5WjBfMQswCQYDVQQGEwJHQjEY
+MBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1Ymxp
+YyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDa
+ef0rty2k1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnz
+SDBh+oF8HqcIStw+KxwfGExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xf
+iOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMPFF1bFOdLvt30yNoDN9HWOaEhUTCDsG3X
+ME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vuZDCQOc2TZYEhMbUjUDM3
+IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5QazYw6A3OAS
+VYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgE
+SJ/AwSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu
++Zd4KKTIRJLpfSYFplhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt
+8uaZFURww3y8nDnAtOFr94MlI1fZEoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+L
+HaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW6aWWrL3DkJiy4Pmi1KZHQ3xt
+zwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWIIUkwDgYDVR0P
+AQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c
+mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQ
+YKlJfp/imTYpE0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52
+gDY9hAaLMyZlbcp+nv4fjFg4exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZA
+Fv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M0ejf5lG5Nkc/kLnHvALcWxxPDkjB
+JYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI84HxZmduTILA7rpX
+DhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9mpFui
+TdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5
+dHn5HrwdVw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65
+LvKRRFHQV80MNNVIIb/bE/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp
+0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmmJ1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAY
+QqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation
+# Subject: CN=SSL.com TLS RSA Root CA 2022 O=SSL Corporation
+# Label: "SSL.com TLS RSA Root CA 2022"
+# Serial: 148535279242832292258835760425842727825
+# MD5 Fingerprint: d8:4e:c6:59:30:d8:fe:a0:d6:7a:5a:2c:2c:69:78:da
+# SHA1 Fingerprint: ec:2c:83:40:72:af:26:95:10:ff:0e:f2:03:ee:31:70:f6:78:9d:ca
+# SHA256 Fingerprint: 8f:af:7d:2e:2c:b4:70:9b:b8:e0:b3:36:66:bf:75:a5:dd:45:b5:de:48:0f:8e:a8:d4:bf:e6:be:bc:17:f2:ed
+-----BEGIN CERTIFICATE-----
+MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBO
+MQswCQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQD
+DBxTU0wuY29tIFRMUyBSU0EgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloX
+DTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jw
+b3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJvb3QgQ0EgMjAyMjCC
+AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u9nTP
+L3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OY
+t6/wNr/y7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0ins
+S657Lb85/bRi3pZ7QcacoOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3
+PnxEX4MN8/HdIGkWCVDi1FW24IBydm5MR7d1VVm0U3TZlMZBrViKMWYPHqIbKUBO
+L9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDGD6C1vBdOSHtRwvzpXGk3
+R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEWTO6Af77w
+dr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS
++YCk8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYS
+d66UNHsef8JmAOSqg+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoG
+AtUjHBPW6dvbxrB6y3snm/vg1UYk7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2f
+gTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j
+BBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsuN+7jhHonLs0Z
+NbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt
+hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsM
+QtfhWsSWTVTNj8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvf
+R4iyrT7gJ4eLSYwfqUdYe5byiB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJ
+DPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjUo3KUQyxi4U5cMj29TH0ZR6LDSeeW
+P4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqoENjwuSfr98t67wVy
+lrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7EgkaibMOlq
+bLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2w
+AgDHbICivRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3q
+r5nsLFR+jM4uElZI7xc7P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sji
+Mho6/4UIyYOf8kpIEFR3N+2ivEC+5BB09+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU
+98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation
+# Subject: CN=SSL.com TLS ECC Root CA 2022 O=SSL Corporation
+# Label: "SSL.com TLS ECC Root CA 2022"
+# Serial: 26605119622390491762507526719404364228
+# MD5 Fingerprint: 99:d7:5c:f1:51:36:cc:e9:ce:d9:19:2e:77:71:56:c5
+# SHA1 Fingerprint: 9f:5f:d9:1a:54:6d:f5:0c:71:f0:ee:7a:bd:17:49:98:84:73:e2:39
+# SHA256 Fingerprint: c3:2f:fd:9f:46:f9:36:d1:6c:36:73:99:09:59:43:4b:9a:d6:0a:af:bb:9e:7c:f3:36:54:f1:44:cc:1b:a1:43
+-----BEGIN CERTIFICATE-----
+MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQsw
+CQYDVQQGEwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxT
+U0wuY29tIFRMUyBFQ0MgUm9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2
+MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3Jh
+dGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3QgQ0EgMjAyMjB2MBAG
+ByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWyJGYm
+acCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFN
+SeR7T5v15wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSME
+GDAWgBSJjy+j6CugFFR781a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NW
+uCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp
+15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w7deedWo1dlJF4AIxAMeN
+b0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5Zn6g6g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos
+# Subject: CN=Atos TrustedRoot Root CA ECC TLS 2021 O=Atos
+# Label: "Atos TrustedRoot Root CA ECC TLS 2021"
+# Serial: 81873346711060652204712539181482831616
+# MD5 Fingerprint: 16:9f:ad:f1:70:ad:79:d6:ed:29:b4:d1:c5:79:70:a8
+# SHA1 Fingerprint: 9e:bc:75:10:42:b3:02:f3:81:f4:f7:30:62:d4:8f:c3:a7:51:b2:dd
+# SHA256 Fingerprint: b2:fa:e5:3e:14:cc:d7:ab:92:12:06:47:01:ae:27:9c:1d:89:88:fa:cb:77:5f:a8:a0:08:91:4e:66:39:88:a8
+-----BEGIN CERTIFICATE-----
+MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4w
+LAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0w
+CwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0
+MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBF
+Q0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6KDP/X
+tXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4
+AjJn8ZQSb+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2
+KCXWfeBmmnoJsmo7jjPXNtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMD
+aAAwZQIwW5kp85wxtolrbNa9d+F851F+uDrNozZffPc8dz7kUK2o59JZDCaOMDtu
+CCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGYa3cpetskz2VAv9LcjBHo
+9H1/IISpQuQo
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos
+# Subject: CN=Atos TrustedRoot Root CA RSA TLS 2021 O=Atos
+# Label: "Atos TrustedRoot Root CA RSA TLS 2021"
+# Serial: 111436099570196163832749341232207667876
+# MD5 Fingerprint: d4:d3:46:b8:9a:c0:9c:76:5d:9e:3a:c3:b9:99:31:d2
+# SHA1 Fingerprint: 18:52:3b:0d:06:37:e4:d6:3a:df:23:e4:98:fb:5b:16:fb:86:74:48
+# SHA256 Fingerprint: 81:a9:08:8e:a5:9f:b3:64:c5:48:a6:f8:55:59:09:9b:6f:04:05:ef:bf:18:e5:32:4e:c9:f4:57:ba:00:11:2f
+-----BEGIN CERTIFICATE-----
+MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBM
+MS4wLAYDVQQDDCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIx
+MQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00
+MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0b3MgVHJ1c3RlZFJvb3QgUm9vdCBD
+QSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYTAkRFMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BBl01Z
+4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYv
+Ye+W/CBGvevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZ
+kmGbzSoXfduP9LVq6hdKZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDs
+GY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt0xU6kGpn8bRrZtkh68rZYnxGEFzedUln
+nkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVKPNe0OwANwI8f4UDErmwh
+3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMYsluMWuPD
+0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzy
+geBYBr3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8
+ANSbhqRAvNncTFd+rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezB
+c6eUWsuSZIKmAMFwoW4sKeFYV+xafJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lI
+pw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+dEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
+DAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS
+4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPs
+o0UvFJ/1TCplQ3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJ
+qM7F78PRreBrAwA0JrRUITWXAdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuyw
+xfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9GslA9hGCZcbUztVdF5kJHdWoOsAgM
+rr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2VktafcxBPTy+av5EzH4
+AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9qTFsR
+0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuY
+o7Ey7Nmj1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5
+dDTedk+SKlOxJTnbPP/lPqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcE
+oji2jbDwN/zIIX8/syQbPYtuzE2wFg2WHYMfRsCbvUOZ58SWLs5fyQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc.
+# Subject: CN=TrustAsia Global Root CA G3 O=TrustAsia Technologies, Inc.
+# Label: "TrustAsia Global Root CA G3"
+# Serial: 576386314500428537169965010905813481816650257167
+# MD5 Fingerprint: 30:42:1b:b7:bb:81:75:35:e4:16:4f:53:d2:94:de:04
+# SHA1 Fingerprint: 63:cf:b6:c1:27:2b:56:e4:88:8e:1c:23:9a:b6:2e:81:47:24:c3:c7
+# SHA256 Fingerprint: e0:d3:22:6a:eb:11:63:c2:e4:8f:f9:be:3b:50:b4:c6:43:1b:e7:bb:1e:ac:c5:c3:6b:5d:5e:c5:09:03:9a:08
+-----BEGIN CERTIFICATE-----
+MIIFpTCCA42gAwIBAgIUZPYOZXdhaqs7tOqFhLuxibhxkw8wDQYJKoZIhvcNAQEM
+BQAwWjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dp
+ZXMsIEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHMzAe
+Fw0yMTA1MjAwMjEwMTlaFw00NjA1MTkwMjEwMTlaMFoxCzAJBgNVBAYTAkNOMSUw
+IwYDVQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtU
+cnVzdEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzMwggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQDAMYJhkuSUGwoqZdC+BqmHO1ES6nBBruL7dOoKjbmzTNyPtxNS
+T1QY4SxzlZHFZjtqz6xjbYdT8PfxObegQ2OwxANdV6nnRM7EoYNl9lA+sX4WuDqK
+AtCWHwDNBSHvBm3dIZwZQ0WhxeiAysKtQGIXBsaqvPPW5vxQfmZCHzyLpnl5hkA1
+nyDvP+uLRx+PjsXUjrYsyUQE49RDdT/VP68czH5GX6zfZBCK70bwkPAPLfSIC7Ep
+qq+FqklYqL9joDiR5rPmd2jE+SoZhLsO4fWvieylL1AgdB4SQXMeJNnKziyhWTXA
+yB1GJ2Faj/lN03J5Zh6fFZAhLf3ti1ZwA0pJPn9pMRJpxx5cynoTi+jm9WAPzJMs
+hH/x/Gr8m0ed262IPfN2dTPXS6TIi/n1Q1hPy8gDVI+lhXgEGvNz8teHHUGf59gX
+zhqcD0r83ERoVGjiQTz+LISGNzzNPy+i2+f3VANfWdP3kXjHi3dqFuVJhZBFcnAv
+kV34PmVACxmZySYgWmjBNb9Pp1Hx2BErW+Canig7CjoKH8GB5S7wprlppYiU5msT
+f9FkPz2ccEblooV7WIQn3MSAPmeamseaMQ4w7OYXQJXZRe0Blqq/DPNL0WP3E1jA
+uPP6Z92bfW1K/zJMtSU7/xxnD4UiWQWRkUF3gdCFTIcQcf+eQxuulXUtgQIDAQAB
+o2MwYTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEDk5PIj7zjKsK5Xf/Ih
+MBY027ySMB0GA1UdDgQWBBRA5OTyI+84yrCuV3/yITAWNNu8kjAOBgNVHQ8BAf8E
+BAMCAQYwDQYJKoZIhvcNAQEMBQADggIBACY7UeFNOPMyGLS0XuFlXsSUT9SnYaP4
+wM8zAQLpw6o1D/GUE3d3NZ4tVlFEbuHGLige/9rsR82XRBf34EzC4Xx8MnpmyFq2
+XFNFV1pF1AWZLy4jVe5jaN/TG3inEpQGAHUNcoTpLrxaatXeL1nHo+zSh2bbt1S1
+JKv0Q3jbSwTEb93mPmY+KfJLaHEih6D4sTNjduMNhXJEIlU/HHzp/LgV6FL6qj6j
+ITk1dImmasI5+njPtqzn59ZW/yOSLlALqbUHM/Q4X6RJpstlcHboCoWASzY9M/eV
+VHUl2qzEc4Jl6VL1XP04lQJqaTDFHApXB64ipCz5xUG3uOyfT0gA+QEEVcys+TIx
+xHWVBqB/0Y0n3bOppHKH/lmLmnp0Ft0WpWIp6zqW3IunaFnT63eROfjXy9mPX1on
+AX1daBli2MjN9LdyR75bl87yraKZk62Uy5P2EgmVtqvXO9A/EcswFi55gORngS1d
+7XB4tmBZrOFdRWOPyN9yaFvqHbgB8X7754qz41SgOAngPN5C8sLtLpvzHzW2Ntjj
+gKGLzZlkD8Kqq7HK9W+eQ42EVJmzbsASZthwEPEGNTNDqJwuuhQxzhB/HIbjj9LV
++Hfsm6vxL2PZQl/gZ4FkkfGXL/xuJvYz+NO1+MRiqzFRJQJ6+N1rZdVtTTDIZbpo
+FGWsJwt0ivKH
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc.
+# Subject: CN=TrustAsia Global Root CA G4 O=TrustAsia Technologies, Inc.
+# Label: "TrustAsia Global Root CA G4"
+# Serial: 451799571007117016466790293371524403291602933463
+# MD5 Fingerprint: 54:dd:b2:d7:5f:d8:3e:ed:7c:e0:0b:2e:cc:ed:eb:eb
+# SHA1 Fingerprint: 57:73:a5:61:5d:80:b2:e6:ac:38:82:fc:68:07:31:ac:9f:b5:92:5a
+# SHA256 Fingerprint: be:4b:56:cb:50:56:c0:13:6a:52:6d:f4:44:50:8d:aa:36:a0:b5:4f:42:e4:ac:38:f7:2a:f4:70:e4:79:65:4c
+-----BEGIN CERTIFICATE-----
+MIICVTCCAdygAwIBAgIUTyNkuI6XY57GU4HBdk7LKnQV1tcwCgYIKoZIzj0EAwMw
+WjELMAkGA1UEBhMCQ04xJTAjBgNVBAoMHFRydXN0QXNpYSBUZWNobm9sb2dpZXMs
+IEluYy4xJDAiBgNVBAMMG1RydXN0QXNpYSBHbG9iYWwgUm9vdCBDQSBHNDAeFw0y
+MTA1MjAwMjEwMjJaFw00NjA1MTkwMjEwMjJaMFoxCzAJBgNVBAYTAkNOMSUwIwYD
+VQQKDBxUcnVzdEFzaWEgVGVjaG5vbG9naWVzLCBJbmMuMSQwIgYDVQQDDBtUcnVz
+dEFzaWEgR2xvYmFsIFJvb3QgQ0EgRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATx
+s8045CVD5d4ZCbuBeaIVXxVjAd7Cq92zphtnS4CDr5nLrBfbK5bKfFJV4hrhPVbw
+LxYI+hW8m7tH5j/uqOFMjPXTNvk4XatwmkcN4oFBButJ+bAp3TPsUKV/eSm4IJij
+YzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUpbtKl86zK3+kMd6Xg1mD
+pm9xy94wHQYDVR0OBBYEFKW7SpfOsyt/pDHel4NZg6ZvccveMA4GA1UdDwEB/wQE
+AwIBBjAKBggqhkjOPQQDAwNnADBkAjBe8usGzEkxn0AAbbd+NvBNEU/zy4k6LHiR
+UKNbwMp1JvK/kF0LgoxgKJ/GcJpo5PECMFxYDlZ2z1jD1xCMuo6u47xkdUfFVZDj
+/bpV6wfEU6s3qe4hsiFbYI89MvHVI5TWWA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=CommScope Public Trust ECC Root-01 O=CommScope
+# Subject: CN=CommScope Public Trust ECC Root-01 O=CommScope
+# Label: "CommScope Public Trust ECC Root-01"
+# Serial: 385011430473757362783587124273108818652468453534
+# MD5 Fingerprint: 3a:40:a7:fc:03:8c:9c:38:79:2f:3a:a2:6c:b6:0a:16
+# SHA1 Fingerprint: 07:86:c0:d8:dd:8e:c0:80:98:06:98:d0:58:7a:ef:de:a6:cc:a2:5d
+# SHA256 Fingerprint: 11:43:7c:da:7b:b4:5e:41:36:5f:45:b3:9a:38:98:6b:0d:e0:0d:ef:34:8e:0c:7b:b0:87:36:33:80:0b:c3:8b
+-----BEGIN CERTIFICATE-----
+MIICHTCCAaOgAwIBAgIUQ3CCd89NXTTxyq4yLzf39H91oJ4wCgYIKoZIzj0EAwMw
+TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t
+bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMTAeFw0yMTA0MjgxNzM1NDNa
+Fw00NjA0MjgxNzM1NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv
+cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDEw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAARLNumuV16ocNfQj3Rid8NeeqrltqLxeP0C
+flfdkXmcbLlSiFS8LwS+uM32ENEp7LXQoMPwiXAZu1FlxUOcw5tjnSCDPgYLpkJE
+hRGnSjot6dZoL0hOUysHP029uax3OVejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSOB2LAUN3GGQYARnQE9/OufXVNMDAKBggq
+hkjOPQQDAwNoADBlAjEAnDPfQeMjqEI2Jpc1XHvr20v4qotzVRVcrHgpD7oh2MSg
+2NED3W3ROT3Ek2DS43KyAjB8xX6I01D1HiXo+k515liWpDVfG2XqYZpwI7UNo5uS
+Um9poIyNStDuiw7LR47QjRE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=CommScope Public Trust ECC Root-02 O=CommScope
+# Subject: CN=CommScope Public Trust ECC Root-02 O=CommScope
+# Label: "CommScope Public Trust ECC Root-02"
+# Serial: 234015080301808452132356021271193974922492992893
+# MD5 Fingerprint: 59:b0:44:d5:65:4d:b8:5c:55:19:92:02:b6:d1:94:b2
+# SHA1 Fingerprint: 3c:3f:ef:57:0f:fe:65:93:86:9e:a0:fe:b0:f6:ed:8e:d1:13:c7:e5
+# SHA256 Fingerprint: 2f:fb:7f:81:3b:bb:b3:c8:9a:b4:e8:16:2d:0f:16:d7:15:09:a8:30:cc:9d:73:c2:62:e5:14:08:75:d1:ad:4a
+-----BEGIN CERTIFICATE-----
+MIICHDCCAaOgAwIBAgIUKP2ZYEFHpgE6yhR7H+/5aAiDXX0wCgYIKoZIzj0EAwMw
+TjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwiQ29t
+bVNjb3BlIFB1YmxpYyBUcnVzdCBFQ0MgUm9vdC0wMjAeFw0yMTA0MjgxNzQ0NTRa
+Fw00NjA0MjgxNzQ0NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21tU2Nv
+cGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgRUNDIFJvb3QtMDIw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAR4MIHoYx7l63FRD/cHB8o5mXxO1Q/MMDAL
+j2aTPs+9xYa9+bG3tD60B8jzljHz7aRP+KNOjSkVWLjVb3/ubCK1sK9IRQq9qEmU
+v4RDsNuESgMjGWdqb8FuvAY5N9GIIvejQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTmGHX/72DehKT1RsfeSlXjMjZ59TAKBggq
+hkjOPQQDAwNnADBkAjAmc0l6tqvmSfR9Uj/UQQSugEODZXW5hYA4O9Zv5JOGq4/n
+ich/m35rChJVYaoR4HkCMHfoMXGsPHED1oQmHhS48zs73u1Z/GtMMH9ZzkXpc2AV
+mkzw5l4lIhVtwodZ0LKOag==
+-----END CERTIFICATE-----
+
+# Issuer: CN=CommScope Public Trust RSA Root-01 O=CommScope
+# Subject: CN=CommScope Public Trust RSA Root-01 O=CommScope
+# Label: "CommScope Public Trust RSA Root-01"
+# Serial: 354030733275608256394402989253558293562031411421
+# MD5 Fingerprint: 0e:b4:15:bc:87:63:5d:5d:02:73:d4:26:38:68:73:d8
+# SHA1 Fingerprint: 6d:0a:5f:f7:b4:23:06:b4:85:b3:b7:97:64:fc:ac:75:f5:33:f2:93
+# SHA256 Fingerprint: 02:bd:f9:6e:2a:45:dd:9b:f1:8f:c7:e1:db:df:21:a0:37:9b:a3:c9:c2:61:03:44:cf:d8:d6:06:fe:c1:ed:81
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIUPgNJgXUWdDGOTKvVxZAplsU5EN0wDQYJKoZIhvcNAQEL
+BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi
+Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMTAeFw0yMTA0MjgxNjQ1
+NTRaFw00NjA0MjgxNjQ1NTNaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t
+U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt
+MDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwSGWjDR1C45FtnYSk
+YZYSwu3D2iM0GXb26v1VWvZVAVMP8syMl0+5UMuzAURWlv2bKOx7dAvnQmtVzslh
+suitQDy6uUEKBU8bJoWPQ7VAtYXR1HHcg0Hz9kXHgKKEUJdGzqAMxGBWBB0HW0al
+DrJLpA6lfO741GIDuZNqihS4cPgugkY4Iw50x2tBt9Apo52AsH53k2NC+zSDO3Oj
+WiE260f6GBfZumbCk6SP/F2krfxQapWsvCQz0b2If4b19bJzKo98rwjyGpg/qYFl
+P8GMicWWMJoKz/TUyDTtnS+8jTiGU+6Xn6myY5QXjQ/cZip8UlF1y5mO6D1cv547
+KI2DAg+pn3LiLCuz3GaXAEDQpFSOm117RTYm1nJD68/A6g3czhLmfTifBSeolz7p
+UcZsBSjBAg/pGG3svZwG1KdJ9FQFa2ww8esD1eo9anbCyxooSU1/ZOD6K9pzg4H/
+kQO9lLvkuI6cMmPNn7togbGEW682v3fuHX/3SZtS7NJ3Wn2RnU3COS3kuoL4b/JO
+Hg9O5j9ZpSPcPYeoKFgo0fEbNttPxP/hjFtyjMcmAyejOQoBqsCyMWCDIqFPEgkB
+Ea801M/XrmLTBQe0MXXgDW1XT2mH+VepuhX2yFJtocucH+X8eKg1mp9BFM6ltM6U
+CBwJrVbl2rZJmkrqYxhTnCwuwwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUN12mmnQywsL5x6YVEFm45P3luG0wDQYJ
+KoZIhvcNAQELBQADggIBAK+nz97/4L1CjU3lIpbfaOp9TSp90K09FlxD533Ahuh6
+NWPxzIHIxgvoLlI1pKZJkGNRrDSsBTtXAOnTYtPZKdVUvhwQkZyybf5Z/Xn36lbQ
+nmhUQo8mUuJM3y+Xpi/SB5io82BdS5pYV4jvguX6r2yBS5KPQJqTRlnLX3gWsWc+
+QgvfKNmwrZggvkN80V4aCRckjXtdlemrwWCrWxhkgPut4AZ9HcpZuPN4KWfGVh2v
+trV0KnahP/t1MJ+UXjulYPPLXAziDslg+MkfFoom3ecnf+slpoq9uC02EJqxWE2a
+aE9gVOX2RhOOiKy8IUISrcZKiX2bwdgt6ZYD9KJ0DLwAHb/WNyVntHKLr4W96ioD
+j8z7PEQkguIBpQtZtjSNMgsSDesnwv1B10A8ckYpwIzqug/xBpMu95yo9GA+o/E4
+Xo4TwbM6l4c/ksp4qRyv0LAbJh6+cOx69TOY6lz/KwsETkPdY34Op054A5U+1C0w
+lREQKC6/oAI+/15Z0wUOlV9TRe9rh9VIzRamloPh37MG88EU26fsHItdkJANclHn
+YfkUyq+Dj7+vsQpZXdxc1+SWrVtgHdqul7I52Qb1dgAT+GhMIbA1xNxVssnBQVoc
+icCMb3SgazNNtQEo/a2tiRc7ppqEvOuM6sRxJKi6KfkIsidWNTJf6jn7MZrVGczw
+-----END CERTIFICATE-----
+
+# Issuer: CN=CommScope Public Trust RSA Root-02 O=CommScope
+# Subject: CN=CommScope Public Trust RSA Root-02 O=CommScope
+# Label: "CommScope Public Trust RSA Root-02"
+# Serial: 480062499834624527752716769107743131258796508494
+# MD5 Fingerprint: e1:29:f9:62:7b:76:e2:96:6d:f3:d4:d7:0f:ae:1f:aa
+# SHA1 Fingerprint: ea:b0:e2:52:1b:89:93:4c:11:68:f2:d8:9a:ac:22:4c:a3:8a:57:ae
+# SHA256 Fingerprint: ff:e9:43:d7:93:42:4b:4f:7c:44:0c:1c:3d:64:8d:53:63:f3:4b:82:dc:87:aa:7a:9f:11:8f:c5:de:e1:01:f1
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIUVBa/O345lXGN0aoApYYNK496BU4wDQYJKoZIhvcNAQEL
+BQAwTjELMAkGA1UEBhMCVVMxEjAQBgNVBAoMCUNvbW1TY29wZTErMCkGA1UEAwwi
+Q29tbVNjb3BlIFB1YmxpYyBUcnVzdCBSU0EgUm9vdC0wMjAeFw0yMTA0MjgxNzE2
+NDNaFw00NjA0MjgxNzE2NDJaME4xCzAJBgNVBAYTAlVTMRIwEAYDVQQKDAlDb21t
+U2NvcGUxKzApBgNVBAMMIkNvbW1TY29wZSBQdWJsaWMgVHJ1c3QgUlNBIFJvb3Qt
+MDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDh+g77aAASyE3VrCLE
+NQE7xVTlWXZjpX/rwcRqmL0yjReA61260WI9JSMZNRTpf4mnG2I81lDnNJUDMrG0
+kyI9p+Kx7eZ7Ti6Hmw0zdQreqjXnfuU2mKKuJZ6VszKWpCtYHu8//mI0SFHRtI1C
+rWDaSWqVcN3SAOLMV2MCe5bdSZdbkk6V0/nLKR8YSvgBKtJjCW4k6YnS5cciTNxz
+hkcAqg2Ijq6FfUrpuzNPDlJwnZXjfG2WWy09X6GDRl224yW4fKcZgBzqZUPckXk2
+LHR88mcGyYnJ27/aaL8j7dxrrSiDeS/sOKUNNwFnJ5rpM9kzXzehxfCrPfp4sOcs
+n/Y+n2Dg70jpkEUeBVF4GiwSLFworA2iI540jwXmojPOEXcT1A6kHkIfhs1w/tku
+FT0du7jyU1fbzMZ0KZwYszZ1OC4PVKH4kh+Jlk+71O6d6Ts2QrUKOyrUZHk2EOH5
+kQMreyBUzQ0ZGshBMjTRsJnhkB4BQDa1t/qp5Xd1pCKBXbCL5CcSD1SIxtuFdOa3
+wNemKfrb3vOTlycEVS8KbzfFPROvCgCpLIscgSjX74Yxqa7ybrjKaixUR9gqiC6v
+wQcQeKwRoi9C8DfF8rhW3Q5iLc4tVn5V8qdE9isy9COoR+jUKgF4z2rDN6ieZdIs
+5fq6M8EGRPbmz6UNp2YINIos8wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUR9DnsSL/nSz12Vdgs7GxcJXvYXowDQYJ
+KoZIhvcNAQELBQADggIBAIZpsU0v6Z9PIpNojuQhmaPORVMbc0RTAIFhzTHjCLqB
+KCh6krm2qMhDnscTJk3C2OVVnJJdUNjCK9v+5qiXz1I6JMNlZFxHMaNlNRPDk7n3
++VGXu6TwYofF1gbTl4MgqX67tiHCpQ2EAOHyJxCDut0DgdXdaMNmEMjRdrSzbyme
+APnCKfWxkxlSaRosTKCL4BWaMS/TiJVZbuXEs1DIFAhKm4sTg7GkcrI7djNB3Nyq
+pgdvHSQSn8h2vS/ZjvQs7rfSOBAkNlEv41xdgSGn2rtO/+YHqP65DSdsu3BaVXoT
+6fEqSWnHX4dXTEN5bTpl6TBcQe7rd6VzEojov32u5cSoHw2OHG1QAk8mGEPej1WF
+sQs3BWDJVTkSBKEqz3EWnzZRSb9wO55nnPt7eck5HHisd5FUmrh1CoFSl+NmYWvt
+PjgelmFV4ZFUjO2MJB+ByRCac5krFk5yAD9UG/iNuovnFNa2RU9g7Jauwy8CTl2d
+lklyALKrdVwPaFsdZcJfMw8eD/A7hvWwTruc9+olBdytoptLFwG+Qt81IR2tq670
+v64fG9PiO/yzcnMcmyiQiRM9HcEARwmWmjgb3bHPDcK0RPOWlc4yOo80nOAXx17O
+rg3bhzjlP1v9mxnhMUF6cKojawHhRUzNlM47ni3niAIi9G7oyOzWPPO5std3eqx7
+-----END CERTIFICATE-----
+
+# Issuer: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH
+# Subject: CN=Telekom Security TLS ECC Root 2020 O=Deutsche Telekom Security GmbH
+# Label: "Telekom Security TLS ECC Root 2020"
+# Serial: 72082518505882327255703894282316633856
+# MD5 Fingerprint: c1:ab:fe:6a:10:2c:03:8d:bc:1c:22:32:c0:85:a7:fd
+# SHA1 Fingerprint: c0:f8:96:c5:a9:3b:01:06:21:07:da:18:42:48:bc:e9:9d:88:d5:ec
+# SHA256 Fingerprint: 57:8a:f4:de:d0:85:3f:4e:59:98:db:4a:ea:f9:cb:ea:8d:94:5f:60:b6:20:a3:8d:1a:3c:13:b2:bc:7b:a8:e1
+-----BEGIN CERTIFICATE-----
+MIICQjCCAcmgAwIBAgIQNjqWjMlcsljN0AFdxeVXADAKBggqhkjOPQQDAzBjMQsw
+CQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0eSBH
+bWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBFQ0MgUm9vdCAyMDIw
+MB4XDTIwMDgyNTA3NDgyMFoXDTQ1MDgyNTIzNTk1OVowYzELMAkGA1UEBhMCREUx
+JzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkGA1UE
+AwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgRUNDIFJvb3QgMjAyMDB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABM6//leov9Wq9xCazbzREaK9Z0LMkOsVGJDZos0MKiXrPk/O
+tdKPD/M12kOLAoC+b1EkHQ9rK8qfwm9QMuU3ILYg/4gND21Ju9sGpIeQkpT0CdDP
+f8iAC8GXs7s1J8nCG6NCMEAwHQYDVR0OBBYEFONyzG6VmUex5rNhTNHLq+O6zd6f
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2cA
+MGQCMHVSi7ekEE+uShCLsoRbQuHmKjYC2qBuGT8lv9pZMo7k+5Dck2TOrbRBR2Di
+z6fLHgIwN0GMZt9Ba9aDAEH9L1r3ULRn0SyocddDypwnJJGDSA3PzfdUga/sf+Rn
+27iQ7t0l
+-----END CERTIFICATE-----
+
+# Issuer: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH
+# Subject: CN=Telekom Security TLS RSA Root 2023 O=Deutsche Telekom Security GmbH
+# Label: "Telekom Security TLS RSA Root 2023"
+# Serial: 44676229530606711399881795178081572759
+# MD5 Fingerprint: bf:5b:eb:54:40:cd:48:71:c4:20:8d:7d:de:0a:42:f2
+# SHA1 Fingerprint: 54:d3:ac:b3:bd:57:56:f6:85:9d:ce:e5:c3:21:e2:d4:ad:83:d0:93
+# SHA256 Fingerprint: ef:c6:5c:ad:bb:59:ad:b6:ef:e8:4d:a2:23:11:b3:56:24:b7:1b:3b:1e:a0:da:8b:66:55:17:4e:c8:97:86:46
+-----BEGIN CERTIFICATE-----
+MIIFszCCA5ugAwIBAgIQIZxULej27HF3+k7ow3BXlzANBgkqhkiG9w0BAQwFADBj
+MQswCQYDVQQGEwJERTEnMCUGA1UECgweRGV1dHNjaGUgVGVsZWtvbSBTZWN1cml0
+eSBHbWJIMSswKQYDVQQDDCJUZWxla29tIFNlY3VyaXR5IFRMUyBSU0EgUm9vdCAy
+MDIzMB4XDTIzMDMyODEyMTY0NVoXDTQ4MDMyNzIzNTk1OVowYzELMAkGA1UEBhMC
+REUxJzAlBgNVBAoMHkRldXRzY2hlIFRlbGVrb20gU2VjdXJpdHkgR21iSDErMCkG
+A1UEAwwiVGVsZWtvbSBTZWN1cml0eSBUTFMgUlNBIFJvb3QgMjAyMzCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAO01oYGA88tKaVvC+1GDrib94W7zgRJ9
+cUD/h3VCKSHtgVIs3xLBGYSJwb3FKNXVS2xE1kzbB5ZKVXrKNoIENqil/Cf2SfHV
+cp6R+SPWcHu79ZvB7JPPGeplfohwoHP89v+1VmLhc2o0mD6CuKyVU/QBoCcHcqMA
+U6DksquDOFczJZSfvkgdmOGjup5czQRxUX11eKvzWarE4GC+j4NSuHUaQTXtvPM6
+Y+mpFEXX5lLRbtLevOP1Czvm4MS9Q2QTps70mDdsipWol8hHD/BeEIvnHRz+sTug
+BTNoBUGCwQMrAcjnj02r6LX2zWtEtefdi+zqJbQAIldNsLGyMcEWzv/9FIS3R/qy
+8XDe24tsNlikfLMR0cN3f1+2JeANxdKz+bi4d9s3cXFH42AYTyS2dTd4uaNir73J
+co4vzLuu2+QVUhkHM/tqty1LkCiCc/4YizWN26cEar7qwU02OxY2kTLvtkCJkUPg
+8qKrBC7m8kwOFjQgrIfBLX7JZkcXFBGk8/ehJImr2BrIoVyxo/eMbcgByU/J7MT8
+rFEz0ciD0cmfHdRHNCk+y7AO+oMLKFjlKdw/fKifybYKu6boRhYPluV75Gp6SG12
+mAWl3G0eQh5C2hrgUve1g8Aae3g1LDj1H/1Joy7SWWO/gLCMk3PLNaaZlSJhZQNg
++y+TS/qanIA7AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtqeX
+gj10hZv3PJ+TmpV5dVKMbUcwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS2
+p5eCPXSFm/c8n5OalXl1UoxtRzANBgkqhkiG9w0BAQwFAAOCAgEAqMxhpr51nhVQ
+pGv7qHBFfLp+sVr8WyP6Cnf4mHGCDG3gXkaqk/QeoMPhk9tLrbKmXauw1GLLXrtm
+9S3ul0A8Yute1hTWjOKWi0FpkzXmuZlrYrShF2Y0pmtjxrlO8iLpWA1WQdH6DErw
+M807u20hOq6OcrXDSvvpfeWxm4bu4uB9tPcy/SKE8YXJN3nptT+/XOR0so8RYgDd
+GGah2XsjX/GO1WfoVNpbOms2b/mBsTNHM3dA+VKq3dSDz4V4mZqTuXNnQkYRIer+
+CqkbGmVps4+uFrb2S1ayLfmlyOw7YqPta9BO1UAJpB+Y1zqlklkg5LB9zVtzaL1t
+xKITDmcZuI1CfmwMmm6gJC3VRRvcxAIU/oVbZZfKTpBQCHpCNfnqwmbU+AGuHrS+
+w6jv/naaoqYfRvaE7fzbzsQCzndILIyy7MMAo+wsVRjBfhnu4S/yrYObnqsZ38aK
+L4x35bcF7DvB7L6Gs4a8wPfc5+pbrrLMtTWGS9DiP7bY+A4A7l3j941Y/8+LN+lj
+X273CXE2whJdV/LItM3z7gLfEdxquVeEHVlNjM7IDiPCtyaaEBRx/pOyiriA8A4Q
+ntOoUAw3gi/q4Iqd4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgm
+dTdmQRCsu/WU48IxK63nI1bMNSWSs1A=
+-----END CERTIFICATE-----
diff --git a/env-llmeval/lib/python3.10/site-packages/certifi/core.py b/env-llmeval/lib/python3.10/site-packages/certifi/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..91f538bb1fd2ce62632e475053dc000e7833d11b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/certifi/core.py
@@ -0,0 +1,114 @@
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem or its contents.
+"""
+import sys
+import atexit
+
+def exit_cacert_ctx() -> None:
+ _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
+
+
+if sys.version_info >= (3, 11):
+
+ from importlib.resources import as_file, files
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the file
+ # in cases where we're inside of a zipimport situation until someone
+ # actually calls where(), but we don't want to re-extract the file
+ # on every call of where(), so we'll do it once then store it in a
+ # global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you to
+ # manage the cleanup of this file, so it doesn't actually return a
+ # path, it returns a context manager that will give you the path
+ # when you enter it and will do any cleanup when you leave it. In
+ # the common case of not needing a temporary file, it will just
+ # return the file system location and the __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+ atexit.register(exit_cacert_ctx)
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
+
+elif sys.version_info >= (3, 7):
+
+ from importlib.resources import path as get_path, read_text
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the
+ # file in cases where we're inside of a zipimport situation until
+ # someone actually calls where(), but we don't want to re-extract
+ # the file on every call of where(), so we'll do it once then store
+ # it in a global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you
+ # to manage the cleanup of this file, so it doesn't actually
+ # return a path, it returns a context manager that will give
+ # you the path when you enter it and will do any cleanup when
+ # you leave it. In the common case of not needing a temporary
+ # file, it will just return the file system location and the
+ # __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = get_path("certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+ atexit.register(exit_cacert_ctx)
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return read_text("certifi", "cacert.pem", encoding="ascii")
+
+else:
+ import os
+ import types
+ from typing import Union
+
+ Package = Union[types.ModuleType, str]
+ Resource = Union[str, "os.PathLike"]
+
+ # This fallback will work for Python versions prior to 3.7 that lack the
+ # importlib.resources module but relies on the existing `where` function
+ # so won't address issues with environments like PyOxidizer that don't set
+ # __file__ on modules.
+ def read_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict'
+ ) -> str:
+ with open(where(), encoding=encoding) as data:
+ return data.read()
+
+ # If we don't have importlib.resources, then we will just do the old logic
+ # of assuming we're on the filesystem and munge the path directly.
+ def where() -> str:
+ f = os.path.dirname(__file__)
+
+ return os.path.join(f, "cacert.pem")
+
+ def contents() -> str:
+ return read_text("certifi", "cacert.pem", encoding="ascii")
diff --git a/env-llmeval/lib/python3.10/site-packages/certifi/py.typed b/env-llmeval/lib/python3.10/site-packages/certifi/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8e7b65eaf628360e6f32f4140fcdd7ec7c2b7077
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4ae44a0686912d59c1c4a58b858e6611e8b81958
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt
@@ -0,0 +1,933 @@
+Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----
+
+This binary distribution of SciPy also bundles the following software:
+
+
+Name: OpenBLAS
+Files: scipy.libs/libopenblas*.so
+Description: bundled as a dynamically linked library
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause-Attribution
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: LAPACK
+Files: scipy.libs/libopenblas*.so
+Description: bundled in OpenBLAS
+Availability: https://github.com/OpenMathLib/OpenBLAS/
+License: BSD-3-Clause-Attribution
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: GCC runtime library
+Files: scipy.libs/libgfortran*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+License: GPL-3.0-with-GCC-exception
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ .
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc.
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
+
+
+Name: libquadmath
+Files: scipy.libs/libquadmath*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+License: LGPL-2.1-or-later
+
+ GCC Quad-Precision Math Library
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
+ Written by Francois-Xavier Coudert
+
+ This file is part of the libquadmath library.
+ Libquadmath is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ Libquadmath is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b9b3bdbd0fa840b523e33652e3a880d2c9c24420
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA
@@ -0,0 +1,1074 @@
+Metadata-Version: 2.1
+Name: scipy
+Version: 1.13.0
+Summary: Fundamental algorithms for scientific computing in Python
+Home-page: https://scipy.org/
+Maintainer-Email: SciPy Developers
+License: Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----
+
+ This binary distribution of SciPy also bundles the following software:
+
+
+ Name: OpenBLAS
+ Files: scipy.libs/libopenblas*.so
+ Description: bundled as a dynamically linked library
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
+ License: BSD-3-Clause-Attribution
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Name: LAPACK
+ Files: scipy.libs/libopenblas*.so
+ Description: bundled in OpenBLAS
+ Availability: https://github.com/OpenMathLib/OpenBLAS/
+ License: BSD-3-Clause-Attribution
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Name: GCC runtime library
+ Files: scipy.libs/libgfortran*.so
+ Description: dynamically linked to files compiled with gcc
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
+ License: GPL-3.0-with-GCC-exception
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ .
+
+ ----
+
+ Full text of license texts referred to above follows (that they are
+ listed below does not necessarily imply the conditions apply to the
+ present binary release):
+
+ ----
+
+ GCC RUNTIME LIBRARY EXCEPTION
+
+ Version 3.1, 31 March 2009
+
+ Copyright (C) 2009 Free Software Foundation, Inc.
+
+ Everyone is permitted to copy and distribute verbatim copies of this
+ license document, but changing it is not allowed.
+
+ This GCC Runtime Library Exception ("Exception") is an additional
+ permission under section 7 of the GNU General Public License, version
+ 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+ bears a notice placed by the copyright holder of the file stating that
+ the file is governed by GPLv3 along with this Exception.
+
+ When you use GCC to compile a program, GCC may combine portions of
+ certain GCC header files and runtime libraries with the compiled
+ program. The purpose of this Exception is to allow compilation of
+ non-GPL (including proprietary) programs to use, in this way, the
+ header files and runtime libraries covered by this Exception.
+
+ 0. Definitions.
+
+ A file is an "Independent Module" if it either requires the Runtime
+ Library for execution after a Compilation Process, or makes use of an
+ interface provided by the Runtime Library, but is not otherwise based
+ on the Runtime Library.
+
+ "GCC" means a version of the GNU Compiler Collection, with or without
+ modifications, governed by version 3 (or a specified later version) of
+ the GNU General Public License (GPL) with the option of using any
+ subsequent versions published by the FSF.
+
+ "GPL-compatible Software" is software whose conditions of propagation,
+ modification and use would permit combination with GCC in accord with
+ the license of GCC.
+
+ "Target Code" refers to output from any compiler for a real or virtual
+ target processor architecture, in executable form or suitable for
+ input to an assembler, loader, linker and/or execution
+ phase. Notwithstanding that, Target Code does not include data in any
+ format that is used as a compiler intermediate representation, or used
+ for producing a compiler intermediate representation.
+
+ The "Compilation Process" transforms code entirely represented in
+ non-intermediate languages designed for human-written code, and/or in
+ Java Virtual Machine byte code, into Target Code. Thus, for example,
+ use of source code generators and preprocessors need not be considered
+ part of the Compilation Process, since the Compilation Process can be
+ understood as starting with the output of the generators or
+ preprocessors.
+
+ A Compilation Process is "Eligible" if it is done using GCC, alone or
+ with other GPL-compatible software, or if it is done without using any
+ work based on GCC. For example, using non-GPL-compatible Software to
+ optimize any GCC intermediate representations would not qualify as an
+ Eligible Compilation Process.
+
+ 1. Grant of Additional Permission.
+
+ You have permission to propagate a work of Target Code formed by
+ combining the Runtime Library with Independent Modules, even if such
+ propagation would otherwise violate the terms of GPLv3, provided that
+ all Target Code was generated by Eligible Compilation Processes. You
+ may then convey such a combination under terms of your choice,
+ consistent with the licensing of the Independent Modules.
+
+ 2. No Weakening of GCC Copyleft.
+
+ The availability of this Exception does not imply any general
+ presumption that third-party software is unaffected by the copyleft
+ requirements of the license of GCC.
+
+ ----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+ software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+ to take away your freedom to share and change the works. By contrast,
+ the GNU General Public License is intended to guarantee your freedom to
+ share and change all versions of a program--to make sure it remains free
+ software for all its users. We, the Free Software Foundation, use the
+ GNU General Public License for most of our software; it applies also to
+ any other work released this way by its authors. You can apply it to
+ your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+ price. Our General Public Licenses are designed to make sure that you
+ have the freedom to distribute copies of free software (and charge for
+ them if you wish), that you receive source code or can get it if you
+ want it, that you can change the software or use pieces of it in new
+ free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+ these rights or asking you to surrender the rights. Therefore, you have
+ certain responsibilities if you distribute copies of the software, or if
+ you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+ gratis or for a fee, you must pass on to the recipients the same
+ freedoms that you received. You must make sure that they, too, receive
+ or can get the source code. And you must show them these terms so they
+ know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+ (1) assert copyright on the software, and (2) offer you this License
+ giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+ that there is no warranty for this free software. For both users' and
+ authors' sake, the GPL requires that modified versions be marked as
+ changed, so that their problems will not be attributed erroneously to
+ authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+ modified versions of the software inside them, although the manufacturer
+ can do so. This is fundamentally incompatible with the aim of
+ protecting users' freedom to change the software. The systematic
+ pattern of such abuse occurs in the area of products for individuals to
+ use, which is precisely where it is most unacceptable. Therefore, we
+ have designed this version of the GPL to prohibit the practice for those
+ products. If such problems arise substantially in other domains, we
+ stand ready to extend this provision to those domains in future versions
+ of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+ States should not allow patents to restrict development and use of
+ software on general-purpose computers, but in those that do, we wish to
+ avoid the special danger that patents applied to a free program could
+ make it effectively proprietary. To prevent this, the GPL assures that
+ patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+ modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+ works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+ License. Each licensee is addressed as "you". "Licensees" and
+ "recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+ in a fashion requiring copyright permission, other than the making of an
+ exact copy. The resulting work is called a "modified version" of the
+ earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+ on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+ permission, would make you directly or secondarily liable for
+ infringement under applicable copyright law, except executing it on a
+ computer or modifying a private copy. Propagation includes copying,
+ distribution (with or without modification), making available to the
+ public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+ parties to make or receive copies. Mere interaction with a user through
+ a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+ to the extent that it includes a convenient and prominently visible
+ feature that (1) displays an appropriate copyright notice, and (2)
+ tells the user that there is no warranty for the work (except to the
+ extent that warranties are provided), that licensees may convey the
+ work under this License, and how to view a copy of this License. If
+ the interface presents a list of user commands or options, such as a
+ menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+ for making modifications to it. "Object code" means any non-source
+ form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+ standard defined by a recognized standards body, or, in the case of
+ interfaces specified for a particular programming language, one that
+ is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+ than the work as a whole, that (a) is included in the normal form of
+ packaging a Major Component, but which is not part of that Major
+ Component, and (b) serves only to enable use of the work with that
+ Major Component, or to implement a Standard Interface for which an
+ implementation is available to the public in source code form. A
+ "Major Component", in this context, means a major essential component
+ (kernel, window system, and so on) of the specific operating system
+ (if any) on which the executable work runs, or a compiler used to
+ produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+ the source code needed to generate, install, and (for an executable
+ work) run the object code and to modify the work, including scripts to
+ control those activities. However, it does not include the work's
+ System Libraries, or general-purpose tools or generally available free
+ programs which are used unmodified in performing those activities but
+ which are not part of the work. For example, Corresponding Source
+ includes interface definition files associated with source files for
+ the work, and the source code for shared libraries and dynamically
+ linked subprograms that the work is specifically designed to require,
+ such as by intimate data communication or control flow between those
+ subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+ can regenerate automatically from other parts of the Corresponding
+ Source.
+
+ The Corresponding Source for a work in source code form is that
+ same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+ copyright on the Program, and are irrevocable provided the stated
+ conditions are met. This License explicitly affirms your unlimited
+ permission to run the unmodified Program. The output from running a
+ covered work is covered by this License only if the output, given its
+ content, constitutes a covered work. This License acknowledges your
+ rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+ convey, without conditions so long as your license otherwise remains
+ in force. You may convey covered works to others for the sole purpose
+ of having them make modifications exclusively for you, or provide you
+ with facilities for running those works, provided that you comply with
+ the terms of this License in conveying all material for which you do
+ not control copyright. Those thus making or running the covered works
+ for you must do so exclusively on your behalf, under your direction
+ and control, on terms that prohibit them from making any copies of
+ your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+ the conditions stated below. Sublicensing is not allowed; section 10
+ makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+ measure under any applicable law fulfilling obligations under article
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
+ similar laws prohibiting or restricting circumvention of such
+ measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+ circumvention of technological measures to the extent such circumvention
+ is effected by exercising rights under this License with respect to
+ the covered work, and you disclaim any intention to limit operation or
+ modification of the work as a means of enforcing, against the work's
+ users, your or third parties' legal rights to forbid circumvention of
+ technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+ receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice;
+ keep intact all notices stating that this License and any
+ non-permissive terms added in accord with section 7 apply to the code;
+ keep intact all notices of the absence of any warranty; and give all
+ recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+ and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+ produce it from the Program, in the form of source code under the
+ terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+ works, which are not by their nature extensions of the covered work,
+ and which are not combined with it such as to form a larger program,
+ in or on a volume of a storage or distribution medium, is called an
+ "aggregate" if the compilation and its resulting copyright are not
+ used to limit the access or legal rights of the compilation's users
+ beyond what the individual works permit. Inclusion of a covered work
+ in an aggregate does not cause this License to apply to the other
+ parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+ of sections 4 and 5, provided that you also convey the
+ machine-readable Corresponding Source under the terms of this License,
+ in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+ from the Corresponding Source as a System Library, need not be
+ included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+ tangible personal property which is normally used for personal, family,
+ or household purposes, or (2) anything designed or sold for incorporation
+ into a dwelling. In determining whether a product is a consumer product,
+ doubtful cases shall be resolved in favor of coverage. For a particular
+ product received by a particular user, "normally used" refers to a
+ typical or common use of that class of product, regardless of the status
+ of the particular user or of the way in which the particular user
+ actually uses, or expects or is expected to use, the product. A product
+ is a consumer product regardless of whether the product has substantial
+ commercial, industrial or non-consumer uses, unless such uses represent
+ the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+ procedures, authorization keys, or other information required to install
+ and execute modified versions of a covered work in that User Product from
+ a modified version of its Corresponding Source. The information must
+ suffice to ensure that the continued functioning of the modified object
+ code is in no case prevented or interfered with solely because
+ modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+ specifically for use in, a User Product, and the conveying occurs as
+ part of a transaction in which the right of possession and use of the
+ User Product is transferred to the recipient in perpetuity or for a
+ fixed term (regardless of how the transaction is characterized), the
+ Corresponding Source conveyed under this section must be accompanied
+ by the Installation Information. But this requirement does not apply
+ if neither you nor any third party retains the ability to install
+ modified object code on the User Product (for example, the work has
+ been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+ requirement to continue to provide support service, warranty, or updates
+ for a work that has been modified or installed by the recipient, or for
+ the User Product in which it has been modified or installed. Access to a
+ network may be denied when the modification itself materially and
+ adversely affects the operation of the network or violates the rules and
+ protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+ in accord with this section must be in a format that is publicly
+ documented (and with an implementation available to the public in
+ source code form), and must require no special password or key for
+ unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+ License by making exceptions from one or more of its conditions.
+ Additional permissions that are applicable to the entire Program shall
+ be treated as though they were included in this License, to the extent
+ that they are valid under applicable law. If additional permissions
+ apply only to part of the Program, that part may be used separately
+ under those permissions, but the entire Program remains governed by
+ this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+ remove any additional permissions from that copy, or from any part of
+ it. (Additional permissions may be written to require their own
+ removal in certain cases when you modify the work.) You may place
+ additional permissions on material, added by you to a covered work,
+ for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+ add to a covered work, you may (if authorized by the copyright holders of
+ that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+ restrictions" within the meaning of section 10. If the Program as you
+ received it, or any part of it, contains a notice stating that it is
+ governed by this License along with a term that is a further
+ restriction, you may remove that term. If a license document contains
+ a further restriction but permits relicensing or conveying under this
+ License, you may add to a covered work material governed by the terms
+ of that license document, provided that the further restriction does
+ not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+ must place, in the relevant source files, a statement of the
+ additional terms that apply to those files, or a notice indicating
+ where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+ form of a separately written license, or stated as exceptions;
+ the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+ provided under this License. Any attempt otherwise to propagate or
+ modify it is void, and will automatically terminate your rights under
+ this License (including any patent licenses granted under the third
+ paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+ license from a particular copyright holder is reinstated (a)
+ provisionally, unless and until the copyright holder explicitly and
+ finally terminates your license, and (b) permanently, if the copyright
+ holder fails to notify you of the violation by some reasonable means
+ prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+ reinstated permanently if the copyright holder notifies you of the
+ violation by some reasonable means, this is the first time you have
+ received notice of violation of this License (for any work) from that
+ copyright holder, and you cure the violation prior to 30 days after
+ your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+ licenses of parties who have received copies or rights from you under
+ this License. If your rights have been terminated and not permanently
+ reinstated, you do not qualify to receive new licenses for the same
+ material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+ run a copy of the Program. Ancillary propagation of a covered work
+ occurring solely as a consequence of using peer-to-peer transmission
+ to receive a copy likewise does not require acceptance. However,
+ nothing other than this License grants you permission to propagate or
+ modify any covered work. These actions infringe copyright if you do
+ not accept this License. Therefore, by modifying or propagating a
+ covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+ receives a license from the original licensors, to run, modify and
+ propagate that work, subject to this License. You are not responsible
+ for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+ organization, or substantially all assets of one, or subdividing an
+ organization, or merging organizations. If propagation of a covered
+ work results from an entity transaction, each party to that
+ transaction who receives a copy of the work also receives whatever
+ licenses to the work the party's predecessor in interest had or could
+ give under the previous paragraph, plus a right to possession of the
+ Corresponding Source of the work from the predecessor in interest, if
+ the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+ rights granted or affirmed under this License. For example, you may
+ not impose a license fee, royalty, or other charge for exercise of
+ rights granted under this License, and you may not initiate litigation
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
+ any patent claim is infringed by making, using, selling, offering for
+ sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+ License of the Program or a work on which the Program is based. The
+ work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+ owned or controlled by the contributor, whether already acquired or
+ hereafter acquired, that would be infringed by some manner, permitted
+ by this License, of making, using, or selling its contributor version,
+ but do not include claims that would be infringed only as a
+ consequence of further modification of the contributor version. For
+ purposes of this definition, "control" includes the right to grant
+ patent sublicenses in a manner consistent with the requirements of
+ this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+ patent license under the contributor's essential patent claims, to
+ make, use, sell, offer for sale, import and otherwise run, modify and
+ propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+ agreement or commitment, however denominated, not to enforce a patent
+ (such as an express permission to practice a patent or covenant not to
+ sue for patent infringement). To "grant" such a patent license to a
+ party means to make such an agreement or commitment not to enforce a
+ patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+ and the Corresponding Source of the work is not available for anyone
+ to copy, free of charge and under the terms of this License, through a
+ publicly available network server or other readily accessible means,
+ then you must either (1) cause the Corresponding Source to be so
+ available, or (2) arrange to deprive yourself of the benefit of the
+ patent license for this particular work, or (3) arrange, in a manner
+ consistent with the requirements of this License, to extend the patent
+ license to downstream recipients. "Knowingly relying" means you have
+ actual knowledge that, but for the patent license, your conveying the
+ covered work in a country, or your recipient's use of the covered work
+ in a country, would infringe one or more identifiable patents in that
+ country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+ arrangement, you convey, or propagate by procuring conveyance of, a
+ covered work, and grant a patent license to some of the parties
+ receiving the covered work authorizing them to use, propagate, modify
+ or convey a specific copy of the covered work, then the patent license
+ you grant is automatically extended to all recipients of the covered
+ work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+ the scope of its coverage, prohibits the exercise of, or is
+ conditioned on the non-exercise of one or more of the rights that are
+ specifically granted under this License. You may not convey a covered
+ work if you are a party to an arrangement with a third party that is
+ in the business of distributing software, under which you make payment
+ to the third party based on the extent of your activity of conveying
+ the work, and under which the third party grants, to any of the
+ parties who would receive the covered work from you, a discriminatory
+ patent license (a) in connection with copies of the covered work
+ conveyed by you (or copies made from those copies), or (b) primarily
+ for and in connection with specific products or compilations that
+ contain the covered work, unless you entered into that arrangement,
+ or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+ any implied license or other defenses to infringement that may
+ otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot convey a
+ covered work so as to satisfy simultaneously your obligations under this
+ License and any other pertinent obligations, then as a consequence you may
+ not convey it at all. For example, if you agree to terms that obligate you
+ to collect a royalty for further conveying from those to whom you convey
+ the Program, the only way you could satisfy both those terms and this
+ License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+ permission to link or combine any covered work with a work licensed
+ under version 3 of the GNU Affero General Public License into a single
+ combined work, and to convey the resulting work. The terms of this
+ License will continue to apply to the part which is the covered work,
+ but the special requirements of the GNU Affero General Public License,
+ section 13, concerning interaction through a network will apply to the
+ combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+ the GNU General Public License from time to time. Such new versions will
+ be similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+ Program specifies that a certain numbered version of the GNU General
+ Public License "or any later version" applies to it, you have the
+ option of following the terms and conditions either of that numbered
+ version or of any later version published by the Free Software
+ Foundation. If the Program does not specify a version number of the
+ GNU General Public License, you may choose any version ever published
+ by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+ versions of the GNU General Public License can be used, that proxy's
+ public statement of acceptance of a version permanently authorizes you
+ to choose that version for the Program.
+
+ Later license versions may give you additional or different
+ permissions. However, no additional obligations are imposed on any
+ author or copyright holder as a result of your choosing to follow a
+ later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+ above cannot be given local legal effect according to their terms,
+ reviewing courts shall apply local law that most closely approximates
+ an absolute waiver of all civil liability in connection with the
+ Program, unless a warranty or assumption of liability accompanies a
+ copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+ possible use to the public, the best way to achieve this is to make it
+ free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+ to attach them to the start of each source file to most effectively
+ state the exclusion of warranty; and each file should have at least
+ the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+ Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+ notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+ The hypothetical commands `show w' and `show c' should show the appropriate
+ parts of the General Public License. Of course, your program's commands
+ might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
+ For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+ into proprietary programs. If your program is a subroutine library, you
+ may consider it more useful to permit linking proprietary applications with
+ the library. If this is what you want to do, use the GNU Lesser General
+ Public License instead of this License. But first, please read
+ .
+
+
+ Name: libquadmath
+ Files: scipy.libs/libquadmath*.so
+ Description: dynamically linked to files compiled with gcc
+ Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
+ License: LGPL-2.1-or-later
+
+ GCC Quad-Precision Math Library
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
+ Written by Francois-Xavier Coudert
+
+ This file is part of the libquadmath library.
+ Libquadmath is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ Libquadmath is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+ https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Scientific/Engineering
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS
+Project-URL: Homepage, https://scipy.org/
+Project-URL: Documentation, https://docs.scipy.org/doc/scipy/
+Project-URL: Source, https://github.com/scipy/scipy
+Project-URL: Download, https://github.com/scipy/scipy/releases
+Project-URL: Tracker, https://github.com/scipy/scipy/issues
+Requires-Python: >=3.9
+Requires-Dist: numpy<2.3,>=1.22.4
+Requires-Dist: pytest; extra == "test"
+Requires-Dist: pytest-cov; extra == "test"
+Requires-Dist: pytest-timeout; extra == "test"
+Requires-Dist: pytest-xdist; extra == "test"
+Requires-Dist: asv; extra == "test"
+Requires-Dist: mpmath; extra == "test"
+Requires-Dist: gmpy2; extra == "test"
+Requires-Dist: threadpoolctl; extra == "test"
+Requires-Dist: scikit-umfpack; extra == "test"
+Requires-Dist: pooch; extra == "test"
+Requires-Dist: hypothesis>=6.30; extra == "test"
+Requires-Dist: array-api-strict; extra == "test"
+Requires-Dist: sphinx>=5.0.0; extra == "doc"
+Requires-Dist: pydata-sphinx-theme>=0.15.2; extra == "doc"
+Requires-Dist: sphinx-design>=0.4.0; extra == "doc"
+Requires-Dist: matplotlib>=3.5; extra == "doc"
+Requires-Dist: numpydoc; extra == "doc"
+Requires-Dist: jupytext; extra == "doc"
+Requires-Dist: myst-nb; extra == "doc"
+Requires-Dist: pooch; extra == "doc"
+Requires-Dist: jupyterlite-sphinx>=0.12.0; extra == "doc"
+Requires-Dist: jupyterlite-pyodide-kernel; extra == "doc"
+Requires-Dist: mypy; extra == "dev"
+Requires-Dist: typing_extensions; extra == "dev"
+Requires-Dist: types-psutil; extra == "dev"
+Requires-Dist: pycodestyle; extra == "dev"
+Requires-Dist: ruff; extra == "dev"
+Requires-Dist: cython-lint>=0.12.2; extra == "dev"
+Requires-Dist: rich-click; extra == "dev"
+Requires-Dist: doit>=0.36.0; extra == "dev"
+Requires-Dist: pydevtool; extra == "dev"
+Provides-Extra: test
+Provides-Extra: doc
+Provides-Extra: dev
+Description-Content-Type: text/x-rst
+
+.. image:: https://raw.githubusercontent.com/scipy/scipy/main/doc/source/_static/logo.svg
+ :target: https://scipy.org
+ :width: 110
+ :height: 110
+ :align: left
+
+.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A
+ :target: https://numfocus.org
+
+.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads
+ :target: https://pypi.org/project/scipy/
+
+.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads
+ :target: https://anaconda.org/conda-forge/scipy
+
+.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg
+ :target: https://stackoverflow.com/questions/tagged/scipy
+
+.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue
+ :target: https://www.nature.com/articles/s41592-019-0686-2
+
+SciPy (pronounced "Sigh Pie") is an open-source software for mathematics,
+science, and engineering. It includes modules for statistics, optimization,
+integration, linear algebra, Fourier transforms, signal and image processing,
+ODE solvers, and more.
+
+- **Website:** https://scipy.org
+- **Documentation:** https://docs.scipy.org/doc/scipy/
+- **Development version of the documentation:** https://scipy.github.io/devdocs
+- **Mailing list:** https://mail.python.org/mailman3/lists/scipy-dev.python.org/
+- **Source code:** https://github.com/scipy/scipy
+- **Contributing:** https://scipy.github.io/devdocs/dev/index.html
+- **Bug reports:** https://github.com/scipy/scipy/issues
+- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html
+- **Report a security vulnerability:** https://tidelift.com/docs/security
+- **Citing in your work:** https://www.scipy.org/citing-scipy/
+
+SciPy is built to work with
+NumPy arrays, and provides many user-friendly and efficient numerical routines,
+such as routines for numerical integration and optimization. Together, they
+run on all popular operating systems, are quick to install, and are free of
+charge. NumPy and SciPy are easy to use, but powerful enough to be depended
+upon by some of the world's leading scientists and engineers. If you need to
+manipulate numbers on a computer and display or publish the results, give
+SciPy a try!
+
+For the installation instructions, see `our install
+guide `__.
+
+
+Call for Contributions
+----------------------
+
+We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good
+first issue" may be a good starting point. Have a look at `our contributing
+guide `__.
+
+Writing code isn’t the only way to contribute to SciPy. You can also:
+
+- review pull requests
+- triage issues
+- develop tutorials, presentations, and other educational materials
+- maintain and improve `our website `__
+- develop graphic design for our brand assets and promotional materials
+- help with outreach and onboard new contributors
+- write grant proposals and help with other fundraising efforts
+
+If you’re unsure where to start or how your skills fit in, reach out! You can
+ask on the mailing list or here, on GitHub, by leaving a
+comment on a relevant issue that is already open.
+
+If you are new to contributing to open source, `this
+guide `__ helps explain why, what,
+and how to get involved.
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..3b4d31004dc26960f394757d13db47949f604c93
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD
@@ -0,0 +1,2175 @@
+scipy-1.13.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+scipy-1.13.0.dist-info/LICENSE.txt,sha256=GBKL4U2eo7yUQAdaiYqUjjMn6WiG0BZ47o4FJRBXFYA,46805
+scipy-1.13.0.dist-info/METADATA,sha256=pupwB-tQg2FOnYO6j6nIUevhfbVQotRECCPdJznBYcE,60568
+scipy-1.13.0.dist-info/RECORD,,
+scipy-1.13.0.dist-info/WHEEL,sha256=sZM_NeUMz2G4fDenMf11eikcCxcLaQWiYRmjwQBavQs,137
+scipy.libs/libgfortran-040039e1.so.5.0.0,sha256=FK-zEpsai1C8QKOwggx_EVLqm8EBIaqxUpQ_cFdHKIY,2686065
+scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so,sha256=CfADHQasbypnAQQRplB4SeqoJnVMpVOpVFmsGKR3Xl8,34990041
+scipy.libs/libquadmath-96973f99.so.0.0.0,sha256=k0wi3tDn0WnE1GeIdslgUa3z2UVF2pYvYLQWWbB12js,247609
+scipy/__config__.py,sha256=CJllCNEYJv3O910L7fzfB9zF2yCR0KTocgM88LMaPEI,5087
+scipy/__init__.py,sha256=8J2KNCrLUruYIHP76yWU2TY_9VQz091xAGYULbfAvuk,4144
+scipy/__pycache__/__config__.cpython-310.pyc,,
+scipy/__pycache__/__init__.cpython-310.pyc,,
+scipy/__pycache__/_distributor_init.cpython-310.pyc,,
+scipy/__pycache__/conftest.cpython-310.pyc,,
+scipy/__pycache__/version.cpython-310.pyc,,
+scipy/_distributor_init.py,sha256=zJThN3Fvof09h24804pNDPd2iN-lCHV3yPlZylSefgQ,611
+scipy/_lib/__init__.py,sha256=CXrH_YBpZ-HImHHrqXIhQt_vevp4P5NXClp7hnFMVLM,353
+scipy/_lib/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/__pycache__/_array_api.cpython-310.pyc,,
+scipy/_lib/__pycache__/_bunch.cpython-310.pyc,,
+scipy/_lib/__pycache__/_ccallback.cpython-310.pyc,,
+scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc,,
+scipy/_lib/__pycache__/_docscrape.cpython-310.pyc,,
+scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc,,
+scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc,,
+scipy/_lib/__pycache__/_gcutils.cpython-310.pyc,,
+scipy/_lib/__pycache__/_pep440.cpython-310.pyc,,
+scipy/_lib/__pycache__/_testutils.cpython-310.pyc,,
+scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc,,
+scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc,,
+scipy/_lib/__pycache__/_util.cpython-310.pyc,,
+scipy/_lib/__pycache__/decorator.cpython-310.pyc,,
+scipy/_lib/__pycache__/deprecation.cpython-310.pyc,,
+scipy/_lib/__pycache__/doccer.cpython-310.pyc,,
+scipy/_lib/__pycache__/uarray.cpython-310.pyc,,
+scipy/_lib/_array_api.py,sha256=Ibx-wfA11m7xKtNIlvYhS4e71GyehsGnUVxlcLKF4Rs,12740
+scipy/_lib/_bunch.py,sha256=WooFxHL6t0SwjcwMDECM5wcWWLIS0St8zP3urDVK-V0,8120
+scipy/_lib/_ccallback.py,sha256=N9CO7kJYzk6IWQR5LHf_YA1-Oq48R38UIhJFIlJ2Qyc,7087
+scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so,sha256=5pAHAaCrTH-nc9QfyOTVZyhYQUhE91sN43PlNKSZ8kw,110000
+scipy/_lib/_disjoint_set.py,sha256=o_EUHZwnnI1m8nitEf8bSkF7TWZ65RSiklBN4daFruA,6160
+scipy/_lib/_docscrape.py,sha256=B4AzU5hrwyo8bJLBlNU-PQ0qCtgStZe_LasHc2Q9ZwE,21498
+scipy/_lib/_elementwise_iterative_method.py,sha256=w3qm_WWCu4nrtcbdnX8Wx2SKRYpamMfeyxjfmyvBONs,13509
+scipy/_lib/_finite_differences.py,sha256=llaIPvCOxpE4VA8O8EycPEU8i6LHJyOD-y7Y9OvQHt0,4172
+scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so,sha256=Kk1mpVY1lns4OpLjvNrW4B9W-nLAOgt6nH-0O5oSRTg,16400
+scipy/_lib/_gcutils.py,sha256=hajQd-HUw9ckK7QeBaqXVRpmnxPgyXO3QqqniEh7tRk,2669
+scipy/_lib/_pep440.py,sha256=vo3nxbfjtMfGq1ektYzHIzRbj8W-NHOMp5WBRjPlDTg,14005
+scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so,sha256=yGXELz3LHq-9jmN9DzA6APmFeL4wvY_rPypIid98qsg,23232
+scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so,sha256=gjamXXU2UkqozZOn9JfyjxoTO7zEtVUII1-TWVoc2B0,49544
+scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so,sha256=NN0_uZiXj6yZNa3FhUCTutwkPNT0atOxMSp0KGYi9og,34392
+scipy/_lib/_testutils.py,sha256=JtE6ksxrUr0E-A8sEXazvoXvnHympmXabXCys0dRtjU,8134
+scipy/_lib/_threadsafety.py,sha256=xuVqUS2jv46fOOQf7bcrhiYtnPVygqmrIVJc-7_LlI8,1455
+scipy/_lib/_tmpdirs.py,sha256=z3IYpzACnWdN_BMjOvqYbkTvYyUbfbQvfehq7idENSo,2374
+scipy/_lib/_uarray/LICENSE,sha256=yAw5tfzga6SJfhTgsKiLVEWDNNlR6xNhQC_60s-4Y7Q,1514
+scipy/_lib/_uarray/__init__.py,sha256=Rww7wLA7FH6Yong7oMgl_sHPpjcRslRaTjh61W_xVg4,4493
+scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc,,
+scipy/_lib/_uarray/_backend.py,sha256=CeTV7H8oXRs7wrdBu9MXqz5-5EtRyzXnDrTlsMWtyt8,20432
+scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so,sha256=b_JP1CEg6jY9SIth1B8Rem111VgcmQHwa3_5ECGnq9M,173888
+scipy/_lib/_util.py,sha256=zPHnzzCxXrbHdiejH81_MRL6K0P84SG1S-Bq6sDN6j8,32217
+scipy/_lib/array_api_compat/__init__.py,sha256=sC0Ht3rsA1SxX6cuBmBSe2mJ8_m2SODKN29BjIxlwP8,946
+scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc,,
+scipy/_lib/array_api_compat/_internal.py,sha256=RiQvh6ZoZLXw0l2CYKMG_6_PwmDO3qm7Hay8MMpgObc,987
+scipy/_lib/array_api_compat/common/__init__.py,sha256=fH4Ux-dWyQRkZ6WxqDTv-Bges_uKQ80TgTKOxvZ2MFE,24
+scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/common/_aliases.py,sha256=P6-5PJI0ZzVPS58CwpAVh__B8TkVMK7_4DYy8SbpC3A,16263
+scipy/_lib/array_api_compat/common/_helpers.py,sha256=Rn-aG4Vu56auzREAnmkhEsQMr9z__4sgEUEQq2E0elA,8206
+scipy/_lib/array_api_compat/common/_linalg.py,sha256=4D1-ukLTf7s3t6LaFsoR_mMkblceSywx4cYXbeeqZ28,6301
+scipy/_lib/array_api_compat/common/_typing.py,sha256=Wfsx0DJSMTIGfMoj_tqH2-HjxPyVSbQ9aUB02FaEYsA,388
+scipy/_lib/array_api_compat/cupy/__init__.py,sha256=g9IFwPzeOhMXnR-c-Qf8QFXfAltPp6SlS9AtZrjKAQw,397
+scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/cupy/_aliases.py,sha256=bKFKl2rLDX9r74Arv-HZg2yj-ZZqRwGbNoUZnsSORgM,2602
+scipy/_lib/array_api_compat/cupy/_typing.py,sha256=oDhrZB8R-D6wvee7tR4YkyBhTq93M0fFi3Tv-lpN_Dg,617
+scipy/_lib/array_api_compat/cupy/linalg.py,sha256=KidQHA9W3gBTRtWZ9963XiMXel-TvFCSecqB3Te0G9o,1358
+scipy/_lib/array_api_compat/numpy/__init__.py,sha256=bhqr1ecsSl-w5N_TnaaItHsT3eWnNtsC5H5C_6zFu7o,596
+scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/numpy/_aliases.py,sha256=xmcLK4lvyXgrPQNnNuwXut0LYcKBzxruvcQxXcSEjOI,2606
+scipy/_lib/array_api_compat/numpy/_typing.py,sha256=OFRXfhT8-snL_4VeOjbOCd_yYIGqVS-IRrZoWNcL3v4,618
+scipy/_lib/array_api_compat/numpy/linalg.py,sha256=e3gqAyX01YCMHYrQ0rGZ8haub9ZhfHv8TZe1haaRkpE,1189
+scipy/_lib/array_api_compat/torch/__init__.py,sha256=MWtkg6kdsN8CaTgYQJvjVMZu3RQq2mUkyme7yfkUWSE,518
+scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc,,
+scipy/_lib/array_api_compat/torch/_aliases.py,sha256=s-1HnikHDhbBGBDquuiulALiQohOthMOPbonWuV4Fuk,26792
+scipy/_lib/array_api_compat/torch/linalg.py,sha256=H6lb-umJYLcrGCEaaaH___3rJkk6dnfXNntU8tyt20E,2485
+scipy/_lib/decorator.py,sha256=ILVZlN5tlQGnmbgzNKH2TTcNzGKPlHwMuYZ8SbSEORA,15040
+scipy/_lib/deprecation.py,sha256=nAiyFAWEH2Bk5P5Hy_3HSUM3v792GS9muBKr-fdj3Yk,8074
+scipy/_lib/doccer.py,sha256=shdWIi3u7QBN5CyyKwqWW99qOEsiFewB8eH10FWhYLM,8362
+scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so,sha256=eTSn1CnkC1JxrK6XqCuyWPjzXht_-pbNMSTXTlwapPQ,85664
+scipy/_lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc,,
+scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc,,
+scipy/_lib/tests/test__gcutils.py,sha256=qvfxvemSmGvaqcpHwoEzdXYn5mrAf-B1X5qGGyasPC4,3416
+scipy/_lib/tests/test__pep440.py,sha256=u9hPoolK4AoIIS-Rq74Du5SJu5og2RxMwgaAvGgWvRo,2277
+scipy/_lib/tests/test__testutils.py,sha256=P4WDJpUgy19wD9tknQSjIivuQvZF7YUBGSBWlur2QRA,800
+scipy/_lib/tests/test__threadsafety.py,sha256=qSfCF5OG_5lbnSl-grmDN_QCU4QLe-fS3sqnwL04pf8,1322
+scipy/_lib/tests/test__util.py,sha256=lG711zcPwi8uNPrMkgwGHqIKbEPHhlU8lYj6gWVT9aA,14479
+scipy/_lib/tests/test_array_api.py,sha256=6y0vlLDf5UaMglwzdN-gWqp14EgT5N2blDYjR_5OYyE,4039
+scipy/_lib/tests/test_bunch.py,sha256=sViE5aFSmAccfk8kYvt6EmzR5hyQ9nOSWMcftaDYDBg,6168
+scipy/_lib/tests/test_ccallback.py,sha256=dy9g70zyd80KpawffSKgWbddsKUwNNeF5sbxMfCTk6w,6175
+scipy/_lib/tests/test_deprecation.py,sha256=a_3r_9pFx1sxJXeFgiTSV9DXYnktc4fio1hR0ITPywA,364
+scipy/_lib/tests/test_import_cycles.py,sha256=lsGEBuEMo4sbYdZNSOsxAQIJgquUIjcDhQjtr0cyFg4,500
+scipy/_lib/tests/test_public_api.py,sha256=vT2kkjgtkMhxPq3mAoQOZnoD5HEHabHMWrBVW4UsvvE,19234
+scipy/_lib/tests/test_scipy_version.py,sha256=jgo-2YhCkBksXHM6xKiN_iJJZkqz0CvXqn2jVxx1djA,606
+scipy/_lib/tests/test_tmpdirs.py,sha256=URQRnE_lTPw9MIJYBKXMfNATQ0mpsBDgoqAowkylbWQ,1240
+scipy/_lib/tests/test_warnings.py,sha256=MnTTTqcMhloMzL0BeZ2JN2oAL0JKzjZ7UY3IOjOrMQs,4546
+scipy/_lib/uarray.py,sha256=4X0D3FBQR6HOYcwMftjH-38Kt1nkrS-eD4c5lWL5DGo,815
+scipy/cluster/__init__.py,sha256=LNM_kFbT28cIYYgctilxYsxdjuF3KuiOaulZH4dFatE,876
+scipy/cluster/__pycache__/__init__.cpython-310.pyc,,
+scipy/cluster/__pycache__/hierarchy.cpython-310.pyc,,
+scipy/cluster/__pycache__/vq.cpython-310.pyc,,
+scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so,sha256=gnVW1uPcgm7A7GKbjVh8mTAU8J6S5bogQVrvT9VzCcc,422992
+scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so,sha256=x4gPXHf2lhC3HPwsoQa_tDJC3wEhkY8Rzl4ADWDygH8,355856
+scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so,sha256=Jj6cJ1TAj11XH4TfH2vtw47yy3q0_LVqDIwPtA96ZxY,127888
+scipy/cluster/hierarchy.py,sha256=XHNOlJBrIReWElJN1MfosbN12aE5jSxsZD-KtTKa-F0,148588
+scipy/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc,,
+scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc,,
+scipy/cluster/tests/hierarchy_test_data.py,sha256=7syUYdIaDVr7hgvMliX0CW4386utjBJn1DOgX0USXls,6850
+scipy/cluster/tests/test_disjoint_set.py,sha256=EuHGBE3ZVEMnWFbCn8tjI-_6CWrNXfpnv5bUBa9qhWI,5525
+scipy/cluster/tests/test_hierarchy.py,sha256=qVwLvvVO7iJNfqWJWdXia1oXOY-T6s09Yf58IuNG6zc,48726
+scipy/cluster/tests/test_vq.py,sha256=pSUokcwvp50iWwyrlNN53VxCaShDCScjRMJ6hcISyWc,17609
+scipy/cluster/vq.py,sha256=abgPHLJDSEH8mwGaGMtMG1rmkI09P272ji0yfMcjmN4,30738
+scipy/conftest.py,sha256=7ocP1roANCCWR6A8lCUUGFoWHX-HAPEo2bUdvbvx-Ag,9034
+scipy/constants/__init__.py,sha256=Pvyiayo6WX0cVORlr-Ap0VacI5hu5C8PQ17HIwgLcTc,12437
+scipy/constants/__pycache__/__init__.cpython-310.pyc,,
+scipy/constants/__pycache__/_codata.cpython-310.pyc,,
+scipy/constants/__pycache__/_constants.cpython-310.pyc,,
+scipy/constants/__pycache__/codata.cpython-310.pyc,,
+scipy/constants/__pycache__/constants.cpython-310.pyc,,
+scipy/constants/_codata.py,sha256=AAXUgkUuVsGHJ0axSfGyxTd8MkPV6yiza-Q2MSJyt58,155635
+scipy/constants/_constants.py,sha256=CcZ7BBKx8NuVpvjBeS0lY0I1yg5lnhSVhLPKGjIMaPU,10376
+scipy/constants/codata.py,sha256=RMD4V770zdsftqP4MN559SKUq1J15dwWStdID0Z_URE,794
+scipy/constants/constants.py,sha256=w7sGxSidD2Q9Ged0Sn1pnL-qqD1ssEP1A8sZWeLWBeI,2250
+scipy/constants/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/constants/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc,,
+scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc,,
+scipy/constants/tests/test_codata.py,sha256=ToO_lhQOsusJlP3QjrYqa1vw7x6wTCuKH17fg87tH08,1959
+scipy/constants/tests/test_constants.py,sha256=PY1oy6bbM2zoPAPgUeBqVThnVRuu4lBt_uMmxm7Ct38,1632
+scipy/datasets/__init__.py,sha256=7IzOi9gij2mhYCCMWJE1RiI22E1cVbe6exL9BRm1GXs,2802
+scipy/datasets/__pycache__/__init__.cpython-310.pyc,,
+scipy/datasets/__pycache__/_download_all.cpython-310.pyc,,
+scipy/datasets/__pycache__/_fetchers.cpython-310.pyc,,
+scipy/datasets/__pycache__/_registry.cpython-310.pyc,,
+scipy/datasets/__pycache__/_utils.cpython-310.pyc,,
+scipy/datasets/_download_all.py,sha256=iRPR2IUk6C3B5u2q77yOhac449MRSoRaTlCy2oCIknE,1701
+scipy/datasets/_fetchers.py,sha256=Jt8oklMEdZSKf0yJddYCarjlMcOl1XRsdv1LW8gfwE0,6760
+scipy/datasets/_registry.py,sha256=br0KfyalEbh5yrQLznQ_QvBtmN4rMsm0UxOjnsJp4OQ,1072
+scipy/datasets/_utils.py,sha256=kdZ-Opp7Dr1pCwM285p3GVjgZTx_mKWCvETur92FWg4,2967
+scipy/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc,,
+scipy/datasets/tests/test_data.py,sha256=GelFTF2yZqiiQkgTv8ukv8sKTJBdmpsyK5fr0G6z7Ls,4064
+scipy/fft/__init__.py,sha256=XjfuqqFtHktAmDhKoFSca5JoYqCaQxtZRdH0SlPNYjM,3513
+scipy/fft/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/__pycache__/_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_basic.cpython-310.pyc,,
+scipy/fft/__pycache__/_basic_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_debug_backends.cpython-310.pyc,,
+scipy/fft/__pycache__/_fftlog.cpython-310.pyc,,
+scipy/fft/__pycache__/_fftlog_backend.cpython-310.pyc,,
+scipy/fft/__pycache__/_helper.cpython-310.pyc,,
+scipy/fft/__pycache__/_realtransforms.cpython-310.pyc,,
+scipy/fft/__pycache__/_realtransforms_backend.cpython-310.pyc,,
+scipy/fft/_backend.py,sha256=5rBxK8GQtCMnuPHc-lNQdpH4uFFZ9_5vBukkDv6jRRA,6544
+scipy/fft/_basic.py,sha256=lGJ8qQTMXUJEbq_2vwfPPPlX7b4j358ks9LLretOtEY,62997
+scipy/fft/_basic_backend.py,sha256=BnexiVV20wvTXBPYbY89v_mCL6hzP7iF6w_ahG7EgHQ,6546
+scipy/fft/_debug_backends.py,sha256=RlvyunZNqaDDsI3-I6QH6GSBz_faT6EN4OONWsvMtR8,598
+scipy/fft/_fftlog.py,sha256=_ryVlUuSQp_J0hH8VFGMRn4ZvzudHqKDYCVbpV-WVsY,7866
+scipy/fft/_fftlog_backend.py,sha256=K-nbAr00YkJ0G5Y_WSe5aorImbnVswKQcRkGSaYLs38,5237
+scipy/fft/_helper.py,sha256=U47qLBvBl6cs6eicfdq1nldfUVs70Nw0ByOCZmuqAG0,10048
+scipy/fft/_pocketfft/LICENSE.md,sha256=wlSytf0wrjyJ02ugYXMFY7l2D8oE8bdGobLDFX2ix4k,1498
+scipy/fft/_pocketfft/__init__.py,sha256=dROVDi9kRvkbSdynd3L09tp9_exzQ4QqG3xnNx78JeU,207
+scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc,,
+scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc,,
+scipy/fft/_pocketfft/basic.py,sha256=4HR-eRDb6j4YR4sqKnTikFmG0tnUIXxa0uImnB6_JVs,8138
+scipy/fft/_pocketfft/helper.py,sha256=lVpf-oCVBU-TAcreDe15vfbZwpxbfvCGzut0w9cu-As,5807
+scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so,sha256=n6qi8DOYhcVycyVM5IMTzmolFDQXyWGKY6Hql9-IY2k,1197600
+scipy/fft/_pocketfft/realtransforms.py,sha256=4TmqAkCDQK3gs1ddxXY4rOrVfvQqO8NyVtOzziUGw6E,3344
+scipy/fft/_pocketfft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fft/_pocketfft/tests/test_basic.py,sha256=TviTxRl-MOQPcBgu-vvGU_wOunD59HQCc8k2-IdV3X4,35373
+scipy/fft/_pocketfft/tests/test_real_transforms.py,sha256=wn3Lgln-PL2OpSoWjKa4G4mXmngT-mLkOuZTZl3jxK0,16656
+scipy/fft/_realtransforms.py,sha256=QmO9CDqrAsvBcLNgIzFBIWBTYsSUCRJ_Cj1myv73KlE,25386
+scipy/fft/_realtransforms_backend.py,sha256=u4y4nBGCxpTLVqxK1J7xV6tcpeC3-8iiSEXLOcRM9wI,2389
+scipy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fft/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc,,
+scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fft/tests/mock_backend.py,sha256=RAlVSy4Qtk1oTaEG9fl4WKonoSijVHIDfxqv5MbVBPY,2554
+scipy/fft/tests/test_backend.py,sha256=KnLuBO1gQcuaLlr2IP8ndhn2hNFe24EiKPvqbv4o1I4,4275
+scipy/fft/tests/test_basic.py,sha256=CRtrf1R8UoZiKrHKBgzyUK4jpAOkqmSXS55seksgHPI,21216
+scipy/fft/tests/test_fftlog.py,sha256=iRvVB54ZMJSJG52bE-t3mqfHDHesuxnfD1phNAScyGo,6173
+scipy/fft/tests/test_helper.py,sha256=8ynydSBXgDSA5uHjrSI891wYOpF7g4veIJ536Iv535Q,15436
+scipy/fft/tests/test_multithreading.py,sha256=Ub0qD3_iSApPT9E71i0dvKnsKrctLiwMq95y3370POE,2132
+scipy/fft/tests/test_real_transforms.py,sha256=sN5XJmLrnmlIBr7Z5GWYeOCZNQs3_8bAgVL44ShP0c8,8621
+scipy/fftpack/__init__.py,sha256=rLCBFC5Dx5ij_wmL7ChiGmScYlgu0mhaWtrJaz_rBt0,3155
+scipy/fftpack/__pycache__/__init__.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_basic.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_helper.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc,,
+scipy/fftpack/__pycache__/basic.cpython-310.pyc,,
+scipy/fftpack/__pycache__/helper.cpython-310.pyc,,
+scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc,,
+scipy/fftpack/_basic.py,sha256=Sk_gfswmWKb3za6wrU_mIrRVBl69qjzAu9ltznbDCKs,13098
+scipy/fftpack/_helper.py,sha256=g5DZnOVLyLw0BRm5w9viScU3GEPmHwRCwy5dcHdJKb4,3350
+scipy/fftpack/_pseudo_diffs.py,sha256=eCln0ZImNYr-wUWpOZ-SmKKIbhJsV8VBLmwT_C79RsQ,14200
+scipy/fftpack/_realtransforms.py,sha256=ledb21L13ofGnOU4pkx8uWuARCxsh3IFQrHctxTgzzw,19214
+scipy/fftpack/basic.py,sha256=i2CMMS__L3UtFFqe57E0cs7AZ4U6VO-Ted1KhU7_wNc,577
+scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so,sha256=uF-nqpiMIPukki9UtxcVlw3ayffpXE7f8vQnt6fC0AA,272968
+scipy/fftpack/helper.py,sha256=M7jTN4gQIRWpkArQR13bI7WN6WcW-AabxKgrOHRvfeQ,580
+scipy/fftpack/pseudo_diffs.py,sha256=RqTDJRobZQGZg6vSNf4FBzFdLTttkqdWTGchttuQhDo,674
+scipy/fftpack/realtransforms.py,sha256=9-mR-VV3W14oTaD6pB5-RIDV3vkTBQmGCcxfbA8GYH0,595
+scipy/fftpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc,,
+scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc,,
+scipy/fftpack/tests/fftw_double_ref.npz,sha256=pgxklBW2RSI5JNg0LMxcCXgByGkBKHo2nlP8kln17E4,162120
+scipy/fftpack/tests/fftw_longdouble_ref.npz,sha256=pAbL1NrQTQxZ3Tj1RBb7SUJMgiKcGgdLakTsDN4gAOM,296072
+scipy/fftpack/tests/fftw_single_ref.npz,sha256=J2qRQTGOb8NuSrb_VKYbZAVO-ISbZg8XNZ5fVBtDxSY,95144
+scipy/fftpack/tests/test.npz,sha256=Nt6ASiLY_eoFRZDOSd3zyFmDi32JGTxWs7y2YMv0N5c,11968
+scipy/fftpack/tests/test_basic.py,sha256=nLMulUtVIcsVzahpYuSvuEqGHgLeCwpar5YhLbtiTxI,30307
+scipy/fftpack/tests/test_helper.py,sha256=8JaPSJOwsk5XXOf1zFahJ_ktUTfNGSk2-k3R6e420XI,1675
+scipy/fftpack/tests/test_import.py,sha256=Sz4ZZmQpz_BtiO0Gbtctt6WB398wB17oopv5mkfOh0U,1120
+scipy/fftpack/tests/test_pseudo_diffs.py,sha256=SEVPHPDdSxDSUCC8qkwuKD7mIX8rFIx9puxGzBYd1uk,13389
+scipy/fftpack/tests/test_real_transforms.py,sha256=W-gHxBHV3elIPFDOuZvSfZkEuMYJ6edjG7fL-3vVY1s,23971
+scipy/integrate/__init__.py,sha256=Nb06g1FvgETDPfultR4y_JGZCR31k9xrvpcq5VtoGPo,4236
+scipy/integrate/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/__pycache__/_bvp.cpython-310.pyc,,
+scipy/integrate/__pycache__/_ode.cpython-310.pyc,,
+scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc,,
+scipy/integrate/__pycache__/_quadrature.cpython-310.pyc,,
+scipy/integrate/__pycache__/_tanhsinh.cpython-310.pyc,,
+scipy/integrate/__pycache__/dop.cpython-310.pyc,,
+scipy/integrate/__pycache__/lsoda.cpython-310.pyc,,
+scipy/integrate/__pycache__/odepack.cpython-310.pyc,,
+scipy/integrate/__pycache__/quadpack.cpython-310.pyc,,
+scipy/integrate/__pycache__/vode.cpython-310.pyc,,
+scipy/integrate/_bvp.py,sha256=7OiL3Kg7IZlmUkcrBy6qzyjhayV546_HlB6kb6o7zh4,40927
+scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so,sha256=vDU7-kaNDtANrwFHsf1mKG0KYq6K33OkR7CVYAmTLb8,116977
+scipy/integrate/_ivp/__init__.py,sha256=gKFR_pPjr8fRLgAGY5sOzYKGUFu2nGX8x1RrXT-GZZc,256
+scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc,,
+scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc,,
+scipy/integrate/_ivp/base.py,sha256=Mlef_dgmn0wzjFxZA3oBbtHrQgrfdZw_8k1mLYNZP4A,10295
+scipy/integrate/_ivp/bdf.py,sha256=deQVxWq58ihFDWKC8teztUbe8MYN4mNgLCU-6aq_z1U,17522
+scipy/integrate/_ivp/common.py,sha256=A6_X4WD0PwK-6MhOAmU8aj8CLuVdlxfBlKdPNxab-lE,15274
+scipy/integrate/_ivp/dop853_coefficients.py,sha256=OrYvW0Hu6X7sOh37FU58gNkgC77KVpYclewv_ARGMAE,7237
+scipy/integrate/_ivp/ivp.py,sha256=C5jQvVgpf0cBo_khaVO_bE9Mh8V-yOadv_xzc8FXKsQ,31472
+scipy/integrate/_ivp/lsoda.py,sha256=t5t2jZBgBPt0G20TOI4SVXuGFAZYAhfDlJZhfCzeeDo,9927
+scipy/integrate/_ivp/radau.py,sha256=7Ng-wYOdOBf4ke4-CYyNUQUH3jgYmDflpE1UXIYNOdU,19743
+scipy/integrate/_ivp/rk.py,sha256=kYWCzolgXwnDuDIqDViI2Exzu61JekmbbCYuQhGYsgA,22781
+scipy/integrate/_ivp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc,,
+scipy/integrate/_ivp/tests/test_ivp.py,sha256=Y1pItTm6-38k1_nDMrWTKwa36vmxd2234gq4uDReUOs,37088
+scipy/integrate/_ivp/tests/test_rk.py,sha256=K9UxZghBzSL2BzmgLndPJcWOWV4Nr530TGKWakpsoeM,1326
+scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so,sha256=kx57YqVE3UeZ8-WerSmFuEdoKw9ksGYl866TbJuSizI,113129
+scipy/integrate/_ode.py,sha256=UBdaILr3TUmCPs-pg32Eni12Gb0WKmyqVp_C5fTVHZQ,48074
+scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so,sha256=eSHckX_3y6otFz6AwgyRzvi2QuEt9C7HlBxBhP-LlDM,83577
+scipy/integrate/_odepack_py.py,sha256=ULRxBnl_FzZbmf_zfFMIK8r11puTTT37IzRy9rVONd8,10912
+scipy/integrate/_quad_vec.py,sha256=zJrfx12UOsyI2bY26BZclLsxhv42xUEZ3ZSDcAcHaog,21234
+scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so,sha256=2ET4zWnuL8B1NBcz8-XRcgCHlRtre207lGMticJsW3Y,116449
+scipy/integrate/_quadpack_py.py,sha256=RMY5JyhkDVESV4sZb2iUEBNezZ2Y-Z5dru5Bbx1k5Yk,53622
+scipy/integrate/_quadrature.py,sha256=27OnvuGOs0s1j60mkpD33NkvfqEDyRkZZ2SdtsGshqE,65061
+scipy/integrate/_tanhsinh.py,sha256=8bDtLU3cNHtHz2KZ_TDPEWlkaixUUeTZEfiCsTH2NJs,52905
+scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so,sha256=oCO9DKyKPy4ERYj4rP5sVzsJ2V1Goc521tLC5k-WlzE,16896
+scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so,sha256=tHI0zXWYhupvzXbVmoEsayJBo1ABVNEWNQ6BZtwgJEo,108745
+scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so,sha256=B4uehSegEEOvby4pRpU_g3IGvbLNI9IWgYOACRKpVAY,166393
+scipy/integrate/dop.py,sha256=EaxhHt4tzQjyQv6WBKqfeJtiBVQmhrcEIgkBzrTQ4Us,453
+scipy/integrate/lsoda.py,sha256=hUg4-tJcW3MjhLjLBsD88kzP7qGp_zLGw1AH2ZClHmw,436
+scipy/integrate/odepack.py,sha256=G5KiKninKFyYgF756_LtDGB68BGk7IwPidUOywFpLQo,545
+scipy/integrate/quadpack.py,sha256=OAAaraeGThs2xYYWqKIOHiTe73Qh6zr8aoI1t8cqpnk,617
+scipy/integrate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc,,
+scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc,,
+scipy/integrate/tests/test__quad_vec.py,sha256=-pcKFE_LsIiMx-bGJWztpib8uhwe8AyETTM8yvv9If0,6284
+scipy/integrate/tests/test_banded_ode_solvers.py,sha256=kJWirYckJ7k4tfweg1ds-Tozp3GEhxTbuXfgSdeJw7k,6687
+scipy/integrate/tests/test_bvp.py,sha256=Q3zw4r3lajNE9y2smIkAayRWrZ67r-yTuXODPeyvecY,20181
+scipy/integrate/tests/test_integrate.py,sha256=U-TlhrTUh8BnQ7SlW9enL5gvO15QcGlmfDEHhnjhct4,24400
+scipy/integrate/tests/test_odeint_jac.py,sha256=enXGyQQ4m-9kMPDaWvipIt3buYZ5jNjaxITP8GoS86s,1816
+scipy/integrate/tests/test_quadpack.py,sha256=e6dBmLYXrV_veLdsypR0fTs8JW_rTTAlSC5ue3vy_JA,27983
+scipy/integrate/tests/test_quadrature.py,sha256=_mQiQ1NizES6MYRUkNP1DlGssXp75aV61wajiSWEXuM,29999
+scipy/integrate/tests/test_tanhsinh.py,sha256=fWXykp3jX-lE9HLeaTaGLY2iHQ8sHIWQnsTmxSADq2k,34195
+scipy/integrate/vode.py,sha256=Jt60dcK-zXBgQF45FNRVtvyUbnkmaNWGbjX00I2mC3k,453
+scipy/interpolate/__init__.py,sha256=AULPLFlB27t4jwYSXN_vojbsO4QF_UiN1kGVsxWeCSs,3530
+scipy/interpolate/__pycache__/__init__.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_cubic.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_pade.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_polyint.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rbf.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc,,
+scipy/interpolate/__pycache__/_rgi.cpython-310.pyc,,
+scipy/interpolate/__pycache__/fitpack.cpython-310.pyc,,
+scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc,,
+scipy/interpolate/__pycache__/interpolate.cpython-310.pyc,,
+scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc,,
+scipy/interpolate/__pycache__/polyint.cpython-310.pyc,,
+scipy/interpolate/__pycache__/rbf.cpython-310.pyc,,
+scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so,sha256=9q6P0Lo0k8OVE34kb7GWzORo5pnd5Ff3lNundUqoAZA,617056
+scipy/interpolate/_bsplines.py,sha256=0UV-sSOfzePJI4wUP6R2rX4AfdOhocDRLhRDDokyJr0,75440
+scipy/interpolate/_cubic.py,sha256=iuDbeuOhlDYUzGNpvvlnPv6xiG5_8pZIONqQ4b6nPiQ,38162
+scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so,sha256=Q6xkCivTDhfYysI9JJBaukVXyWakOeI76qsMb-OXVQ0,91409
+scipy/interpolate/_fitpack2.py,sha256=KFfeRremt7_PYekhXuH4rjlRrUvMw0pvKlxvgfHDFyE,89172
+scipy/interpolate/_fitpack_impl.py,sha256=oTxX0ZBw1eChL2gKyVnEIOjQhbOdHv1JAFXPCivVi8A,28669
+scipy/interpolate/_fitpack_py.py,sha256=HxdppqjgMmwwK-a2ZIoNSEjikbMlRLqWErKPdWoijSE,28064
+scipy/interpolate/_interpolate.py,sha256=eBpiTbpC4_9O-7pokew59fmtazbOYN1Se__7d32HG3k,88259
+scipy/interpolate/_ndbspline.py,sha256=rXABycf5_j8ESpY3DO_ysu76kxLKo1CawWUjbQzMSQk,12742
+scipy/interpolate/_ndgriddata.py,sha256=Piz6T2dSyv7ozsX_sn3K5DdEIa18I9UJca9V2NrF4Uc,12092
+scipy/interpolate/_pade.py,sha256=OBorKWc3vCSGlsWrajoF1_7WeNd9QtdbX0wOHLdRI2A,1827
+scipy/interpolate/_polyint.py,sha256=jcB08oyPsO71j7omBYaz-q0UbGfnxMJPzUik6lMgkD0,34983
+scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so,sha256=xHUlm6LStHTmGXQPUOPR1_b8Ezk4LkcK3PajcL2okuw,470232
+scipy/interpolate/_rbf.py,sha256=tBeBsMEe_NO1yxEv8PsX8ngVearEn1VfOyrCqEfr_Uc,11674
+scipy/interpolate/_rbfinterp.py,sha256=bzuAuZpojP-cKCukD3jVekbQzZfHnrUT13Sex5pkKOI,19723
+scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so,sha256=8AcYGq3EaX0OSiAOrQ029ZL7GJyldWVJHPec3gxh0Q0,261280
+scipy/interpolate/_rgi.py,sha256=zEKwwpQpvKU4j8NBc1SzPE61rdi_zACcZwPeqVTaPTk,31491
+scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so,sha256=h4NpmHAmUh_fXagbo8NYUbcz_vN0I2htm3JH0rxnu1U,295704
+scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so,sha256=OuDAHsFz09ayCBQPQy0SX3i2bI3aFziXHaA4MMlqe2c,338105
+scipy/interpolate/fitpack.py,sha256=VJP17JUH7I0hQhdGaOfhXpJkyUGYuKDfaZ0GGFdLE9o,716
+scipy/interpolate/fitpack2.py,sha256=34oNI8q0UKW6kLh0iLGToTKmen1CsKHKiendex3Fp9k,964
+scipy/interpolate/interpnd.cpython-310-x86_64-linux-gnu.so,sha256=rLGJfF7UUnMGYuVFCvv1VhSmYvcrOvtfuKSsEfU-6pA,484664
+scipy/interpolate/interpolate.py,sha256=pmWxfOOtaAvMKJvkO8oLvMGBZp1cEDvUM9PJWg2Cl2g,963
+scipy/interpolate/ndgriddata.py,sha256=F65cg9Tw-3LQy-G3V0YWFMN4yF23I6xOoQI3idK-sPg,677
+scipy/interpolate/polyint.py,sha256=-KGJfScIoqD3mTuR7FKS8MKWaE4EtPzomfB0Zoaa4f4,712
+scipy/interpolate/rbf.py,sha256=9AKQfUe99wmx8GaQoOd1sMo-o9yupBtvYBshimRqG9Y,597
+scipy/interpolate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc,,
+scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc,,
+scipy/interpolate/tests/data/bug-1310.npz,sha256=jWgDwLOY8nBMI28dG56OXt4GvRZaCrsPIoKBq71FWuk,2648
+scipy/interpolate/tests/data/estimate_gradients_hang.npy,sha256=QGwQhXQX_16pjYzSiUXJ0OT1wk-SpIrQ6Pq5Vb8kd_E,35680
+scipy/interpolate/tests/data/gcvspl.npz,sha256=A86BVabLoMG_CiRBoQwigZH5Ft7DbLggcjQpgRKWu6g,3138
+scipy/interpolate/tests/test_bsplines.py,sha256=XoOzxITldFfd5JxbGa2M_v6AL3USCNsAkq5mJZBBzKI,93848
+scipy/interpolate/tests/test_fitpack.py,sha256=zkOUpis1bFPOiZSuBTcwOpM8TH8lYE37YhLlY_n_cdw,16057
+scipy/interpolate/tests/test_fitpack2.py,sha256=fyNnCzCp2V-OQ8hHuRtgeSEcBlB102KFTu1HeOXm2ik,58726
+scipy/interpolate/tests/test_gil.py,sha256=wt92CaxUlVgRGB-Wl2EuQxveqdARU8rZucD9IKl-pUE,1874
+scipy/interpolate/tests/test_interpnd.py,sha256=n-jvOfEyyPrA46HH43xT-5mH7jN8iICRz6Hou80aPog,13675
+scipy/interpolate/tests/test_interpolate.py,sha256=QkW9zZJzp-1sC-bBjbfUwpF9nsEEQhsyNXbKXCLm7U0,97533
+scipy/interpolate/tests/test_ndgriddata.py,sha256=2q-eRB6cvvRjtBaeFjjZJJXkkYA_ILXSecOZueT0Z3Q,10980
+scipy/interpolate/tests/test_pade.py,sha256=qtJfPaUxPCt2424CeYUCHIuofGGq0XAiyFCLYdkSMLg,3808
+scipy/interpolate/tests/test_polyint.py,sha256=q6S4LFc0aJjbxm4H0rP1NFspQ9QHvzT9E4ZJVJd6ujM,36326
+scipy/interpolate/tests/test_rbf.py,sha256=OitMk6wEbVeRS_TUeSa-ReWqR7apVez2n-wYOI08grg,6559
+scipy/interpolate/tests/test_rbfinterp.py,sha256=i-gJl0zAl5ctWj2cRU6Wi9kHOrnbbFuSeS_Ltr0Mog8,18529
+scipy/interpolate/tests/test_rgi.py,sha256=31AtLCmsfVXmg3JJllgFq0cPBx9_7yN8nkrR1FFGFbg,44604
+scipy/io/__init__.py,sha256=XegFIpTjKz9NXsHPLcvnYXT-mzUrMqPJUD7a8dhUK_0,2735
+scipy/io/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/__pycache__/_fortran.cpython-310.pyc,,
+scipy/io/__pycache__/_idl.cpython-310.pyc,,
+scipy/io/__pycache__/_mmio.cpython-310.pyc,,
+scipy/io/__pycache__/_netcdf.cpython-310.pyc,,
+scipy/io/__pycache__/harwell_boeing.cpython-310.pyc,,
+scipy/io/__pycache__/idl.cpython-310.pyc,,
+scipy/io/__pycache__/mmio.cpython-310.pyc,,
+scipy/io/__pycache__/netcdf.cpython-310.pyc,,
+scipy/io/__pycache__/wavfile.cpython-310.pyc,,
+scipy/io/_fast_matrix_market/__init__.py,sha256=8okZpcBG5EjYz6kxS26Uxof9rk0YZcUb-3aT7dO_3SY,16876
+scipy/io/_fast_matrix_market/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so,sha256=cZ-MTGi7t1EIxpaNK6QWCio11p63h-40iUOohOpimCc,3827072
+scipy/io/_fortran.py,sha256=ZWR385RMYQtcjgv2S9CCaRwOHPKf1kzD8dzAIqw55WE,10895
+scipy/io/_harwell_boeing/__init__.py,sha256=2iVxlj6ZquU8_XPA37npOdeHCXe8XbQrmMZO7k6Bzxs,574
+scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc,,
+scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc,,
+scipy/io/_harwell_boeing/_fortran_format_parser.py,sha256=ykWecU9ysrCFRfeIdctaELnIDQMaCt6PjGwkxpljNzw,8917
+scipy/io/_harwell_boeing/hb.py,sha256=euxQyYRTvluzGUicNfEuyk4cOUCGLFCIs0r-8vjIZ-U,19177
+scipy/io/_harwell_boeing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc,,
+scipy/io/_harwell_boeing/tests/test_fortran_format.py,sha256=0LxOjUewBj1Fwf7EOxMWZG_PdzMbVrFYMUeGgs23VII,2360
+scipy/io/_harwell_boeing/tests/test_hb.py,sha256=3eLwxTSg_Ebt2pjBLvZhpq8WUMjkFhM1lsTu_mgvDTI,2284
+scipy/io/_idl.py,sha256=4oBvgwifLtx05eMKTNbYMfrOi1yi4poEM5scZb6J00w,27102
+scipy/io/_mmio.py,sha256=-SCJh-M8Zmh-UbBs8mbyFJhGP3eCRLbAknB0s0zl-rQ,31872
+scipy/io/_netcdf.py,sha256=dGNKBKWJ2ZcO5e5aQ1Z9oZW-n26clSweqv_bPhnSL78,39263
+scipy/io/_test_fortran.cpython-310-x86_64-linux-gnu.so,sha256=wSJC3OO9XilZ0iWqb2Q8s1XjuWkRfZx4HgfsX6zRtkE,63449
+scipy/io/arff/__init__.py,sha256=czaV8hvY6JnmEn2qyU3_fzcy_P55aXVT09OzGnhJT9I,805
+scipy/io/arff/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/arff/__pycache__/_arffread.cpython-310.pyc,,
+scipy/io/arff/__pycache__/arffread.cpython-310.pyc,,
+scipy/io/arff/_arffread.py,sha256=iZgv9wiDI9oivXVd4lxhWgS1KPYS7sWvE9IV8bvlzPI,26560
+scipy/io/arff/arffread.py,sha256=q8OPAnQ_eP4K4ZyspmXOeaR-KwpiVvEKTntVPEWew3o,1145
+scipy/io/arff/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc,,
+scipy/io/arff/tests/data/iris.arff,sha256=fTS6VWSX6dwoM16mYoo30dvLoJChriDcLenHAy0ZkVM,7486
+scipy/io/arff/tests/data/missing.arff,sha256=ga__Te95i1Yf-yu2kmYDBVTz0xpSTemz7jS74_OfI4I,120
+scipy/io/arff/tests/data/nodata.arff,sha256=DBXdnIe28vrbf4C-ar7ZgeFIa0kGD4pDBJ4YP-z4QHQ,229
+scipy/io/arff/tests/data/quoted_nominal.arff,sha256=01mPSc-_OpcjXFy3EoIzKdHCmzWSag4oK1Ek2tUc6_U,286
+scipy/io/arff/tests/data/quoted_nominal_spaces.arff,sha256=bcMOl-E0I5uTT27E7bDTbW2mYOp9jS8Yrj0NfFjQdKU,292
+scipy/io/arff/tests/data/test1.arff,sha256=nUFDXUbV3sIkur55rL4qvvBdqUTbzSRrTiIPwmtmG8I,191
+scipy/io/arff/tests/data/test10.arff,sha256=va7cXiWX_AnHf-_yz25ychD8hOgf7-sEMJITGwQla30,199009
+scipy/io/arff/tests/data/test11.arff,sha256=G-cbOUUxuc3859vVkRDNjcLRSnUu8-T-Y8n0dSpvweo,241
+scipy/io/arff/tests/data/test2.arff,sha256=COGWCYV9peOGLqlYWhqG4ANT2UqlAtoVehbJLW6fxHw,300
+scipy/io/arff/tests/data/test3.arff,sha256=jUTWGaZbzoeGBneCmKu6V6RwsRPp9_0sJaSCdBg6tyI,72
+scipy/io/arff/tests/data/test4.arff,sha256=mtyuSFKUeiRR2o3mNlwvDCxWq4DsHEBHj_8IthNzp-M,238
+scipy/io/arff/tests/data/test5.arff,sha256=2Q_prOBCfM_ggsGRavlOaJ_qnWPFf2akFXJFz0NtTIE,365
+scipy/io/arff/tests/data/test6.arff,sha256=V8FNv-WUdurutFXKTOq8DADtNDrzfW65gyOlv-lquOU,195
+scipy/io/arff/tests/data/test7.arff,sha256=rxsqdev8WeqC_nKJNwetjVYXA1-qCzWmaHlMvSaVRGk,559
+scipy/io/arff/tests/data/test8.arff,sha256=c34srlkU8hkXYpdKXVozEutiPryR8bf_5qEmiGQBoG4,429
+scipy/io/arff/tests/data/test9.arff,sha256=ZuXQQzprgmTXxENW7we3wBJTpByBlpakrvRgG8n7fUk,311
+scipy/io/arff/tests/test_arffread.py,sha256=7L9m9tLfHz8moV8wJyLs1ob_gxFBCBr3SDpZXW1fgng,13104
+scipy/io/harwell_boeing.py,sha256=6cNioakGH8vMnjCt-k7W2vM5eq_L6ZMvnwpLB23KBoM,682
+scipy/io/idl.py,sha256=WWbkHVJPlPTH4XBQmts7g4ei1UBlZFvR9fJ79poHwzM,599
+scipy/io/matlab/__init__.py,sha256=YkLznYXgPaXmCNngcs9O9firIXLnM9Ez8iQC5luw2-Y,2028
+scipy/io/matlab/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio4.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/miobase.cpython-310.pyc,,
+scipy/io/matlab/__pycache__/streams.cpython-310.pyc,,
+scipy/io/matlab/_byteordercodes.py,sha256=5mtMzDwNmpSWeEk901SKqwN2tIXSNIN1FBpmZ2Pn3XY,1985
+scipy/io/matlab/_mio.py,sha256=Bb4X8My32gDYfeZiRQuVzdJzjtGHJiwRYOxaQb3Z0Dg,12833
+scipy/io/matlab/_mio4.py,sha256=xSIrZ1BbIoxtoQqa44pu5LgvlCclehfUuoWR4Q1jZ4M,20713
+scipy/io/matlab/_mio5.py,sha256=28C22-ZpH782DqXyrpazkoEI6iCjnTcfXPWHZBstKB8,33580
+scipy/io/matlab/_mio5_params.py,sha256=skRcKG70vOlVMSb1TO67LB5312zuOUSrcOK7mOCcUss,8201
+scipy/io/matlab/_mio5_utils.cpython-310-x86_64-linux-gnu.so,sha256=9tssFfOqsNK1W7t81v3NDf5OjrqyCtX0jy5xVJPG9oQ,264600
+scipy/io/matlab/_mio_utils.cpython-310-x86_64-linux-gnu.so,sha256=STtjkyZ6zE3AnPPRavWcxkjgucxtneKEsk4Z88GQMCU,73280
+scipy/io/matlab/_miobase.py,sha256=xw8D9CU6Aajk6-hXhtAW5GKMkbkSdJxTx17qogpSxCA,12962
+scipy/io/matlab/_streams.cpython-310-x86_64-linux-gnu.so,sha256=gskAgdMqQQLu2ptdArq7apZjG7Q-Riz0xWd2v81EYX8,147488
+scipy/io/matlab/byteordercodes.py,sha256=TP6lKr_4_0aUVqX5flFI_w_NabnJF3xvbm6xK4qWIws,611
+scipy/io/matlab/mio.py,sha256=imPlshqcGZNEuWlzpYW-Y_JzUqcwdI9Z1SE3gjCzTWo,678
+scipy/io/matlab/mio4.py,sha256=53boJCNzXr3bRewVn5xtBqp_gFvb1fEUZobx-cbxpqY,983
+scipy/io/matlab/mio5.py,sha256=tcfrucXyoBq5OOSQWLpQvmlABq0ZhgKnnLK_-0ld-LQ,1217
+scipy/io/matlab/mio5_params.py,sha256=bPjuNDH79SW5p-L4RFEXFiokiynE1rqolR26-qVH0RE,1294
+scipy/io/matlab/mio5_utils.py,sha256=BrUSxwpJ2d32lW6Gjuuh5Sk7SeMQv-MS1r0sc-ZcaBo,661
+scipy/io/matlab/mio_utils.py,sha256=JZP2mnyDKjHzABKHAZ5Nmxt9FdnlM1lUV-Qe4Uju2yk,558
+scipy/io/matlab/miobase.py,sha256=JKUwT3HNlPzLFiigr3lPj9WB7yBx7mF8xitGuFwWu5E,764
+scipy/io/matlab/streams.py,sha256=sh2KA6Wl-56ghy15v2P2tmIrH-Tb8bGnTp7z22XTx-8,585
+scipy/io/matlab/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/matlab/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_byteordercodes.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio5_utils.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio_funcs.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_mio_utils.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_miobase.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_pathological.cpython-310.pyc,,
+scipy/io/matlab/tests/__pycache__/test_streams.cpython-310.pyc,,
+scipy/io/matlab/tests/data/bad_miuint32.mat,sha256=CVkYHp_U4jxYKRRHSuZ5fREop4tJjnZcQ02DKfObkRA,272
+scipy/io/matlab/tests/data/bad_miutf8_array_name.mat,sha256=V-jfVMkYyy8qRGcOIsNGcoO0GCgTxchrsQUBGBnfWHE,208
+scipy/io/matlab/tests/data/big_endian.mat,sha256=2ttpiaH2B6nmHnq-gsFeMvZ2ZSLOlpzt0IJiqBTcc8M,273
+scipy/io/matlab/tests/data/broken_utf8.mat,sha256=nm8aotRl6NIxlM3IgPegKR3EeevYZoJCrYpV4Sa1T5I,216
+scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat,sha256=X4dvE7K9DmGEF3D6I-48hC86W41jB54H7bD8KTXjtYA,276
+scipy/io/matlab/tests/data/corrupted_zlib_data.mat,sha256=DfE1YBH-pYw-dAaEeKA6wZcyKeo9GlEfrzZtql-fO_w,3451
+scipy/io/matlab/tests/data/japanese_utf8.txt,sha256=rgxiBH7xmEKF91ZkB3oMLrqABBXINEMHPXDKdZXNBEY,270
+scipy/io/matlab/tests/data/little_endian.mat,sha256=FQP_2MNod-FFF-JefN7ZxovQ6QLCdHQ0DPL_qBCP44Y,265
+scipy/io/matlab/tests/data/logical_sparse.mat,sha256=qujUUpYewaNsFKAwGpYS05z7kdUv9TQZTHV5_lWhRrs,208
+scipy/io/matlab/tests/data/malformed1.mat,sha256=DTuTr1-IzpLMBf8u5DPb3HXmw9xJo1aWfayA5S_3zUI,2208
+scipy/io/matlab/tests/data/miuint32_for_miint32.mat,sha256=romrBP_BS46Sl2-pKWsUnxYDad2wehyjq4wwLaVqums,272
+scipy/io/matlab/tests/data/miutf8_array_name.mat,sha256=Vo8JptFr-Kg2f2cEoDg8LtELSjVNyccdJY74WP_kqtc,208
+scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat,sha256=bvdmj6zDDUIpOfIP8J4Klo107RYCDd5VK5gtOYx3GsU,8168
+scipy/io/matlab/tests/data/one_by_zero_char.mat,sha256=Z3QdZjTlOojjUpS0cfBP4XfNQI3GTjqU0n_pnAzgQhU,184
+scipy/io/matlab/tests/data/parabola.mat,sha256=ENWuWX_uwo4Av16dIGOwnbMReAMrShDhalkq8QUI8Rg,729
+scipy/io/matlab/tests/data/single_empty_string.mat,sha256=4uTmX0oydTjmtnhxqi9SyPWCG2I24gj_5LarS80bPik,171
+scipy/io/matlab/tests/data/some_functions.mat,sha256=JA736oG3s8PPdKhdsYK-BndLUsGrJCJAIRBseSIEZtM,1397
+scipy/io/matlab/tests/data/sqr.mat,sha256=3DtGl_V4wABKCDQ0P3He5qfOzpUTC-mINdK73MKS7AM,679
+scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat,sha256=-odiBIQAbOLERg0Vg682QHGfs7C8MaA_gY77OWR8x78,232
+scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat,sha256=G5siwvZ-7Uv5KJ6h7AA3OHL6eiFsd8Lnjx4IcoByzCU,232
+scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat,sha256=EVj1wPnoyWGIdTpkSj3YAwqzTAm27eqZNxCaJAs3pwU,213
+scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat,sha256=S_Sd3sxorDd8tZ5CxD5_J8vXbfcksLWzhUQY5b82L9g,213
+scipy/io/matlab/tests/data/test_empty_struct.mat,sha256=WoC7g7TyXqNr2T0d5xE3IUq5PRzatE0mxXjqoHX5Xec,173
+scipy/io/matlab/tests/data/test_mat4_le_floats.mat,sha256=2xvn3Cg4039shJl62T-bH-VeVP_bKtwdqvGfIxv8FJ4,38
+scipy/io/matlab/tests/data/test_skip_variable.mat,sha256=pJLVpdrdEb-9SMZxaDu-uryShlIi90l5LfXhvpVipJ0,20225
+scipy/io/matlab/tests/data/testbool_8_WIN64.mat,sha256=_xBw_2oZA7u9Xs6GJItUpSIEV4jVdfdcwzmLNFWM6ow,185
+scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat,sha256=OWOBzNpWTyAHIcZABRytVMcABiRYgEoMyF9gDaIkFe4,536
+scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat,sha256=7111TN_sh1uMHmYx-bjd_v9uaAnWhJMhrQFAtAw6Nvk,536
+scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat,sha256=62p6LRW6PbM-Y16aUeGVhclTVqS5IxPUtsohe7MjrYo,283
+scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat,sha256=NkTA8UW98hIQ0t5hGx_leG-MzNroDelYwqx8MPnO63Q,283
+scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat,sha256=AeNaog8HUDCVrIuGICAXYu9SGDsvV6qeGjgvWHrVQho,568
+scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat,sha256=Gl4QA0yYwGxjiajjgWS939WVAM-W2ahNIm9wwMaT5oc,568
+scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat,sha256=CUGtkwIU9CBa0Slx13mbaM67_ec0p-unZdu8Z4YYM3c,228
+scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat,sha256=TeTk5yjl5j_bcnmIkpzuYHxGGQXNu-rK6xOsN4t6lX8,228
+scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat,sha256=WOwauWInSVUFBuOJ1Bo3spmUQ3UWUIlsIe4tYGlrU7o,176
+scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat,sha256=GpAEccizI8WvlrBPdvlKUv6uKbZOo_cjUK3WVVb2lo4,352
+scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat,sha256=3MEbf0zJdQGAO7x-pzFCup2QptfYJHQG59z0vVOdxl4,352
+scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat,sha256=VNHV2AIEkvPuhae1kKIqt5t8AMgUyr0L_CAp-ykLxt4,247
+scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat,sha256=8rWGf5bqY7_2mcd5w5gTYgMkXVePlLL8qT7lh8kApn0,247
+scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat,sha256=MzT7OYPEUXHYNPBrVkyKEaG5Cas2aOA0xvrO7l4YTrQ,103
+scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat,sha256=DpB-mVKx1gsjl-3IbxfxHNuzU5dnuku-MDQCA8kALVI,272
+scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat,sha256=4hY5VEubavNEv5KvcqQnd7MWWvFUzHXXpYIqUuUt-50,272
+scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat,sha256=N2QOOIXPyy0zPZZ_qY7xIDaodMGrTq3oXNBEHZEscw0,232
+scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat,sha256=TrkJ4Xx_dC9YrPdewlsOvYs_xag7gT3cN4HkDsJmT8I,232
+scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat,sha256=g96Vh9FpNhkiWKsRm4U6KqeKd1hNAEyYSD7IVzdzwsU,472
+scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat,sha256=2Zw-cMv-Mjbs2HkSl0ubmh_htFUEpkn7XVHG8iM32o0,472
+scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat,sha256=t5Ar8EgjZ7fkTUHIVpdXg-yYWo_MBaigMDJUGWEIrmU,218
+scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat,sha256=5PPvfOoL-_Q5ou_2nIzIrHgeaOZGFXGxAFdYzCQuwEQ,218
+scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat,sha256=ScTKftENe78imbMc0I5ouBlIMcEEmZgu8HVKWAMNr58,381
+scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat,sha256=ZoVbGk38_MCppZ0LRr6OE07HL8ZB4rHXgMj9LwUBgGg,4168
+scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat,sha256=14YMiKAN9JCPTqSDXxa58BK6Un7EM4hEoSGAUuwKWGQ,151
+scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat,sha256=ZdjNbcIE75V5Aht5EVBvJX26aabvNqbUH0Q9VBnxBS4,216
+scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat,sha256=OB82QgB6SwtsxT4t453OVSj-B777XrHGEGOMgMD1XGc,216
+scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat,sha256=-TYB0kREY7i7gt5x15fOYjXi410pXuDWUFxPYuMwywI,193
+scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat,sha256=l9psDc5K1bpxNeuFlyYIYauswLnOB6dTX6-jvelW0kU,193
+scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat,sha256=2914WYQajPc9-Guy3jDOLU3YkuE4OXC_63FUSDzJzX0,38
+scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat,sha256=2X2fZKomz0ktBvibj7jvHbEvt2HRA8D6hN9qA1IDicw,200
+scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat,sha256=i364SgUCLSYRjQsyygvY1ArjEaO5uLip3HyU-R7zaLo,200
+scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat,sha256=gtYNC9_TciYdq8X9IwyGEjiw2f1uCVTGgiOPFOiQbJc,184
+scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat,sha256=eXcoTM8vKuh4tQnl92lwdDaqssGB6G9boSHh3FOCkng,184
+scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat,sha256=Zhyu2KCsseSJ5NARdS00uwddCs4wmjcWNP2LJFns2-Q,240
+scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat,sha256=KI3H58BVj6k6MFsj8icSbjy_0Z-jOesWN5cafStLPG8,276
+scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat,sha256=Yr4YKCP27yMWlK5UOK3BAEOAyMr-m0yYGcj8v1tCx-I,276
+scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat,sha256=kzLxy_1o1HclPXWyA-SX5gl6LsG1ioHuN4eS6x5iZio,800
+scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat,sha256=dq_6_n0v7cUz9YziXn-gZFNc9xYtNxZ8exTsziWIM7s,672
+scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat,sha256=3z-boFw0SC5142YPOLo2JqdusPItVzjCFMhXAQNaQUQ,306
+scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat,sha256=5OwLTMgCBlxsDfiEUzlVjqcSbVQG-X5mIw5JfW3wQXA,306
+scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat,sha256=BCvppGhO19-j-vxAvbdsORIiyuJqzCuQog9Ao8V1lvA,40
+scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat,sha256=ThppTHGJFrUfal5tewS70DL00dSwk1otazuVdJrTioE,200
+scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat,sha256=SBfN6e7Vz1rAdi8HLguYXcHUHk1viaXTYccdEyhhob4,200
+scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat,sha256=m8W9GqvflfAsizkhgAfT0lLcxuegZIWCLNuHVX69Jac,184
+scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat,sha256=t9ObKZOLy3vufnER8TlvQcUkd_wmXbJSdQoG4f3rVKY,184
+scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat,sha256=5LX9sLH7Y6h_N_a1XRN2GuMgp_P7ECpPsXGDOypAJg0,194
+scipy/io/matlab/tests/data/testsimplecell.mat,sha256=Aoeh0PX2yiLDTwkxMEyZ_CNX2mJHZvyfuFJl817pA1c,220
+scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat,sha256=dFUcB1gunfWqexgR4YDZ_Ec0w0HffM1DUE1C5PVfDDc,223
+scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat,sha256=9Sgd_SPkGNim7ZL0xgD71qml3DK0yDHYC7VSNLNQEXA,280
+scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat,sha256=jp1ILNxLyV6XmCCGxAz529XoZ9dhCqGEO-ExPH70_Pg,328
+scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat,sha256=k8QuQ_4Zu7FWTzHjRnHCVZ9Yu5vwNP0WyNzu6TuiY-4,229
+scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat,sha256=QbZOCqIvnaK0XOH3kaSXBe-m_1_Rb33psq8E-WMSBTU,229
+scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat,sha256=QMVoBXVyl9RBGvAjLoiW85kAXYJ-hHprUMegEG69A5w,294
+scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat,sha256=WfEroAT5YF4HGAKq3jTJxlFrKaTCh3rwlSlKu__VjwA,304
+scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat,sha256=e0s6cyoKJeYMArdceHpnKDvtCVcw7XuB44OBDHpoa6U,400
+scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat,sha256=kgHcuq-deI2y8hfkGwlMOkW7lntexdPHfuz0ar6b3jo,241
+scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat,sha256=rYCaWNLXK7f_jjMc6_UvZz6ZDuMCuVRmJV5RyeXiDm8,241
+scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat,sha256=hnNV6GZazEeqTXuA9vcOUo4xam_UnKRYGYH9PUGTLv8,219
+scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat,sha256=cAhec51DlqIYfDXXGaumOE3Hqb3cFWM1UsUK3K_lDP8,375
+scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat,sha256=ciFzNGMO7gjYecony-E8vtOwBY4vXIUhyug6Euaz3Kg,288
+scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat,sha256=yrJrpLiwLvU_LI1D6rw1Pk1qJK1YlC7Cmw7lwyJVLtw,288
+scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat,sha256=zo7sh-8dMpGqhoNxLEnfz3Oc7RonxiY5j0B3lxk0e8o,224
+scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat,sha256=igL_CvtAcNEa1nxunDjQZY5wS0rJOlzsUkBiDreJssk,224
+scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat,sha256=pRldk-R0ig1k3ouvaR9oVtBwZsQcDW_b4RBEDYu1-Vk,156
+scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat,sha256=B9IdaSsyb0wxjyYyHOj_GDO0laAeWDEJhoEhC9xdm1E,232
+scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat,sha256=t4tKGJg2NEg_Ar5MkOjCoQb2hVL8Q_Jdh9FF4TPL_4g,232
+scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat,sha256=lpYkBZX8K-c4FO5z0P9DMfYc7Y-yzyg11J6m-19uYTU,203
+scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat,sha256=lG-c7U-5Bo8j8xZLpd0JAsMYwewT6cAw4eJCZH5xf6E,203
+scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat,sha256=3GJbA4O7LP57J6IYzmJqTPeSJrEaiNSk-rg7h0ANR1w,608
+scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat,sha256=fRbqAnzTeOU3dTQx7O24MfMVFr6pM5u594FRrPPkYJE,552
+scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat,sha256=mCtI_Yot08NazvWHvehOZbTV4bW_I4-D5jBgJ6T9EbI,314
+scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat,sha256=52qaF4HRCtPl1jE6ljbkEl2mofZVAPpmBxrm-J5OTTI,314
+scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat,sha256=vneCpWBwApBGfeKzdZcybyajxjR-ZYf64j0l08_hU84,528
+scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat,sha256=gqhRpSfNNB5SR9sCp-wWrvokr5VV_heGnvco6dmfOvY,472
+scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat,sha256=6VDU0mtTBEG0bBHqKP1p8xq846eMhSZ_WvBZv8MzE7M,246
+scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat,sha256=ejtyxeeX_W1a2rNrEUUiG9txPW8_UtSgt8IaDOxE2pg,246
+scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat,sha256=sbi0wUwOrbU-gBq3lyDwhAbvchdtOJkflOR_MU7uGKA,496
+scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat,sha256=uTkKtrYBTuz4kICVisEaG7V5C2nJDKjy92mPDswTLPE,416
+scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat,sha256=o4F2jOhYyNpJCo-BMg6v_ITZQvjenXfXHLq94e7iwRo,252
+scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat,sha256=CNXO12O6tedEuMG0jNma4qfbTgCswAbHwh49a3uE3Yk,252
+scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat,sha256=KV97FCW-1XZiXrwXJoZPbgyAht79oIFHa917W1KFLwE,357
+scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat,sha256=9-8xzACZleBkMjZnbr8t4Ncs9B6mbzrONDblPnteBPU,357
+scipy/io/matlab/tests/data/testvec_4_GLNX86.mat,sha256=GQzR3mBVS266_NBfrRC9X0dLgmeu8Jl4r4ZYMOrn1V0,93
+scipy/io/matlab/tests/test_byteordercodes.py,sha256=FCHBAxeQZlhvTXw-AO-ukwTWvpN7NzmncBEDJ1P4de4,938
+scipy/io/matlab/tests/test_mio.py,sha256=BcQlSLmQqqNv7CQa1HcLJYVp6OtlMig9FeliyRTc98Q,44810
+scipy/io/matlab/tests/test_mio5_utils.py,sha256=eacgGg0TaQXOkG7iaeYovtWyjPgYCY50mHPoPjnHMTI,5389
+scipy/io/matlab/tests/test_mio_funcs.py,sha256=fSDaeVPvCRBFzqjWtXR5xIv9UQ_yv6Y_Nl5D5u0HIGo,1392
+scipy/io/matlab/tests/test_mio_utils.py,sha256=GX85RuLqr2HxS5_f7ZgrxbhswJy2GPQQoQbiQYg0s14,1594
+scipy/io/matlab/tests/test_miobase.py,sha256=xH4ZOR_b25TJLyIGqYQdeSASpTi8j-oIkRcO4D-R4us,1464
+scipy/io/matlab/tests/test_pathological.py,sha256=-Efeq2x2yAaLK28EKpai1vh4HsZTCteF_hY_vEGWndA,1055
+scipy/io/matlab/tests/test_streams.py,sha256=dcirMJ5slCA3eIjB9VRcGG3U2htTtXL8BiYOLvHCfds,7406
+scipy/io/mmio.py,sha256=jT06sWGxdylPF_jBjbrqV2H5TXVUa04R-38OGrN8DZs,569
+scipy/io/netcdf.py,sha256=iDIpKlQcPWf2u-jIoYsqYx3a5oqWCy-54AcFW_muzU0,880
+scipy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/io/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_idl.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_paths.cpython-310.pyc,,
+scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc,,
+scipy/io/tests/data/Transparent Busy.ani,sha256=vwoK3ysYo87-TwzvjerHjFjSPIGpw83jjiMDXcHPWjA,4362
+scipy/io/tests/data/array_float32_1d.sav,sha256=A_xXWkfS1sQCxP4ONezeEZvlKEXwZ1TPG2rCCFdmBNM,2628
+scipy/io/tests/data/array_float32_2d.sav,sha256=qJmN94pywXznXMHzt-L6DJgaIq_FfruVKJl_LMaI8UU,3192
+scipy/io/tests/data/array_float32_3d.sav,sha256=U7P6As7Nw6LdBY1pTOaW9C-O_NlXLXZwSgbT3H8Z8uk,13752
+scipy/io/tests/data/array_float32_4d.sav,sha256=Tl6erEw_Zq3dwVbVyPXRWqB83u_o4wkIVFOe3wQrSro,6616
+scipy/io/tests/data/array_float32_5d.sav,sha256=VmaBgCD854swYyLouDMHJf4LL6iUNgajEOQf0pUjHjg,7896
+scipy/io/tests/data/array_float32_6d.sav,sha256=lb7modI0OQDweJWbDxEV2OddffKgMgq1tvCy5EK6sOU,19416
+scipy/io/tests/data/array_float32_7d.sav,sha256=pqLWIoxev9sLCs9LLwxFlM4RCFwxHC4Q0dEEz578mpI,3288
+scipy/io/tests/data/array_float32_8d.sav,sha256=R8A004f9XLWvF6eKMNEqIrC6PGP1vLZr9sFqawqM8ZA,13656
+scipy/io/tests/data/array_float32_pointer_1d.sav,sha256=sV7qFNwHK-prG5vODa7m5HYK7HlH_lqdfsI5Y1RWDyg,2692
+scipy/io/tests/data/array_float32_pointer_2d.sav,sha256=b0brvK6xQeezoRuujmEcJNw2v6bfASLM3FSY9u5dMSg,3256
+scipy/io/tests/data/array_float32_pointer_3d.sav,sha256=a_Iyg1YjPBRh6B-N_n_BGIVjFje4K-EPibKV-bPbF7E,13816
+scipy/io/tests/data/array_float32_pointer_4d.sav,sha256=cXrkHHlPyoYstDL_OJ15-55sZOOeDNW2OJ3KWhBv-Kk,6680
+scipy/io/tests/data/array_float32_pointer_5d.sav,sha256=gRVAZ6jeqFZyIQI9JVBHed9Y0sjS-W4bLseb01rIcGs,7960
+scipy/io/tests/data/array_float32_pointer_6d.sav,sha256=9yic-CQiS0YR_ow2yUA2Nix0Nb_YCKMUsIgPhgcJT1c,19480
+scipy/io/tests/data/array_float32_pointer_7d.sav,sha256=Rp1s8RbW8eoEIRTqxba4opAyY0uhTuyy3YkwRlNspQU,3352
+scipy/io/tests/data/array_float32_pointer_8d.sav,sha256=Wk3Dd2ClAwWprXLKZon3blY7aMvMrJqz_NXzK0J5MFY,13720
+scipy/io/tests/data/example_1.nc,sha256=EkfC57dWXeljgXy5sidrJHJG12D1gmQUyPDK18WzlT4,1736
+scipy/io/tests/data/example_2.nc,sha256=wywMDspJ2QT431_sJUr_5DHqG3pt9VTvDJzfR9jeWCk,272
+scipy/io/tests/data/example_3_maskedvals.nc,sha256=P9N92jCJgKJo9VmNd7FeeJSvl4yUUFwBy6JpR4MeuME,1424
+scipy/io/tests/data/fortran-3x3d-2i.dat,sha256=oYCXgtY6qqIqLAhoh_46ob_RVQRcV4uu333pOiLKgRM,451
+scipy/io/tests/data/fortran-mixed.dat,sha256=zTi7RLEnyAat_DdC3iSEcSbyDtAu0aTKwUT-tExjasw,40
+scipy/io/tests/data/fortran-sf8-11x1x10.dat,sha256=KwaOrZOAe-wRhuxvmHIK-Wr59us40MmiA9QyWtIAUaA,888
+scipy/io/tests/data/fortran-sf8-15x10x22.dat,sha256=5ohvjjOUcIsGimSqDhpUUKwflyhVsfwKL5ElQe_SU0I,26408
+scipy/io/tests/data/fortran-sf8-1x1x1.dat,sha256=Djmoip8zn-UcxWGUPKV5wzKOYOf7pbU5L7HaR3BYlec,16
+scipy/io/tests/data/fortran-sf8-1x1x5.dat,sha256=Btgavm3w3c9md_5yFfq6Veo_5IK9KtlLF1JEPeHhZoU,48
+scipy/io/tests/data/fortran-sf8-1x1x7.dat,sha256=L0r9yAEMbfMwYQytzYsS45COqaVk-o_hi6zRY3yIiO4,64
+scipy/io/tests/data/fortran-sf8-1x3x5.dat,sha256=c2LTocHclwTIeaR1Pm3mVMyf5Pl_imfjIFwi4Lpv0Xs,128
+scipy/io/tests/data/fortran-si4-11x1x10.dat,sha256=OesvSIGsZjpKZlZsV74PNwy0Co0KH8-3gxL9-DWoa08,448
+scipy/io/tests/data/fortran-si4-15x10x22.dat,sha256=OJcKyw-GZmhHb8REXMsHDn7W5VP5bhmxgVPIAYG-Fj4,13208
+scipy/io/tests/data/fortran-si4-1x1x1.dat,sha256=1Lbx01wZPCOJHwg99MBDuc6QZKdMnccxNgICt4omfFM,12
+scipy/io/tests/data/fortran-si4-1x1x5.dat,sha256=L1St4yiHTA3v91JjnndYfUrdKfT1bWxckwnnrscEZXc,28
+scipy/io/tests/data/fortran-si4-1x1x7.dat,sha256=Dmqt-tD1v2DiPZkghGGZ9Ss-nJGfei-3yFXPO5Acpk4,36
+scipy/io/tests/data/fortran-si4-1x3x5.dat,sha256=3vl6q93m25jEcZVKD0CuKNHmhZwZKp-rv0tfHoPVP88,68
+scipy/io/tests/data/invalid_pointer.sav,sha256=JmgoISXC4r5fSmI5FqyapvmzQ4qpYLf-9N7_Et1p1HQ,1280
+scipy/io/tests/data/null_pointer.sav,sha256=P_3a_sU614F3InwM82jSMtWycSZkvqRn1apwd8XxbtE,2180
+scipy/io/tests/data/scalar_byte.sav,sha256=dNJbcE5OVDY_wHwN_UBUtfIRd13Oqu-RBEO74g5SsBA,2076
+scipy/io/tests/data/scalar_byte_descr.sav,sha256=DNTmDgDWOuzlQnrceER6YJ0NutUUwZ9tozVMBWQmuuY,2124
+scipy/io/tests/data/scalar_complex32.sav,sha256=NGd-EvmFZgt8Ko5MP3T_TLwyby6yS0BXM_OW8197hpU,2076
+scipy/io/tests/data/scalar_complex64.sav,sha256=gFBWtxuAajazupGFSbvlWUPDYK-JdWgZcEWih2-7IYU,2084
+scipy/io/tests/data/scalar_float32.sav,sha256=EwWQw2JTwq99CHVpDAh4R20R0jWaynXABaE2aTRmXrs,2072
+scipy/io/tests/data/scalar_float64.sav,sha256=iPcDlgF1t0HoabvNLWCbSiTPIa9rvVEbOGGmE_3Ilsk,2076
+scipy/io/tests/data/scalar_heap_pointer.sav,sha256=JXZbPmntXILsNOuLIKL8qdu8gDJekYrlN9DQxAWve0E,2204
+scipy/io/tests/data/scalar_int16.sav,sha256=kDBLbPYGo2pzmZDhyl8rlDv0l6TMEWLIoLtmgJXDMkk,2072
+scipy/io/tests/data/scalar_int32.sav,sha256=IzJwLvEoqWLO5JRaHp8qChfptlauU-ll3rb0TfDDM8Y,2072
+scipy/io/tests/data/scalar_int64.sav,sha256=-aSHQRiaE3wjAxINwuLX33_8qmWl4GUkTH45elTkA-8,2076
+scipy/io/tests/data/scalar_string.sav,sha256=AQ7iZ8dKk9QfnLdP9idKv1ojz0M_SwpL7XAUmbHodDQ,2124
+scipy/io/tests/data/scalar_uint16.sav,sha256=928fmxLsQM83ue4eUS3IEnsLSEzmHBklDA59JAUvGK8,2072
+scipy/io/tests/data/scalar_uint32.sav,sha256=X3RbPhS6_e-u-1S1gMyF7s9ys7oV6ZNwPrJqJ6zIJsk,2072
+scipy/io/tests/data/scalar_uint64.sav,sha256=ffVyS2oKn9PDtWjJdOjSRT2KZzy6Mscgd4u540MPHC4,2076
+scipy/io/tests/data/struct_arrays.sav,sha256=TzH-Gf0JgbP_OgeKYbV8ZbJXvWt1VetdUr6C_ziUlzg,2580
+scipy/io/tests/data/struct_arrays_byte_idl80.sav,sha256=oOmhTnmKlE60-JMJRRMv_zfFs4zqioMN8QA0ldlgQZo,1388
+scipy/io/tests/data/struct_arrays_replicated.sav,sha256=kXU8j9QI2Q8D22DVboH9fwwDQSLVvuWMJl3iIOhUAH8,2936
+scipy/io/tests/data/struct_arrays_replicated_3d.sav,sha256=s3ZUwhT6TfiVfk4AGBSyxYR4FRzo4sZQkTxFCJbIQMI,4608
+scipy/io/tests/data/struct_inherit.sav,sha256=4YajBZcIjqMQ4CI0lRUjXpYDY3rI5vzJJzOYpjWqOJk,2404
+scipy/io/tests/data/struct_pointer_arrays.sav,sha256=fkldO6-RO2uAN_AI9hM6SEaBPrBf8TfiodFGJpViaqg,2408
+scipy/io/tests/data/struct_pointer_arrays_replicated.sav,sha256=eKVerR0LoD9CuNlpwoBcn7BIdj3-8x56VNg--Qn7Hgc,2492
+scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav,sha256=vsqhGpn3YkZEYjQuI-GoX8Jg5Dv8A2uRtP0kzQkq4lg,2872
+scipy/io/tests/data/struct_pointers.sav,sha256=Zq6d5V9ZijpocxJpimrdFTQG827GADBkMB_-6AweDYI,2268
+scipy/io/tests/data/struct_pointers_replicated.sav,sha256=aIXPBIXTfPmd4IaLpYD5W_HUoIOdL5Y3Hj7WOeRM2sA,2304
+scipy/io/tests/data/struct_pointers_replicated_3d.sav,sha256=t1jhVXmhW6VotQMNZ0fv0sDO2pkN4EutGsx5No4VJQs,2456
+scipy/io/tests/data/struct_scalars.sav,sha256=LYICjERzGJ_VvYgtwJ_Up2svQTv8wBzNcVD3nsd_OPg,2316
+scipy/io/tests/data/struct_scalars_replicated.sav,sha256=lw3fC4kppi6BUWAd4n81h8_KgoUdiJl5UIt3CvJIuBs,2480
+scipy/io/tests/data/struct_scalars_replicated_3d.sav,sha256=xVAup6f1dSV_IsSwBQC3KVs0eLEZ6-o5EaZT9yUoDZI,3240
+scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav,sha256=gjv__ng9xH_sm34hyxCbCgO4AP--PZAfDOArH5omkjM,3586
+scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav,sha256=H0LLyv2lc2guzYGnx4DWXU6vB57JrRX-G9Dd4qGh0hM,3586
+scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav,sha256=KKz9SXv_R3gX_AVeED2vyhYnj4BvD1uyDiKpCT3ulZ0,17720
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav,sha256=YX1g8qdCOAG16vX9G6q4SsfCj2ZVk199jzDQ8S0zWYI,72
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav,sha256=bFrsRqw0QXmsaDtjD6TFP8hZ5jEYMyaCmt-ka_C6GNk,1024
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav,sha256=zMnhvZvrP4kyOWKVKfbBneyv03xvzgqXYhHNxsAxDJ4,13
+scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav,sha256=9qTCvpgdz3raecVN1ViggHPnQjBf47xmXod9iCDsEik,17720
+scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav,sha256=EqYBnEgTxTKvaTAtdA5HIl47CCFIje93y4hawR6Pyu0,7792
+scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav,sha256=hGYchxQFjrtvZCBo0ULi-xdZ8krqXcKdTl3NSUfqe8k,90
+scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav,sha256=h8CXsW5_ShKR197t_d-TUTlgDqOZ-7wK_EcVGucR-aY,74
+scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav,sha256=BoUCDct3GiY_JJV_HoghF3mzAebT18j02c-MOn19KxU,70
+scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav,sha256=R6EJshvQp5YVR4GB9u4Khn5HM1VMfJUj082i8tkBIJ8,1644
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav,sha256=t2Mgri3h6JLQDekrwIhDBOaG46OUzHynUz0pKbvOpNU,90
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav,sha256=yCv0uh-ux_skJsxeOjzog0YBk3ZQO_kw5HJHMqtVyI0,90
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav,sha256=oiMVsQV9-qGBz_ZwsfAkgA9BZXNjXbH4zxCGvvdT0RY,120
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav,sha256=e97XoPrPGJDIh8nO6mii__ViY5yVlmt4OnPQoDN1djs,134
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav,sha256=wbonKlzvzQ_bQYyBsj-GwnihZOhn0uxfKhL_nENCGNc,150
+scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav,sha256=Uu5QPQcbtnFlnxOd4zFGxpiTC4wgdp6JOoYJ2VMZIU0,164
+scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav,sha256=1F67h8tr2xz0C5K21T9y9gspcGA0qnSOzsl2vjArAMs,116
+scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav,sha256=TJvGU7GpgXdCrdrjzMlDtpieDMnDK-lWMMqlWjT23BY,89
+scipy/io/tests/data/various_compressed.sav,sha256=H-7pc-RCQx5y6_IbHk1hB6OfnhvuPyW6EJq4EwI9iMc,1015
+scipy/io/tests/test_fortran.py,sha256=U8BS4PZxbnIzg8-GHYTXMDpHlKcDhu6-8GCbX6PVqho,7531
+scipy/io/tests/test_idl.py,sha256=Q1ekSAxQdXN-MailSNDqaKHAQvyP9BxtOwGM3NpYyrw,20511
+scipy/io/tests/test_mmio.py,sha256=GXrcNLv-2roKPaisWRyf6i9hG-EmmNkKqOX4HPx29WA,27874
+scipy/io/tests/test_netcdf.py,sha256=8BpKkEm-G0zymAjpvMS5doLLORwhnX35nzPaod4vMxM,19404
+scipy/io/tests/test_paths.py,sha256=3ewh_1yXujx3NIZ3deUjepFJgJDa5IHIugxupLDhHoU,3178
+scipy/io/tests/test_wavfile.py,sha256=LLYFtOeL4vPdk7221TcQ_J3aVPVe9IfV16GyHCSoeAo,15647
+scipy/io/wavfile.py,sha256=Jgz3Qi_6RXNphZVx6riCGK4qovdBbcnzI4726a0ex4I,26625
+scipy/linalg.pxd,sha256=0MlO-o_Kr8gg--_ipXEHFGtB8pZdHX8VX4wLYe_UzPg,53
+scipy/linalg/__init__.py,sha256=UOFZX4GCusrQjcaPB6NNNerhsVDe707BvlfE7XB8KzU,7517
+scipy/linalg/__pycache__/__init__.cpython-310.pyc,,
+scipy/linalg/__pycache__/_basic.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_ldl.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_polar.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_schur.cpython-310.pyc,,
+scipy/linalg/__pycache__/_decomp_svd.cpython-310.pyc,,
+scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc,,
+scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc,,
+scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc,,
+scipy/linalg/__pycache__/_misc.cpython-310.pyc,,
+scipy/linalg/__pycache__/_procrustes.cpython-310.pyc,,
+scipy/linalg/__pycache__/_sketches.cpython-310.pyc,,
+scipy/linalg/__pycache__/_solvers.cpython-310.pyc,,
+scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc,,
+scipy/linalg/__pycache__/_testutils.cpython-310.pyc,,
+scipy/linalg/__pycache__/basic.cpython-310.pyc,,
+scipy/linalg/__pycache__/blas.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc,,
+scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc,,
+scipy/linalg/__pycache__/interpolative.cpython-310.pyc,,
+scipy/linalg/__pycache__/lapack.cpython-310.pyc,,
+scipy/linalg/__pycache__/matfuncs.cpython-310.pyc,,
+scipy/linalg/__pycache__/misc.cpython-310.pyc,,
+scipy/linalg/__pycache__/special_matrices.cpython-310.pyc,,
+scipy/linalg/_basic.py,sha256=bG3YlFR2vgoF8ijCkedBmEw4x0iAS_5-orpUdDxcE78,68914
+scipy/linalg/_blas_subroutines.h,sha256=3nanVNwivmwbWRd42BNZB4G2lH7i5nYnsvO3gEohZQE,18134
+scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so,sha256=4QlArUqYu3kYG1frieTVJ27tDzvSomXNPsRMq1NXQHY,633088
+scipy/linalg/_cythonized_array_utils.pxd,sha256=OlWTbJt3gmdrfRFyx_Vz7GTmDTjr8dids5HA4TfC6R0,890
+scipy/linalg/_cythonized_array_utils.pyi,sha256=HZWXvJdpXGcydTEjkaL_kXIcxpcMqBBfFz7ZhscsRNo,340
+scipy/linalg/_decomp.py,sha256=ta_h9p6FoKFEe1pzV759Cinnrj00GsaHmGil6XIOf0Y,62177
+scipy/linalg/_decomp_cholesky.py,sha256=aOKQKj0WG6j-UBUifPwoSx6NFmUa5RftayITRrD_tAw,11815
+scipy/linalg/_decomp_cossin.py,sha256=N1TCrFf_-umaWn035E4CtxOBCkHROaFEhSqZLITLB3M,8973
+scipy/linalg/_decomp_ldl.py,sha256=HYzVUNZgEyuC2ZoFOGneas8ZkhhOFzUGcapL3Pos_cE,12535
+scipy/linalg/_decomp_lu.py,sha256=6KMcxOyCxLNFmzqh-DPmti8ck0gWQtSRdZmXUMMzzEs,12588
+scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so,sha256=ens7MPKv-1i_5o3E4blXB-Cu1giUKhA3bJR2nz5RAXM,270816
+scipy/linalg/_decomp_lu_cython.pyi,sha256=EASCkhrbJcBHo4zMYCUl1qRJDvPrvCqxd1TfqMWEd_U,291
+scipy/linalg/_decomp_polar.py,sha256=arzJ40FP1-TFsRvXPCP1qdNTsT60lkBcKBHfhB2JxxY,3578
+scipy/linalg/_decomp_qr.py,sha256=n9241Aj2DY7RALMK4E22zApBppIMc-BV5P8mBOpML5g,13776
+scipy/linalg/_decomp_qz.py,sha256=uH93in1ikPR-Wgi1g49EPm2XXuhKOWBzPUJEahCotx8,16330
+scipy/linalg/_decomp_schur.py,sha256=yUUR-4mtWG0qjtz6UMhj5L0PMNGKLH5m12KElks4Gtk,10419
+scipy/linalg/_decomp_svd.py,sha256=Egoy9LMjsNsykHqPp584LT43sVAyHS8LEWM1wUF7LDg,15616
+scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so,sha256=oOVzbfeQa7g-7aZYEy-kjcFOW8yxSnW8Ntn9PjHIAJU,372704
+scipy/linalg/_expm_frechet.py,sha256=efAQwON5vV4D_8NAe3EAM1NMNibQUlNZHjFmmp48Bs4,12328
+scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so,sha256=PokAppTl9GvdssvRY8iy2pc50081fxdPeHQx9BJx154,642017
+scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so,sha256=RWX6DQ3EQNB0GTqJdwxQWioXRj3P1jRug9oIWPIvGx4,2066281
+scipy/linalg/_interpolative.cpython-310-x86_64-linux-gnu.so,sha256=SdF0-ostVysuUhGw1RBMBrMKsrBio_hnp78RSnOq0IE,457113
+scipy/linalg/_interpolative_backend.py,sha256=yycf_ceX0dgf7Usjvtaxmkm_cT-2jmEMBuWY6tJST2g,45192
+scipy/linalg/_lapack_subroutines.h,sha256=E4T9vai7YJAJZ9HBMyGRpCm36NEufmTTdZDjWe-DwNA,239303
+scipy/linalg/_matfuncs.py,sha256=oD7Ni2R7EQsJNRiQRt_LvM6cz-DCWOYEzUeOm1e5pUE,24331
+scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so,sha256=jsajvaJRa66ms_qSo2IGaE19WDhYecbXFz3NcXhmxhk,525696
+scipy/linalg/_matfuncs_expm.pyi,sha256=GCTnQ9X_CNNpadcYhDFhjL2WBhzfdnt0mkW1ms34cjY,187
+scipy/linalg/_matfuncs_inv_ssq.py,sha256=THG87Ac9olliQ9tKjshCo1NRzb1QfgGHOOUomedP4eE,28059
+scipy/linalg/_matfuncs_sqrtm.py,sha256=ijwi8Kqx8n4EIbTThMcyyJfDjjK51B_dCBM27tZdQLQ,6820
+scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so,sha256=azWjVeOW-0rLYFs294LEGV9D4TADoni1RRcytPqTEV8,276432
+scipy/linalg/_misc.py,sha256=3IPq-LIQcxV7ELbtcgZK8Ri60YWbhpN_y7UYe6BKEgA,6283
+scipy/linalg/_procrustes.py,sha256=aa5KcFwCM0wcwnLhwwBq_pWIMhfZoB5wIHY2ocS7Xc0,2763
+scipy/linalg/_sketches.py,sha256=n6PEJILrFpzWhdf-sKFgGN-0elEwqvBlI0Z3H54tk0c,6145
+scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so,sha256=0aT_Xi4m-LM8tsmyxOueU3sr70Eb2RSgJPl1H-M6MGM,300152
+scipy/linalg/_solvers.py,sha256=q-bHb_WR4D3a_uOWpiD2zclBhotdxwPO8OwC4V0KGM4,28342
+scipy/linalg/_special_matrices.py,sha256=NieLFLp1O_6BlgAx_fVRr2bVrqaFFS5VySRVNBFnIbc,36865
+scipy/linalg/_testutils.py,sha256=oUEc8_lllXP18Ugrv3KlEcibTL6Mem5iEAyZJg4hNwE,1753
+scipy/linalg/basic.py,sha256=0uMJev4ZSqcrZ4FEV50FQyzf1U39QAhTu8gI_s_0R90,797
+scipy/linalg/blas.py,sha256=WcuILhaA_wqcz2NJRl8gNabzec8Xi-kj4HeRS-EJhYY,11697
+scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so,sha256=PG3NCTAQ1ZeRITbysRu-U4IljmguzLvjMmknDd91fH0,348849
+scipy/linalg/cython_blas.pxd,sha256=DCPBxNWP-BvdT_REj6_a4TjUrNaf6sCq_XoxU3pEbfc,15592
+scipy/linalg/cython_blas.pyx,sha256=DFCT-H2mDlf-KtVcTB4DQyCRSIIQjd1zB3r8NSUafrY,64918
+scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so,sha256=8vkROhZcpQmId9eNfAcaJgSN2zrXtU6UXSrFXg-39pQ,837713
+scipy/linalg/cython_lapack.pxd,sha256=Ld5hPwcYxpOPahFNsfNomsp0_DY8BfG-W8TmZxh-iYM,204556
+scipy/linalg/cython_lapack.pyx,sha256=dLADFnGKlafqoLZOE7OqVmj2pzhWDNut0KJMzh_i9w4,706982
+scipy/linalg/decomp.py,sha256=imZLuEFtV2WakBzX1DPiWCgUw00t4bEXyMyjtyQu_B4,838
+scipy/linalg/decomp_cholesky.py,sha256=LfsMeb0QgOX2nLKgCsZCpi-mXBxGT596kPYVeRALok8,688
+scipy/linalg/decomp_lu.py,sha256=1KQnoczngZjaNxs_CAP6-eUcyw2igK1PrmNHm1vhRlk,614
+scipy/linalg/decomp_qr.py,sha256=QRjlkvSPo65naiTUDK823r6DnrcxDucOma6Z_DTLG0I,579
+scipy/linalg/decomp_schur.py,sha256=6GtwTodRgqTY9tsmPpdKtIIgOGSEYub4_F2tmCYChvw,660
+scipy/linalg/decomp_svd.py,sha256=HrJqbmgde7d7EWxCsa9XkS9QuWgPYMFOHiF4NcAL_Qg,631
+scipy/linalg/interpolative.py,sha256=tPB5mfxVk_g0VSP1Y6YG4cqUkCSNYg7eomlu5KzhiO0,32251
+scipy/linalg/lapack.py,sha256=1-XWvhL1N7R6vXQTturAC9CLEzoJSq0ata_molM_R2c,15667
+scipy/linalg/matfuncs.py,sha256=G21MOYFXuqlDzWdBWC6FQ_nh5Hv0QwZaDDJ3PTwtHmY,883
+scipy/linalg/misc.py,sha256=uxpR80jJ5w5mslplWlL6tIathas8mEXvRIwDXYMcTOk,592
+scipy/linalg/special_matrices.py,sha256=tLbqSB71b5ucf8nFIAmkKmnFLEZbZk8IXYl4zZs_30g,771
+scipy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_lu.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc,,
+scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc,,
+scipy/linalg/tests/data/carex_15_data.npz,sha256=E_PhSRqHa79Z1-oQrSnB-bWZaiq5khbzHVv81lkBLB4,34462
+scipy/linalg/tests/data/carex_18_data.npz,sha256=Wfg5Rn8nUrffb7bUCUOW7dMqWSm3ZPf_oeZmZDHmysY,161487
+scipy/linalg/tests/data/carex_19_data.npz,sha256=OOj8ewQd8LI9flyhXq0aBl5kZ2Ee-ahIzH25P4Ct_Yc,34050
+scipy/linalg/tests/data/carex_20_data.npz,sha256=FOIi00pxGMcoShZ1xv7O7ne4TflRpca6Kl7p_zBU-h0,31231
+scipy/linalg/tests/data/carex_6_data.npz,sha256=GyoHNrVB6_XEubTADW2rKB5zyfuZE8biWBp4Gze2Avk,15878
+scipy/linalg/tests/data/gendare_20170120_data.npz,sha256=o9-rRR2dXCAkPg7YXNi2yWV2afuaD4O1vhZVhXg9VbU,2164
+scipy/linalg/tests/test_basic.py,sha256=zia60-ir6RMT_f3dUwKZ32czTQR0GjmRQriQ7YBewfk,69951
+scipy/linalg/tests/test_blas.py,sha256=_egnuCdKf89WuIkm45pl_02wMoHV3c4mvZ3uUV4NoWA,40842
+scipy/linalg/tests/test_cython_blas.py,sha256=0Y2w1Btw6iatfodZE7z0lisJJLVCr70DAW-62he_sz4,4087
+scipy/linalg/tests/test_cython_lapack.py,sha256=McSFDUU4kgCavU1u3-uqBGlzUZiLGxM5qPfBFgPTqdE,796
+scipy/linalg/tests/test_cythonized_array_utils.py,sha256=O1EKWxsYt6k1zMWjFlQhTndQVOhHsJlSm-bHfPMny1U,3840
+scipy/linalg/tests/test_decomp.py,sha256=i_Yzs6RMKM1VdSPCSOGeYzueKO2iKbh0Ph8RBRItIaY,106420
+scipy/linalg/tests/test_decomp_cholesky.py,sha256=FKAGOFEcx3Bh8NvZHoUjaDov-a6VpLdjSAswaxjACLY,7857
+scipy/linalg/tests/test_decomp_cossin.py,sha256=Z9QpHHszBuZ-OorqILNK0Oly7sMvXNhbYLTZHNKd3YI,5955
+scipy/linalg/tests/test_decomp_ldl.py,sha256=9h96PmHpoXIbjzc5nPxA3Dzw4575IelqxXw2aiNjabo,4944
+scipy/linalg/tests/test_decomp_lu.py,sha256=i7K4zDx3PocMSPYJzaS0IiZuVRphC_CXzLreK1FNkIE,11186
+scipy/linalg/tests/test_decomp_polar.py,sha256=5x5vz9rJE2U2nvo0kx6xMX5Z9OcnqxayPZvAd4dwsUQ,2646
+scipy/linalg/tests/test_decomp_update.py,sha256=kPMpEe2ddl3rdEDhPlj-cdBL4BsPK3CAtf9g5k55vSo,68490
+scipy/linalg/tests/test_fblas.py,sha256=Ykb7LKjbxPXAdJD-IkXMAsbUmXMAkku2FQCr-jlDTUE,18687
+scipy/linalg/tests/test_interpolative.py,sha256=Y9yGVHR1OMZWHgrX_HmBx446TACjkARoxyHwT49iEuw,8969
+scipy/linalg/tests/test_lapack.py,sha256=4dBJoJkgtXWnuof3Xx8UTBqWZ6lrg8h7NUeihxKIgsY,129349
+scipy/linalg/tests/test_matfuncs.py,sha256=6b5wMGDvMI2PeimrjWastS3pZSE4f1-ETezFeJeyz6E,39926
+scipy/linalg/tests/test_matmul_toeplitz.py,sha256=Wd9T03zZRwX3M3ppkhYJiJbkWZ_xop4VKj57TjeozUs,3870
+scipy/linalg/tests/test_misc.py,sha256=HP9jfKohbJIaKVcBqov9hAOHYk5dZck497-V5DMHe6E,76
+scipy/linalg/tests/test_procrustes.py,sha256=WkNNarBf69izBmlOhu4-u0eWdzkSzYHQuDZh-w89fOU,6758
+scipy/linalg/tests/test_sketches.py,sha256=FVEcNV43JteZZU7GDdBjtl-_alYDimxnjgKvpmtzVsI,3960
+scipy/linalg/tests/test_solve_toeplitz.py,sha256=KuTAYh-8MRWjaHclgQuIaBBx8IBTGEzXgZnhM_gjWxo,4010
+scipy/linalg/tests/test_solvers.py,sha256=degoX4OXSpo_6F59TyHcNdtcY3HCbkkGJRHldDfgdPs,31642
+scipy/linalg/tests/test_special_matrices.py,sha256=7IbOPS0DyTC1zwEXbrjRr3NnctiTGlZsNRVqsJF17hQ,23596
+scipy/misc/__init__.py,sha256=CdX9k6HUYu_cqVF4l2X5h1eqd9xUCuKafO_0aIY5RNE,1726
+scipy/misc/__pycache__/__init__.cpython-310.pyc,,
+scipy/misc/__pycache__/_common.cpython-310.pyc,,
+scipy/misc/__pycache__/common.cpython-310.pyc,,
+scipy/misc/__pycache__/doccer.cpython-310.pyc,,
+scipy/misc/_common.py,sha256=4pb0UjMkG0GBlJ2IgZ4NDiu2vlPCxfL2r0BCOSpOFdE,11153
+scipy/misc/ascent.dat,sha256=6KhJOUhEY6uAUa7cW0CqJiqzOpHWRYps0TxqHK1aAj0,527630
+scipy/misc/common.py,sha256=V67COWNbYuMJwdPMypUiimxSShtUXaq8RSop35sOiuM,619
+scipy/misc/doccer.py,sha256=hUk7LlSlkTY28QjqyHv4HI8cWUDnZyg1PbMLvL3-Yso,1458
+scipy/misc/ecg.dat,sha256=8grTNl-5t_hF0OXEi2_mcIE3fuRmw6Igt_afNciVi68,119035
+scipy/misc/face.dat,sha256=nYsLTQgTE-K0hXSMdwRy5ale0XOBRog9hMcDBJPoKIY,1581821
+scipy/misc/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/misc/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_common.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_config.cpython-310.pyc,,
+scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc,,
+scipy/misc/tests/test_common.py,sha256=0h_qT7hwQnqx4Oc6ccvM-U79EkbXPq5LNlC3QSvR88M,833
+scipy/misc/tests/test_config.py,sha256=j1Ppp6DCZy9wMxTmBEGxq4MScvsQXTQk7268EnNnPFQ,1244
+scipy/misc/tests/test_doccer.py,sha256=V1B5Z-XfIQFiSyRNo3PXG-AQfToFmoQ1oOBGjxK2zmo,3738
+scipy/ndimage/__init__.py,sha256=2dI3Sj1jF2AR1xSghzX4E5NFYxN9Z3-qd0a6YDRpPE4,4989
+scipy/ndimage/__pycache__/__init__.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_filters.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_fourier.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_measurements.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_morphology.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc,,
+scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc,,
+scipy/ndimage/__pycache__/filters.cpython-310.pyc,,
+scipy/ndimage/__pycache__/fourier.cpython-310.pyc,,
+scipy/ndimage/__pycache__/interpolation.cpython-310.pyc,,
+scipy/ndimage/__pycache__/measurements.cpython-310.pyc,,
+scipy/ndimage/__pycache__/morphology.cpython-310.pyc,,
+scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so,sha256=h98uh-F0_Ywmq7sQkE-zVgPCuj5JX3uZqeFVBgpYS0A,17008
+scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so,sha256=foCM32TOb2d_cAnZNdKGZfl4ZDontGMixm5kiuVIQk4,90984
+scipy/ndimage/_filters.py,sha256=tF-yf0az51r2dPkhK2CatkGNc1vDUnQHWF1BHXi8l70,65695
+scipy/ndimage/_fourier.py,sha256=X-Y0EP59mH5ogqts58SpDhxA0dfqplwZQ8T0G6DzPos,11385
+scipy/ndimage/_interpolation.py,sha256=xtG_a3pksNFF1tm7gl-2v36Zy8fxN4iPn2-j348Obdw,37023
+scipy/ndimage/_measurements.py,sha256=7yn0c2ygTZm12oKUapXHT4r8MZ263ennI_qpEzXC8YM,56097
+scipy/ndimage/_morphology.py,sha256=HKKP__gdrLNYDtp6J1qIzrcmpq7MYO7DpGHYAgyHMrk,94913
+scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so,sha256=riAtjLhu1Lpfxlq9tu_l6HVBP0P9LS2wWGWGe35yvOI,147184
+scipy/ndimage/_ni_docstrings.py,sha256=Pxf50i8Wzrm2M70NkUrbdv901hsJ5XcRHVwyxHmXQJk,8505
+scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so,sha256=zE9mnBeiXgM-J-pGm-deH4TK1949x5XHUJUyA0WHaVs,428200
+scipy/ndimage/_ni_support.py,sha256=rO5ihuExCyN0o5mFUqU1ckg3pprTPpj8a1EZfIIdwqY,4646
+scipy/ndimage/filters.py,sha256=cAv2zezrTJEm9JzKPV_pmXzZcgczCK_VaYJ4mdNW3FM,976
+scipy/ndimage/fourier.py,sha256=gnifi4S_Epyu4DpNsebz4A5BKzBWoGf11FkXWeXsoqY,599
+scipy/ndimage/interpolation.py,sha256=KzQNWvuqSrUfGcfe7gFSX9bHo7jVy76fErfjnpqbIaM,680
+scipy/ndimage/measurements.py,sha256=xdSs52Y5RjURLP710iGURXWQFeS3ok4WjoYufKh9OeA,788
+scipy/ndimage/morphology.py,sha256=yFWSo7o_7PuYq61WGQOCIgMppneNLxqhJocyN0bMsVA,965
+scipy/ndimage/tests/__init__.py,sha256=LUFQT_tCLZ6noa1Myz-TwTfwRaSZ96zqJJUWNyMfb_k,395
+scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc,,
+scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc,,
+scipy/ndimage/tests/data/label_inputs.txt,sha256=JPbEnncwUyhlAAv6grN8ysQW9w9M7ZSIn_NPopqU7z4,294
+scipy/ndimage/tests/data/label_results.txt,sha256=Cf2_l7FCWNjIkyi-XU1MaGzmLnf2J7NK2SZ_10O-8d0,4309
+scipy/ndimage/tests/data/label_strels.txt,sha256=AU2FUAg0WghfvnPDW6lhMB1kpNdfv3coCR8blcRNBJ8,252
+scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114
+scipy/ndimage/tests/test_c_api.py,sha256=wZv9LUefK1Fnq__xemuxW2GDdRMdNN7gCqhWkdqZLZQ,3730
+scipy/ndimage/tests/test_datatypes.py,sha256=tpCXBY_MH-NcCuytUVVnLbDy1q_3NN7hH245cpqhvsI,2827
+scipy/ndimage/tests/test_filters.py,sha256=IisrzOqjhMwwRjxw05pUBqAHh_OSwLNla9_p1nZWlGo,93325
+scipy/ndimage/tests/test_fourier.py,sha256=DlD_Eb1jZF_3y2wxi1IJaXI3566da9fnbY7jVtUZ42o,6664
+scipy/ndimage/tests/test_interpolation.py,sha256=3kTKe5U76lDnEGTAWW9SzHyCnkbcr2KM1CluN_nUicc,54771
+scipy/ndimage/tests/test_measurements.py,sha256=vgGx-V5jTigVaKxE-dasZ5w9fUfRuzD0QszQV4lOM04,48181
+scipy/ndimage/tests/test_morphology.py,sha256=0qFGtsQkCn20vY9c4C10eeg44R4leNYO4F0BHAWSaNU,106687
+scipy/ndimage/tests/test_ni_support.py,sha256=kuf8otEyIlGVPzcEPekRK7lfXI8bVEvB2_YF6ko7jzg,2472
+scipy/ndimage/tests/test_splines.py,sha256=4dXpWNMKwb2vHMdbNc2jEvAHzStziq8WRh4PTUkoYpQ,2199
+scipy/odr/__init__.py,sha256=CErxMJ0yBfu_cvCoKJMu9WjqUaohLIqqf228Gm9XWJI,4325
+scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so,sha256=SaDHNH3mKMIGrcVtc9ZgnwXi-rudrDM_oUq-_05ubdQ,222969
+scipy/odr/__pycache__/__init__.cpython-310.pyc,,
+scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/odr/__pycache__/_models.cpython-310.pyc,,
+scipy/odr/__pycache__/_odrpack.cpython-310.pyc,,
+scipy/odr/__pycache__/models.cpython-310.pyc,,
+scipy/odr/__pycache__/odrpack.cpython-310.pyc,,
+scipy/odr/_add_newdocs.py,sha256=GeWL4oIb2ydph_K3qCjiIbPCM3QvpwP5EZwEJVOzJrQ,1128
+scipy/odr/_models.py,sha256=tfOLgqnV4LR3VKi7NAg1g1Jp_Zw8lG_PA5BHwU_pTH0,7800
+scipy/odr/_odrpack.py,sha256=SaYqOX4MwAOAGBxK8ICbu1wH6vaBJCqF1RQoqCTIoiM,42401
+scipy/odr/models.py,sha256=Fcdj-P9rJ_B-Ct8bh3RrusnapeHLysVaDsM26Q8fHFo,590
+scipy/odr/odrpack.py,sha256=OlRlBxKlzp5VDi2fnnA-Jdl6G0chDt95JNCvJYg2czs,632
+scipy/odr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/odr/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc,,
+scipy/odr/tests/test_odr.py,sha256=ajJfXACR24a5cEqG7BiwAdoDYpAmvS1I6L7U3Gm-zL4,21011
+scipy/optimize.pxd,sha256=kFYBK9tveJXql1KXuOkKGvj4Fu67GmuyRP5kMVkMbyk,39
+scipy/optimize/README,sha256=FChXku722u0youZGhUoQg7VzDq0kOJ6MCohYcSQWSrg,3221
+scipy/optimize/__init__.py,sha256=YUWDGxYsG4UmFsjNTMi5yWxB3mdLQUh9wbcnz4ATG0g,13108
+scipy/optimize/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc,,
+scipy/optimize/__pycache__/_bracket.cpython-310.pyc,,
+scipy/optimize/__pycache__/_chandrupatla.cpython-310.pyc,,
+scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_constraints.cpython-310.pyc,,
+scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc,,
+scipy/optimize/__pycache__/_differentiate.cpython-310.pyc,,
+scipy/optimize/__pycache__/_direct_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc,,
+scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc,,
+scipy/optimize/__pycache__/_isotonic.cpython-310.pyc,,
+scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linesearch.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc,,
+scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc,,
+scipy/optimize/__pycache__/_milp.cpython-310.pyc,,
+scipy/optimize/__pycache__/_minimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_nnls.cpython-310.pyc,,
+scipy/optimize/__pycache__/_nonlin.cpython-310.pyc,,
+scipy/optimize/__pycache__/_numdiff.cpython-310.pyc,,
+scipy/optimize/__pycache__/_optimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/_qap.cpython-310.pyc,,
+scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc,,
+scipy/optimize/__pycache__/_root.cpython-310.pyc,,
+scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc,,
+scipy/optimize/__pycache__/_shgo.cpython-310.pyc,,
+scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/_spectral.cpython-310.pyc,,
+scipy/optimize/__pycache__/_tnc.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc,,
+scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc,,
+scipy/optimize/__pycache__/_tstutils.cpython-310.pyc,,
+scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc,,
+scipy/optimize/__pycache__/cobyla.cpython-310.pyc,,
+scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc,,
+scipy/optimize/__pycache__/linesearch.cpython-310.pyc,,
+scipy/optimize/__pycache__/minpack.cpython-310.pyc,,
+scipy/optimize/__pycache__/minpack2.cpython-310.pyc,,
+scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc,,
+scipy/optimize/__pycache__/nonlin.cpython-310.pyc,,
+scipy/optimize/__pycache__/optimize.cpython-310.pyc,,
+scipy/optimize/__pycache__/slsqp.cpython-310.pyc,,
+scipy/optimize/__pycache__/tnc.cpython-310.pyc,,
+scipy/optimize/__pycache__/zeros.cpython-310.pyc,,
+scipy/optimize/_basinhopping.py,sha256=ej5TxpHfW8-mH7rIsYtsaW9WGOj6FWmQUWab2YVlSNY,30691
+scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so,sha256=1n87nMOoMjskAlODyPZ3OF_N1On4IjO905OKEjYPWfE,364200
+scipy/optimize/_bracket.py,sha256=o-ZowrYRDTItOlCut9k0B60sjRbGH6R4bv5ScG0_Q14,28614
+scipy/optimize/_chandrupatla.py,sha256=SoGJwgIk3oWmRHG9EDgcG773fPdF1Z9SNJu2I3Hu2yA,23222
+scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so,sha256=cdQ0MuG9r-_thseq4yu11Vbzb3oKLhR9w3SHBzKL154,100545
+scipy/optimize/_cobyla_py.py,sha256=bLw81_uD6zBTLybEfJUA46_OMdnTmXObhGZcvgBARss,10869
+scipy/optimize/_constraints.py,sha256=_xlt1pkOpxXVJEj-yd_vkPfv20Pxt-us2yxlICngiY0,22854
+scipy/optimize/_dcsrch.py,sha256=D5I9G4oH5kFD2Rrb61gppXFMwwz6JiQBYPvW3vbR5Gs,25235
+scipy/optimize/_differentiable_functions.py,sha256=g-i-tnlS0RcWj6z8PF5cbNeYu_AjRjSbHmuewNN2juc,23665
+scipy/optimize/_differentialevolution.py,sha256=wCLdSrATmzlpyOn3oeoIx-GR2malvM3QZYkhRMgroqo,83206
+scipy/optimize/_differentiate.py,sha256=1cO7QcbxIs0g7gDl9Bo40X_c2PG13wWcYm4OpUHCGh8,30870
+scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so,sha256=eCBbO6L80921dGsv6Vc-JtNPd3nlmc3uzijfhTvxa0c,43480
+scipy/optimize/_direct_py.py,sha256=ShNGJHCdN02zGTQbBL5oEwxZ9yGH8dczXTsmnt1WJIg,11798
+scipy/optimize/_dual_annealing.py,sha256=23UWd8CkGU02s5TaYoiu8h3Tv4GZmaVKgvGFo685Wlc,30346
+scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so,sha256=BuL-HIRz4V5Jvpel1Ptz0xslrgMeUKSyXo3z9ynv4-U,96016
+scipy/optimize/_hessian_update_strategy.py,sha256=PBnp8tf7hHcXb7uOz-GLJpoB79TCmdQM2IIOVX6ubI0,15862
+scipy/optimize/_highs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so,sha256=-sV4_-lkmk5KrdpTsJzCnJXX7OUS8lXC5tn-KP9aaMw,36072
+scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so,sha256=nvY4f9awwbBFeIOnAVaUPK-DE4qhtV7IEYXybbMkv-4,4045920
+scipy/optimize/_highs/src/cython/HConst.pxd,sha256=ipav35Vt3T5POWpL3X0kGkXGMuDjfA8A61FPahnrRxI,5511
+scipy/optimize/_highs/src/cython/Highs.pxd,sha256=1fwhSznVl2Vl_XyXyUTmX8ajygpeJKSgWbkpHiH6QZo,2147
+scipy/optimize/_highs/src/cython/HighsIO.pxd,sha256=cnPDpEfuETXVLGdb4wgyVtQtKh5M2dd0rX9WidZG77U,705
+scipy/optimize/_highs/src/cython/HighsInfo.pxd,sha256=TKvi5wZQ5DH4trIw29PhGWHmMnb8Cz_zjrTBDoodtCM,735
+scipy/optimize/_highs/src/cython/HighsLp.pxd,sha256=ECXgv0gFOP2X12DPi1YWd_uybSAJ9hIll2SMUJ1DZjo,1106
+scipy/optimize/_highs/src/cython/HighsLpUtils.pxd,sha256=eEFgoY_td38M5baXYvvlyFM72x2b1VU_lMFV3Y7HL-8,289
+scipy/optimize/_highs/src/cython/HighsModelUtils.pxd,sha256=FzpoHqKLeMjwJCqM3qHWsxIZb69LNgfO9HsdwcbahZA,335
+scipy/optimize/_highs/src/cython/HighsOptions.pxd,sha256=XsDO_rR9Y-0yxKSstRuv6VffEKh6tqIxIuef1UuanuI,3160
+scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd,sha256=MzjcGCorYJ9NbroJIyZDOM_v8RU4a1kjl1up4DPUicA,261
+scipy/optimize/_highs/src/cython/HighsStatus.pxd,sha256=_pXo59wMcXeIw9mvZSwe9N77w3TaCVALe8ZghhPCF2M,339
+scipy/optimize/_highs/src/cython/SimplexConst.pxd,sha256=hLhOZdBa0qfy_d8ZrXHbQiTfPx11V2xAiH-TGfTClEo,5018
+scipy/optimize/_highs/src/cython/highs_c_api.pxd,sha256=LssK9RFO3D9eGRy2YjdncfnJQfKJ_cRHT6IxS9iV3lw,332
+scipy/optimize/_isotonic.py,sha256=g4puoNqjJyDrJRoC0kvfG_I-0KNjeEfGpfZM5-Ltn48,6054
+scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so,sha256=aY78uxs5PK45b1DhJJF32YcixvQ7CKBhs0vi4muElRI,125145
+scipy/optimize/_lbfgsb_py.py,sha256=AR6PWfz5xgHBT6GEG_V5e7S9wqN8CKYDe9C_ShpT_uA,20718
+scipy/optimize/_linesearch.py,sha256=-OwKJ52xl-pKeRM1kiCVgHGFkGrXW8BEGxUOiGcfEYc,27282
+scipy/optimize/_linprog.py,sha256=EE4T4NoZoTtTbGvERlKilCLQs2uxxt65TgWnRSuUQME,29719
+scipy/optimize/_linprog_doc.py,sha256=ejVGlwlW7xF5T7UkBbRpJ9-dBm6rcEAjXPbz-gWtdLA,61945
+scipy/optimize/_linprog_highs.py,sha256=QbrJwka_Kz3xbpOZymQcm7NteXmzT9yxCskefrZNL58,17573
+scipy/optimize/_linprog_ip.py,sha256=t43a8xJd9Ms8PSIFmdzmT6Pggner7l-Y5bkubWhlAI8,45785
+scipy/optimize/_linprog_rs.py,sha256=5PhSblTUv5bgI9yW5BN1Rmy09gjZFA1tg1BXWxAKOQQ,23146
+scipy/optimize/_linprog_simplex.py,sha256=I3hKTW_BFX0URJkByvqFL6bVBP5X84bq9ilXa2NxViY,24716
+scipy/optimize/_linprog_util.py,sha256=3i_IjuXNBnz-F25qdW6VJLF8bKbG9_kOXCPwb1u2IHo,62749
+scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so,sha256=_QJFQFXt3QnvlJeKxmEZilxTId4zEShI6oL3PteBWVI,27072
+scipy/optimize/_lsq/__init__.py,sha256=Yk4FSVEqe1h-qPqVX7XSkQNBYDtZO2veTmMAebCxhIQ,172
+scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc,,
+scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc,,
+scipy/optimize/_lsq/bvls.py,sha256=7u5B8LfUbv3ZRZ8DAZKuDTSNRfDEBmTsn25VZtMMsKk,5195
+scipy/optimize/_lsq/common.py,sha256=nSiCudLnGfw1fWXXnsl5G7BslkYCMAMoC91QZOoVjq0,20523
+scipy/optimize/_lsq/dogbox.py,sha256=97htRlr-Yt-u4Ob3ks7avAMdnjJsO83uHUMjMYrhyjc,11682
+scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so,sha256=Lz_f0muMyegWsHnqYCrZ-wfpLYZMM_sLqMfGJaJ3XlI,235776
+scipy/optimize/_lsq/least_squares.py,sha256=XiGlnKJod4UV2YYXXuiNe4TJoh270b7fOFLjs8txxMY,39672
+scipy/optimize/_lsq/lsq_linear.py,sha256=0Zpy7C0jdGLOE00NBohsu2iWq8hXMMI0FeA6oruZ-Co,15180
+scipy/optimize/_lsq/trf.py,sha256=ElVHnB2Un3eaQ4jJ8KHHp-hwXfYHMypnSthfRO33P90,19477
+scipy/optimize/_lsq/trf_linear.py,sha256=jIs7WviOu_8Kpb7sTln8W7YLgkcndv0eGIP15g_mC4g,7642
+scipy/optimize/_milp.py,sha256=7Giiq-GsySyJzPQmWjwmbuSJyI4ZLPOmzkCbC2AHy9o,15187
+scipy/optimize/_minimize.py,sha256=bGnVzGLCcPHNRgFeBhuvIeCRUo6rRkatHTcYijtv6_E,48221
+scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so,sha256=BnjnQf-LaqWZ0pzhKBp1bJTibafj_afBFZCYzEN6NZM,78312
+scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so,sha256=XcH5OO0wa5JhtHmGgi_RD1xL5osseBLRA0bWRHRIWpA,61008
+scipy/optimize/_minpack_py.py,sha256=0lCQ_b1U8gFoaGs_6v_Mjq0QURPwyvS3L6x2LZWkOAA,44671
+scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so,sha256=WoQvywZTkiSxsfyZbtTHUZpNgpZO3N-hWsmqUaMjlH8,152168
+scipy/optimize/_nnls.py,sha256=0iAi7_xT306p9r674t0Yf5w-Czvzu7ki8hHTbKJZvk8,5484
+scipy/optimize/_nonlin.py,sha256=Om_vN7pckkm9wk_uBgmE5eQnv1Wk5RQ8Vk8P-aBH0rE,49821
+scipy/optimize/_numdiff.py,sha256=BEZjmEEVCv34UHth_JvDTICwhlJWKY6UdGcE0YVOgnc,28720
+scipy/optimize/_optimize.py,sha256=eOBZsdU17C6CwVEjjRMPEJiTBbv55Ts3VQ6F0_RY-Co,146575
+scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so,sha256=0KDFzcxQz_AgLD-mrZZOpb2wSeDK5805FuHDsoY5Eno,223832
+scipy/optimize/_qap.py,sha256=hFSa41-SnDya8Lro7UKViyx2Yz7ZpRfMKoaBTGNVqck,27831
+scipy/optimize/_remove_redundancy.py,sha256=JqaQo5XclDpilSzc1BFv4Elxr8CXlFlgV45ypUwALyc,18769
+scipy/optimize/_root.py,sha256=tsNdnGNqBlrXvzHR9yFYAebIX4C7Wwjwwx_sGXDcW0Y,27810
+scipy/optimize/_root_scalar.py,sha256=baTVT1Vi5ZeXLGxbxhbLkx4bRGA91uHfBzeiwcHUQpM,19595
+scipy/optimize/_shgo.py,sha256=bVUz409huFf-M6q5Rdyiap-NPusAdWyCHbo0rBZoDoQ,62257
+scipy/optimize/_shgo_lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc,,
+scipy/optimize/_shgo_lib/_complex.py,sha256=yzBQt3YjTcpw1PK4c_VJmi4CF94BZAiMMGDaTO1ai-8,50259
+scipy/optimize/_shgo_lib/_vertex.py,sha256=I2TAqEEdTK66Km6UIkrDm2-tKpeJUuFX7DAfTk3XvUg,13996
+scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so,sha256=pdoC9sJ2ip4zkjsokCcxbNDq245eZugiG3sU76FwNlA,86592
+scipy/optimize/_slsqp_py.py,sha256=cHOtSPw8AP50yoTCc2yl3EzkDKW-wa5XYdkRwaBRdm4,19088
+scipy/optimize/_spectral.py,sha256=cgBoHOh5FcTqQ0LD5rOx4K7ECc7sbnODvcrn15_QeTI,8132
+scipy/optimize/_tnc.py,sha256=Y6rzgteDEKU0sxJ9UOcEsgzTQ3PD6x0WNg4k2IBO-r0,16908
+scipy/optimize/_trlib/__init__.py,sha256=cNGWE1VffijqhPtSaqwagtBJvjJK-XrJ6K80RURLd48,524
+scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so,sha256=aFyKo2r0iTeqcsCasZwCdN5JXyPY4XLtEdjMKL1WVmE,380865
+scipy/optimize/_trustregion.py,sha256=r4CGiKYFqNKWDFA_XT23_d4oqscIm5eSnWQNyno85Ps,10801
+scipy/optimize/_trustregion_constr/__init__.py,sha256=c8J2wYGQZr9WpLIT4zE4MUgEj4YNbHEWYYYsFmxAeXI,180
+scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/canonical_constraint.py,sha256=690VxTb7JJ9RzGwa-LN2hASKlqQPmulyEDZA7I-XyLY,12538
+scipy/optimize/_trustregion_constr/equality_constrained_sqp.py,sha256=5NiEruWnhYL2zhhgZsuLMn-yb5NOFs_bX3sm5giG7I8,8592
+scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py,sha256=mWneWXy1bmte2nH_rq6VYPKXh9YlNIkiu3IG9uvRTck,25744
+scipy/optimize/_trustregion_constr/projections.py,sha256=EO0uHULrNw8pm99vY-gd3pOFQEqrqk_13lVde9iUjTA,13169
+scipy/optimize/_trustregion_constr/qp_subproblem.py,sha256=EtAhRcEtSnGsEeEZ2HGEzm-7r0pnXMCgl9NemKWvdzg,22592
+scipy/optimize/_trustregion_constr/report.py,sha256=_6b3C2G18tAgTstQSvqJbZVFYRxWKuUXFA1SAz95Y6k,1818
+scipy/optimize/_trustregion_constr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc,,
+scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py,sha256=zVPxZDa0WkG_tw9Fm_eo_JzsQ8rQrUJyQicq4J12Nd4,9869
+scipy/optimize/_trustregion_constr/tests/test_projections.py,sha256=-UrTi0-lWm4hANoytCmyImSJUH9Ed4x3apHDyRdJg5o,8834
+scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py,sha256=7tapj8clx8M7K5imwnTA4t-_Jh_cAYeu6efbGg4PbSU,27723
+scipy/optimize/_trustregion_constr/tests/test_report.py,sha256=lbr947QQxz681HxTXEZZ0B6_2VNKiN85Inkz7XYhe4A,1070
+scipy/optimize/_trustregion_constr/tr_interior_point.py,sha256=HPyAfUzwu704yvplRMMMMvUKqBtC56gGUBvg218t-Zo,13798
+scipy/optimize/_trustregion_dogleg.py,sha256=HS783IZYHE-EEuF82c4rkFp9u3MNKUdCeynZ6ap8y8s,4389
+scipy/optimize/_trustregion_exact.py,sha256=s-X20WMrJhO36x3YEtxYepLqyxm1Chl7v8MjirrftUw,15555
+scipy/optimize/_trustregion_krylov.py,sha256=KGdudJsoXXROXAc82aZ8ACojD3rimvyx5PYitbo4UzQ,3030
+scipy/optimize/_trustregion_ncg.py,sha256=y7b7QjFBfnB1wDtbwnvKD9DYpz7y7NqVrJ9RhNPcipw,4580
+scipy/optimize/_tstutils.py,sha256=Q5dZTgMzvonIb2ggCU9a35M8k_iV6v8hK4HDdKE20PQ,33910
+scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so,sha256=8wEb0RyMpKpbOPVfUTFDfN-vRonegrTDLDToQjlta0Y,21648
+scipy/optimize/_zeros_py.py,sha256=FLSkeAm2CoRkjLx37lKS6pMEvmlsZ8agt_ahA_rtwcM,52190
+scipy/optimize/cobyla.py,sha256=6FcM--HbgtHfOZt5QzGCcmyH2wRmDA73UxN8tO8aIqE,619
+scipy/optimize/cython_optimize.pxd,sha256=ecYJEpT0CXN-2vtaZfGCChD-oiIaJyRDIsTHE8eUG5M,442
+scipy/optimize/cython_optimize/__init__.py,sha256=eehEQNmLGy3e_XjNh6t5vQIC9l_OREeE4tYRRaFZdNs,4887
+scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/cython_optimize/_zeros.cpython-310-x86_64-linux-gnu.so,sha256=lLKeI281vqnvLx3x-3XyEbDUgwdzZxXMlKwwFK3DrYw,115552
+scipy/optimize/cython_optimize/_zeros.pxd,sha256=anyu-MgWhq24f1bywI4TlohvJjOnpNpkCtSzpKBJSSo,1239
+scipy/optimize/cython_optimize/c_zeros.pxd,sha256=6Gc0l1q-1nlCO9uKrYeXFiHsbimRZzU3t6EoTa8MVvA,1118
+scipy/optimize/lbfgsb.py,sha256=VHujkuUaSo6g_uQ2k5MqY1tvWUZrs9eqoZTAWCpRMY0,708
+scipy/optimize/linesearch.py,sha256=HKsTaTIl0eE3ZZbPNf3T_ulRpsQVzj4MuQ3BROvBU14,781
+scipy/optimize/minpack.py,sha256=I559Oh_EXey3U0Ixtz4lajjZeexPHMwnXS0aGX1qkY8,1054
+scipy/optimize/minpack2.py,sha256=-GBMcSUKuDdYiS9JmGvwXMnzshmCErFE0E8G66nc9Bw,547
+scipy/optimize/moduleTNC.py,sha256=qTEQ4IWtv_LT6fH3-iYmYNwrtrjG1gS4KFbZ73iDcd0,507
+scipy/optimize/nonlin.py,sha256=Soe0x_9z4QyXdOGJxZ98pksET4H-mqauonpZk49WF-A,1200
+scipy/optimize/optimize.py,sha256=uydjzFbjWgAN_lDMfOwjyGD7FEEhEbZIx3gBiUGKlL0,1240
+scipy/optimize/slsqp.py,sha256=K9gVnto2Ol-0wzGisZXR9MxlGGFhjKIdhPfkUwkWLic,809
+scipy/optimize/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_chandrupatla.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc,,
+scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc,,
+scipy/optimize/tests/test__basinhopping.py,sha256=QrDpRjbRnxgIDevxSovYFjC1UUrEr7g-goyzJHcFZms,18897
+scipy/optimize/tests/test__differential_evolution.py,sha256=sVjn7FKKbMUq64fkTDgmjVNvidMxhvh_hEogG8biVrQ,68844
+scipy/optimize/tests/test__dual_annealing.py,sha256=syotN4J2XhMSdTZaC95mlBRvzkh3Lce3mGtG05nH8dU,15173
+scipy/optimize/tests/test__linprog_clean_inputs.py,sha256=9HFrqlU1OHGTHCgy_R9w2rJ5A5xlu_3QpGbnzQezqXM,11678
+scipy/optimize/tests/test__numdiff.py,sha256=n0qb2yClsrDMNgrjvXqKZX_ww162ZF8C8_jbqvLrTiQ,31351
+scipy/optimize/tests/test__remove_redundancy.py,sha256=gwakPkJo8Y8aRL4son1bp8USfwc9uMrLLnZFrDmfvxY,6799
+scipy/optimize/tests/test__root.py,sha256=MvAzGJkaon4Hfk2BznRvFIVK05ezxezjvwmkEiEZFh8,4211
+scipy/optimize/tests/test__shgo.py,sha256=mUOxM4itGBJ025EOjzlbA1I_ncj3WDkO0j1MRxlptvM,40291
+scipy/optimize/tests/test__spectral.py,sha256=xh-4SMIAWkx_ND2nt7rGACy3ckfw_votfyfxMpQ8m2I,6664
+scipy/optimize/tests/test_bracket.py,sha256=tzlXzMl_36yeDtQV_oU5YH8IBzAJWPfss9QLc6JuqIs,30579
+scipy/optimize/tests/test_chandrupatla.py,sha256=04LrZHxJDpsSNDiZQg_0etOr1pChB-lP4_qLypTxJcA,30108
+scipy/optimize/tests/test_cobyla.py,sha256=PcQCKsaEsyEqgEzZ_T-eC5kTtSdfNekvapf6LeoZPJU,5271
+scipy/optimize/tests/test_constraint_conversion.py,sha256=vp-PUJNne1gnnvutl9mujO7HxnVcSMf5Ix3ti3AwDTI,11887
+scipy/optimize/tests/test_constraints.py,sha256=03SN10ubXpgrNq9Z4DEpPSC6hTXznW-YUF-nxdaxSQ4,9408
+scipy/optimize/tests/test_cython_optimize.py,sha256=n-HccBWoUmmBWq_OsNrAVnt4QrdssIYm4PWG29Ocias,2638
+scipy/optimize/tests/test_differentiable_functions.py,sha256=UtUepS5cJTIHZrSrX8g-74lP-aodwwgGRU0ShbBwf5E,27019
+scipy/optimize/tests/test_differentiate.py,sha256=Ide6nFAUD8KcWyQlV0SpF3PsmpZSPDlk7LI5LA1FEAs,15530
+scipy/optimize/tests/test_direct.py,sha256=dUfsmTx9phFmlwv93UYgjYBoHh-iuWUrdc_KBn7jGlY,13152
+scipy/optimize/tests/test_hessian_update_strategy.py,sha256=czoYotEPSbAfcKhjjf3a9BNJ7i78c4pWzBKCNifuPAY,10115
+scipy/optimize/tests/test_isotonic_regression.py,sha256=_qLmTpd3O9jI4qfFLYLxGiXAf3W5ON1xxro77Jr-GEM,7006
+scipy/optimize/tests/test_lbfgsb_hessinv.py,sha256=rpJbiCUfgJrjp-xVe4JiXjVNe6-l8-s8uPqzKROgmJQ,1137
+scipy/optimize/tests/test_lbfgsb_setulb.py,sha256=44caMVc_OSIthB1SLFPK-k2m0mMWxN4pMiJ-cDnqnLU,3599
+scipy/optimize/tests/test_least_squares.py,sha256=Ho5mgEuNB_t6Jj-M--wdN5e7SfgYnzXdZZZ3wOKETGQ,33951
+scipy/optimize/tests/test_linear_assignment.py,sha256=84d4YHCf9RzjYDKUujQe2GbudkP8dtlSpZtMBwCf_Oc,4085
+scipy/optimize/tests/test_linesearch.py,sha256=DVr7k5tkVpt2XkXwX2edFpRp1x15nfdcXFDK_Mb9XMk,10916
+scipy/optimize/tests/test_linprog.py,sha256=eizplKYRvUKzcXzmvA5n6wNoFN7wzQpCGxowmJl7TTY,96989
+scipy/optimize/tests/test_lsq_common.py,sha256=alCLPPQB4mrxLIAo_rn7eg9xrCEH7DerNBozSimOQRA,9500
+scipy/optimize/tests/test_lsq_linear.py,sha256=E41vtYzwf9Px1QZpm1ShC9GU_sU2X-Cn9apfn5pku6M,10861
+scipy/optimize/tests/test_milp.py,sha256=RDJe1CiL8-UMD8xqe4n2aVWp8qBe1hYufRx8qvad4wU,14553
+scipy/optimize/tests/test_minimize_constrained.py,sha256=c6_cxRer5aG0cXpBH7MwOfIjkPeyG7d5-bVnn9y_IjM,26520
+scipy/optimize/tests/test_minpack.py,sha256=EAarG7t3ucqklW4VWooF_7epPQcYdsocUmN5rjpuDMU,41341
+scipy/optimize/tests/test_nnls.py,sha256=McLnzzUcdj7qANpv1Ui3QQ4XPJfZvvhPtVSDOxU7zFU,19194
+scipy/optimize/tests/test_nonlin.py,sha256=IK7AjY9sWxEb6xwzE9IPnRi4VwQaCfTd9Wv0Pr7_lcs,18493
+scipy/optimize/tests/test_optimize.py,sha256=Qe1JeRz6sxM6Ndcoou_EvxPSzdB0TY3X3BhsYJcHRPs,123372
+scipy/optimize/tests/test_quadratic_assignment.py,sha256=zXttKYFREnrDhMExvBFNKzYb_77tFFsDlOPf-FP5XrA,16307
+scipy/optimize/tests/test_regression.py,sha256=CSg8X-hq6-6jW8vki6aVfEFYRUGTWOg58silM1XNXbU,1077
+scipy/optimize/tests/test_slsqp.py,sha256=KtqXxnMWsxI25GY-YT9BEZtgK9EkdLs_f5CRpXquiMQ,23258
+scipy/optimize/tests/test_tnc.py,sha256=ahSwu8F1tUcPV09l1MsbacUXXi1avQHzQNniYhZRf4s,12700
+scipy/optimize/tests/test_trustregion.py,sha256=HJtCc8Gdjznkzyn7Ei3XByBM_10pqv7VXgXBR9kCc8k,4701
+scipy/optimize/tests/test_trustregion_exact.py,sha256=DnuS71T8CyVKWOP6ib7jB2PQEjNf3O5r1DQ4fQCJSi0,12951
+scipy/optimize/tests/test_trustregion_krylov.py,sha256=DA169NkSqKMHdtDztMnlsrMZC3fnVlqkoKADMzGSWPg,6634
+scipy/optimize/tests/test_zeros.py,sha256=UzJWUB9wBdKpOAN0IQEMm3sYjANg9xtpQzqs_NV4Saw,35691
+scipy/optimize/tnc.py,sha256=5FKObWi_WSt7nFbOrt6MVkJQxZzCxZy_aStpnDV7okY,920
+scipy/optimize/zeros.py,sha256=cL-uiCpCIb28_C5a2O8oGOGC_5t836mICzkKDoMMgZY,789
+scipy/signal/__init__.py,sha256=Qi1hDJ8z3Zw5bdh3HK_Pj4H5aRgz7RML28_EqVC8ytY,13983
+scipy/signal/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/__pycache__/_arraytools.cpython-310.pyc,,
+scipy/signal/__pycache__/_bsplines.cpython-310.pyc,,
+scipy/signal/__pycache__/_czt.cpython-310.pyc,,
+scipy/signal/__pycache__/_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc,,
+scipy/signal/__pycache__/_ltisys.cpython-310.pyc,,
+scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc,,
+scipy/signal/__pycache__/_peak_finding.cpython-310.pyc,,
+scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc,,
+scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc,,
+scipy/signal/__pycache__/_signaltools.cpython-310.pyc,,
+scipy/signal/__pycache__/_spectral_py.cpython-310.pyc,,
+scipy/signal/__pycache__/_upfirdn.cpython-310.pyc,,
+scipy/signal/__pycache__/_waveforms.cpython-310.pyc,,
+scipy/signal/__pycache__/_wavelets.cpython-310.pyc,,
+scipy/signal/__pycache__/bsplines.cpython-310.pyc,,
+scipy/signal/__pycache__/filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc,,
+scipy/signal/__pycache__/lti_conversion.cpython-310.pyc,,
+scipy/signal/__pycache__/ltisys.cpython-310.pyc,,
+scipy/signal/__pycache__/signaltools.cpython-310.pyc,,
+scipy/signal/__pycache__/spectral.cpython-310.pyc,,
+scipy/signal/__pycache__/spline.cpython-310.pyc,,
+scipy/signal/__pycache__/waveforms.cpython-310.pyc,,
+scipy/signal/__pycache__/wavelets.cpython-310.pyc,,
+scipy/signal/_arraytools.py,sha256=k3kHbl9RzcqsyftIYSFJZvJFL4zlcMAHyaRFUkFxOXY,8294
+scipy/signal/_bsplines.py,sha256=84tNZ2SuCWbh810Xu4Q084zsLvBptHU7fNGV_gZTYhY,15731
+scipy/signal/_czt.py,sha256=t5P1kRCM3iw3eCaL9hTgctMfQKezkqnjbghLjCkffQE,19445
+scipy/signal/_filter_design.py,sha256=JgYGAcpX4uhomSfJU5zQ-25bomkD9PqnXfMovbg32Ps,186602
+scipy/signal/_fir_filter_design.py,sha256=lcCVdgZytsIhVE1GdzksJ5sD2YPmD1D7EwvYOO52BIo,49381
+scipy/signal/_lti_conversion.py,sha256=GDo7lUK9QLv7PCKoblyvHXaEVtYbuKTwAmJ3OAuy4Tw,16142
+scipy/signal/_ltisys.py,sha256=g1c1oPuplyaQY0tfGGbq3XKfPUHNP0PW_G2AHoqJSLY,116354
+scipy/signal/_max_len_seq.py,sha256=8QkMWoYY3qy3bCKfsuXaS93Bnb2zd-ue6j5i5-3_hi0,5060
+scipy/signal/_max_len_seq_inner.cpython-310-x86_64-linux-gnu.so,sha256=1zYLmm8tjUSq5ZFYuzXmlGhNu6vh7MooPWSp8CXLEw4,77848
+scipy/signal/_peak_finding.py,sha256=d4y3__VSe9hPIueLZ_9xRKB9EnonvUOS6g1xp_WuxAY,48892
+scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so,sha256=junXT0_tsKF9WfjvK4TtoudFYKUKr1RPqrgfWnM034k,305464
+scipy/signal/_savitzky_golay.py,sha256=mnltOfknWRlNiZmNLLy-zKTCrw6nZSdJPEvpGi0kv8E,13417
+scipy/signal/_short_time_fft.py,sha256=jSd8xQrvHrJFyOVhcPJPduCThBvKXPLPuKcQDrOw5pE,73463
+scipy/signal/_signaltools.py,sha256=38oXczH1v4GT4pGVuI1WIYzOFYLHhO66C-SxGbV5ums,157590
+scipy/signal/_sigtools.cpython-310-x86_64-linux-gnu.so,sha256=_DTzyCLkXWw51j738x5PMSR4c4BVozpShS8nQ6e_QOk,109008
+scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so,sha256=bHwx8VcEnIY45Mw6fbe3xsj6SZNPcHSRqWyKxCZ_N-M,303376
+scipy/signal/_spectral.cpython-310-x86_64-linux-gnu.so,sha256=ZK9JCPq7mK4YRg6O7TC2VKUUI14nQYrpP23uhjYPzk0,78176
+scipy/signal/_spectral_py.py,sha256=xRwdztzKYeYv0xIGYfGdxVeW3-DN5L0XJYFlWZjWm7o,78406
+scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so,sha256=Mz_jv3AkR6uNPMxR2jemmXQq1CJGLyOBvG8QLRdGW3A,85280
+scipy/signal/_upfirdn.py,sha256=ODSw2x1KHXN0vdKHm4vnovZxkoafcwIdUek0N8Edu5g,7882
+scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so,sha256=wApFc2zPuMZHuqDuY21pR1X8iNVhbe5GaJ4LlIy4DVM,394672
+scipy/signal/_waveforms.py,sha256=Bm5WOBhk1nXwK0A6yFVTY7tCCv6trdrUjje_xmM878Y,20523
+scipy/signal/_wavelets.py,sha256=NzmN785S0xFdgFhC4Lv52BKrvw3q3wtyVZdCditpDG8,16095
+scipy/signal/bsplines.py,sha256=xpwI33IQDzkH6S5o8ZxDtNj40dDD1G_tkpG4MaMMxQ4,738
+scipy/signal/filter_design.py,sha256=TRo01JzmAh6zpgVgZi_8pHLPM2DKo9fA9yDXpU5AOCM,1471
+scipy/signal/fir_filter_design.py,sha256=m74z7fwTgiYFfHdYd0NYVfpUnDIkNRVCG8nBaOoPVZ8,766
+scipy/signal/lti_conversion.py,sha256=fhyTsetZE9Pe57f9DeBdOIZwc71Nxw7j2Ovn6m7w2W0,707
+scipy/signal/ltisys.py,sha256=E5t7vHjsj09EYmpd27aqtRvT8E8sDpH-5YOgcmeqypI,1146
+scipy/signal/signaltools.py,sha256=ZnV0ARj_8YPUZ7cIxpM2Ko5yuOkW7Ic-JxN5uLmGcj8,1179
+scipy/signal/spectral.py,sha256=m_Q-gzRpT6e_w2kIBFKPBLuDVj5If5zfVWbAViAQJsk,723
+scipy/signal/spline.py,sha256=iisoUmgbyuuEukQjBz99HM3SYao7j1ZsXXmtE-wo5cU,810
+scipy/signal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/signal/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/_scipy_spectral_test_shim.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_short_time_fft.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_upfirdn.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc,,
+scipy/signal/tests/__pycache__/test_windows.cpython-310.pyc,,
+scipy/signal/tests/_scipy_spectral_test_shim.py,sha256=qkEcaCK7_jPHA7sellidJJs6rS6wo9xO9f5YkFdqBOQ,19995
+scipy/signal/tests/mpsig.py,sha256=DHB3eHB0KYA-E0SBebKG36YLk-T5egbwwryne3RwIHM,3308
+scipy/signal/tests/test_array_tools.py,sha256=J9Mr5DtqmhiTReWvsk3YclL6Cnv32bDuklBnw2zprJY,3632
+scipy/signal/tests/test_bsplines.py,sha256=7nnnsABF-uwKj13_Vq-CSbZJeIqx22j4yYySw83Q40o,8855
+scipy/signal/tests/test_cont2discrete.py,sha256=3IkRfgGlgnX7X0bERpExPAxAkcGK0h6Ovy6GyrhnYS8,14605
+scipy/signal/tests/test_czt.py,sha256=3HxxWwOWIrIc0GC-K5h6f0NRjkLrWRA5OhoB5y0zbw0,6993
+scipy/signal/tests/test_dltisys.py,sha256=f4wDe0rF_FATRWHkHddbPDOsFGV-Kv2Unz8QeOUUs-k,21558
+scipy/signal/tests/test_filter_design.py,sha256=whn5g9GR7BcsFjSMJyCMQFkrSWJoGSr9bhwEwmOGKP8,193782
+scipy/signal/tests/test_fir_filter_design.py,sha256=77Dt5heM2m9QTQ9VUZTeeSWnTi9cOjFbL-51CfNX-_8,29941
+scipy/signal/tests/test_ltisys.py,sha256=MbFugdbcNFZuzxcpjcVldhpaR64E0AaOg0qEWgPSMQQ,45208
+scipy/signal/tests/test_max_len_seq.py,sha256=X9oyCvW0Ny8hOAVX22HmKaMgi2oioe1cZWO3PTgPOgw,3106
+scipy/signal/tests/test_peak_finding.py,sha256=03S223wQ6xcJ_VyO6WCxthrFjWgatAmGKm6uTIZOlfk,33863
+scipy/signal/tests/test_result_type.py,sha256=25ha15iRfFZxy3nDODyOuvaWequyBpA42YNiiU43iAc,1627
+scipy/signal/tests/test_savitzky_golay.py,sha256=hMD2YqRw3WypwzVQlHwAwa3s6yJHiujXd_Ccspk1yNs,12424
+scipy/signal/tests/test_short_time_fft.py,sha256=h1xMjXJKr9HO1FEElm-D60uKPjPOckL7XOWhGH-fKtY,34474
+scipy/signal/tests/test_signaltools.py,sha256=rW7rMh50nQxlBWeQW104HUQWI8x6z9Me4C3Eruh0tk8,141443
+scipy/signal/tests/test_spectral.py,sha256=9IwUmrhRIynmcuCr-24LMH3HN9rcf2-49tP6bixkFEg,63775
+scipy/signal/tests/test_upfirdn.py,sha256=i3EjQKnwS6FRRRPPzwl1B_zWsQ20Dfa_6WUUYH8I3xM,11240
+scipy/signal/tests/test_waveforms.py,sha256=sTT0DeOER5U9h8Xp54VGvGlbtcxhp_wjGNQXw1yOaGM,11975
+scipy/signal/tests/test_wavelets.py,sha256=BurB2_FZ9rnLVJVhItmaueAUqlnmXR2POtFAJ-h3FLU,6721
+scipy/signal/tests/test_windows.py,sha256=tLnQi4VyekCfhV3Bn1mCY9pCVcDH6TbuYa7yiUI8rak,40990
+scipy/signal/waveforms.py,sha256=HHwdsb-_WPvMhFLAUohMBByHP_kgCL3ZJPY7IZuwprA,672
+scipy/signal/wavelets.py,sha256=ItCm-1UJc8s9y-_wMECmVUePpjW8LMSJVtZB-lFwVao,612
+scipy/signal/windows/__init__.py,sha256=BUSXzc_D5Agp59RacDdG6EE9QjkXXtlcfQrTop_IJwo,2119
+scipy/signal/windows/__pycache__/__init__.cpython-310.pyc,,
+scipy/signal/windows/__pycache__/_windows.cpython-310.pyc,,
+scipy/signal/windows/__pycache__/windows.cpython-310.pyc,,
+scipy/signal/windows/_windows.py,sha256=F-9DNB-71WE3WQOxVfNESgmc4gG21rDFgD631Y9-E78,83607
+scipy/signal/windows/windows.py,sha256=OztcTMqgFMLguY9-hVUvSSPMYY4GYkbrFvtsRcktxC8,879
+scipy/sparse/__init__.py,sha256=WClFuFd1byUOWhYZ6ZrjBsnKTwXEvjUJpVoMzbAvvv4,9272
+scipy/sparse/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/__pycache__/_base.cpython-310.pyc,,
+scipy/sparse/__pycache__/_bsr.cpython-310.pyc,,
+scipy/sparse/__pycache__/_compressed.cpython-310.pyc,,
+scipy/sparse/__pycache__/_construct.cpython-310.pyc,,
+scipy/sparse/__pycache__/_coo.cpython-310.pyc,,
+scipy/sparse/__pycache__/_csc.cpython-310.pyc,,
+scipy/sparse/__pycache__/_csr.cpython-310.pyc,,
+scipy/sparse/__pycache__/_data.cpython-310.pyc,,
+scipy/sparse/__pycache__/_dia.cpython-310.pyc,,
+scipy/sparse/__pycache__/_dok.cpython-310.pyc,,
+scipy/sparse/__pycache__/_extract.cpython-310.pyc,,
+scipy/sparse/__pycache__/_index.cpython-310.pyc,,
+scipy/sparse/__pycache__/_lil.cpython-310.pyc,,
+scipy/sparse/__pycache__/_matrix.cpython-310.pyc,,
+scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc,,
+scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc,,
+scipy/sparse/__pycache__/_sputils.cpython-310.pyc,,
+scipy/sparse/__pycache__/base.cpython-310.pyc,,
+scipy/sparse/__pycache__/bsr.cpython-310.pyc,,
+scipy/sparse/__pycache__/compressed.cpython-310.pyc,,
+scipy/sparse/__pycache__/construct.cpython-310.pyc,,
+scipy/sparse/__pycache__/coo.cpython-310.pyc,,
+scipy/sparse/__pycache__/csc.cpython-310.pyc,,
+scipy/sparse/__pycache__/csr.cpython-310.pyc,,
+scipy/sparse/__pycache__/data.cpython-310.pyc,,
+scipy/sparse/__pycache__/dia.cpython-310.pyc,,
+scipy/sparse/__pycache__/dok.cpython-310.pyc,,
+scipy/sparse/__pycache__/extract.cpython-310.pyc,,
+scipy/sparse/__pycache__/lil.cpython-310.pyc,,
+scipy/sparse/__pycache__/sparsetools.cpython-310.pyc,,
+scipy/sparse/__pycache__/spfuncs.cpython-310.pyc,,
+scipy/sparse/__pycache__/sputils.cpython-310.pyc,,
+scipy/sparse/_base.py,sha256=yXHwyNvhZYQ4JN7AxHwOR2zZPRzjBPzet_8Lv5WeKVE,52557
+scipy/sparse/_bsr.py,sha256=miltBWH6omnM8vuBeZqD9VoJ6xybgzRoz0F1xLLlbEs,30154
+scipy/sparse/_compressed.py,sha256=ul9lnyyKN2yaLKVs54CWIJYQYTlD6Seiftp_UXhxnok,53089
+scipy/sparse/_construct.py,sha256=S8avkP1bHGA5Hrufj2IldPqYXK1ls0GRUBdIRBpGfWw,47179
+scipy/sparse/_coo.py,sha256=A_6Le4-yfn20cx8rjSlzP1P-x6v7dptmNu-makDJoRk,31757
+scipy/sparse/_csc.py,sha256=oMNfti0VZ-OKJi-5THPcQCrj-vWFS3heJoGWUCyJ-EM,11057
+scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so,sha256=i0Ju-q3-Rgwi4xEtHjyg4wov_ZDYUktLgO-SsOUC2ko,823376
+scipy/sparse/_csr.py,sha256=nM2lgWRujXz_PhoinsooCfn0iqkzGS9aNm-Mapi3bus,15675
+scipy/sparse/_data.py,sha256=CbZVzMgio3OLAlLl2_1SlGHO7A2oXcdpAhKu1VgTlTI,17219
+scipy/sparse/_dia.py,sha256=cihl_869L2DSqjslBanJGhBgCYmnezBC8ZSdEAkZD8I,18755
+scipy/sparse/_dok.py,sha256=rL11rshTMrbm-SxkTa4XD90JSjnRCjdU48WPLSNExH8,22220
+scipy/sparse/_extract.py,sha256=iIRSqqVMiXfiacfswDCWXTjZCFfRvOz1NFicLUMHSl4,4987
+scipy/sparse/_index.py,sha256=c_Wt3XdFl9Zd6bAnfZ-pOCYHZ6VaB1a1duIh9xvYO50,13279
+scipy/sparse/_lil.py,sha256=zMhN5b7M0Yk1j1M5CS1hUcq7mt1x5POGHPAuxQkfoo4,20521
+scipy/sparse/_matrix.py,sha256=cT7Piq0NYzvRouy3HksG7d063HTjRlauBheAAT9PzCI,3081
+scipy/sparse/_matrix_io.py,sha256=dHzwMMqkdhWA8YTonemaZmVT66i3GiG46FBcsIDBbAY,6005
+scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so,sha256=7vpfiypFLkZVGMKWMGJu4XxaXv8CLhZVIDgs_ysZZqg,4466608
+scipy/sparse/_spfuncs.py,sha256=lDVTp6CiQIuMxTfSzOi3-k6p97ayXJxdKPTf7j_4GWc,1987
+scipy/sparse/_sputils.py,sha256=o3u434vbhJaoOE0ixhQQXJ_0T7ZqC-hmt5RmgFPm048,14545
+scipy/sparse/base.py,sha256=8Yx-QLKSRu9LJjgG-y8VqsRnsjImB2iKoJFxTgKGFsI,791
+scipy/sparse/bsr.py,sha256=CsYirxoLqHwBiEyNbOgGdZMx4Lt3adKZ-7uVv1gpzCY,811
+scipy/sparse/compressed.py,sha256=rbaz4AoTJvNnfnwEx4ocDXlkHJPOxe9DzqxCcJoHY2g,1009
+scipy/sparse/construct.py,sha256=i9lHBSRsDkvoNCbF9b7mZ0C2fHCjKU5CKCE30c-CxMc,925
+scipy/sparse/coo.py,sha256=VRF6kaYsVtyprwYrEuy1gRcCU5G7xsKyY0L1zJ_9JiQ,844
+scipy/sparse/csc.py,sha256=EV_LxYjPiRsTV6-J8kUefNna-R0tdI5uBt9Fj_XWlwc,609
+scipy/sparse/csgraph/__init__.py,sha256=VbNYhqSQ5ZPIPjU3Q9Q9MKTH1umiVu11GOjXNa1Cx68,7753
+scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc,,
+scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc,,
+scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so,sha256=LAliLFRkNi9po0xA86_zsGl4OIgtrAOSG6d6RLZY4gw,344976
+scipy/sparse/csgraph/_laplacian.py,sha256=n5iodxzmVtvbpcFLld-y-ZG3539uebImpMfIfnMhMck,18209
+scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so,sha256=2U1wR2L9IvxCUd8wwwA_WnUGmbZsJNGuItwVbmT8mrQ,347976
+scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so,sha256=mnQ3Ijgjil1ybq7RD4a8KSBAavBONxvpcWMAttrdkac,259472
+scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so,sha256=0nvEhVx41cCZZZ3oeF_BunnkG7EeAY46T_6nBRA8fdA,331936
+scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so,sha256=myjTgnUn1FHAJRZjAD7AdIf4J9TSY-VEEgzWxRkvLwE,484824
+scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so,sha256=ugHhgQ1RoYT6EXuEtxbEA9q2-531YUdVoir4Pgfy5qU,205312
+scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so,sha256=s_0NkoNPI5Hs13oLyDHZqsxzbCjeOauQ8__QIIMJdg8,658864
+scipy/sparse/csgraph/_validation.py,sha256=VQl6Aj3ns7AhLe3BDKp0-tRUXSzXOeD32wQ1eN7xnek,2476
+scipy/sparse/csgraph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc,,
+scipy/sparse/csgraph/tests/test_connected_components.py,sha256=a2HZjm7HsC0STqiDnhN6OJL4yIMcM28VNVtMXDI2BqE,3948
+scipy/sparse/csgraph/tests/test_conversions.py,sha256=KJ6jEAYl5C8APyH_WE5I1M8qGgxOyjGtNPf9rt4RYCo,1856
+scipy/sparse/csgraph/tests/test_flow.py,sha256=BXhx0qBT3Ijy9all5OhNVNVzMbdTPySQuaZ1ajK6DTs,7420
+scipy/sparse/csgraph/tests/test_graph_laplacian.py,sha256=6fDEldaGM_gEZk-NMHaeQMKjZRnz3J7R5kWqHhfchY0,10990
+scipy/sparse/csgraph/tests/test_matching.py,sha256=MkSKU_9_IIhRnhp5sbRbB8RYqVe_keS4xqhDVvV3EhM,11944
+scipy/sparse/csgraph/tests/test_pydata_sparse.py,sha256=eoiFT4O_myDq2hVHM3A2qkwL5t8hv3XwRLhXwC4ZmHE,3601
+scipy/sparse/csgraph/tests/test_reordering.py,sha256=by-44sshHL-yaYE23lDp1EqnG-72MRbExi_HYSMJEz8,2613
+scipy/sparse/csgraph/tests/test_shortest_path.py,sha256=RmRAk_RxMo3C9do0f01DsHSPyDUVEUZXuq4h6aALrDo,14441
+scipy/sparse/csgraph/tests/test_spanning_tree.py,sha256=7Zcbj_87eeAkm6RetgeO0wVp1EOIEjGxJLuGtw_H9qc,2168
+scipy/sparse/csgraph/tests/test_traversal.py,sha256=UNTZXJ9bjDHcji_vUa1Ye5Kbp6xLfyHBG9LusToGUSY,2840
+scipy/sparse/csr.py,sha256=9UrWUoq5-hSl9bcaVeWxN4tmPJisTQ_6JiISCyrlMCw,658
+scipy/sparse/data.py,sha256=qGDAuAvTASgQ7wXXZ9t2JPp0rNBNVxObTTzXNHDRSEo,573
+scipy/sparse/dia.py,sha256=0y5_QfvVeU5doVbngvf8G36qVGU-FlnUxRChQ43e1aU,689
+scipy/sparse/dok.py,sha256=LMnaLFd266EZ3p4D1ZgOICGRZkY6s7YM0Wvlr6ylRn0,733
+scipy/sparse/extract.py,sha256=6qT2PNOilsEhDWl6MhmgpveIuQr4QCs3LATwIrBroOQ,567
+scipy/sparse/lil.py,sha256=BbnMgvzMi33OqmBNYF_VDPeju2RcRs9OyZUUU3aZHcc,734
+scipy/sparse/linalg/__init__.py,sha256=_2NSGBqWo-MaV_ZiFDzXRYTM9eW8RfmtSWVp4WMESyw,3999
+scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc,,
+scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__init__.py,sha256=YxlWZfj2dxiZrFLL6Oj6iWKEuC6OHXdRVRf9xCU_Zoo,1991
+scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/_add_newdocs.py,sha256=ASCr6jhvN8hgJCEg9Qq685LXKJuGTvFQCZtUwzWphDk,3912
+scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so,sha256=UOaqh-gu9w_8RGj1Pb4ZdPXjHOM9JvPy5o4vSWn42BI,378961
+scipy/sparse/linalg/_dsolve/linsolve.py,sha256=Iro6NQavwUGTmib9d3UOPBQAXXCVpplzfCiqRDS6nh0,26486
+scipy/sparse/linalg/_dsolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc,,
+scipy/sparse/linalg/_dsolve/tests/test_linsolve.py,sha256=632NbRmJm2-8vbQ6g9pFiMsApZ01tIGveNfP0BUjVXo,27784
+scipy/sparse/linalg/_eigen/__init__.py,sha256=SwNho3iWZu_lJvcdSomA5cQdcDU8gocKbmRnm6Bf9-0,460
+scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/_svds.py,sha256=3N36RCFHqkYraaY7Fc7WoN-w9_7c1cG0QnlWYAJaroA,20239
+scipy/sparse/linalg/_eigen/_svds_doc.py,sha256=3_mPNg5idszebdDr-3z_39dX3KBmX2ui1PCCP_hPF24,15605
+scipy/sparse/linalg/_eigen/arpack/COPYING,sha256=CSZWb59AYXjRIU-Mx5bhZrEhPdfAXgxbRhqLisnlC74,1892
+scipy/sparse/linalg/_eigen/arpack/__init__.py,sha256=zDxf9LokyPitn3_0d-PUXoBCh6tWK0eUSvsAj6nkXI0,562
+scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so,sha256=BLVsy5NYNQUJA_GGLA0Xt6Ovliza3b3pBham9OOqN28,486441
+scipy/sparse/linalg/_eigen/arpack/arpack.py,sha256=BSkXtfwvmUtmBejugJkE2LOPeGtV-Ms7TxXHIpD_Rx8,67401
+scipy/sparse/linalg/_eigen/arpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py,sha256=R5FfNhm1CZNVMiP_ldOp5x_0pzpwCJlO68FPW_pR8vw,23750
+scipy/sparse/linalg/_eigen/lobpcg/__init__.py,sha256=E5JEPRoVz-TaLrj_rPm5LP3jCwei4XD-RxbcxYwf5lM,420
+scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py,sha256=CdmO8VQrARiE1i8VJvE4O0tYytbzQCzDIf3eo1sWq6g,41905
+scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py,sha256=TVAhSqfKVm-T05Nx-eIJfMMyf8P-XEyZv_r9YSrHuZo,23813
+scipy/sparse/linalg/_eigen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc,,
+scipy/sparse/linalg/_eigen/tests/test_svds.py,sha256=0fxAvOZN6Jet3H8dAlq0je1MS5THhGv0l4dv1ZYoUFw,36157
+scipy/sparse/linalg/_expm_multiply.py,sha256=enIS-h-6F6UQ6SQeR57bH8MYbM4XzwQv5dVqlWVqhJU,26312
+scipy/sparse/linalg/_interface.py,sha256=drcxlR1TUiZ1sEat2ke6bh62DPIe888Xd1QagqHMlq8,27979
+scipy/sparse/linalg/_isolve/__init__.py,sha256=Z_eQUYbe6RWMSNi09T9TfPEWm8RsVxcIKYAlihM-U-c,479
+scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/_gcrotmk.py,sha256=j2JVJBMs8u72hwF0jueRIfkJlS4ZtUZHt0TXYzWXcUY,16212
+scipy/sparse/linalg/_isolve/iterative.py,sha256=T2ebi650XYuxLcE90_vvNhnmDKNn4yCMEHy2fQyqFMM,35768
+scipy/sparse/linalg/_isolve/lgmres.py,sha256=_HXq4vrLuoo2cvjZIgJ9_NJPQnpaQNoGcrUFQdhgQto,9159
+scipy/sparse/linalg/_isolve/lsmr.py,sha256=ej51ykzoqpWvyksTFISRN-lXce7InPpqyDT4N42QEpY,15653
+scipy/sparse/linalg/_isolve/lsqr.py,sha256=mJADMPk_aL_lf57tkaTydK4lYhkszmHf2-4jHJEe8Vs,21214
+scipy/sparse/linalg/_isolve/minres.py,sha256=lz5MBEKkTIjhiBnWoJ6WhNXGkKiYRKnt2FAI2MNvsmM,11611
+scipy/sparse/linalg/_isolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc,,
+scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py,sha256=M5lrn0JBRUmo6ug2p1SgDtm7PAbU6potiJzRy-wT68Q,5413
+scipy/sparse/linalg/_isolve/tests/test_iterative.py,sha256=g2dEqDPRJUuesDn8FrTOQxkZ2wMaOWGao7z7lShV1Ng,25626
+scipy/sparse/linalg/_isolve/tests/test_lgmres.py,sha256=hAjJLuBtyLMCCqK_uZbTVGnsFACsLZHgtiHdUABRO3Q,7064
+scipy/sparse/linalg/_isolve/tests/test_lsmr.py,sha256=6bQA3WdneycfXx6aZyFdPjWRUSXm_Smjh9YcJo8R-4E,6365
+scipy/sparse/linalg/_isolve/tests/test_lsqr.py,sha256=IG6FaJjYU_0QYYCBC4yjNiZldi1ZafIITDKnESTScCo,3754
+scipy/sparse/linalg/_isolve/tests/test_minres.py,sha256=7h3A3dzQV9_jqYrNdulAAJnzZ5icw_HBnTXNXnUdUto,2435
+scipy/sparse/linalg/_isolve/tests/test_utils.py,sha256=VlmvctRaQtjuYvQuoe2t2ufib74Tua_7qsiVrs3j-p0,265
+scipy/sparse/linalg/_isolve/tfqmr.py,sha256=SpMqzbNeYBgMU6DYgQyV2SbGlnal6d1iMysAILQj_pI,6689
+scipy/sparse/linalg/_isolve/utils.py,sha256=I-Fjco_b83YKUtZPVdobTjPyY41-2SHruVvKZVOIXaU,3598
+scipy/sparse/linalg/_matfuncs.py,sha256=wib0cFQFGX9CylfenGMGdDskE5XJ_LTC_OWpLJcfIZY,29385
+scipy/sparse/linalg/_norm.py,sha256=y4J98m4JBfHI67lZNsF93SUIiy4JHwhFElFjuZE_twg,6067
+scipy/sparse/linalg/_onenormest.py,sha256=47p9H_75GVy3AobAmpgYQQI3Nm7owHVil6ezu42PHsQ,15486
+scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so,sha256=SlYCZgv47DhHlVImmktzifsuFrQjbVrwSNNJnJoVeeo,158281
+scipy/sparse/linalg/_propack/_dpropack.cpython-310-x86_64-linux-gnu.so,sha256=POglN_81l6zE-ZLoBfFLvRzAplvMi448eie1-liotTQ,133633
+scipy/sparse/linalg/_propack/_spropack.cpython-310-x86_64-linux-gnu.so,sha256=h3dW6gZeeshDj26Qmz6aNcCZeWxo82isE4lkBtIxhyM,133633
+scipy/sparse/linalg/_propack/_zpropack.cpython-310-x86_64-linux-gnu.so,sha256=y9SyEYpI32lN02fWyYrRH_8jtGxPmFvc_Yru01nAJUI,150089
+scipy/sparse/linalg/_special_sparse_arrays.py,sha256=7jnMobVkXaYQeHODLmaTFwAL-uC-LVda5D1vz-vpz3A,34298
+scipy/sparse/linalg/_svdp.py,sha256=3_w6ECB1W0LiFoS400LCtx0NXwKPJETmoF9X1JZ07uI,11415
+scipy/sparse/linalg/dsolve.py,sha256=iR9kBE3U5eVFBVJW8bpEGEhFFfR6PiI-NIbqKzLT8U4,697
+scipy/sparse/linalg/eigen.py,sha256=SItXs6TCDv9zJFnj8_KyBzJakRC2oeIGDqVEs0sHmzQ,664
+scipy/sparse/linalg/interface.py,sha256=JHIM0cIQUEzMmUqhkU69hTy6seeG648_l2XI39nmLvs,682
+scipy/sparse/linalg/isolve.py,sha256=BWvUveL2QGKFxqVGDFq2PpGEggkq204uPYs5I83lzgY,671
+scipy/sparse/linalg/matfuncs.py,sha256=zwrqI0IwAPhQt6IIJ-oK5W_ixhGMGcYVGcSr2qU6lFI,697
+scipy/sparse/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/linalg/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_expm_multiply.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_interface.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_norm.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_onenormest.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_propack.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,,
+scipy/sparse/linalg/tests/__pycache__/test_special_sparse_arrays.cpython-310.pyc,,
+scipy/sparse/linalg/tests/propack_test_data.npz,sha256=v-NNmpI1Pgj0APODcTblU6jpHUQRhpE9ObWb-KYnu6M,600350
+scipy/sparse/linalg/tests/test_expm_multiply.py,sha256=EN5HcjT92SgJuTHX89Ebh-OIgrrR0UVxjcrPYmNAN60,13955
+scipy/sparse/linalg/tests/test_interface.py,sha256=MmCzkRdcaIy2DUOYRFRv8px_Hk68AFdepBe8ivbSXLA,17953
+scipy/sparse/linalg/tests/test_matfuncs.py,sha256=gPpXsIUZg97wL_fzHodNMyswgZ0h9nqxTqxFu8_3bL0,21885
+scipy/sparse/linalg/tests/test_norm.py,sha256=8waDQ-csiw4jTIQPz8qlseqgosvjY9OHfAU7lJ8yLxo,6163
+scipy/sparse/linalg/tests/test_onenormest.py,sha256=EYUVD6i7RGiMi_bclm1_4YkLZSAma5CHqRH9YeDvtwM,9227
+scipy/sparse/linalg/tests/test_propack.py,sha256=Tvcx6MliY6i_Px0KlKfGwjFCElH5y2Arekm7WVAhKqI,5539
+scipy/sparse/linalg/tests/test_pydata_sparse.py,sha256=fqGKTw7gnPyHQ47mlWjL5wDEPZ2i8gbzpZvwPHHc5OQ,6213
+scipy/sparse/linalg/tests/test_special_sparse_arrays.py,sha256=2Z7r1LPx7QTekuXNTLcspGOdJ9riRwioGIpxzIa0Kh4,12854
+scipy/sparse/sparsetools.py,sha256=0d2MTFPJIvMWcTfWTSKIzP7AiVyFGS76plzgzWSXGuQ,2168
+scipy/sparse/spfuncs.py,sha256=zcwv-EvwXW-_7kjRJqNm-ZoKbDcxlU4xOuvl3iBWao0,582
+scipy/sparse/sputils.py,sha256=coz-V4p4Vg2eT1yc3sZF6_7FXKvj2ZuP7QKhPF4UEb0,973
+scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_common1d.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_coo.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_deprecations.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_dok.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_minmax1d.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc,,
+scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc,,
+scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQcZhNAfo,846
+scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851
+scipy/sparse/tests/test_array_api.py,sha256=OWXlJJzLgz9LdbLyJ8PrOaAdDRR8-xJs067jY37AwqI,14465
+scipy/sparse/tests/test_base.py,sha256=ns97Qb0d96Bkts3VnayHqYg8K9t7qYQBvmvq6UP2vpM,190463
+scipy/sparse/tests/test_common1d.py,sha256=uMbeYmB-FcSE2gQ8tC4CvptRalUDOPNy3amXTDQ34EQ,15613
+scipy/sparse/tests/test_construct.py,sha256=6J4zV_rbj-eO7rLiR4kF_3nxf1sf82lzxOzHFif91iM,33414
+scipy/sparse/tests/test_coo.py,sha256=opa1NGLbCzMDMIbuxS1nn7kFhFx1cu1WLQTJg8SZe04,8477
+scipy/sparse/tests/test_csc.py,sha256=rB2cBXznxPdQbMZpdQyQitUdCdEeO6bWt7tQ_LBGGDw,2958
+scipy/sparse/tests/test_csr.py,sha256=efYU3H8Mm3GIB0ZRxXQCZixFo2OB56AR016k-bz33tY,6488
+scipy/sparse/tests/test_deprecations.py,sha256=g4bw2bVauWSGt4e0yvDJ1MMkqDtp97kTl77EXwyDsIs,645
+scipy/sparse/tests/test_dok.py,sha256=iGzGJVnfC-aLW7Ra2GXJv8COW8V-bBc2nphTTcXcDZU,5761
+scipy/sparse/tests/test_extract.py,sha256=4qUPrtCv9H7xd-c9Xs51seQCiIlK45n-9ZEVTDuPiv8,1685
+scipy/sparse/tests/test_matrix_io.py,sha256=sLyFQeZ8QpiSoTM1A735j-LK4K0MV-L7VnWtNaBJhw4,3305
+scipy/sparse/tests/test_minmax1d.py,sha256=HNR0aaPGesVzenx_iXNKTs9bMoGomckk7aeUscjnGx0,2375
+scipy/sparse/tests/test_sparsetools.py,sha256=zKeUESux895mYLdhhW_uM5V1c-djdEKnZ-xURx5fNrw,10543
+scipy/sparse/tests/test_spfuncs.py,sha256=ECs34sgYYhTBWe4hIkx357obH2lLsnJWkh7TfacjThw,3258
+scipy/sparse/tests/test_sputils.py,sha256=h8YJ7QKigGy49OPf_X8KZBF3ZmB5RN3BjghNeMGg3rI,7286
+scipy/spatial/__init__.py,sha256=SOzwiLe2DZ3ymTbCiSaYRG81hJfeqSFy5PcccZ3Cwn0,3697
+scipy/spatial/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc,,
+scipy/spatial/__pycache__/_kdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/_plotutils.cpython-310.pyc,,
+scipy/spatial/__pycache__/_procrustes.cpython-310.pyc,,
+scipy/spatial/__pycache__/_spherical_voronoi.cpython-310.pyc,,
+scipy/spatial/__pycache__/ckdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/distance.cpython-310.pyc,,
+scipy/spatial/__pycache__/kdtree.cpython-310.pyc,,
+scipy/spatial/__pycache__/qhull.cpython-310.pyc,,
+scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so,sha256=EDFGE2PDdyqaMgMGkzQbESCT_5ieq249BCE9aHPCNdk,1027824
+scipy/spatial/_ckdtree.pyi,sha256=rt73FClv4b7Ua0TcIj4gLWWfiNrETMlCFnyqTXzeAQM,5892
+scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so,sha256=OVLqkiGCWJfBu2EupE192IeoHgdh2IIy-vr1Y9eWW18,641232
+scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so,sha256=oB_9nLLv1_nIJWpVmTOtfKC9wwFJHopahlk-IEz96Ok,113256
+scipy/spatial/_geometric_slerp.py,sha256=WdTteqZuTzrW-ZMXTKehWTplaOJrtqQimAIWWAaW5vM,7981
+scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so,sha256=M_21smS0-G27W9YwYHvZx2mu-x50yCyHVXO6bvmWtF4,250088
+scipy/spatial/_kdtree.py,sha256=9k5hOuUrM7vnVTUp4_IKCJAjaKekCB378inhmYgeBQQ,33443
+scipy/spatial/_plotutils.py,sha256=hESt827uWjj14yGCsRCLrpa_oMUMwGJZ0DNRNDPGTfo,7259
+scipy/spatial/_procrustes.py,sha256=oj1TnlLsBxlLVXvn7zG5nymeHxQkRMSDzgjsLZGg-9A,4429
+scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so,sha256=0qF76iZb6kg6ppU9G_dfTyg9XKPJNUfvzchCQsrVS6o,1163696
+scipy/spatial/_qhull.pyi,sha256=dmvze3QcaoA_Be6H8zswajVatOPwtJFIFxoZFE9qR-A,5969
+scipy/spatial/_spherical_voronoi.py,sha256=x3TrK6tTkKwfSSSWcdkBOZ9i042t1Hn21oom4aES15U,13539
+scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so,sha256=FMRMxVzZ-dLVhUHOfaBHSoCoCP2TmyHLM7FFigsxbHE,241008
+scipy/spatial/_voronoi.pyi,sha256=aAOiF4fvHz18hmuSjieKkRItssD443p2_w1ggXOIs1g,126
+scipy/spatial/ckdtree.py,sha256=uvC-phcjhzmGLLcE_tKHPn6zrTTjGwVSren0M4jSPng,645
+scipy/spatial/distance.py,sha256=QVH_K3qK3MvElGaoMimK3VNyFmwnuGdq0MvoRumsKRw,91483
+scipy/spatial/distance.pyi,sha256=f9eGCqRUYrQt7gI37JnARDn1FkIVsKRlinx2onMshZQ,5273
+scipy/spatial/kdtree.py,sha256=Wlqqnd9uwGZ1t7UoL4uIzUhSYo247jaOpokehDGj66o,655
+scipy/spatial/qhull.py,sha256=aFE-KscuINt6QIhFC2dqhwFCYu3HSBkVXDH5exHH71s,622
+scipy/spatial/qhull_src/COPYING.txt,sha256=NNsMDE-TGGHXIFVcnNei4ijRKQuimvDy7oDEG7IDivs,1635
+scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test__plotutils.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test__procrustes.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_distance.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_hausdorff.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_kdtree.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_qhull.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_slerp.cpython-310.pyc,,
+scipy/spatial/tests/__pycache__/test_spherical_voronoi.cpython-310.pyc,,
+scipy/spatial/tests/data/cdist-X1.txt,sha256=ULnYAgX2_AwOVF-VE7XfnW5S0pzhx7UAoocxSnXMaWs,5750
+scipy/spatial/tests/data/cdist-X2.txt,sha256=_IJVjXsp3pvd8NNPNTLmVbHOrzl_RiEXz7cb86NfvZ4,11500
+scipy/spatial/tests/data/degenerate_pointset.npz,sha256=BIq8Hd2SS_LU0fIWAVVS7ZQx-emVRvvzgnaO2lh4gXU,22548
+scipy/spatial/tests/data/iris.txt,sha256=k19QSfkqhMmByqNMzwWDmM6wf5dt6whdGyfAyUO3AW0,15000
+scipy/spatial/tests/data/pdist-boolean-inp.txt,sha256=5Z9SMsXrtmzeUwJlVmGkrPDC_Km7nVpZIbBl7p3Hdc0,50000
+scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt,sha256=Yerj1wqIzcdyULlha-q02WBNGyS2Q5o2wAr0XVEkzis,178801
+scipy/spatial/tests/data/pdist-chebyshev-ml.txt,sha256=NEd2b-DONqUMV9f8gJ2yod17C_5fXGHHZ38PeFsXkyw,3041
+scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt,sha256=UCWZJeMkMajbpjeG0FW60b0q-4r1geAyguNY6Chx5bM,178801
+scipy/spatial/tests/data/pdist-cityblock-ml.txt,sha256=8Iq7cF8oMJjpqd6qsDt_mKPQK0T8Ldot2P8C5rgbGIU,3041
+scipy/spatial/tests/data/pdist-correlation-ml-iris.txt,sha256=l2kEAu0Pm3OsFJsQtHf9Qdy5jnnoOu1v3MooBISnjP0,178801
+scipy/spatial/tests/data/pdist-correlation-ml.txt,sha256=S4GY3z-rf_BGuHmsnColMvR8KwYDyE9lqEbYT_a3Qag,3041
+scipy/spatial/tests/data/pdist-cosine-ml-iris.txt,sha256=hQzzoZrmw9OXAbqkxC8eTFXtJZrbFzMgcWMLbJlOv7U,178801
+scipy/spatial/tests/data/pdist-cosine-ml.txt,sha256=P92Tm6Ie8xg4jGSP7k7bmFRAP5MfxtVR_KacS73a6PI,3041
+scipy/spatial/tests/data/pdist-double-inp.txt,sha256=0Sx5yL8D8pyYDXTIBZAoTiSsRpG_eJz8uD2ttVrklhU,50000
+scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt,sha256=3-UwBM7WZa4aCgmW_ZAdRSq8KYMq2gnkIUqU73Z0OLI,178801
+scipy/spatial/tests/data/pdist-euclidean-ml.txt,sha256=rkQA2-_d7uByKmw003lFXbXNDjHrUGBplZ8nB_TU5pk,3041
+scipy/spatial/tests/data/pdist-hamming-ml.txt,sha256=IAYroplsdz6n7PZ-vIMIJ4FjG9jC1OSxc3-oVJdSFDM,3041
+scipy/spatial/tests/data/pdist-jaccard-ml.txt,sha256=Zb42SoVEnlTj_N_ndnym3_d4RNZWeHm290hTtpp_zO8,3041
+scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt,sha256=L7STTmlRX-z-YvksmiAxEe1UoTmDnQ_lnAjZH53Szp0,172738
+scipy/spatial/tests/data/pdist-jensenshannon-ml.txt,sha256=-sZUikGMWskONojs6fJIMX8VEWpviYYg4u1vipY6Bak,2818
+scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt,sha256=N5L5CxRT5yf_vq6pFjorJ09Sr-RcnrAlH-_F3kEsyUU,178801
+scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt,sha256=DRgzqxRtvQVzFnpFAjNC9TDNgRtk2ZRkWPyAaeOx3q4,3041
+scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt,sha256=jz7SGKU8GuJWASH2u428QL9c-G_-8nZvOFSOUlMdCyA,178801
+scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt,sha256=37H01o6GibccR_hKIwwbWxGX0Tuxnb-4Qc6rmDxwwUI,178801
+scipy/spatial/tests/data/pdist-seuclidean-ml.txt,sha256=YmcI7LZ6i-Wg1wjAkLVX7fmxzCj621Pc5itO3PvCm_k,3041
+scipy/spatial/tests/data/pdist-spearman-ml.txt,sha256=IrtJmDQliv4lDZ_UUjkZNso3EZyu7pMACxMB-rvHUj0,3041
+scipy/spatial/tests/data/random-bool-data.txt,sha256=MHAQdE4hPVzgu-csVVbm1DNJ80dP7XthJ1kb2In8ImM,6000
+scipy/spatial/tests/data/random-double-data.txt,sha256=GA8hYrHsTBeS864GJf0X6JRTvGlbpM8P8sJairmfnBU,75000
+scipy/spatial/tests/data/random-int-data.txt,sha256=xTUbCgoT4X8nll3kXu7S9lv-eJzZtwewwm5lFepxkdQ,10266
+scipy/spatial/tests/data/random-uint-data.txt,sha256=8IPpXhwglxzinL5PcK-PEqleZRlNKdx3zCVMoDklyrY,8711
+scipy/spatial/tests/data/selfdual-4d-polytope.txt,sha256=rkVhIL1mupGuqDrw1a5QFaODzZkdoaLMbGI_DbLLTzM,480
+scipy/spatial/tests/test__plotutils.py,sha256=fASbg0i7iLiJIEj5vIkiDuTq3wU0z3mKJY019kzKrFk,3814
+scipy/spatial/tests/test__procrustes.py,sha256=wmmnUHRdw_oID0YLi404IEWPH6vEGhvHXSeGPY_idHo,4974
+scipy/spatial/tests/test_distance.py,sha256=m0lxDXuZWREXE-k_yMHUddKqnmbRKo-g-VoVEE2Xez0,84153
+scipy/spatial/tests/test_hausdorff.py,sha256=n-Qm2gVF0zc11tDSCnXBznt5Mp0E1ekTtzfWXjqG54M,7114
+scipy/spatial/tests/test_kdtree.py,sha256=ZlrKMS1JEdkbwFE8WtEMPI3W5H8ldfPjz1D23fcrsKM,49270
+scipy/spatial/tests/test_qhull.py,sha256=v_GB-IN6UdcNdsOQtQUYDnHKNyGAq_4wYkFicEe4-hQ,43989
+scipy/spatial/tests/test_slerp.py,sha256=hYH-2ROq0iswTsli4c-yBLZfACvQL0QVCKrPWTeBNls,16396
+scipy/spatial/tests/test_spherical_voronoi.py,sha256=Ydof8dYsSoYfII5lVDJ82iVynrruwuBdg0_oESw8YoY,14492
+scipy/spatial/transform/__init__.py,sha256=vkvtowJUcu-FrMMXjEiyfnG94Cqwl000z5Nwx2F8OX0,700
+scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc,,
+scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc,,
+scipy/spatial/transform/_rotation.cpython-310-x86_64-linux-gnu.so,sha256=yGDxsus_6GKKrlwjSWVsXT-LM8MxDZRTcfX61AKOfYk,987072
+scipy/spatial/transform/_rotation.pyi,sha256=SI2NWoIjma0P-DaicaLVeRtafg8_SUvJeXOry2bVa5A,3080
+scipy/spatial/transform/_rotation_groups.py,sha256=XS-9K6xYnnwWywMMYMVznBYc1-0DPhADHQp_FIT3_f8,4422
+scipy/spatial/transform/_rotation_spline.py,sha256=M2i8qbPQwQ49D3mNtqll31gsCMqfqBJe8vOxMPRlD5M,14083
+scipy/spatial/transform/rotation.py,sha256=eVnQRbOorImPet4qbF0W95z_ptTNR80LSLRT2jBZAc8,612
+scipy/spatial/transform/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/spatial/transform/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation_groups.cpython-310.pyc,,
+scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc,,
+scipy/spatial/transform/tests/test_rotation.py,sha256=TEyEEVsT4Qd-14wxSxF1mcUO4smcK6gZgu-GXGGfXqw,61014
+scipy/spatial/transform/tests/test_rotation_groups.py,sha256=V6DiLWvJsrdklhS-GlzcA9qEy0cTQpwaNR-7vkhBt1M,5560
+scipy/spatial/transform/tests/test_rotation_spline.py,sha256=g3prW5afu_yJxevIz2LMdRFYLfe8zq-3b6TMGw06Ads,5105
+scipy/special.pxd,sha256=l9Y21wnx5fZLvrxCeCMUWQvBI5gHx7LBhimDWptxke8,42
+scipy/special/__init__.py,sha256=8RBpMhRlS6fAXj1PH0Rj6KkfdTC4E2skg3vZrZ2Q0cs,31975
+scipy/special/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/__pycache__/_add_newdocs.cpython-310.pyc,,
+scipy/special/__pycache__/_basic.cpython-310.pyc,,
+scipy/special/__pycache__/_ellip_harm.cpython-310.pyc,,
+scipy/special/__pycache__/_lambertw.cpython-310.pyc,,
+scipy/special/__pycache__/_logsumexp.cpython-310.pyc,,
+scipy/special/__pycache__/_mptestutils.cpython-310.pyc,,
+scipy/special/__pycache__/_orthogonal.cpython-310.pyc,,
+scipy/special/__pycache__/_sf_error.cpython-310.pyc,,
+scipy/special/__pycache__/_spfun_stats.cpython-310.pyc,,
+scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc,,
+scipy/special/__pycache__/_support_alternative_backends.cpython-310.pyc,,
+scipy/special/__pycache__/_testutils.cpython-310.pyc,,
+scipy/special/__pycache__/add_newdocs.cpython-310.pyc,,
+scipy/special/__pycache__/basic.cpython-310.pyc,,
+scipy/special/__pycache__/orthogonal.cpython-310.pyc,,
+scipy/special/__pycache__/sf_error.cpython-310.pyc,,
+scipy/special/__pycache__/specfun.cpython-310.pyc,,
+scipy/special/__pycache__/spfun_stats.cpython-310.pyc,,
+scipy/special/_add_newdocs.py,sha256=cWyckQIFsSlIkK6swKC0OcWx0ZKlLtlC4D-bLVx-6h4,398483
+scipy/special/_basic.py,sha256=CKWvRFOjr4EhKlzbUf6S0xqolq6yZNC0FgfwupXmxIY,103790
+scipy/special/_cdflib.cpython-310-x86_64-linux-gnu.so,sha256=1L-npBimaXutX3FF_gXvaDaqPtjqgMfqvXbZyFlgc-E,187520
+scipy/special/_comb.cpython-310-x86_64-linux-gnu.so,sha256=NAq1jPghJ33K5HTGHQaFRef2kD1eA5cOP57hXpAgvdM,63456
+scipy/special/_ellip_harm.py,sha256=YHHFZXMtzdJxyjZXKsy3ocIsV-eg6ne3Up79BuFl9P8,5382
+scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so,sha256=Yg4gDMzAzxYplmCKBME9ZJtaY3khUhmdORc6DiTIeSk,138121
+scipy/special/_lambertw.py,sha256=-oSEnHFQWZiUZXMamxPWjfntWq5tt0rzHmI13DxGHBY,3962
+scipy/special/_logsumexp.py,sha256=2MyHR5PWo83qt5RrEnXWRCcWS55gy2s5UWDu30LUvaQ,9027
+scipy/special/_mptestutils.py,sha256=Yl_tYnFW1j2DbH6I-2MBNjjqt4WiDO-phVWyNj1Hpfw,14441
+scipy/special/_orthogonal.py,sha256=jcOgiGPDzhAsxeEmoYhTSDHZ_uSE5TNiG1yTvAliuXI,74558
+scipy/special/_orthogonal.pyi,sha256=XATMiU9ri9e39B5YANXPyQkMqWtfu5rDIP4NA7WSQTU,8304
+scipy/special/_precompute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/utils.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc,,
+scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc,,
+scipy/special/_precompute/cosine_cdf.py,sha256=ZGSeDDpLRsapyx2GbIrqqYR98fvaEQrLn7IE-fuodhE,354
+scipy/special/_precompute/expn_asy.py,sha256=JAz0hY1gBJu3Q_dvscQrSJdgKuwpjqFZVwz-sOQQ21w,1265
+scipy/special/_precompute/gammainc_asy.py,sha256=P5OFRcPkkpjGQeYCaMZ8SFSUmZG_CjrEHv8OLwgcGFc,2502
+scipy/special/_precompute/gammainc_data.py,sha256=Y5taFAdCE3W14bavUACTA3XoCxyh7_Z2NHcs-DKS75E,4077
+scipy/special/_precompute/lambertw.py,sha256=7f4F3ivouVNZwuvVX8TAi2lPB7LirPS8IfN5lEw9zI0,1961
+scipy/special/_precompute/loggamma.py,sha256=iq7ZBrUmk8pXYZwO_wINI4u8ENsLbL9VUShGjGO0Pt0,1094
+scipy/special/_precompute/struve_convergence.py,sha256=z7R0Q5_Ye-EqLI9g-yARdl_j5FooofXMRXPLVrIFJQQ,3624
+scipy/special/_precompute/utils.py,sha256=JXJuI07Jlm4bDHJFVtj0jHq05p-V1ofeXZB16Y05kzI,887
+scipy/special/_precompute/wright_bessel.py,sha256=7z2W3spGANZO31r_xauMA6hIQ0eseRlXx-zJW6du5tU,12868
+scipy/special/_precompute/wright_bessel_data.py,sha256=f1id2Gk5TPyUmSt-Evhoq2_hfRgLUU7Qu_mELKtaXGg,5647
+scipy/special/_precompute/wrightomega.py,sha256=YpmLwtGJ4qazMDY0RXjhnQiuRAISI-Pr9MwKc7pZlhc,955
+scipy/special/_precompute/zetac.py,sha256=LmhJP7JFg7XktHvfm-DgzuiWZFtVdpvYzzLOB1ePG1Q,591
+scipy/special/_sf_error.py,sha256=q_Rbfkws1ttgTQKYLt6zFTdY6DFX2HajJe_lXiNWC0c,375
+scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so,sha256=mTQWpR9jY-Fi9mWZxGtYurMHKXpepRk7xit6hdcMd2I,301592
+scipy/special/_spfun_stats.py,sha256=IjK325nhaTa7koQyvlVaeCo01TN9QWRpK6mDzkuuAq0,3779
+scipy/special/_spherical_bessel.py,sha256=XbbMLs_0qsmbuM7hIb0v6LPn5QrKLwhwAQYl5PtZYjc,10420
+scipy/special/_support_alternative_backends.py,sha256=SYomM7-qPmsMO_0UYzfpVAAdaU9Y9gPb6F6g0xBOnOo,2294
+scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so,sha256=5VlK0NlIAUTjGPoeBcSe8LNZ9CxHI7_udhCjxNZzhM0,289544
+scipy/special/_test_internal.pyi,sha256=BI0xSfTmREV92CPzaHbBo6LikARpqb9hubAQgTT0W6w,338
+scipy/special/_testutils.py,sha256=pnEE50AZrNe2FJ92fM1rsEcTY7lR-zYBE2paEPhI-wk,12027
+scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so,sha256=LcurBfEhiyqJLoEYUYU5SBgt4gAiBGwd-QjVsQy_s_g,1572553
+scipy/special/_ufuncs.pyi,sha256=Bop_e3jGG-wWIrCehOwR7Aa_qEuk-TfWi0C2Phkknmc,8937
+scipy/special/_ufuncs.pyx,sha256=yM5T3uRffyZS1vCfdBke-Kpdd9Y8GE0a0Ozpifl-EDw,890803
+scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so,sha256=i_UufGMbl4CilLlhfuk35wEIKfVAMFDJa3NiVVNMfnI,654984
+scipy/special/_ufuncs_cxx.pxd,sha256=xBBTzhemAneLScqm5Tf3Ufz64gfrMVoeKfE5-EpZmXM,1951
+scipy/special/_ufuncs_cxx.pyx,sha256=uwWM8H7h3Os4NvGdN6fE8OmWi5rN_rZZlnBN15eTvIU,10940
+scipy/special/_ufuncs_cxx_defs.h,sha256=Qi71Kwn1-fg0klmk8fBuGq0x7-DoolwkoJzaH4gyc34,2972
+scipy/special/_ufuncs_defs.h,sha256=Yhew1gtfnDeBLn6aQr0ysVmJwehm2R_4PqxlJAFAl7E,9216
+scipy/special/add_newdocs.py,sha256=np1hD4g1B2jNT4SOMq-6PUkTsGMBEucT5IuL3kcflCg,469
+scipy/special/basic.py,sha256=LRU8rIxXx42O4eVZv21nFwswAu7JFtQ42_4xT5BwYpE,1582
+scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so,sha256=G_YMrpbFbCOckbykBxkjvwnPxP9tqAuog6KMM8tWi_0,3161976
+scipy/special/cython_special.pxd,sha256=OzvZ0di3svc0wvTDEkufTwHCDiDU-F1GygJvsy_Kq0o,16349
+scipy/special/cython_special.pyi,sha256=BQVUCzV8lCylnmLCtnN0Yz_ttlqyzcLc-BZx2KPXPzM,58
+scipy/special/cython_special.pyx,sha256=E7lNHH4Jq07mM3keMhgxLmXn6i-qoTh421Ur1OSy2SY,142731
+scipy/special/orthogonal.py,sha256=2uWRTD_Wg83YzaMwYY8BAdyGVy4Z3iEc7ne5rLpdudo,1830
+scipy/special/sf_error.py,sha256=wOZqzX7iipkH39hOHqBlkmretJRbYy-K7PsnZPyaJFU,573
+scipy/special/specfun.py,sha256=bChigh8GnoirH0wQ8j_D_AY77Pl0Pd8ZqGNgjIMAZ84,826
+scipy/special/special/binom.h,sha256=Nbs4PzhKl-3bSs9AaslHgYYkQy3rHtb8ZiTXqqicW80,2359
+scipy/special/special/cephes/beta.h,sha256=V9TjdBG6gRBVykHA3fNL0fQZAdnIWxd2RbEkZ5bQkNA,7012
+scipy/special/special/cephes/const.h,sha256=ITr0sKUAP4CcYicPmmk65M9XFVupRgfF3FiqOewlbAI,2599
+scipy/special/special/cephes/gamma.h,sha256=AsGJQL5c7V9gahXe3B5_dFIfOsEK2KWqK4X8ECY3EHU,10337
+scipy/special/special/cephes/polevl.h,sha256=ClCCS13O-ePqXSxvmsPZNZR_RoyZQW7xMQo0ePSQmDU,4025
+scipy/special/special/cephes/psi.h,sha256=O9ZDjk-CbhsTpbg9jfQI5VxnxJYu9h5KfGUlf2mISxQ,6323
+scipy/special/special/cephes/trig.h,sha256=NvkMCTA1TpscUcqSQ1EIlbs7FYST2SyUdXvG2_EvANE,1304
+scipy/special/special/cephes/zeta.h,sha256=IvdUT0PdHreDUsPpjqiY4Uhvz0kq6tyegbY2CwU2u4w,4386
+scipy/special/special/config.h,sha256=aMf_pNKWE1iAgJNSnaCKqdPNuKK3Zq9uuck8h6f8Ggs,4315
+scipy/special/special/digamma.h,sha256=TG6_ayajnm-RQByvYF1ohZ93TxwDdnJwaAWoiRGDCRU,7303
+scipy/special/special/error.h,sha256=_sd-2bgRyCtPMb4wLD57i8GmfuYOINeP_o40iRRwvgE,1191
+scipy/special/special/evalpoly.h,sha256=E_GM-Idr-dF5WfeRdvhiYCioNtKRZ10kTBMON8wWm08,1131
+scipy/special/special/lambertw.h,sha256=E59hB9vFOQ3cr_jMrbt9xmwJTkXxTY4FGIFBJh-DSms,5205
+scipy/special/special/loggamma.h,sha256=eQFXyU7sOsRySn7GWV2DypOSfrwfEngSgZ3gTFKuC8k,6000
+scipy/special/special/trig.h,sha256=fLojwOOecF_eRJU5H86THXbZq1dK1hjVG98cLzN4WSg,3116
+scipy/special/special/zlog1.h,sha256=uojL5H_Oe7CipENnvenHNjUkDcXXK0qe6ynocDwSYuQ,977
+scipy/special/spfun_stats.py,sha256=fYFGN-9Q3X9zdm9KTyW6t2oixuaZzQwd_h0eyVvfGBk,545
+scipy/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_basic.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_data.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_dd.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_logit.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_round.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sici.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_specfun.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spence.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_support_alternative_backends.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_trig.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc,,
+scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc,,
+scipy/special/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/special/tests/data/__pycache__/__init__.cpython-310.pyc,,
+scipy/special/tests/data/boost.npz,sha256=1z7Lu1FlRSI0K6BHCmJjqWhOYXwrg3RWX-OnlZP0sjE,1270643
+scipy/special/tests/data/gsl.npz,sha256=rKtwAgjLswHuUesfUSyxwn57TnUz_FpfXNXF1qoZfdg,51433
+scipy/special/tests/data/local.npz,sha256=ECuHbCfsTS-AQdWrL7bf78gUcCEzUWD1FUVeU-Bocf8,203438
+scipy/special/tests/test_basic.py,sha256=0F-3SOrg8xzCcl9NT8QOuXltThFVRHlaJfwNnxD1O64,171573
+scipy/special/tests/test_bdtr.py,sha256=QwGyt0tnutuou25mS0u2LjRgDTYI6ohM2cbZ-He6Os4,3231
+scipy/special/tests/test_boxcox.py,sha256=gUrGF7Ql1adxiPl_YxpsGunDfg-B_WpqI9Zghzool7o,2672
+scipy/special/tests/test_cdflib.py,sha256=zWmnQvdBdSbrlHg_kzoYBs5wfsVXiDuVH1N_2B5Ro48,17441
+scipy/special/tests/test_cdft_asymptotic.py,sha256=DBVVLaduZUHSWlKJ5aBXmxgdNm_YjLvWgyiTTcQq04c,1441
+scipy/special/tests/test_cosine_distr.py,sha256=zL7aWLisIEy1oNKjcynqncgsCxcPKvPb9Odr-J5Xa1M,2690
+scipy/special/tests/test_cython_special.py,sha256=3uVOa0p0OdaqxBWeyewQuedpnQtxDJB5kYolf1vRjoA,18838
+scipy/special/tests/test_data.py,sha256=iXTMMdNj-jCaXSVbhw3KTQrzLSk5wNQEdRBEDZ_2Cug,30269
+scipy/special/tests/test_dd.py,sha256=GROHQEkzIAW6KXkj8J3nPcRDAONcf1nCoArcfx30_5s,1974
+scipy/special/tests/test_digamma.py,sha256=Bm7Hh_aETx6MTN3Wu7Sijy4rYGR_1haNGsi3xfzrAKM,1382
+scipy/special/tests/test_ellip_harm.py,sha256=51KiCpQjqmf2uLZEsty-Vmr0FhoABtvMUz4218WR_S0,9640
+scipy/special/tests/test_erfinv.py,sha256=fzdEHd6MxfSyzQDO93qndXukG2jWj-XNY2X4BJRIdBI,3059
+scipy/special/tests/test_exponential_integrals.py,sha256=hlzNhZEXjo5ioPteG0P85qXuMmVD-WVc67e049tvY8Q,3687
+scipy/special/tests/test_faddeeva.py,sha256=YLY3Ylp4u_8zxTGxOb5kxNfXXEW0ld_GP2ceOR2ev_Y,2568
+scipy/special/tests/test_gamma.py,sha256=hb-ZlA2ZNz6gUGvVtMBgXFl_w30HPmthuUEAmNcz0sw,258
+scipy/special/tests/test_gammainc.py,sha256=Avv52EDQ7M8kUpiVU1BVsW_Gj5HDCzAOojLtoFojKbw,3815
+scipy/special/tests/test_hyp2f1.py,sha256=knYs5n6I8DwQEfbEj-CtXin9xPepe71Doqx1vQ3FYb0,78549
+scipy/special/tests/test_hypergeometric.py,sha256=LqbHLHkdsw8RnVeClpulG6rHRykqZsAyP43AUsKSiQI,5596
+scipy/special/tests/test_kolmogorov.py,sha256=0UoQN7q_De8Mx1NEUzhl9KGLNT8fdq6QoX11_vNS3e4,19410
+scipy/special/tests/test_lambertw.py,sha256=vd5G_70CQz3N_U15mcyE0-2KZ_8QYLKmrJ4ZL-RwFXY,4560
+scipy/special/tests/test_log_softmax.py,sha256=JdiC5C1Fm16rNdQHVWRu-FGMVOv24DPWRnguDDd1zEY,3415
+scipy/special/tests/test_loggamma.py,sha256=x6kuJf-bEnn5ECdkDSgvk3An_A-9UxVsZpqa49IwAq8,1992
+scipy/special/tests/test_logit.py,sha256=PvIgcK33vQjcvHE3_3fVarKTjZ0t35-ksZnhvoqKQrA,5540
+scipy/special/tests/test_logsumexp.py,sha256=Y4hPV6_KotWabV-v2OYVzz_tweKRlHXPCRVFqFk_0fY,6545
+scipy/special/tests/test_mpmath.py,sha256=h0rtQEkOubS2J_2DPq55pVn7dQmrDsiF6kemEWPSwNk,72665
+scipy/special/tests/test_nan_inputs.py,sha256=8aIQJ2Xz1O4Lr7cJz9KDjFj5SEVjccu3j8auelQ3lj8,1831
+scipy/special/tests/test_ndtr.py,sha256=-UMxTIi4CaaLoJ5-SGW9THChPIM3e1_fTY0L877ioNA,2680
+scipy/special/tests/test_ndtri_exp.py,sha256=13eabgdbfcL37RReiUH7g9amT9XMsTLOfwxFJXR_2Ww,3708
+scipy/special/tests/test_orthogonal.py,sha256=lPVOwR_LSrShHfCkhTrRMc2yJj0q3d6f54cW3-cwsVY,31538
+scipy/special/tests/test_orthogonal_eval.py,sha256=iT9QWDaz-V0J77mavxktZ-2oBdJ8y2JifOqiO-wGxk8,9491
+scipy/special/tests/test_owens_t.py,sha256=zRbiKje7KrYJ25f1ZuIBfiFSyNtK_bnkIW7dRETIqME,1792
+scipy/special/tests/test_pcf.py,sha256=RNjEWZGFS99DOGZkkPJ8HNqLULko8UkX0nEWFYX26NE,664
+scipy/special/tests/test_pdtr.py,sha256=VmupC2ezUR3p5tgZx0rqXEHAtzsikBW2YgaIxuGwO5A,1284
+scipy/special/tests/test_powm1.py,sha256=9hZeiQVKqV63J5oguYXv_vqolpnJX2XRO1JN0ouLWAM,2276
+scipy/special/tests/test_precompute_expn_asy.py,sha256=bCQikPkWbxVUeimvo79ToVPgwaudzxGC7Av-hPBgIU4,583
+scipy/special/tests/test_precompute_gammainc.py,sha256=6XSz0LTbFRT-k0SlnPhYtpzrlxKHaL_CZbPyDhhfT5E,4459
+scipy/special/tests/test_precompute_utils.py,sha256=MOvdbLbzjN5Z1JQQgtIyjwjuIMPX4s2bTc_kxaX67wc,1165
+scipy/special/tests/test_round.py,sha256=oZdjvm0Fxhv6o09IFOi8UUuLb3msbq00UdD8P_2Jwaw,421
+scipy/special/tests/test_sf_error.py,sha256=iXZ3bCSQ3oa5_PvrJSfpZme4Ymix5drIcE1Ji2Kfwqo,3902
+scipy/special/tests/test_sici.py,sha256=w4anBf8fiq2fmkwMSz3MX0uy35NLXVqfuW3Fwt2Nqek,1227
+scipy/special/tests/test_specfun.py,sha256=4nKU8JoGF8s4hHo0m_mUZpScU4ZkYKVhVLTBcjxVouc,1196
+scipy/special/tests/test_spence.py,sha256=fChPw7xncNCTPMUGb0C8BC-lDKHWoEXSz8Rb4Wv8vNo,1099
+scipy/special/tests/test_spfun_stats.py,sha256=mKJZ2-kLmVK3ZqX3UlDi9Mx4bRQZ9YoXQW2fxrW2kZs,1997
+scipy/special/tests/test_sph_harm.py,sha256=ySUesSgZBb4RN-QES2L6G6k3QGOCdGLt86fjJ-6EYiQ,1106
+scipy/special/tests/test_spherical_bessel.py,sha256=80H9ub9vzX4QomYZAQk-3IkCI8fNgO-dompHI3QtBVg,14311
+scipy/special/tests/test_support_alternative_backends.py,sha256=PHpXGaxGDvJeZS6mcGTxTHHDf1b2HnWh_dX1i0oLKpU,2650
+scipy/special/tests/test_trig.py,sha256=ZlzoL1qKvw2ZCbIYTNYm6QkeKqYUSeE7kUghELXZwzU,2332
+scipy/special/tests/test_wright_bessel.py,sha256=v1yLL6Ki01VuKPj5nfL-9_FaACvwdIlDsarKsm-z9EQ,4155
+scipy/special/tests/test_wrightomega.py,sha256=BW8TS_CuDjR7exA4l6ADnKhXwgFWUYaN1UIopMBJUZY,3560
+scipy/special/tests/test_zeta.py,sha256=IoBUdssBRj7noPjW-xs9xGFFihZ7wvQpPJidgMOFCOs,1367
+scipy/stats/__init__.py,sha256=k9cOA7sGZ_GO0_AbE9ecVlg-zsq2vbM6HBjKh4CjHjM,18163
+scipy/stats/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc,,
+scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc,,
+scipy/stats/__pycache__/_binomtest.cpython-310.pyc,,
+scipy/stats/__pycache__/_bws_test.cpython-310.pyc,,
+scipy/stats/__pycache__/_censored_data.cpython-310.pyc,,
+scipy/stats/__pycache__/_common.cpython-310.pyc,,
+scipy/stats/__pycache__/_constants.cpython-310.pyc,,
+scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc,,
+scipy/stats/__pycache__/_covariance.cpython-310.pyc,,
+scipy/stats/__pycache__/_crosstab.cpython-310.pyc,,
+scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc,,
+scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc,,
+scipy/stats/__pycache__/_distr_params.cpython-310.pyc,,
+scipy/stats/__pycache__/_entropy.cpython-310.pyc,,
+scipy/stats/__pycache__/_fit.cpython-310.pyc,,
+scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc,,
+scipy/stats/__pycache__/_hypotests.cpython-310.pyc,,
+scipy/stats/__pycache__/_kde.cpython-310.pyc,,
+scipy/stats/__pycache__/_ksstats.cpython-310.pyc,,
+scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc,,
+scipy/stats/__pycache__/_morestats.cpython-310.pyc,,
+scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc,,
+scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc,,
+scipy/stats/__pycache__/_multicomp.cpython-310.pyc,,
+scipy/stats/__pycache__/_multivariate.cpython-310.pyc,,
+scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc,,
+scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc,,
+scipy/stats/__pycache__/_qmc.cpython-310.pyc,,
+scipy/stats/__pycache__/_qmvnt.cpython-310.pyc,,
+scipy/stats/__pycache__/_relative_risk.cpython-310.pyc,,
+scipy/stats/__pycache__/_resampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_result_classes.cpython-310.pyc,,
+scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/_sensitivity_analysis.cpython-310.pyc,,
+scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc,,
+scipy/stats/__pycache__/_stats_py.cpython-310.pyc,,
+scipy/stats/__pycache__/_survival.cpython-310.pyc,,
+scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc,,
+scipy/stats/__pycache__/_variation.cpython-310.pyc,,
+scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc,,
+scipy/stats/__pycache__/_wilcoxon.cpython-310.pyc,,
+scipy/stats/__pycache__/biasedurn.cpython-310.pyc,,
+scipy/stats/__pycache__/contingency.cpython-310.pyc,,
+scipy/stats/__pycache__/distributions.cpython-310.pyc,,
+scipy/stats/__pycache__/kde.cpython-310.pyc,,
+scipy/stats/__pycache__/morestats.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats_basic.cpython-310.pyc,,
+scipy/stats/__pycache__/mstats_extras.cpython-310.pyc,,
+scipy/stats/__pycache__/mvn.cpython-310.pyc,,
+scipy/stats/__pycache__/qmc.cpython-310.pyc,,
+scipy/stats/__pycache__/sampling.cpython-310.pyc,,
+scipy/stats/__pycache__/stats.cpython-310.pyc,,
+scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so,sha256=_qaK1oLgr9v_wkyB8LZEl35zR4a6WJsMXpKAzwC1lJU,277968
+scipy/stats/_axis_nan_policy.py,sha256=NnZZH10vl4E8UNNosfmMWh-lv8Xr_4LWeuuwQhJw1qI,29107
+scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so,sha256=Pnje7AjKCEaghydHuepyNQGPEY7jnWL0agzckcjxrxw,359512
+scipy/stats/_biasedurn.pxd,sha256=bQC6xG4RH1E5h2jCKXRMADfgGctiO5TgNlJegKrR7DY,1046
+scipy/stats/_binned_statistic.py,sha256=JYbpISuP2vn7U0FD7W5CWffC2dbMwAVeBLIlKJyxy8Q,32712
+scipy/stats/_binomtest.py,sha256=aW6p-vRkv3pSB8_0nTfT3kNAhV8Ip44A39EEPyl9Wlc,13118
+scipy/stats/_boost/__init__.py,sha256=e1_a5N-BBpz7qb0VeLQ7FOEURW9OfQ3tV42_fMDVkOU,1759
+scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=EKYn1JRW_eTSrQXegBZ0Xp7VWE1yUNhTXoscFT1JRso,204728
+scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=iJxMfF0bHb11DZxFjoU5_2vUdjvQwhG1Mz7sQfrQfFc,176008
+scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=xVuzmm0MQF8xsfLaRevOqobV9mFkN5C6OIp_1anhh9U,120848
+scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=yFrVVDJg_-QyLuDscN7WGoO1mlDga75evYoelsY4kuQ,171176
+scipy/stats/_boost/nbinom_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=fuwzokUvIwKP5RBGnKzIwsygJ-_da2yRp2BW84F5be8,180336
+scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=naUKrQb-OrawOtL7EnyGBqjaqoMrfEghb6oVdOBAHuA,174120
+scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=dqECFMhlCux7V0jvyMmgmrtzaYRVjWK8Gycdveh0BOo,223872
+scipy/stats/_boost/ncx2_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=Kdu_108i98ry18M_Bvh-SquZhd0mrYvOy8RqNG4J4pE,174968
+scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so,sha256=LPhGuDWa6YF1bgUZas4-JlP4Yc4mZWSI497-4ntc9Bs,109096
+scipy/stats/_bws_test.py,sha256=XQMGiLMPKFN3b6O4nD5tkZdcI8D8vggSx8B7XLJ5EGs,7062
+scipy/stats/_censored_data.py,sha256=Ts7GSYYti2z-8yoOJTedj6aCLnGhugLlDRdxZc4rPxs,18306
+scipy/stats/_common.py,sha256=4RqXT04Knp1CoOJuSBV6Uy_XmcmtVr0bImAbSk_VHlQ,172
+scipy/stats/_constants.py,sha256=_afhD206qrU0xVct9aXqc_ly_RFDbDdr0gul9Nz6LCg,962
+scipy/stats/_continuous_distns.py,sha256=sKcoHSKqUAskV8xkIDx26U04wWzZxDZlkA5HFNjauPQ,386328
+scipy/stats/_covariance.py,sha256=vu5OY1tuC5asr3FnwukQKwwJKUDP-Rlp0Kbe1mT36qM,22527
+scipy/stats/_crosstab.py,sha256=f4Sqooh-gPyTjLMHRbmhkVaOT-nhrOZ2NJ-gfPjvyuY,7355
+scipy/stats/_discrete_distns.py,sha256=7Hm_bUNUBM8cgjepOOWLE3se17Jtg8e07W1jL1seBHo,59346
+scipy/stats/_distn_infrastructure.py,sha256=3QkGaXLtQF-AF4KhHamPCJSJQVXekOQmkX2tNpWUTv4,148306
+scipy/stats/_distr_params.py,sha256=odGVYiGgrvM6UFujQZd9K0u6ojIIgHlURtsD7x7kAxU,8732
+scipy/stats/_entropy.py,sha256=b0wlhLQRWEIDZrOTMFfRwx4aPE6HqnJ6HTtBGoGXrpM,15232
+scipy/stats/_fit.py,sha256=_Abj6CcENqRz0z4O27Zp1q002JrXzdnKCo2KL7RjvUg,59771
+scipy/stats/_generate_pyx.py,sha256=gHEsVa0zFLC5CSEpsalRLxA0R6DP1ghV9VPV1_ZxDh8,829
+scipy/stats/_hypotests.py,sha256=-3f22z3TZNK7W_Cu-xmf2vy_gALLXYW3paYw48sNzcI,78852
+scipy/stats/_kde.py,sha256=8eZxz9JkZXUphFb6-ibzvT2fUpMY615kU4KmwRYMu4I,25138
+scipy/stats/_ksstats.py,sha256=Svh0qUd7GI1qmMNRIlv8_AfH0Rf7SmVn9mQ2gQdjd3k,20116
+scipy/stats/_levy_stable/__init__.py,sha256=n6IgB_ZpXpe05d3399bs31shsCZVepUOIrrW7pt149g,45541
+scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so,sha256=AhrBC3lJHLulZ34FL8coumDDtKGT4nMHsA2imYy8YLA,66512
+scipy/stats/_mannwhitneyu.py,sha256=GojWBxRMWgQEGGSJjona90xX18AYiKcSPjJy9rvqtV0,20522
+scipy/stats/_morestats.py,sha256=RwInwxrEuX7q4GORyyVN6AVnXPVLCaoO2t-RZS3dK_k,186567
+scipy/stats/_mstats_basic.py,sha256=2mJYZK1eNIgRcptmSjZgKsRr0DKtNCAbxLEQiwuvRWA,119363
+scipy/stats/_mstats_extras.py,sha256=TeBf3hF0OtcnDk3pTW6iutrzW0H0T7dXx923gHib2pY,16370
+scipy/stats/_multicomp.py,sha256=ae_nYfCQVLduyPb5sRTCcV0MpcymnV4H8SM35u3E8NY,17282
+scipy/stats/_multivariate.py,sha256=ZPrMbYAus8PUyWDWu87ZWf7fdhQUQrqsX8okqlnQmFY,237847
+scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so,sha256=5Blqrk4HPmojSUIAaTA8byccxam5LulYu_OV8C1gfW4,84952
+scipy/stats/_odds_ratio.py,sha256=S_zkibLVH7K8Qj6IO6sTkXtq-lGsp8sj_wIXitgu7Es,17858
+scipy/stats/_page_trend_test.py,sha256=OvisWd3E6CF7rdFRGv46HWOfJlyHalMITt5iJPzE8LI,18987
+scipy/stats/_qmc.py,sha256=ZwXM8sAjx8NfkHXQOC6uEdvIydj-vSfHVks73njFGnY,99365
+scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so,sha256=KnU9jGK3JJX0Jie06f2IRZ36iMXHL5hkETDJx-8Yles,286880
+scipy/stats/_qmc_cy.pyi,sha256=xOpTSlaG_1YDZhkJjQQtukbcgOTAR9FpcRMkU5g9mXc,1134
+scipy/stats/_qmvnt.py,sha256=Mss1xkmWwM3o4Y_Mw78JI-eB4pZBeig47oAVpBcrMMc,18767
+scipy/stats/_rcont/__init__.py,sha256=dUzWdRuJNAxnGYVFjDqUB8DMYti3by1WziKEfBDOlB4,84
+scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so,sha256=82H3m0rDu7M0sfoqoJxPSTGMnXKkAdrnxpxuSjhYV7g,299408
+scipy/stats/_relative_risk.py,sha256=5zeYBMshYwtomiLTkaXc1nmWYD0FsaQNjf0iuDadtSc,9571
+scipy/stats/_resampling.py,sha256=4PzopnEwUUZVMkPZlcBl4fddOu1HCZolna8iOmPenXc,81473
+scipy/stats/_result_classes.py,sha256=_ghuGdpFsCMuEmnfHg1AeorR-fASc77ACXYWEmQzXjI,1085
+scipy/stats/_rvs_sampling.py,sha256=Hz5U8lTHrVPZtGg-OeAKzSA5HW9M51OwH8AU4j2xXVM,2233
+scipy/stats/_sampling.py,sha256=YJ1mG2tkXW4Em-virElY-cNzMXn8lHbOxNxujqDsPY0,46408
+scipy/stats/_sensitivity_analysis.py,sha256=qu5mNpZZhggy0mywqB8jsqcZZagzsH0mICG4FIz7bhM,24745
+scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so,sha256=lD6iGaUNOL4TzPVLWM1MC019odo5DiOj6j3nVz0AXrA,403816
+scipy/stats/_sobol.pyi,sha256=TAywylI75AF9th9QZY8TYfHvIQ1cyM5QZi7eBOAkrbg,971
+scipy/stats/_sobol_direction_numbers.npz,sha256=SFmTEUfULORluGBcsnf5V9mLg50DGU_fBleTV5BtGTs,589334
+scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so,sha256=QzRr7fyOgXBEmeRgMl-9NCBfiXZg8SllcXtv31TYf_8,766320
+scipy/stats/_stats.pxd,sha256=US2p3SKahv_OPhZClWl_h3cZe7UncGZoQJeixoeFOPg,708
+scipy/stats/_stats_mstats_common.py,sha256=ken8kD9hSgUOhmN6biu0d9QNaumzMB5uLb04ZQeib0Y,18593
+scipy/stats/_stats_py.py,sha256=7Ny49fBYXJkDUB4q55MuTm1z4ZPjbZTjZvcbtUtIqnQ,423593
+scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so,sha256=SJVnF2IAScl7diLBUJkDJ3vBcs3HHH5S1L7cNRDzh1Y,158904
+scipy/stats/_survival.py,sha256=a6pNTOpNnkq3XFoGuid1cJrsObuzpgI7psUzP0PU2j0,26005
+scipy/stats/_tukeylambda_stats.py,sha256=eodvo09rCVfcYa1Uh6BKHKvXyY8K5Zg2uGQX1phQ6Ew,6871
+scipy/stats/_unuran/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/stats/_unuran/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so,sha256=-ysKtSG4zPKLCTCJlIraToMgjxPhIc4NIxTGqS4wDiw,1589832
+scipy/stats/_unuran/unuran_wrapper.pyi,sha256=RGAWLNAHrkAtaS-EjIkcTIr7sag9b0Lx_3i7s_keBfk,5551
+scipy/stats/_variation.py,sha256=oHqUpfaL49IxpLmgac1te5Av5MXuScP9XrxRzywJR6I,4375
+scipy/stats/_warnings_errors.py,sha256=MpucxNFYEDytXh7vrZCMqTkRfuXTvvMpQ2W_Ak2OnPk,1196
+scipy/stats/_wilcoxon.py,sha256=wkgJyjir4LkHSeJXWKn1akskHxnNB9_ZGKEZ-8CqfH4,7936
+scipy/stats/biasedurn.py,sha256=kSspd2wFUf85L3FgTYA04jg7oq9ROtqppSMMoPfPm7E,529
+scipy/stats/contingency.py,sha256=8Imh2sKSk_il8o55LaQTC0HMODNnjC4aAv4RW6W0zCk,16275
+scipy/stats/distributions.py,sha256=9Kt2fyTohorJcf6a7M9DYH8Nu4jEU66nKP01cRhKmuE,859
+scipy/stats/kde.py,sha256=_Bawa8xgGYr6hM1c7AM1eKFSZMuV124sA_NIKUqG7Ho,720
+scipy/stats/morestats.py,sha256=q2zUyJucrLoBeADOzPjI8ZeOXvuAzg_wGowBG4EdmMU,1391
+scipy/stats/mstats.py,sha256=aRbrykjrvl-qOBkmGjlFMH4rbWYSqBBQHReanSAomFg,2466
+scipy/stats/mstats_basic.py,sha256=y0qYsc9UjIN6FLUTDGRZSteuDvLsvyDYbru25xfWCKQ,1888
+scipy/stats/mstats_extras.py,sha256=aORMhUJUmlI23msX7BA-GwTH3TeUZg1qRA9IE5X5WWM,785
+scipy/stats/mvn.py,sha256=1vEs5P-H69S2KnQjUiAvA5E3VxyiAOutYPr2npkQ2LE,565
+scipy/stats/qmc.py,sha256=qN3l4emoGfQKZMOAnFgoQaKh2bJGaBzgCGwW1Ba9mU4,11663
+scipy/stats/sampling.py,sha256=Tyd68aXwZV51Fwr5pl41WapJ05OG3XWWcYlsQeg6LgA,1683
+scipy/stats/stats.py,sha256=YPMYFQOjf3NFWt1kkXTZNMe62TpHaaBDa7CjIvQkw24,2140
+scipy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+scipy/stats/tests/__pycache__/__init__.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc,,
+scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc,,
+scipy/stats/tests/common_tests.py,sha256=buhvK6hFtUkMIu1iKuiqXwbg_IGeVJ0e4Ml66xuzFXg,12288
+scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc,,
+scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc,,
+scipy/stats/tests/data/_mvt.py,sha256=OvFCmMqI74DWIgo32UV55dP1nzvFvYBSyYcmKJes9pI,6905
+scipy/stats/tests/data/fisher_exact_results_from_r.py,sha256=BKxPAi4h3IOebcZYGxCbutYuAX0tlb40P0DEkfEi918,27349
+scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy,sha256=JU0t7kpNVHuTMcYCQ8b8_K_9JsixBNCNT2BFp2RbO7o,4064
+scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy,sha256=zxjB8tZaIyvyxxISgt8xvyqL6Cevr8TtgQ7TdFfuiYo,183728
+scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy,sha256=_umVErq0zMZWm0e5JOSwNOHNurViT6_H4SBki9X3oSg,183688
+scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy,sha256=88cZ7dVDH7nnuey20Z48p6kJUpi9GfImaFsPykDwwHM,9328
+scipy/stats/tests/data/nist_anova/AtmWtAg.dat,sha256=Qdd0i7H4cNhAABfFOZPuplhi_9SCquFpO-hNkyRcMD8,3063
+scipy/stats/tests/data/nist_anova/SiRstv.dat,sha256=x9wJ2g1qnzf4DK_w9F_WiOiDMDEg4td2z6uU77G07xM,1947
+scipy/stats/tests/data/nist_anova/SmLs01.dat,sha256=KdnJedRthF7XLA-w7XkIPIMTgzu89yBAMmZA2H4uQOQ,6055
+scipy/stats/tests/data/nist_anova/SmLs02.dat,sha256=nCPyxRk1dAoSPWiC7kG4dLaXs2GL3-KRXRt2NwgXoIA,46561
+scipy/stats/tests/data/nist_anova/SmLs03.dat,sha256=6yPHiQSk0KI4oURQOk99t-uEm-IZN-8eIPHb_y0mQ1U,451566
+scipy/stats/tests/data/nist_anova/SmLs04.dat,sha256=fI-HpgJF9cdGdBinclhVzOcWCCc5ZJZuXalUwirV-lc,6815
+scipy/stats/tests/data/nist_anova/SmLs05.dat,sha256=iJTaAWUFn7DPLTd9bQh_EMKEK1DPG0fnN8xk7BQlPRE,53799
+scipy/stats/tests/data/nist_anova/SmLs06.dat,sha256=riOkYT-LRgmJhPpCK32x7xYnD38gwnh_Eo1X8OK3eN8,523605
+scipy/stats/tests/data/nist_anova/SmLs07.dat,sha256=QtSS11d-vkVvqaIEeJ6oNwyET1CKoyQqjlfBl2sTOJA,7381
+scipy/stats/tests/data/nist_anova/SmLs08.dat,sha256=qrxQQ0I6gnhrefygKwT48x-bz-8laD8Vpn7c81nITRg,59228
+scipy/stats/tests/data/nist_anova/SmLs09.dat,sha256=qmELOQyNlH7CWOMt8PQ0Z_yxgg9Hxc4lqZOuHZxxWuc,577633
+scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDDbhCzKSfwFGr3hwZ1nq30,2591
+scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy,sha256=7vTccC3YxuMcGMdOH4EoTD6coqtQKC3jnJrTC3u4520,38624
+scipy/stats/tests/data/studentized_range_mpmath_ref.json,sha256=icZGNBodwmJNzOyEki9MreI2lS6nQJNWfnVJiHRNRNM,29239
+scipy/stats/tests/test_axis_nan_policy.py,sha256=pNw12PLiF58FVRUPvFvE-DbNGokYS8AH-QFcyJO-lV0,51478
+scipy/stats/tests/test_binned_statistic.py,sha256=WE5KdJq4zJxZ1LuYp8lv-RMcTEyjuSkjvFHWsGMujkM,18814
+scipy/stats/tests/test_boost_ufuncs.py,sha256=B9lwHkVasspQA78Rz3vtLQESnPRC7Z6R9druZeebs9Q,1825
+scipy/stats/tests/test_censored_data.py,sha256=pAQfSHhmcetcxoS1ZgIHVm1pEbapW7az7I-y_8phb5w,6935
+scipy/stats/tests/test_contingency.py,sha256=fMeGnTldQjLa5CSaaQ6qH90JXzrUivthVD-9DafgQm0,7706
+scipy/stats/tests/test_continuous_basic.py,sha256=-XYuKdMujql8lSh3Xq-vX0UGV32RI0-S0722lmepnkg,41793
+scipy/stats/tests/test_continuous_fit_censored.py,sha256=7hu1sSo9hhh0g9pmPMmjj2BI2rkxvA1h20XdMYZeyog,24188
+scipy/stats/tests/test_crosstab.py,sha256=tvCoZGfVasNIhYxLQIe3dcdMm34s2ykxxPmCRTIOFc0,3882
+scipy/stats/tests/test_discrete_basic.py,sha256=6wVF_k93w1I2ZMtb2kaJ2LK0rygVKoiPRNm87Oue1gE,19924
+scipy/stats/tests/test_discrete_distns.py,sha256=tdrO5avvjTRHi9z1uXIxmqGIZKO8hCCGwgY0cLrnLkI,22684
+scipy/stats/tests/test_distributions.py,sha256=_0zETqAJu1LQi4hqfmlCuR-7L-IMDTCzD860V7kcFII,384266
+scipy/stats/tests/test_entropy.py,sha256=92tO5uF3bpqUoU0gpmn89fInuKjVTatXPf5hwh9Kbns,11281
+scipy/stats/tests/test_fast_gen_inversion.py,sha256=2FV7tIuHWfjLGO4xMDi4j5poA1zBwEs-tpkwSVDaLrs,15889
+scipy/stats/tests/test_fit.py,sha256=GqCiCnEivEGOkloerHmKClzwAzQa-bpvf6-nWVP0Qys,45662
+scipy/stats/tests/test_hypotests.py,sha256=e8FUHEowBTmeixb1g9yTpvs5mZofJeRQJmlxVaqHS1o,80302
+scipy/stats/tests/test_kdeoth.py,sha256=cCEieP06bjuIrS-V5P7q6T7st0z5zG1AR9KyEywvWew,20470
+scipy/stats/tests/test_morestats.py,sha256=leIrk4vutRvjFxgREgs7zVcPDnI96QOh1BNn_nYKNiE,127621
+scipy/stats/tests/test_mstats_basic.py,sha256=4dvTBP06G8tEbqZwimB9y0HxHGdyor_x21AbUHeqn6o,86407
+scipy/stats/tests/test_mstats_extras.py,sha256=CCexzT1lksTG_WvGvHn6-CuWd_ZXoFviNGnBZd_hE7Y,7297
+scipy/stats/tests/test_multicomp.py,sha256=xLlLP54cWsLAbSsfodoTkuJa9FJM1qKnlSrDGE-jRZ0,17826
+scipy/stats/tests/test_multivariate.py,sha256=naPnWGp6fXMS4ALDnqDd4p2oWmTEqYbczxzTQi5494E,153313
+scipy/stats/tests/test_odds_ratio.py,sha256=RIsmgnmUUH3DvynDRZUaS6llCbXm2oWIfPa48IJJ-gI,6705
+scipy/stats/tests/test_qmc.py,sha256=MsZ_hgjfxSXpqLlkKrk8x1FJy8ImmZwF2cVrcc1uiKM,54645
+scipy/stats/tests/test_rank.py,sha256=uxJXitafsPrfI3yrdVOT1Hiz3abzy5vCRafSnpn_KfU,11721
+scipy/stats/tests/test_relative_risk.py,sha256=jzOGNQ2y9_YfFnXiGAiRDrgahy66qQkw6ZkHgygCJMA,3646
+scipy/stats/tests/test_resampling.py,sha256=X8uKrXUDZbKETZrPmv5cmHilyfIzyfwj5OPPm5beUyw,71766
+scipy/stats/tests/test_sampling.py,sha256=EOtDuGLi87801MG0rkDsJ6n7PfIO8f44n4xjdt0vxY4,54513
+scipy/stats/tests/test_sensitivity_analysis.py,sha256=mMifx96zCAx1OOM0Er3ugd_S2I6bih9GF1pir6djNyQ,10134
+scipy/stats/tests/test_stats.py,sha256=yNC3SPq7IPFJWZLJxBAZS4z3n_mn8VzVAL8VV1yug8M,360179
+scipy/stats/tests/test_survival.py,sha256=ky3R88sMfKUkqTs6wXUTjOjK1BzCWpxS16crycohUps,22265
+scipy/stats/tests/test_tukeylambda_stats.py,sha256=6WUBNVoTseVjfrHfWXtU11gTgmRcdnwAPLQOI0y_5U8,3231
+scipy/stats/tests/test_variation.py,sha256=Xnsn0fk4lqtk-ji1VhXxTdDAg9fHv02Q6Uv82-Xx6v4,6292
+scipy/version.py,sha256=JZiJ_CrvfTTDHoKWU40OrVAfmZynLFk9T5-Wy9SNHuo,264
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..4e4c38ae320920b8f083b87f408214cdecd350d2
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: meson
+Root-Is-Purelib: false
+Tag: cp310-cp310-manylinux_2_17_x86_64
+Tag: cp310-cp310-manylinux2014_x86_64
+
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..ba1b6403f09ffb72b9ebb6ee18afbf52ae73e83b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_VF.py b/env-llmeval/lib/python3.10/site-packages/torch/_VF.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6b63c511959616aeb787f4303015241057201de
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_VF.py
@@ -0,0 +1,30 @@
+"""
+This makes the functions in torch._C._VariableFunctions available as
+ torch._VF.
+without mypy being able to find them.
+
+A subset of those functions are mapped to ATen functions in
+torch/jit/_builtins.py
+
+See https://github.com/pytorch/pytorch/issues/21478 for the reason for
+introducing torch._VF
+
+"""
+import sys
+import types
+
+import torch
+
+
+class VFModule(types.ModuleType):
+ vf: types.ModuleType
+
+ def __init__(self, name):
+ super().__init__(name)
+ self.vf = torch._C._VariableFunctions
+
+ def __getattr__(self, attr):
+ return getattr(self.vf, attr)
+
+
+sys.modules[__name__] = VFModule(__name__)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_VF.pyi b/env-llmeval/lib/python3.10/site-packages/torch/_VF.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..79c00ba48bf5507aa11384e0897456c9ee5e99ff
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_VF.pyi
@@ -0,0 +1,2027 @@
+# @generated from torch/_C/_VariableFunctions.pyi.in
+# mypy: disable-error-code="type-arg"
+
+import builtins
+from typing import (
+ Any,
+ Callable,
+ ContextManager,
+ Iterator,
+ List,
+ Literal,
+ NamedTuple,
+ Optional,
+ overload,
+ Sequence,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+import torch
+from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor
+from torch.types import (
+ _bool,
+ _complex,
+ _device,
+ _dtype,
+ _float,
+ _int,
+ _layout,
+ _qscheme,
+ _size,
+ Device,
+ Number,
+)
+
+from torch._prims_common import DeviceLikeType
+
+@overload
+def __and__(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def __or__(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
+def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
+def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
+@overload
+def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
+def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
+@overload
+def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
+def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
+@overload
+def _assert_async(input: Tensor) -> None: ...
+@overload
+def _assert_async(input: Tensor, assert_msg: str) -> None: ...
+def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ...
+def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
+def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ...
+def _coalesce(input: Tensor) -> Tensor: ...
+def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _conj(input: Tensor) -> Tensor: ...
+def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _conj_physical(input: Tensor) -> Tensor: ...
+def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ...
+@overload
+def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
+@overload
+def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
+def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ...
+def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
+def _cslt_compress(input: Tensor) -> Tensor: ...
+def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> Tensor: ...
+@overload
+def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
+@overload
+def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
+def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
+def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
+def _cufft_clear_plan_cache(device_index: _int) -> None: ...
+def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
+def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
+def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
+def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
+def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
+def _debug_has_internal_overlap(input: Tensor) -> _int: ...
+def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
+def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
+def _disable_functionalization(): ...
+@overload
+def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+@overload
+def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def _enable_functionalization(*, reapply_views: _bool = False): ...
+def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
+def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
+def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
+def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
+def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
+def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ...
+def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ...
+def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ...
+@overload
+def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ...
+@overload
+def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
+@overload
+def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ...
+@overload
+def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
+@overload
+def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> List[Tensor]: ...
+@overload
+def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
+@overload
+def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
+@overload
+def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
+@overload
+def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> List[Tensor]: ...
+@overload
+def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
+@overload
+def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
+def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ...
+def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> List[Tensor]: ...
+@overload
+def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
+@overload
+def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> List[Tensor]: ...
+@overload
+def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
+@overload
+def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2) -> List[Tensor]: ...
+@overload
+def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+@overload
+def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ...
+@overload
+def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+@overload
+def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> List[Tensor]: ...
+@overload
+def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> List[Tensor]: ...
+@overload
+def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> List[Tensor]: ...
+@overload
+def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
+@overload
+def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
+@overload
+def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
+def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
+def _from_functional_tensor(t: Tensor) -> Tensor: ...
+def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ...
+def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
+def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
+def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ...
+def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ...
+def _functionalize_commit_update(t: Tensor) -> None: ...
+def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ...
+def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ...
+def _functionalize_sync(t: Tensor) -> None: ...
+@overload
+def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
+@overload
+def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
+@overload
+def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
+@overload
+def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
+def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ...
+def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
+def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ...
+def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
+def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
+def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> List[Tensor]: ...
+def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
+def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
+def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ...
+def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _is_all_true(input: Tensor) -> Tensor: ...
+def _is_any_true(input: Tensor) -> Tensor: ...
+def _is_functional_tensor(t: Tensor) -> _bool: ...
+def _is_zerotensor(input: Tensor) -> _bool: ...
+def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
+def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ...
+def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ...
+def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ...
+def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ...
+def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ...
+def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
+def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ...
+def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
+def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
+def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
+def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
+def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ...
+def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ...
+def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
+def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
+def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
+def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+@overload
+def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
+@overload
+def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
+def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
+def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ...
+def _neg_view(input: Tensor) -> Tensor: ...
+def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ...
+def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
+def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ...
+def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
+def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ...
+def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
+def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ...
+def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _nnpack_available() -> _bool: ...
+def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
+def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
+def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ...
+def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ...
+def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
+def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ...
+def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
+def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ...
+def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
+def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ...
+def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
+def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
+def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
+def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ...
+def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ...
+def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ...
+def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ...
+def _shape_as_tensor(input: Tensor) -> Tensor: ...
+def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
+def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
+def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
+def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
+def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ...
+def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
+def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
+def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
+def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
+def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ...
+def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
+def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def _sparse_sum(input: Tensor) -> Tensor: ...
+@overload
+def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
+@overload
+def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
+@overload
+def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
+def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
+def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
+def _sync(t: Tensor) -> None: ...
+@overload
+def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
+@overload
+def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
+def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
+def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _test_check_tensor(input: Tensor) -> Tensor: ...
+def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ...
+def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ...
+def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def _to_functional_tensor(t: Tensor) -> Tensor: ...
+def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ...
+def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
+def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ...
+def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ...
+def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ...
+def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ...
+def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ...
+def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
+def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
+def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ...
+def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
+@overload
+def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
+@overload
+def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
+def _use_cudnn_rnn_flatten_weight() -> _bool: ...
+def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
+def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
+def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
+def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
+def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ...
+def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
+def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
+def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ...
+def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ...
+def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ...
+def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def abs_(input: Tensor) -> Tensor: ...
+def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def acos_(input: Tensor) -> Tensor: ...
+def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def acosh_(input: Tensor) -> Tensor: ...
+def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
+def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
+@overload
+def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: ...
+@overload
+def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
+@overload
+def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
+@overload
+def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
+@overload
+def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ...
+@overload
+def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
+@overload
+def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ...
+@overload
+def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
+@overload
+def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor: ...
+@overload
+def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: ...
+@overload
+def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: ...
+def adjoint(input: Tensor) -> Tensor: ...
+def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ...
+def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: ...
+def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax: ...
+def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arccos_(input: Tensor) -> Tensor: ...
+def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arccosh_(input: Tensor) -> Tensor: ...
+def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arcsin_(input: Tensor) -> Tensor: ...
+def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arcsinh_(input: Tensor) -> Tensor: ...
+def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arctan_(input: Tensor) -> Tensor: ...
+def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def arctanh_(input: Tensor) -> Tensor: ...
+def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: ...
+@overload
+def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor: ...
+@overload
+def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: ...
+def argwhere(input: Tensor) -> Tensor: ...
+def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor: ...
+def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor: ...
+def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def asin_(input: Tensor) -> Tensor: ...
+def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def asinh_(input: Tensor) -> Tensor: ...
+def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def atan_(input: Tensor) -> Tensor: ...
+def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def atanh_(input: Tensor) -> Tensor: ...
+def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ...
+@overload
+def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: ...
+@overload
+def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
+@overload
+def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
+def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
+def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ...
+def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
+def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
+def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
+def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
+@overload
+def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor: ...
+def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
+def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ...
+def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: ...
+def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
+@overload
+def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
+@overload
+def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor: ...
+def can_cast(from_: _dtype, to: _dtype) -> _bool: ...
+@overload
+def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ...
+def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def ceil_(input: Tensor) -> Tensor: ...
+def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
+def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
+def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
+def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
+def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> List[Tensor]: ...
+@overload
+def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
+@overload
+def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
+@overload
+def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ...
+@overload
+def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
+@overload
+def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ...
+@overload
+def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
+def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ...
+def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ...
+def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor: ...
+def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ...
+def conj(input: Tensor) -> Tensor: ...
+def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def conj_physical_(input: Tensor) -> Tensor: ...
+def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ...
+@overload
+def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+@overload
+def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+@overload
+def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+@overload
+def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+@overload
+def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+@overload
+def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
+def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ...
+def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
+def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
+def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
+def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+@overload
+def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def corrcoef(input: Tensor) -> Tensor: ...
+def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def cos_(input: Tensor) -> Tensor: ...
+def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def cosh_(input: Tensor) -> Tensor: ...
+def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
+def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ...
+@overload
+def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor: ...
+@overload
+def count_nonzero(input: Tensor, dim: _size) -> Tensor: ...
+def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: ...
+def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
+@overload
+def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
+def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
+def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
+def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
+def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
+def cudnn_is_acceptable(input: Tensor) -> _bool: ...
+@overload
+def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: ...
+@overload
+def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: ...
+@overload
+def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: ...
+@overload
+def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: ...
+@overload
+def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ...
+@overload
+def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: ...
+def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def deg2rad_(input: Tensor) -> Tensor: ...
+@overload
+def dequantize(input: Tensor) -> Tensor: ...
+@overload
+def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
+def det(input: Tensor) -> Tensor: ...
+def detach(input: Tensor) -> Tensor: ...
+def detach_(input: Tensor) -> Tensor: ...
+def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: ...
+def diagflat(input: Tensor, offset: _int = 0) -> Tensor: ...
+@overload
+def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ...
+@overload
+def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: ...
+def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ...
+def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: ...
+def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ...
+def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: ...
+@overload
+def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def dsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
+@overload
+def dsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
+def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ...
+def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ...
+@overload
+def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+@overload
+def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
+@overload
+def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def equal(input: Tensor, other: Tensor) -> _bool: ...
+def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def erf_(input: Tensor) -> Tensor: ...
+def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def erfc_(input: Tensor) -> Tensor: ...
+def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def exp2_(input: Tensor) -> Tensor: ...
+def exp_(input: Tensor) -> Tensor: ...
+def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def expm1_(input: Tensor) -> Tensor: ...
+@overload
+def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
+@overload
+def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: ...
+@overload
+def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: ...
+def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
+def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
+def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
+def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
+def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
+def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
+@overload
+def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
+@overload
+def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
+def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
+@overload
+def fill(input: Tensor, value: Tensor) -> Tensor: ...
+@overload
+def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+@overload
+def fill_(input: Tensor, value: Tensor) -> Tensor: ...
+@overload
+def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def fix_(input: Tensor) -> Tensor: ...
+@overload
+def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: ...
+@overload
+def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: ...
+@overload
+def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
+@overload
+def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: ...
+def flip(input: Tensor, dims: _size) -> Tensor: ...
+def fliplr(input: Tensor) -> Tensor: ...
+def flipud(input: Tensor) -> Tensor: ...
+@overload
+def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def floor_(input: Tensor) -> Tensor: ...
+def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ...
+def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def frac_(input: Tensor) -> Tensor: ...
+def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp: ...
+def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def from_numpy(ndarray) -> Tensor: ...
+def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False) -> Tensor: ...
+@overload
+def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ...
+@overload
+def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf: ...
+def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def get_default_dtype() -> _dtype: ...
+def get_num_interop_threads() -> _int: ...
+def get_num_threads() -> _int: ...
+@overload
+def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> List[Tensor]: ...
+@overload
+def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
+def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
+def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
+def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ...
+@overload
+def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
+@overload
+def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
+def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ...
+def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ...
+def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: ...
+@overload
+def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: ...
+@overload
+def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ...
+@overload
+def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ...
+@overload
+def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: ...
+def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def hsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
+@overload
+def hsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
+def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ...
+def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def i0_(input: Tensor) -> Tensor: ...
+def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def imag(input: Tensor) -> Tensor: ...
+@overload
+def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: ...
+@overload
+def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
+@overload
+def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
+@overload
+def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+@overload
+def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
+def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
+def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def init_num_threads() -> None: ...
+def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
+def int_repr(input: Tensor) -> Tensor: ...
+def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def is_complex(input: Tensor) -> _bool: ...
+def is_conj(input: Tensor) -> _bool: ...
+def is_distributed(input: Tensor) -> _bool: ...
+def is_floating_point(input: Tensor) -> _bool: ...
+def is_grad_enabled() -> _bool: ...
+def is_inference(input: Tensor) -> _bool: ...
+def is_inference_mode_enabled() -> _bool: ...
+def is_neg(input: Tensor) -> _bool: ...
+def is_nonzero(input: Tensor) -> _bool: ...
+def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
+def is_signed(input: Tensor) -> _bool: ...
+def is_vulkan_available() -> _bool: ...
+def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: ...
+def isfinite(input: Tensor) -> Tensor: ...
+@overload
+def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+def isinf(input: Tensor) -> Tensor: ...
+def isnan(input: Tensor) -> Tensor: ...
+def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def isreal(input: Tensor) -> Tensor: ...
+def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ...
+@overload
+def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ...
+def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: ...
+@overload
+def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: ...
+def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ...
+def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
+def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def log10_(input: Tensor) -> Tensor: ...
+def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def log1p_(input: Tensor) -> Tensor: ...
+def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def log2_(input: Tensor) -> Tensor: ...
+def log_(input: Tensor) -> Tensor: ...
+@overload
+def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
+def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: ...
+def logdet(input: Tensor) -> Tensor: ...
+def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ...
+@overload
+def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
+@overload
+def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
+def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
+@overload
+def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack: ...
+def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
+@overload
+def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
+@overload
+def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
+def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def matrix_exp(input: Tensor) -> Tensor: ...
+def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: ...
+@overload
+def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: ...
+def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ...
+def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ...
+@overload
+def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def median(input: Tensor) -> Tensor: ...
+@overload
+def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: ...
+@overload
+def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: ...
+@overload
+def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: ...
+@overload
+def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: ...
+def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
+def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
+def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
+def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
+def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
+def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ...
+def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
+def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
+def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
+def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: ...
+@overload
+def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: ...
+@overload
+def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: ...
+@overload
+def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: ...
+@overload
+def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: ...
+@overload
+def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: ...
+def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def mul(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ...
+def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ...
+def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nanmedian(input: Tensor) -> Tensor: ...
+@overload
+def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: ...
+@overload
+def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: ...
+@overload
+def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ...
+def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: ...
+@overload
+def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: ...
+def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
+def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
+def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
+def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
+def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
+def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
+@overload
+def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
+@overload
+def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ...
+@overload
+def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def neg_(input: Tensor) -> Tensor: ...
+def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def negative_(input: Tensor) -> Tensor: ...
+def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...
+def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ...
+def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ...
+@overload
+def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def numel(self: Tensor) -> _int: ...
+@overload
+def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ...
+def pdist(input: Tensor, p: _float = 2) -> Tensor: ...
+def permute(input: Tensor, dims: _size) -> Tensor: ...
+def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
+def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor: ...
+def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
+def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
+def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
+def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
+def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def positive(input: Tensor) -> Tensor: ...
+@overload
+def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
+@overload
+def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ...
+@overload
+def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+def promote_types(type1: _dtype, type2: _dtype) -> _dtype: ...
+def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ...
+def q_per_channel_axis(input: Tensor) -> _int: ...
+def q_per_channel_scales(input: Tensor) -> Tensor: ...
+def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
+def q_scale(input: Tensor) -> _float: ...
+def q_zero_point(input: Tensor) -> _int: ...
+def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr: ...
+@overload
+def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: ...
+def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: ...
+@overload
+def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: ...
+@overload
+def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: ...
+@overload
+def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> List[Tensor]: ...
+def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: ...
+def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: ...
+def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
+def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ...
+def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
+def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
+def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
+def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def rad2deg_(input: Tensor) -> Tensor: ...
+@overload
+def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+def ravel(input: Tensor) -> Tensor: ...
+def real(input: Tensor) -> Tensor: ...
+def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def reciprocal_(input: Tensor) -> Tensor: ...
+def relu(input: Tensor) -> Tensor: ...
+def relu_(input: Tensor) -> Tensor: ...
+@overload
+def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+@overload
+def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+@overload
+def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
+def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: ...
+def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ...
+def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
+def resolve_conj(input: Tensor) -> Tensor: ...
+def resolve_neg(input: Tensor) -> Tensor: ...
+@overload
+def result_type(tensor: Tensor, other: Tensor) -> _dtype: ...
+@overload
+def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype: ...
+@overload
+def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype: ...
+@overload
+def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype: ...
+@overload
+def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
+@overload
+def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
+def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
+@overload
+def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
+def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
+def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: ...
+def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor: ...
+@overload
+def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def round_(input: Tensor) -> Tensor: ...
+@overload
+def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
+def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ...
+def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
+def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
+def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def rsqrt_(input: Tensor) -> Tensor: ...
+@overload
+def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
+def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ...
+def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
+@overload
+def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: ...
+def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: ...
+def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ...
+@overload
+def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
+@overload
+def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: ...
+def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
+def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: ...
+def selu(input: Tensor) -> Tensor: ...
+def selu_(input: Tensor) -> Tensor: ...
+def set_flush_denormal(mode: _bool) -> _bool: ...
+def set_num_interop_threads(num: _int) -> None: ...
+def set_num_threads(num: _int) -> None: ...
+def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sigmoid_(input: Tensor) -> Tensor: ...
+def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sin_(input: Tensor) -> Tensor: ...
+def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sinc_(input: Tensor) -> Tensor: ...
+def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sinh_(input: Tensor) -> Tensor: ...
+def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: ...
+def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
+def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet: ...
+def smm(input: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
+@overload
+def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ...
+@overload
+def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ...
+@overload
+def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ...
+@overload
+def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: ...
+def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ...
+def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ...
+def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ...
+def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor: ...
+def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ...
+def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: ...
+def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ...
+def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ...
+def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ...
+def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
+def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def sqrt_(input: Tensor) -> Tensor: ...
+def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def square_(input: Tensor) -> Tensor: ...
+@overload
+def squeeze(input: Tensor) -> Tensor: ...
+@overload
+def squeeze(input: Tensor, dim: _int) -> Tensor: ...
+@overload
+def squeeze(input: Tensor, dim: _size) -> Tensor: ...
+@overload
+def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: ...
+@overload
+def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: ...
+@overload
+def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: ...
+def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def std(input: Tensor, unbiased: _bool = True) -> Tensor: ...
+@overload
+def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: ...
+@overload
+def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def sub(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: ...
+@overload
+def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
+@overload
+def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: ...
+@overload
+def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: ...
+def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd: ...
+def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: ...
+def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
+def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
+def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
+def t(input: Tensor) -> Tensor: ...
+def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
+def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def tan_(input: Tensor) -> Tensor: ...
+def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def tanh_(input: Tensor) -> Tensor: ...
+def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: ...
+@overload
+def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> List[Tensor]: ...
+@overload
+def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ...
+@overload
+def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ...
+def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ...
+def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor: ...
+def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk: ...
+def trace(input: Tensor) -> Tensor: ...
+@overload
+def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
+@overload
+def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: ...
+def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ...
+@overload
+def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: ...
+@overload
+def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor: ...
+@overload
+def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: ...
+def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve: ...
+def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ...
+def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
+def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: ...
+def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def trunc_(input: Tensor) -> Tensor: ...
+@overload
+def unbind(input: Tensor, dim: _int = 0) -> List[Tensor]: ...
+@overload
+def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> List[Tensor]: ...
+def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: ...
+@overload
+def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
+@overload
+def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ...
+def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
+def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> List[Tensor]: ...
+def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> List[Tensor]: ...
+def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> List[Tensor]: ...
+def unsqueeze(input: Tensor, dim: _int) -> Tensor: ...
+def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
+def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor: ...
+@overload
+def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def var(input: Tensor, unbiased: _bool = True) -> Tensor: ...
+@overload
+def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: ...
+@overload
+def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+@overload
+def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
+def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def view_as_complex(input: Tensor) -> Tensor: ...
+def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+def view_as_real(input: Tensor) -> Tensor: ...
+def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def vsplit(input: Tensor, sections: _int) -> List[Tensor]: ...
+@overload
+def vsplit(input: Tensor, indices: _size) -> List[Tensor]: ...
+def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def where(condition: Tensor) -> List[Tensor]: ...
+@overload
+def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor: ...
+@overload
+def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor: ...
+@overload
+def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
+@overload
+def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
+@overload
+def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
+def zero_(input: Tensor) -> Tensor: ...
+@overload
+def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+@overload
+def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
+
+__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
+ '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
+ '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
+ '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', '_cast_Double',
+ '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
+ '_choose_qparams_per_tensor', '_coalesce', '_compute_linear_combination', '_conj', '_conj_copy',
+ '_conj_physical', '_convert_indices_from_coo_to_csr', '_convert_indices_from_csr_to_coo',
+ '_convert_weight_to_int4pack', '_convolution', '_convolution_mode', '_copy_from',
+ '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm', '_ctc_loss', '_cudnn_ctc_loss',
+ '_cudnn_init_dropout_state', '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
+ '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
+ '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
+ '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
+ '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
+ '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
+ '_fake_quantize_learnable_per_tensor_affine',
+ '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
+ '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
+ '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos',
+ '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_',
+ '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan',
+ '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_',
+ '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_',
+ '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf',
+ '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_',
+ '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac',
+ '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_',
+ '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_',
+ '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_maximum', '_foreach_maximum_',
+ '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_', '_foreach_neg',
+ '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_', '_foreach_reciprocal',
+ '_foreach_reciprocal_', '_foreach_round', '_foreach_round_', '_foreach_sigmoid',
+ '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin', '_foreach_sin_',
+ '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub',
+ '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_',
+ '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor',
+ '_functional_assert_async', '_functional_sym_constrain_range',
+ '_functional_sym_constrain_range_for_size',
+ '_functionalize_are_all_mutations_hidden_from_autograd',
+ '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update',
+ '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace',
+ '_functionalize_sync', '_fused_adam_', '_fused_adamw_', '_fused_dropout',
+ '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper', '_fused_sdp_choice',
+ '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback', '_has_compatible_shallow_copy_type',
+ '_histogramdd_bin_edges', '_histogramdd_from_bin_cts', '_histogramdd_from_bin_tensors',
+ '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true', '_is_any_true',
+ '_is_functional_tensor', '_is_zerotensor', '_linalg_check_errors', '_linalg_det', '_linalg_det',
+ '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet', '_linalg_solve_ex',
+ '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax', '_log_softmax_backward_data',
+ '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info', '_make_dep_token', '_make_dual',
+ '_make_dual_copy', '_make_per_channel_quantized_tensor', '_make_per_tensor_quantized_tensor',
+ '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear', '_mkldnn_reshape', '_mkldnn_transpose',
+ '_mkldnn_transpose_', '_mps_convolution', '_mps_convolution_transpose', '_native_batch_norm_legit',
+ '_native_batch_norm_legit_no_training', '_native_multi_head_attention', '_neg_view',
+ '_neg_view_copy', '_nested_from_padded', '_nested_from_padded_and_nested_example',
+ '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned',
+ '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer',
+ '_nested_view_from_buffer_copy', '_nnpack_available', '_nnpack_spatial_convolution',
+ '_pack_padded_sequence', '_pad_packed_sequence', '_pin_memory', '_prelu_kernel',
+ '_propagate_xla_data', '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor',
+ '_resize_output_', '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
+ '_scaled_dot_product_attention_math', '_scaled_dot_product_efficient_attention',
+ '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention',
+ '_scaled_dot_product_flash_attention', '_scaled_mm', '_shape_as_tensor', '_sobol_engine_draw',
+ '_sobol_engine_ff_', '_sobol_engine_initialize_state_', '_sobol_engine_scramble_', '_softmax',
+ '_softmax_backward_data', '_sparse_broadcast_to', '_sparse_broadcast_to_copy', '_sparse_csr_prod',
+ '_sparse_csr_sum', '_sparse_log_softmax_backward_data', '_sparse_semi_structured_linear',
+ '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack',
+ '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch',
+ '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy',
+ '_test_check_tensor', '_test_functorch_fallback', '_test_serialization_subcmul', '_to_cpu',
+ '_to_functional_tensor', '_to_sparse_semi_structured', '_transform_bias_rescale_qkv',
+ '_transformer_encoder_layer_fwd', '_trilinear', '_triton_multi_head_attention',
+ '_triton_scaled_dot_attention', '_unique', '_unique2', '_unpack_dual', '_unpack_dual',
+ '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss', '_use_cudnn_rnn_flatten_weight',
+ '_validate_compressed_sparse_indices', '_validate_sparse_bsc_tensor_args',
+ '_validate_sparse_bsr_tensor_args', '_validate_sparse_compressed_tensor_args',
+ '_validate_sparse_coo_tensor_args', '_validate_sparse_csc_tensor_args',
+ '_validate_sparse_csr_tensor_args', '_values_copy', '_weight_int4pack_mm', '_weight_norm',
+ '_weight_norm_interface', 'abs', 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_',
+ 'adaptive_avg_pool1d', 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm',
+ 'addmv', 'addmv_', 'addr', 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose',
+ 'alpha_dropout', 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange',
+ 'arccos', 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan',
+ 'arctan2', 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere',
+ 'as_strided', 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray',
+ 'asin', 'asin_', 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d',
+ 'baddbmm', 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt',
+ 'batch_norm_backward_reduce', 'batch_norm_elemt', 'batch_norm_gather_stats',
+ 'batch_norm_gather_stats_with_counts', 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli',
+ 'bilinear', 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and',
+ 'bitwise_left_shift', 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor',
+ 'blackman_window', 'bmm', 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy',
+ 'ceil', 'ceil_', 'celu', 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse',
+ 'cholesky_solve', 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max',
+ 'clamp_max_', 'clamp_min', 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy',
+ 'column_stack', 'combinations', 'complex', 'concat', 'concatenate', 'conj', 'conj_physical',
+ 'conj_physical_', 'constant_pad_nd', 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d',
+ 'conv_transpose2d', 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_',
+ 'cosh', 'cosh_', 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross',
+ 'crow_indices_copy', 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm',
+ 'cudnn_convolution', 'cudnn_convolution_add_relu', 'cudnn_convolution_relu',
+ 'cudnn_convolution_transpose', 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax',
+ 'cummin', 'cummin', 'cumprod', 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_',
+ 'dequantize', 'det', 'detach', 'detach_', 'detach_copy', 'diag', 'diag_embed', 'diagflat',
+ 'diagonal', 'diagonal_copy', 'diagonal_scatter', 'diff', 'digamma', 'dist', 'div', 'divide', 'dot',
+ 'dropout', 'dropout_', 'dsmm', 'dsplit', 'dstack', 'embedding', 'embedding_bag',
+ 'embedding_renorm_', 'empty', 'empty_like', 'empty_permuted', 'empty_quantized', 'empty_strided',
+ 'eq', 'equal', 'erf', 'erf_', 'erfc', 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_',
+ 'expand_copy', 'expm1', 'expm1_', 'eye', 'fake_quantize_per_channel_affine',
+ 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
+ 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
+ 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
+ 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
+ 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
+ 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
+ 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
+ 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
+ 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
+ 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
+ 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
+ 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
+ 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
+ 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
+ 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
+ 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
+ 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
+ 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
+ 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
+ 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
+ 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
+ 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
+ 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
+ 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
+ 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
+ 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
+ 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
+ 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
+ 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
+ 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
+ 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
+ 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
+ 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
+ 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
+ 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal',
+ 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance',
+ 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson',
+ 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types',
+ 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
+ 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
+ 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
+ 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell',
+ 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
+ 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
+ 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
+ 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
+ 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
+ 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
+ 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
+ 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
+ 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
+ 'slice_copy', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', 'sort',
+ 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
+ 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
+ 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
+ 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
+ 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take',
+ 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold',
+ 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz',
+ 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu',
+ 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten',
+ 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes',
+ 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot',
+ 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy',
+ 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/__config__.py b/env-llmeval/lib/python3.10/site-packages/torch/__config__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7e3e209654a8846ddc42d31220101340043c276
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/__config__.py
@@ -0,0 +1,22 @@
+import torch
+
+
+def show():
+ """
+ Return a human-readable string with descriptions of the
+ configuration of PyTorch.
+ """
+ return torch._C._show_config()
+
+
+# TODO: In principle, we could provide more structured version/config
+# information here. For now only CXX_FLAGS is exposed, as Timer
+# uses them.
+def _cxx_flags():
+ """Returns the CXX_FLAGS used when building PyTorch."""
+ return torch._C._cxx_flags()
+
+
+def parallel_info():
+ r"""Returns detailed string with parallelization settings"""
+ return torch._C._parallel_info()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/__future__.py b/env-llmeval/lib/python3.10/site-packages/torch/__future__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ac8406e8f8ea3150eed5fb08843e2c72305c950
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/__future__.py
@@ -0,0 +1,21 @@
+"""
+This global flag controls whether to assign new tensors to the parameters
+instead of changing the existing parameters in-place when converting an `nn.Module`
+using the following methods:
+1. `module.cuda()` / `.cpu()` (for moving `module` between devices)
+2. `module.float()` / `.double()` / `.half()` (for converting `module` to a different dtype)
+3. `module.to()` / `.type()` (for changing `module`'s device or dtype)
+4. `module._apply(fn)` (for generic functions applied to `module`)
+
+Default: False
+"""
+_overwrite_module_params_on_conversion = False
+
+
+def set_overwrite_module_params_on_conversion(value):
+ global _overwrite_module_params_on_conversion
+ _overwrite_module_params_on_conversion = value
+
+
+def get_overwrite_module_params_on_conversion():
+ return _overwrite_module_params_on_conversion
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f52060e97133cfb19326b694dc2fa174d06576e0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/__init__.py
@@ -0,0 +1,1995 @@
+
+r"""
+The torch package contains data structures for multi-dimensional
+tensors and defines mathematical operations over these tensors.
+Additionally, it provides many utilities for efficient serialization of
+Tensors and arbitrary types, and other useful utilities.
+
+It has a CUDA counterpart, that enables you to run your tensor computations
+on an NVIDIA GPU with compute capability >= 3.0.
+"""
+
+import math
+import os
+import sys
+import platform
+import textwrap
+import ctypes
+import inspect
+
+# multipy/deploy is setting this import before importing torch, this is the most
+# reliable way we have to detect if we're running within deploy.
+# https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
+def _running_with_deploy():
+ return sys.modules.get("torch._meta_registrations", None) is object
+
+from ._utils import _import_dotted_name, classproperty
+from ._utils import _functionalize_sync as _sync
+from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
+ USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
+
+# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
+if _running_with_deploy():
+ __version__ = "torch-deploy-1.8"
+else:
+ from .torch_version import __version__ as __version__
+
+from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List
+import builtins
+
+__all__ = [
+ 'typename', 'is_tensor', 'is_storage',
+ 'set_default_tensor_type', 'set_default_device',
+ 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
+ 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
+ 'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
+ 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
+ 'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
+ 'TypedStorage', 'UntypedStorage',
+ 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
+ 'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
+ 'lobpcg', 'use_deterministic_algorithms',
+ 'are_deterministic_algorithms_enabled',
+ 'is_deterministic_algorithms_warn_only_enabled',
+ 'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
+ 'set_float32_matmul_precision', 'get_float32_matmul_precision',
+ 'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
+ 'SymBool', 'sym_not', 'unravel_index',
+ 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
+ 'sym_sqrt',
+ 'export', 'autocast', 'cond',
+]
+
+################################################################################
+# Load the extension module
+################################################################################
+
+if sys.platform == 'win32':
+ pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
+ py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
+ th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
+
+ # When users create a virtualenv that inherits the base environment,
+ # we will need to add the corresponding library directory into
+ # DLL search directories. Otherwise, it will rely on `PATH` which
+ # is dependent on user settings.
+ if sys.exec_prefix != sys.base_exec_prefix:
+ base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
+ else:
+ base_py_dll_path = ''
+
+ dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
+
+ if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths):
+ nvtoolsext_dll_path = os.path.join(
+ os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
+ else:
+ nvtoolsext_dll_path = ''
+
+ from .version import cuda as cuda_version
+ import glob
+ if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths):
+ cuda_version_1 = cuda_version.replace('.', '_')
+ cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
+ default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
+ cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
+ else:
+ cuda_path = ''
+
+ dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
+
+ kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
+ with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
+ prev_error_mode = kernel32.SetErrorMode(0x0001)
+
+ kernel32.LoadLibraryW.restype = ctypes.c_void_p
+ if with_load_library_flags:
+ kernel32.LoadLibraryExW.restype = ctypes.c_void_p
+
+ for dll_path in dll_paths:
+ os.add_dll_directory(dll_path)
+
+ try:
+ ctypes.CDLL('vcruntime140.dll')
+ ctypes.CDLL('msvcp140.dll')
+ ctypes.CDLL('vcruntime140_1.dll')
+ except OSError:
+ print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
+ It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
+
+ dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
+ path_patched = False
+ for dll in dlls:
+ is_loaded = False
+ if with_load_library_flags:
+ res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
+ last_error = ctypes.get_last_error()
+ if res is None and last_error != 126:
+ err = ctypes.WinError(last_error)
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
+ raise err
+ elif res is not None:
+ is_loaded = True
+ if not is_loaded:
+ if not path_patched:
+ os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
+ path_patched = True
+ res = kernel32.LoadLibraryW(dll)
+ if res is None:
+ err = ctypes.WinError(ctypes.get_last_error())
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
+ raise err
+
+ kernel32.SetErrorMode(prev_error_mode)
+
+
+def _preload_cuda_deps(lib_folder, lib_name):
+ """Preloads cuda deps if they could not be found otherwise."""
+ # Should only be called on Linux if default path resolution have failed
+ assert platform.system() == 'Linux', 'Should only be called on Linux'
+ import glob
+ lib_path = None
+ for path in sys.path:
+ nvidia_path = os.path.join(path, 'nvidia')
+ if not os.path.exists(nvidia_path):
+ continue
+ candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name))
+ if candidate_lib_paths and not lib_path:
+ lib_path = candidate_lib_paths[0]
+ if lib_path:
+ break
+ if not lib_path:
+ raise ValueError(f"{lib_name} not found in the system path {sys.path}")
+ ctypes.CDLL(lib_path)
+
+
+# See Note [Global dependencies]
+def _load_global_deps() -> None:
+ if _running_with_deploy() or platform.system() == 'Windows':
+ return
+
+ lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
+ here = os.path.abspath(__file__)
+ lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
+
+ try:
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
+ except OSError as err:
+ # Can only happen for wheel with cuda libs as PYPI deps
+ # As PyTorch is not purelib, but nvidia-*-cu12 is
+ cuda_libs: Dict[str, str] = {
+ 'cublas': 'libcublas.so.*[0-9]',
+ 'cudnn': 'libcudnn.so.*[0-9]',
+ 'cuda_nvrtc': 'libnvrtc.so.*[0-9]',
+ 'cuda_runtime': 'libcudart.so.*[0-9]',
+ 'cuda_cupti': 'libcupti.so.*[0-9]',
+ 'cufft': 'libcufft.so.*[0-9]',
+ 'curand': 'libcurand.so.*[0-9]',
+ 'cusolver': 'libcusolver.so.*[0-9]',
+ 'cusparse': 'libcusparse.so.*[0-9]',
+ 'nccl': 'libnccl.so.*[0-9]',
+ 'nvtx': 'libnvToolsExt.so.*[0-9]',
+ }
+ is_cuda_lib_err = [lib for lib in cuda_libs.values() if(lib.split('.')[0] in err.args[0])]
+ if not is_cuda_lib_err:
+ raise err
+ for lib_folder, lib_name in cuda_libs.items():
+ _preload_cuda_deps(lib_folder, lib_name)
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
+
+
+if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
+ (_running_with_deploy() or platform.system() != 'Windows'):
+ # Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
+ # few circumstances:
+ #
+ # 1. You're in a build environment (e.g., fbcode) where
+ # libtorch_global_deps is not available, but you still need
+ # to get mkl to link in with RTLD_GLOBAL or it will just
+ # not work.
+ #
+ # 2. You're trying to run PyTorch under UBSAN and you need
+ # to ensure that only one copy of libtorch is loaded, so
+ # vptr checks work properly
+ #
+ # If you're using this setting, you must verify that all the libraries
+ # you load consistently use the same libstdc++, or you may have
+ # mysterious segfaults.
+ #
+ old_flags = sys.getdlopenflags()
+ sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
+ from torch._C import * # noqa: F403
+ sys.setdlopenflags(old_flags)
+ del old_flags
+
+else:
+ # Easy way. You want this most of the time, because it will prevent
+ # C++ symbols from libtorch clobbering C++ symbols from other
+ # libraries, leading to mysterious segfaults.
+ #
+ # If building in an environment where libtorch_global_deps isn't available
+ # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
+ # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
+ #
+ # See Note [Global dependencies]
+ if USE_GLOBAL_DEPS:
+ _load_global_deps()
+ from torch._C import * # noqa: F403
+
+# Appease the type checker; ordinarily this binding is inserted by the
+# torch._C module initialization code in C
+if TYPE_CHECKING:
+ from . import _C as _C
+
+class SymInt:
+ """
+ Like an int (including magic methods), but redirects all operations on the
+ wrapped node. This is used in particular to symbolically record operations
+ in the symbolic shape workflow.
+ """
+
+ def __init__(self, node):
+ # This field MUST be named node; C++ binding code assumes that this
+ # class has a field named node that stores SymNode
+ self.node = node
+
+ def __bool__(self):
+ return builtins.bool(self != 0)
+
+ def __int__(self):
+ return self.node.int_()
+
+ def __index__(self):
+ return self.node.int_()
+
+ # Magic methods installed by torch.fx.experimental.sym_node
+
+ def __eq__(self, other: object) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __lt__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __gt__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __le__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __ge__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __sym_max__(self, other):
+ raise AssertionError("type stub not overridden")
+
+ def __sym_min__(self, other):
+ raise AssertionError("type stub not overridden")
+
+ def __sym_float__(self):
+ raise AssertionError("type stub not overridden")
+
+ def __neg__(self):
+ raise AssertionError("type stub not overridden")
+
+ def __repr__(self):
+ return str(self.node)
+
+ def __hash__(self) -> builtins.int:
+ ret = self.node.singleton_int()
+ if ret is not None:
+ return hash(ret)
+ else:
+ # We could support constant SymInts as well, but not doing it for now
+ raise TypeError("unhashable type: non-singleton SymInt")
+
+class SymFloat:
+ """
+ Like an float (including magic methods), but redirects all operations on the
+ wrapped node. This is used in particular to symbolically record operations
+ in the symbolic shape workflow.
+ """
+
+ def __init__(self, node):
+ # This field MUST be named node; C++ binding code assumes that this
+ # class has a field named node that stores SymNode
+ self.node = node
+
+ def __bool__(self):
+ return self.node.bool_()
+
+ # Magic methods installed by torch.fx.experimental.sym_node
+
+ def __eq__(self, other: object) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __lt__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __gt__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __le__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __ge__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __sym_max__(self, other):
+ raise AssertionError("type stub not overridden")
+
+ def __sym_min__(self, other):
+ raise AssertionError("type stub not overridden")
+
+ def __sym_int__(self):
+ raise AssertionError("type stub not overridden")
+
+ def __repr__(self):
+ return self.node.str()
+
+class SymBool:
+ """
+ Like an bool (including magic methods), but redirects all operations on the
+ wrapped node. This is used in particular to symbolically record operations
+ in the symbolic shape workflow.
+
+ Unlike regular bools, regular boolean operators will force extra guards instead
+ of symbolically evaluate. Use the bitwise operators instead to handle this.
+ """
+
+ def __init__(self, node):
+ # This field MUST be named node; C++ binding code assumes that this
+ # class has a field named node that stores SymNode
+ self.node = node
+
+ def __bool__(self):
+ return self.node.bool_()
+
+ def __int__(self):
+ return builtins.int(self.node.bool_())
+
+ # Magic methods installed by torch.fx.experimental.sym_node
+ def __and__(self, other) -> "SymBool":
+ raise AssertionError("type stub not overridden")
+
+ def __or__(self, other) -> "SymBool":
+ raise AssertionError("type stub not overridden")
+
+ # We very carefully define __sym_not__, and not a number of other
+ # plausible alternatives:
+ #
+ # - We do not override __not__ because this is not a real magic
+ # method; you cannot override the meaning of the not builtin in
+ # Python. We use the name 'sym_not' to clarify that in user code you
+ # cannot use the builtin not or operator.not_ or operator.__not__ and
+ # hit this magic method; you must use our custom sym_not operator.
+ #
+ # - We do not override the __invert__ method because SymBool is
+ # meant to be usable in situations where bool is expected. However,
+ # bitwise negation ~a does the wrong thing with booleans (because
+ # bool is a subclass of int, so ~1 = -2 which is not falseish.)
+ # This would be a giant footgun, so we get around it by defining
+ # our own operator. Note that bitwise and/or do the right thing,
+ # so we reuse the conventional operators there for readability.
+ #
+ def __sym_not__(self) -> "SymBool":
+ raise AssertionError("type stub not overridden")
+
+ def __sym_ite__(self, then_val, else_val):
+ raise AssertionError("type stub not overridden")
+
+ def __eq__(self, other) -> builtins.bool:
+ raise AssertionError("type stub not overridden")
+
+ def __repr__(self):
+ return str(self.node)
+
+ def __hash__(self):
+ if self.node.is_constant():
+ return hash(self.node.bool_())
+ else:
+ raise TypeError("unhashable type: SymBool")
+
+def sym_not(a):
+ r""" SymInt-aware utility for logical negation.
+
+ Args:
+ a (SymBool or bool): Object to negate
+ """
+ import sympy
+ from .overrides import has_torch_function_unary, handle_torch_function
+
+ if has_torch_function_unary(a):
+ return handle_torch_function(sym_not, (a,), a)
+ if hasattr(a, '__sym_not__'):
+ return a.__sym_not__()
+ if isinstance(a, sympy.Basic):
+ return ~a # type: ignore[operator]
+ return not a
+
+def sym_float(a):
+ r""" SymInt-aware utility for float casting.
+
+ Args:
+ a (SymInt, SymFloat, or object): Object to cast
+ """
+ from .overrides import has_torch_function_unary, handle_torch_function
+
+ if has_torch_function_unary(a):
+ return handle_torch_function(sym_float, (a,), a)
+ if isinstance(a, SymFloat):
+ return a
+ elif hasattr(a, '__sym_float__'):
+ return a.__sym_float__()
+ return py_float(a) # type: ignore[operator]
+
+
+def sym_int(a):
+ r""" SymInt-aware utility for int casting.
+
+ Args:
+ a (SymInt, SymFloat, or object): Object to cast
+ """
+ from .overrides import has_torch_function_unary, handle_torch_function
+
+ if has_torch_function_unary(a):
+ return handle_torch_function(sym_int, (a,), a)
+ if isinstance(a, SymInt):
+ return a
+ elif isinstance(a, SymFloat):
+ return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
+ return py_int(a) # type: ignore[operator]
+
+def sym_max(a, b):
+ """ SymInt-aware utility for max()."""
+ from .overrides import has_torch_function, handle_torch_function
+
+ if has_torch_function((a, b)):
+ return handle_torch_function(sym_max, (a, b), a, b)
+ if isinstance(a, (SymInt, SymFloat)):
+ return a.__sym_max__(b)
+ elif isinstance(b, (SymInt, SymFloat)):
+ # NB: If you actually care about preserving output type exactly
+ # if you do something like max(0, 0.0), it is NOT sound to treat
+ # min/max as commutative
+ return b.__sym_max__(a)
+ return builtins.max(a, b) # type: ignore[operator]
+
+def sym_min(a, b):
+ """ SymInt-aware utility for max()."""
+ from .overrides import has_torch_function, handle_torch_function
+
+ if has_torch_function((a, b)):
+ return handle_torch_function(sym_min, (a, b), a, b)
+ if isinstance(a, (SymInt, SymFloat)):
+ return a.__sym_min__(b)
+ elif isinstance(b, (SymInt, SymFloat)):
+ return b.__sym_min__(a)
+ return builtins.min(a, b) # type: ignore[operator]
+
+# Drop in replacement for math.sqrt
+def sym_sqrt(a):
+ from .overrides import has_torch_function_unary, handle_torch_function
+
+ if has_torch_function_unary(a):
+ return handle_torch_function(sym_sqrt, (a,), a)
+ if hasattr(a, "__sym_sqrt__"):
+ return a.__sym_sqrt__()
+ return math.sqrt(a)
+
+def sym_ite(b, t, f):
+ from .overrides import has_torch_function, handle_torch_function
+
+ if has_torch_function((b, t, f)):
+ return handle_torch_function(sym_ite, (b, t, f), b, t, f)
+ assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f)
+ if isinstance(b, SymBool):
+ return b.__sym_ite__(t, f)
+ return t if b else f
+
+# Check to see if we can load C extensions, and if not provide some guidance
+# on what the problem might be.
+try:
+ # _initExtension is chosen (arbitrarily) as a sentinel.
+ from torch._C import _initExtension
+except ImportError:
+ import torch._C as _C_for_compiled_check
+
+ # The __file__ check only works for Python 3.7 and above.
+ if _C_for_compiled_check.__file__ is None:
+ raise ImportError(textwrap.dedent('''
+ Failed to load PyTorch C extensions:
+ It appears that PyTorch has loaded the `torch/_C` folder
+ of the PyTorch repository rather than the C extensions which
+ are expected in the `torch._C` namespace. This can occur when
+ using the `install` workflow. e.g.
+ $ python setup.py install && python -c "import torch"
+
+ This error can generally be solved using the `develop` workflow
+ $ python setup.py develop && python -c "import torch" # This should succeed
+ or by running Python from a different directory.
+ ''').strip()) from None
+ raise # If __file__ is not None the cause is unknown, so just re-raise.
+
+for name in dir(_C):
+ if name[0] != '_' and not name.endswith('Base'):
+ __all__.append(name)
+ obj = getattr(_C, name)
+ if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
+ if (obj.__module__ != 'torch'):
+ # TODO: fix their module from C++ side
+ if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']:
+ obj.__module__ = 'torch'
+ elif name == 'TensorBase':
+ # issue 109438 / pr 109940. Prevent TensorBase from being copied into torch.
+ delattr(sys.modules[__name__], name)
+
+if not TYPE_CHECKING:
+ # issue 38137 and python issue 43367. Submodules of a C extension are
+ # non-standard, and attributes of those submodules cannot be pickled since
+ # pickle expect to be able to import them as "from _C.sub import attr"
+ # which fails with "_C is not a package
+ for attr in dir(_C):
+ candidate = getattr(_C, attr)
+ if type(candidate) is type(_C):
+ # submodule
+ if f'torch._C.{attr}' not in sys.modules:
+ sys.modules[f'torch._C.{attr}'] = candidate
+
+
+################################################################################
+# Define basic utilities
+################################################################################
+
+
+def typename(o):
+ if isinstance(o, torch.Tensor):
+ return o.type()
+
+ module = ''
+ class_name = ''
+ if hasattr(o, '__module__') and o.__module__ != 'builtins' \
+ and o.__module__ != '__builtin__' and o.__module__ is not None:
+ module = o.__module__ + '.'
+
+ if hasattr(o, '__qualname__'):
+ class_name = o.__qualname__
+ elif hasattr(o, '__name__'):
+ class_name = o.__name__
+ else:
+ class_name = o.__class__.__name__
+
+ return module + class_name
+
+
+def is_tensor(obj):
+ r"""Returns True if `obj` is a PyTorch tensor.
+
+ Note that this function is simply doing ``isinstance(obj, Tensor)``.
+ Using that ``isinstance`` check is better for typechecking with mypy,
+ and more explicit - so it's recommended to use that instead of
+ ``is_tensor``.
+
+ Args:
+ obj (Object): Object to test
+ Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> torch.is_tensor(x)
+ True
+
+ """
+ return isinstance(obj, torch.Tensor)
+
+
+def is_storage(obj):
+ r"""Returns True if `obj` is a PyTorch storage object.
+
+ Args:
+ obj (Object): Object to test
+ """
+ return type(obj) in _storage_classes
+
+
+_GLOBAL_DEVICE_CONTEXT = None
+
+def set_default_device(device):
+ """Sets the default ``torch.Tensor`` to be allocated on ``device``. This
+ does not affect factory function calls which are called with an explicit
+ ``device`` argument. Factory calls will be performed as if they
+ were passed ``device`` as an argument.
+
+ To only temporarily change the default device instead of setting it
+ globally, use ``with torch.device(device):`` instead.
+
+ The default device is initially ``cpu``. If you set the default tensor
+ device to another device (e.g., ``cuda``) without a device index, tensors
+ will be allocated on whatever the current device for the device type,
+ even after :func:`torch.cuda.set_device` is called.
+
+ .. warning::
+
+ This function imposes a slight performance cost on every Python
+ call to the torch API (not just factory functions). If this
+ is causing problems for you, please comment on
+ https://github.com/pytorch/pytorch/issues/92701
+
+ .. note::
+
+ This doesn't affect functions that create tensors that share the same memory as the input, like:
+ :func:`torch.from_numpy` and :func:`torch.frombuffer`
+
+ Args:
+ device (device or string): the device to set as default
+
+ Example::
+
+ >>> # xdoctest: +SKIP("requires cuda, changes global state")
+ >>> torch.tensor([1.2, 3]).device
+ device(type='cpu')
+ >>> torch.set_default_device('cuda') # current device is 0
+ >>> torch.tensor([1.2, 3]).device
+ device(type='cuda', index=0)
+ >>> torch.set_default_device('cuda:1')
+ >>> torch.tensor([1.2, 3]).device
+ device(type='cuda', index=1)
+
+ """
+ global _GLOBAL_DEVICE_CONTEXT
+ if _GLOBAL_DEVICE_CONTEXT is not None:
+ _GLOBAL_DEVICE_CONTEXT.__exit__(None, None, None)
+ if device is None:
+ _GLOBAL_DEVICE_CONTEXT = None
+ return
+ from torch.utils._device import DeviceContext
+ _GLOBAL_DEVICE_CONTEXT = DeviceContext(device)
+ _GLOBAL_DEVICE_CONTEXT.__enter__()
+
+
+def set_default_tensor_type(t):
+ r"""
+ .. warning::
+
+ This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and
+ :func:`torch.set_default_device()` as alternatives.
+
+ Sets the default ``torch.Tensor`` type to floating point tensor type
+ ``t``. This type will also be used as default floating point type for
+ type inference in :func:`torch.tensor`.
+
+ The default floating point tensor type is initially ``torch.FloatTensor``.
+
+ Args:
+ t (type or string): the floating point tensor type or its name
+
+ Example::
+
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
+ >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
+ torch.float32
+ >>> torch.set_default_tensor_type(torch.DoubleTensor)
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
+ torch.float64
+
+ """
+ if isinstance(t, str):
+ t = _import_dotted_name(t)
+ _C._set_default_tensor_type(t)
+
+
+def set_default_dtype(d):
+ r"""
+
+ Sets the default floating point dtype to :attr:`d`. Supports torch.float32
+ and torch.float64 as inputs. Other dtypes may be accepted without complaint
+ but are not supported and are unlikely to work as expected.
+
+ When PyTorch is initialized its default floating point dtype is torch.float32,
+ and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
+ type inference. The default floating point dtype is used to:
+
+ 1. Implicitly determine the default complex dtype. When the default floating point
+ type is float32 the default complex dtype is complex64, and when the default
+ floating point type is float64 the default complex type is complex128.
+ 2. Infer the dtype for tensors constructed using Python floats or complex Python
+ numbers. See examples below.
+ 3. Determine the result of type promotion between bool and integer tensors and
+ Python floats and complex Python numbers.
+
+ Args:
+ d (:class:`torch.dtype`): the floating point dtype to make the default.
+ Either torch.float32 or torch.float64.
+
+ Example:
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
+ >>> # initial default for floating point is torch.float32
+ >>> # Python floats are interpreted as float32
+ >>> torch.tensor([1.2, 3]).dtype
+ torch.float32
+ >>> # initial default for floating point is torch.complex64
+ >>> # Complex Python numbers are interpreted as complex64
+ >>> torch.tensor([1.2, 3j]).dtype
+ torch.complex64
+
+ >>> torch.set_default_dtype(torch.float64)
+
+ >>> # Python floats are now interpreted as float64
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
+ torch.float64
+ >>> # Complex Python numbers are now interpreted as complex128
+ >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
+ torch.complex128
+
+ """
+ _C._set_default_dtype(d)
+
+def use_deterministic_algorithms(mode: builtins.bool, *, warn_only: builtins.bool = False) -> None:
+ r""" Sets whether PyTorch operations must use "deterministic"
+ algorithms. That is, algorithms which, given the same input, and when
+ run on the same software and hardware, always produce the same output.
+ When enabled, operations will use deterministic algorithms when available,
+ and if only nondeterministic algorithms are available they will throw a
+ :class:`RuntimeError` when called.
+
+ .. note:: This setting alone is not always enough to make an application
+ reproducible. Refer to :ref:`reproducibility` for more information.
+
+ .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
+ interface for this feature.
+
+ The following normally-nondeterministic operations will act
+ deterministically when ``mode=True``:
+
+ * :class:`torch.nn.Conv1d` when called on CUDA tensor
+ * :class:`torch.nn.Conv2d` when called on CUDA tensor
+ * :class:`torch.nn.Conv3d` when called on CUDA tensor
+ * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
+ * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
+ * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
+ * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
+ * :func:`torch.bmm` when called on sparse-dense CUDA tensors
+ * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
+ and the index is a list of tensors
+ * :func:`torch.Tensor.index_put` with ``accumulate=False``
+ * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
+ tensor
+ * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
+ tensor
+ * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
+ * :func:`torch.gather` when called on a CUDA tensor that requires grad
+ * :func:`torch.index_add` when called on CUDA tensor
+ * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
+ * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
+ * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
+ * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
+
+ The following normally-nondeterministic operations will throw a
+ :class:`RuntimeError` when ``mode=True``:
+
+ * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.MaxUnpool1d`
+ * :class:`torch.nn.MaxUnpool2d`
+ * :class:`torch.nn.MaxUnpool3d`
+ * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
+ and one of the following modes is used:
+
+ - ``linear``
+ - ``bilinear``
+ - ``bicubic``
+ - ``trilinear``
+
+ * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
+ * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
+ * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
+ ``mode='max'``
+ * :func:`torch.Tensor.put_` when ``accumulate=False``
+ * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
+ * :func:`torch.histc` when called on a CUDA tensor
+ * :func:`torch.bincount` when called on a CUDA tensor and ``weights``
+ tensor is given
+ * :func:`torch.kthvalue` with called on a CUDA tensor
+ * :func:`torch.median` with indices output when called on a CUDA tensor
+ * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
+ * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
+ * :func:`torch.Tensor.resize_` when called with a quantized tensor
+
+ In addition, several operations fill uninitialized memory when this setting
+ is turned on and when
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
+ See the documentation for that attribute for more information.
+
+ A handful of CUDA operations are nondeterministic if the CUDA version is
+ 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
+ or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
+ details: ``_
+ If one of these environment variable configurations is not set, a :class:`RuntimeError`
+ will be raised from these operations when called with CUDA tensors:
+
+ * :func:`torch.mm`
+ * :func:`torch.mv`
+ * :func:`torch.bmm`
+
+ Note that deterministic operations tend to have worse performance than
+ nondeterministic operations.
+
+ .. note::
+
+ This flag does not detect or prevent nondeterministic behavior caused
+ by calling an inplace operation on a tensor with an internal memory
+ overlap or by giving such a tensor as the :attr:`out` argument for an
+ operation. In these cases, multiple writes of different data may target
+ a single memory location, and the order of writes is not guaranteed.
+
+ Args:
+ mode (:class:`bool`): If True, makes potentially nondeterministic
+ operations switch to a deterministic algorithm or throw a runtime
+ error. If False, allows nondeterministic operations.
+
+ Keyword args:
+ warn_only (:class:`bool`, optional): If True, operations that do not
+ have a deterministic implementation will throw a warning instead of
+ an error. Default: ``False``
+
+ Example::
+
+ >>> # xdoctest: +SKIP
+ >>> torch.use_deterministic_algorithms(True)
+
+ # Forward mode nondeterministic error
+ >>> torch.randn(10, device='cuda').kthvalue(1)
+ ...
+ RuntimeError: kthvalue CUDA does not have a deterministic implementation...
+
+ # Backward mode nondeterministic error
+ >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
+ ...
+ RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
+ """
+ _C._set_deterministic_algorithms(mode, warn_only=warn_only)
+
+def are_deterministic_algorithms_enabled() -> builtins.bool:
+ r"""Returns True if the global deterministic flag is turned on. Refer to
+ :func:`torch.use_deterministic_algorithms` documentation for more details.
+ """
+ return _C._get_deterministic_algorithms()
+
+def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:
+ r"""Returns True if the global deterministic flag is set to warn only.
+ Refer to :func:`torch.use_deterministic_algorithms` documentation for more
+ details.
+ """
+ return _C._get_deterministic_algorithms_warn_only()
+
+def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
+ r"""Sets the debug mode for deterministic operations.
+
+ .. note:: This is an alternative interface for
+ :func:`torch.use_deterministic_algorithms`. Refer to that function's
+ documentation for details about affected operations.
+
+ Args:
+ debug_mode(str or int): If "default" or 0, don't error or warn on
+ nondeterministic operations. If "warn" or 1, warn on
+ nondeterministic operations. If "error" or 2, error on
+ nondeterministic operations.
+ """
+
+ # NOTE: builtins.int is used here because int in this scope resolves
+ # to torch.int
+ if not isinstance(debug_mode, (builtins.int, str)):
+ raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
+
+ if isinstance(debug_mode, str):
+ if debug_mode == 'default':
+ debug_mode = 0
+ elif debug_mode == 'warn':
+ debug_mode = 1
+ elif debug_mode == 'error':
+ debug_mode = 2
+ else:
+ raise RuntimeError(
+ 'invalid value of debug_mode, expected one of `default`, '
+ f'`warn`, `error`, but got {debug_mode}')
+
+ if debug_mode == 0:
+ _C._set_deterministic_algorithms(False)
+ elif debug_mode == 1:
+ _C._set_deterministic_algorithms(True, warn_only=True)
+ elif debug_mode == 2:
+ _C._set_deterministic_algorithms(True)
+ else:
+ raise RuntimeError(
+ 'invalid value of debug_mode, expected 0, 1, or 2, '
+ f'but got {debug_mode}')
+
+def get_deterministic_debug_mode() -> builtins.int:
+ r"""Returns the current value of the debug mode for deterministic
+ operations. Refer to :func:`torch.set_deterministic_debug_mode`
+ documentation for more details.
+ """
+
+ if _C._get_deterministic_algorithms():
+ if _C._get_deterministic_algorithms_warn_only():
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+def get_float32_matmul_precision() -> builtins.str:
+ r"""Returns the current value of float32 matrix multiplication precision. Refer to
+ :func:`torch.set_float32_matmul_precision` documentation for more details.
+ """
+ return _C._get_float32_matmul_precision()
+
+def set_float32_matmul_precision(precision: str) -> None:
+ r"""Sets the internal precision of float32 matrix multiplications.
+
+ Running float32 matrix multiplications in lower precision may significantly increase
+ performance, and in some programs the loss of precision has a negligible impact.
+
+ Supports three settings:
+
+ * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
+ bits) for internal computations.
+ * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
+ mantissa bits) or treat each float32 number as the sum of two bfloat16 numbers
+ (approximately 16 mantissa bits), if the appropriate fast matrix multiplication
+ algorithms are available. Otherwise float32 matrix multiplications are computed
+ as if the precision is "highest". See below for more information on the bfloat16
+ approach.
+ * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
+ bits) for internal computations, if a fast matrix multiplication algorithm
+ using that datatype internally is available. Otherwise float32
+ matrix multiplications are computed as if the precision is "high".
+
+ When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
+ that is more complicated than simply truncating to some smaller number mantissa bits
+ (e.g. 10 for TensorFloat32, 8 for bfloat16). Refer to [Henry2019]_ for a complete
+ description of this algorithm. To briefly explain here, the first step is to realize
+ that we can perfectly encode a single float32 number as the sum of three bfloat16
+ numbers (because float32 has 24 mantissa bits while bfloat16 has 8, and both have the
+ same number of exponent bits). This means that the product of two float32 numbers can
+ be exactly given by the sum of nine products of bfloat16 numbers. We can then trade
+ accuracy for speed by dropping some of these products. The "high" precision algorithm
+ specifically keeps only the three most significant products, which conveniently excludes
+ all of the products involving the last 8 mantissa bits of either input. This means that
+ we can represent our inputs as the sum of two bfloat16 numbers rather than three.
+ Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
+ float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
+ precision than it is to do a single multiplication with float32 precision.
+
+ .. [Henry2019] http://arxiv.org/abs/1904.06376
+
+ .. note::
+
+ This does not change the output dtype of float32 matrix multiplications,
+ it controls how the internal computation of the matrix multiplication is performed.
+
+ .. note::
+
+ This does not change the precision of convolution operations. Other flags,
+ like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
+ operations.
+
+ .. note::
+
+ This flag currently only affects one native device type: CUDA.
+ If "high" or "medium" are set then the TensorFloat32 datatype will be used
+ when computing float32 matrix multiplications, equivalent to setting
+ `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
+ is set then the float32 datatype is used for internal computations, equivalent
+ to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
+
+ Args:
+ precision(str): can be set to "highest" (default), "high", or "medium" (see above).
+
+ """
+ _C._set_float32_matmul_precision(precision)
+
+def set_warn_always(b: builtins.bool) -> None:
+ r"""When this flag is False (default) then some PyTorch warnings may only
+ appear once per process. This helps avoid excessive warning information.
+ Setting it to True causes these warnings to always appear, which may be
+ helpful when debugging.
+
+ Args:
+ b (:class:`bool`): If True, force warnings to always be emitted
+ If False, set to the default behaviour
+ """
+ _C._set_warnAlways(b)
+
+def is_warn_always_enabled() -> builtins.bool:
+ r"""Returns True if the global warn_always flag is turned on. Refer to
+ :func:`torch.set_warn_always` documentation for more details.
+ """
+ return _C._get_warnAlways()
+
+################################################################################
+# Define error checking functions
+################################################################################
+
+# These error checking functions must be kept consistent with their C++
+# equivalents. Their C++ equivalents are mentioned where applicable.
+
+def _check_with(error_type, cond: Union[builtins.bool, SymBool], message: Callable[[], str]): # noqa: F811
+ if not isinstance(cond, (builtins.bool, torch.SymBool)):
+ raise TypeError(f'cond must be a bool, but got {type(cond)}')
+
+ from torch.fx.experimental.symbolic_shapes import expect_true
+ if expect_true(cond):
+ return
+
+ # error_type must be a subclass of Exception and not subclass of Warning
+ assert issubclass(error_type, Exception) and not issubclass(error_type, Warning)
+
+ if message is None:
+ message_evaluated = (
+ 'Expected cond to be True, but got False. (Could this error '
+ 'message be improved? If so, please report an enhancement request '
+ 'to PyTorch.)')
+
+ else:
+ if not callable(message):
+ raise TypeError('message must be a callable')
+
+ message_evaluated = str(message())
+
+ raise error_type(message_evaluated)
+
+def _check(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``RuntimeError``
+
+ C++ equivalent: ``TORCH_CHECK``
+
+ Args:
+ cond (:class:`bool`): If False, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_with(RuntimeError, cond, message)
+
+def _check_is_size(i, message=None):
+ """Checks that a given integer is a valid size (i.e., is non-negative).
+ You should use this over _check(i >= 0) because we can use the semantic
+ information (that i is a size) to make some further inferences in case
+ i is an unbacked SymInt.
+
+ NB: Do NOT use this in contexts where a -1 size would be valid (indicating
+ to infer the size from context, or if you should wrap-around or truncate).
+ Only use this if the only valid value is an honest to goodness size.
+ """
+ # This is responsible for the expect_true
+ _check(i >= 0, message)
+ from torch.fx.experimental.symbolic_shapes import _advise_is_size
+ _advise_is_size(i)
+
+def _check_index(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``IndexError``
+
+ C++ equivalent: ``TORCH_CHECK_INDEX``
+
+ Args:
+ cond (:class:`bool`): If False, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_with(IndexError, cond, message)
+
+def _check_value(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``ValueError``
+
+ C++ equivalent: ``TORCH_CHECK_VALUE``
+
+ Args:
+ cond (:class:`bool`): If False, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_with(ValueError, cond, message)
+
+def _check_type(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``TypeError``
+
+ C++ equivalent: ``TORCH_CHECK_TYPE``
+
+ Args:
+ cond (:class:`bool`): If False, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_with(TypeError, cond, message)
+
+def _check_not_implemented(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``NotImplementedError``
+
+ C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED``
+
+ Args:
+ cond (:class:`bool`): If False, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_with(NotImplementedError, cond, message)
+
+def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811
+ if not torch.is_tensor(cond):
+ raise TypeError(f'cond must be a tensor, but got {type(cond)}')
+
+ if not cond.dtype == torch.bool:
+ raise TypeError(
+ f'cond tensor must have dtype torch.bool, but got {cond.dtype}')
+
+ _check_with(error_type, cond._is_all_true().item(), message)
+
+# C++ equivalent: `TORCH_CHECK_TENSOR_ALL`
+def _check_tensor_all(cond, message=None): # noqa: F811
+ r"""Throws error containing an optional message if the specified condition
+ is False.
+
+ Error type: ``RuntimeError``
+
+ C++ equivalent: ``TORCH_CHECK_TENSOR_ALL``
+
+ Args:
+ cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any
+ element is ``False``, throw error
+
+ message (Callable, optional): Callable that returns either a string or
+ an object that has a ``__str__()`` method to be used as the error
+ message. Default: ``None``
+ """
+ _check_tensor_all_with(RuntimeError, cond, message)
+
+################################################################################
+# Define numeric constants
+################################################################################
+
+# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
+# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
+from math import e , nan , inf , pi
+__all__.extend(['e', 'pi', 'nan', 'inf'])
+
+################################################################################
+# Define Storage and Tensor classes
+################################################################################
+
+from ._tensor import Tensor
+from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal
+
+# NOTE: New Storage classes should never be added. When adding a new
+# dtype, use torch.storage.TypedStorage directly.
+
+class ByteStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.uint8
+
+class DoubleStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.double
+
+class FloatStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.float
+
+class HalfStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.half
+
+class LongStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.long
+
+class IntStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.int
+
+class ShortStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.short
+
+class CharStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.int8
+
+class BoolStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.bool
+
+class BFloat16Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.bfloat16
+
+class ComplexDoubleStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.cdouble
+
+class ComplexFloatStorage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.cfloat
+
+class QUInt8Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.quint8
+
+class QInt8Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.qint8
+
+class QInt32Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.qint32
+
+class QUInt4x2Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.quint4x2
+
+class QUInt2x4Storage(_LegacyStorage):
+ @classproperty
+ def dtype(self):
+ _warn_typed_storage_removal(stacklevel=3)
+ return self._dtype
+
+ @classproperty
+ def _dtype(self):
+ return torch.quint2x4
+
+_storage_classes = {
+ UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
+ ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
+ QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
+ ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
+ TypedStorage
+}
+
+# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
+_tensor_classes: Set[Type] = set()
+
+# If you edit these imports, please update torch/__init__.py.in as well
+from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
+from .serialization import save, load
+from ._tensor_str import set_printoptions
+
+################################################################################
+# Initialize extension
+################################################################################
+
+def manager_path():
+ if _running_with_deploy() or platform.system() == 'Windows':
+ return b""
+ path = get_file_path('torch', 'bin', 'torch_shm_manager')
+ prepare_multiprocessing_environment(get_file_path('torch'))
+ if not os.path.exists(path):
+ raise RuntimeError("Unable to find torch_shm_manager at " + path)
+ return path.encode('utf-8')
+
+from torch.amp import autocast
+
+# Initializing the extension shadows the built-in python float / int classes;
+# store them for later use by SymInt / SymFloat.
+py_float = float
+py_int = int
+
+# Shared memory manager needs to know the exact location of manager executable
+_C._initExtension(manager_path())
+del manager_path
+
+# Appease the type checker: it can't deal with direct setting of globals().
+# Note that we will see "too many" functions when reexporting this way; there
+# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
+# so that this import is good enough
+if TYPE_CHECKING:
+ # Some type signatures pulled in from _VariableFunctions here clash with
+ # signatures already imported. For now these clashes are ignored; see
+ # PR #43339 for details.
+ from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403
+ # Fixup segment_reduce visibility
+ _segment_reduce = segment_reduce
+ del segment_reduce
+
+# Ops not to be exposed in `torch` namespace,
+# mostly helper ops.
+PRIVATE_OPS = (
+ 'unique_dim',
+)
+
+for name in dir(_C._VariableFunctions):
+ if name.startswith('__') or name in PRIVATE_OPS:
+ continue
+ obj = getattr(_C._VariableFunctions, name)
+ obj.__module__ = 'torch'
+ # Hide some APIs that should not be public
+ if name == "segment_reduce":
+ # TODO: Once the undocumented FC window is passed, remove the line bellow
+ globals()[name] = obj
+ name = "_" + name
+ globals()[name] = obj
+ if not name.startswith("_"):
+ __all__.append(name)
+
+
+
+################################################################################
+# Import TorchDynamo's lazy APIs to avoid circular dependenices
+################################################################################
+
+# needs to be before from .functional import * to avoid circular dependencies
+from ._compile import _disable_dynamo
+
+################################################################################
+# Import interface functions defined in Python
+################################################################################
+
+# needs to be after the above ATen bindings so we can overwrite from Python side
+from .functional import * # noqa: F403
+
+
+################################################################################
+# Remove unnecessary members
+################################################################################
+
+del _StorageBase
+del _LegacyStorage
+
+################################################################################
+# Define _assert
+################################################################################
+
+# needs to be before the submodule imports to avoid circular dependencies
+def _assert(condition, message):
+ r"""A wrapper around Python's assert which is symbolically traceable.
+ """
+ from .overrides import has_torch_function, handle_torch_function
+
+ if type(condition) is not torch.Tensor and has_torch_function((condition,)):
+ return handle_torch_function(_assert, (condition,), condition, message)
+ assert condition, message
+
+################################################################################
+# Import most common subpackages
+################################################################################
+
+# Use the redundant form so that type checkers know that these are a part of
+# the public API. The "regular" import lines are there solely for the runtime
+# side effect of adding to the imported module's members for other users.
+from torch import cuda as cuda
+from torch import cpu as cpu
+from torch import mps as mps
+from torch import autograd as autograd
+from torch.autograd import (
+ no_grad as no_grad,
+ enable_grad as enable_grad,
+ set_grad_enabled as set_grad_enabled,
+ inference_mode as inference_mode,
+)
+from torch import fft as fft
+from torch import futures as futures
+from torch import _awaits as _awaits
+from torch import nested as nested
+from torch import nn as nn
+from torch.signal import windows as windows
+from torch import optim as optim
+import torch.optim._multi_tensor
+from torch import multiprocessing as multiprocessing
+from torch import sparse as sparse
+from torch import special as special
+import torch.utils.backcompat
+from torch import jit as jit
+from torch import linalg as linalg
+from torch import hub as hub
+from torch import random as random
+from torch import distributions as distributions
+from torch import testing as testing
+from torch import backends as backends
+import torch.utils.data
+from torch import __config__ as __config__
+from torch import __future__ as __future__
+from torch import profiler as profiler
+
+# Quantized, sparse, AO, etc. should be last to get imported, as nothing
+# is expected to depend on them.
+from torch import ao as ao
+# nn.quant* depends on ao -- so should be after those.
+import torch.nn.quantizable
+import torch.nn.quantized
+import torch.nn.qat
+import torch.nn.intrinsic
+
+_C._init_names(list(torch._storage_classes))
+
+# attach docstrings to torch and tensor functions
+from . import _torch_docs, _tensor_docs, _storage_docs
+del _torch_docs, _tensor_docs, _storage_docs
+
+
+def compiled_with_cxx11_abi() -> builtins.bool:
+ r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
+ return _C._GLIBCXX_USE_CXX11_ABI
+
+
+# Import the ops "namespace"
+from torch._ops import ops
+from torch._classes import classes
+import torch._library
+
+# quantization depends on torch.fx
+# Import quantization
+from torch import quantization as quantization
+
+# Import the quasi random sampler
+from torch import quasirandom as quasirandom
+
+# If you are seeing this, it means that this call site was not checked if
+# the memory format could be preserved, and it was switched to old default
+# behaviour of contiguous
+legacy_contiguous_format = contiguous_format
+
+# Register fork handler to initialize OpenMP in child processes (see gh-28389)
+from torch.multiprocessing._atfork import register_after_fork
+register_after_fork(torch.get_num_threads)
+del register_after_fork
+
+# Import tools that require fully imported torch (for applying
+# torch.jit.script as a decorator, for instance):
+from ._lobpcg import lobpcg as lobpcg
+
+# These were previously defined in native_functions.yaml and appeared on the
+# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
+# class usage. We add these lines here to preserve backward compatibility.
+quantized_lstm = torch.ops.aten.quantized_lstm
+quantized_gru = torch.ops.aten.quantized_gru
+
+from torch.utils.dlpack import from_dlpack, to_dlpack
+
+# Import experimental masked operations support. See
+# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
+# information.
+from . import masked
+
+# Import removed ops with error message about removal
+from ._linalg_utils import ( # type: ignore[misc]
+ matrix_rank,
+ eig,
+ solve,
+ lstsq,
+)
+from ._linalg_utils import _symeig as symeig # type: ignore[misc]
+
+class _TorchCompileInductorWrapper:
+ compiler_name = "inductor"
+
+ def __init__(self, mode, options, dynamic):
+ self.config: Dict[str, Any] = dict()
+ self.dynamic = dynamic
+ self.apply_mode(mode)
+ self.apply_options(options)
+
+ if self.config.get("triton.cudagraphs", False):
+ os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
+ # FIXME: CUDA Graph does not work well with CUPTI teardown.
+ # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
+ # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
+ # Workaround: turn off CUPTI teardown when using CUDA Graphs.
+ os.environ["TEARDOWN_CUPTI"] = "0"
+
+ def __eq__(self, other):
+ return (isinstance(other, _TorchCompileInductorWrapper) and
+ self.config == other.config and
+ self.dynamic == other.dynamic)
+
+ def apply_mode(self, mode: Optional[str]):
+ if mode is None or mode == "default":
+ pass
+ elif mode in ("reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"):
+ from torch._inductor import list_mode_options
+ self.apply_options(list_mode_options(mode, self.dynamic))
+ else:
+ raise RuntimeError(
+ f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs"
+ )
+
+ def apply_options(self, options: Optional[Dict[str, Any]]):
+ if not options:
+ return
+
+ from torch._inductor import config
+ current_config: Dict[str, Any] = config.shallow_copy_dict()
+
+ for key, val in options.items():
+ attr_name = key.replace("-", "_")
+ if attr_name not in current_config:
+ raise RuntimeError(
+ f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
+ )
+ if type(val) is not type(current_config[attr_name]):
+ val_type_str = type(val).__name__
+ expected_type_str = type(current_config[attr_name]).__name__
+ raise RuntimeError(
+ f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
+ )
+ self.config[attr_name] = val
+
+ def __call__(self, model_, inputs_):
+ from torch._inductor.compile_fx import compile_fx
+
+ return compile_fx(model_, inputs_, config_patches=self.config)
+
+ def get_compiler_config(self):
+ from torch._inductor.compile_fx import get_patched_config_dict
+ return get_patched_config_dict(config_patches=self.config)
+
+ def reset(self):
+ from torch._inductor import config
+ if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
+ if self.config.get("triton.cudagraphs", True):
+ from torch._inductor.cudagraph_trees import reset_cudagraph_trees
+ reset_cudagraph_trees()
+
+class _TorchCompileWrapper:
+ def __init__(self, backend, mode, options, dynamic):
+ from torch._dynamo.backends.registry import lookup_backend
+
+ if isinstance(backend, str):
+ self.compiler_name = backend
+ elif hasattr(backend, "__name__"):
+ self.compiler_name = backend.__name__
+ else:
+ self.compiler_name = str(backend)
+ self.dynamic = dynamic
+ self.compiler_fn = lookup_backend(backend)
+ self.kwargs = {}
+ # only pass the args if they non-empty
+ if mode and mode != "default":
+ self.kwargs["mode"] = mode
+ if options:
+ self.kwargs["options"] = options
+
+ def __eq__(self, other):
+ return (isinstance(other, _TorchCompileWrapper) and
+ self.compiler_fn == other.compiler_fn and
+ self.kwargs == other.kwargs and
+ self.dynamic == other.dynamic)
+
+ def __call__(self, model_, inputs_):
+ return self.compiler_fn(model_, inputs_, **self.kwargs)
+
+
+def compile(model: Optional[Callable] = None, *,
+ fullgraph: builtins.bool = False,
+ dynamic: Optional[builtins.bool] = None,
+ backend: Union[str, Callable] = "inductor",
+ mode: Union[str, None] = None,
+ options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
+ disable: builtins.bool = False) -> Callable:
+ """
+ Optimizes given model/function using TorchDynamo and specified backend.
+
+ Concretely, for every frame executed within the compiled region, we will attempt
+ to compile it and cache the compiled result on the code object for future
+ use. A single frame may be compiled multiple times if previous compiled
+ results are not applicable for subsequent calls (this is called a "guard
+ failure), you can use TORCH_LOGS=guards to debug these situations.
+ Multiple compiled results can be associated with a frame up to
+ ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which
+ point we will fall back to eager. Note that compile caches are per
+ *code object*, not frame; if you dynamically create multiple copies of a
+ function, they will all share the same code cache.
+
+ Args:
+ model (Callable): Module/function to optimize
+ fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
+ in the function that it will optimize. If True, then we require that the entire function be
+ capturable into a single graph. If this is not possible (that is, if there are graph breaks),
+ then this will raise an error.
+ dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
+ to generate a kernel that is as dynamic as possible to avoid recompilations when
+ sizes change. This may not always work as some operations/optimizations will
+ force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
+ When this is False, we will NEVER generate dynamic kernels, we will always specialize.
+ By default (None), we automatically detect if dynamism has occurred and compile a more
+ dynamic kernel upon recompile.
+ backend (str or Callable): backend to be used
+
+ - "inductor" is the default backend, which is a good balance between performance and overhead
+
+ - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
+
+ - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
+
+ - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html
+ mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
+
+ - "default" is the default mode, which is a good balance between performance and overhead
+
+ - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
+ useful for small batches. Reduction of overhead can come at the cost of more memory
+ usage, as we will cache the workspace memory required for the invocation so that we
+ do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
+ to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
+ There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
+ to debug.
+
+ - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions
+ It enables CUDA graphs by default.
+
+ - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
+
+ - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
+
+ options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
+
+ - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
+
+ - `max_autotune` which will profile to pick the best matmul configuration
+
+ - `fallback_random` which is useful when debugging accuracy issues
+
+ - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
+
+ - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
+
+ - `trace.enabled` which is the most useful debugging flag to turn on
+
+ - `trace.graph_diagram` which will show you a picture of your graph after fusion
+
+ - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
+ disable (bool): Turn torch.compile() into a no-op for testing
+
+ Example::
+
+ @torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
+ def foo(x):
+ return torch.sin(x) + torch.cos(x)
+
+ """
+ _C._log_api_usage_once("torch.compile")
+ # Temporary until we get proper support for python 3.12
+ if sys.version_info >= (3, 12):
+ raise RuntimeError("Dynamo is not supported on Python 3.12+")
+
+ # Decorator mode
+ if model is None:
+ def fn(model: Callable):
+ if model is None:
+ raise RuntimeError("Model can't be None")
+ return compile(model,
+ fullgraph=fullgraph,
+ dynamic=dynamic,
+ backend=backend,
+ mode=mode,
+ options=options,
+ disable=disable)
+ return fn
+
+ if mode is not None and options is not None:
+ raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")
+ if mode is None and options is None:
+ mode = "default"
+ if backend == "inductor":
+ backend = _TorchCompileInductorWrapper(mode, options, dynamic)
+ else:
+ backend = _TorchCompileWrapper(backend, mode, options, dynamic)
+
+ return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
+
+
+from torch import export as export
+
+from torch._higher_order_ops import cond
+
+def _register_device_module(device_type, module):
+ r"""Register an external runtime module of the specific :attr:`device_type`
+ supported by torch.
+
+ After the :attr:`module` is registered correctly, the user can refer
+ the external runtime module as part of torch with attribute torch.xxx.
+ """
+ # Make sure the device_type represent a supported device type for torch.
+ device_type = torch.device(device_type).type
+ m = sys.modules[__name__]
+ if hasattr(m, device_type):
+ raise RuntimeError(f"The runtime module of '{device_type}' has already "
+ f"been registered with '{getattr(m, device_type)}'")
+ setattr(m, device_type, module)
+ torch_module_name = '.'.join([__name__, device_type])
+ sys.modules[torch_module_name] = module
+
+# expose return_types
+from . import return_types
+from . import library
+if not TYPE_CHECKING:
+ from . import _meta_registrations
+
+# Enable CUDA Sanitizer
+if 'TORCH_CUDA_SANITIZER' in os.environ:
+ import torch.cuda._sanitizer as csan
+
+ csan.enable_cuda_sanitizer()
+
+# Populate magic methods on SymInt and SymFloat
+import torch.fx.experimental.sym_node
+
+from torch import func as func
+from torch.func import vmap
+
+
+# The function _sparse_coo_tensor_unsafe is removed from PyTorch
+# Python API (v. 1.13), here we temporarily provide its replacement
+# with a deprecation warning.
+# TODO: remove the function for PyTorch v 1.15.
+def _sparse_coo_tensor_unsafe(*args, **kwargs):
+ import warnings
+ warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
+ 'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
+ kwargs['check_invariants'] = False
+ return torch.sparse_coo_tensor(*args, **kwargs)
+
+# Register MPS specific decomps
+torch.backends.mps._init()
+
+if not _running_with_deploy():
+ from torch import compiler as compiler
+
+ class _TritonLibrary:
+ lib = torch.library.Library("triton", "DEF")
+ ops_table: Dict[Tuple[str, str], Callable] = {}
+
+ @classmethod
+ def registerOp(cls, op_key, full_schema, op_impl, dispatch_key):
+ if (op_key, dispatch_key) not in cls.ops_table:
+ cls.lib.define(full_schema)
+ cls.lib.impl("triton::" + op_key, op_impl, dispatch_key)
+ cls.ops_table[(op_key, dispatch_key)] = op_impl
+
+ return cls.ops_table[(op_key, dispatch_key)]
+
+
+# Deprecated attributes
+_deprecated_attrs = {
+ "has_mps": torch.backends.mps.is_built,
+ "has_cuda": torch.backends.cuda.is_built,
+ "has_cudnn": torch.backends.cudnn.is_available,
+ "has_mkldnn": torch.backends.mkldnn.is_available,
+}
+
+if TYPE_CHECKING:
+ # Import the following modules during type checking to enable code intelligence features,
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
+ # imported in user code.
+ from torch import _dynamo as _dynamo
+ from torch import _inductor as _inductor
+ from torch import onnx as onnx
+
+else:
+ _lazy_modules = {
+ "_dynamo",
+ "_inductor",
+ "_export",
+ # ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit
+ "onnx",
+ }
+
+ def __getattr__(name):
+ # Deprecated attrs
+ replacement = _deprecated_attrs.get(name)
+ if replacement is not None:
+ import warnings
+ warnings.warn(f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'", stacklevel=2)
+ return replacement()
+
+ # Lazy modules
+ if name in _lazy_modules:
+ import importlib
+ return importlib.import_module(f".{name}", __name__)
+
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
+
+
+def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
+ """
+ Add min/max constraint on the intermediate symbol at tracing time. If called in eager mode,
+ it will still check if the input value is within the specified range.
+ """
+ torch.sym_constrain_range(symbol, min=min, max=max)
+
+
+def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
+ """
+ This indicates that a given int is size-like, and can be used in any context where a size is expected.
+ You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist()
+ which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve
+ GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
+
+ This function has unusual semantics which distinguish it from constrain_as_value.
+ Specifically, at compile-time, we will unsoundly assume that the resulting int is always >= 2.
+ As a result, max value you pass in should always be greater than 2.
+ This makes it easier to use the unbacked int in size contexts, as we will often attempt to guard on a size being zero/one
+ (e.g., when computing the contiguity of a tensor, or testing if broadcasting can occur),
+ which will not work on unbacked SymInts. Assuming that the int is >= 2 allows us to
+ report False to these tests. Although this is technically unsound,
+ in practice we observe that if your program works for all sizes >= 2,
+ it probably works for zero and one too. The reason specifically assume size is >= 2 is because
+ lot of PyTorch code is specialized for 0 and 1 which could result in not general graphs.
+ At runtime, we only assert that the user provided min/max values are respected.
+
+ To demonstrate in a scenario, suppose you do
+ ```
+ # Case 1
+ # This will assume symbol is between [2, inf) at compile time, but [0, inf) at runtime
+ constrain_as_size(symbol, min=0)
+
+ # Case 2
+ # This will assume symbol is between [2, N] at compile time, but [0, N] at runtime
+ constrain_as_size(symbol, min=0, max=N)
+
+ # Case 3
+ # This is not valid case as max is <= 2
+ constrain_as_size(symbol, min=0, max=1)
+
+ # Case 4
+ # This will assume symbol is between [2, inf) at compile time, AND [2, inf) at runtime
+ constrain_as_size(symbol, min=2)
+
+ # Case 5
+ # This will assume symbol is between [2, inf) at compile time, but [1, inf) at runtime
+ constrain_as_size(symbol, min=1)
+ ```
+ """
+ torch.sym_constrain_range_for_size(symbol, min=min, max=max)
+
+
+from . import _logging
+_logging._init_logs()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_appdirs.py b/env-llmeval/lib/python3.10/site-packages/torch/_appdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..46d4c599f2a672272eaf87a438206c9d8c612dda
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_appdirs.py
@@ -0,0 +1,666 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+# flake8: noqa
+
+"""
+This file is directly from
+https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
+
+The license of https://github.com/ActiveState/appdirs copied below:
+
+
+# This is the MIT license
+
+Copyright (c) 2010 ActiveState Software Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
+
+"""Utilities for determining application-specific dirs.
+
+See for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version__ = "1.4.4"
+__version_info__ = tuple(int(segment) for segment in __version__.split("."))
+
+
+import os
+import sys
+
+unicode = str
+
+if sys.platform.startswith("java"):
+ import platform
+
+ os_name = platform.java_ver()[3][0]
+ if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
+ system = "win32"
+ elif os_name.startswith("Mac"): # "Mac OS X", etc.
+ system = "darwin"
+ else: # "Linux", "SunOS", "FreeBSD", etc.
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
+ # are actually checked for and the rest of the module expects
+ # *sys.platform* style strings.
+ system = "linux2"
+else:
+ system = sys.platform
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user data directories are:
+ Mac OS X: ~/Library/Application Support/
+ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined
+ Win XP (not roaming): C:\Documents and Settings\\Application Data\\
+ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\
+ Win 7 (not roaming): C:\Users\\AppData\Local\\
+ Win 7 (roaming): C:\Users\\AppData\Roaming\\
+
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+ That means, by default "~/.local/share/".
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(_get_win_folder(const))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == "darwin":
+ path = os.path.expanduser("~/Library/Application Support/")
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of data dirs should be
+ returned. By default, the first item from XDG_DATA_DIRS is
+ returned, or '/usr/local/share/',
+ if XDG_DATA_DIRS is not set
+
+ Typical site data directories are:
+ Mac OS X: /Library/Application Support/
+ Unix: /usr/local/share/ or /usr/share/
+ Win XP: C:\Documents and Settings\All Users\Application Data\\
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+ Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7.
+
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == "darwin":
+ path = os.path.expanduser("/Library/Application Support")
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_DATA_DIRS
+ # only first, if multipath is False
+ path = os.getenv(
+ "XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
+ )
+ pathlist = [
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
+ ]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific config dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user config directories are:
+ Mac OS X: ~/Library/Preferences/
+ Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+ That means, by default "~/.config/".
+ """
+ if system == "win32":
+ path = user_data_dir(appname, appauthor, None, roaming)
+ elif system == "darwin":
+ path = os.path.expanduser("~/Library/Preferences/")
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of config dirs should be
+ returned. By default, the first item from XDG_CONFIG_DIRS is
+ returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set
+
+ Typical site config directories are:
+ Mac OS X: same as site_data_dir
+ Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in
+ $XDG_CONFIG_DIRS
+ Win *: same as site_data_dir
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ path = site_data_dir(appname, appauthor)
+ if appname and version:
+ path = os.path.join(path, version)
+ elif system == "darwin":
+ path = os.path.expanduser("/Library/Preferences")
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_CONFIG_DIRS
+ # only first, if multipath is False
+ path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
+ pathlist = [
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
+ ]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific cache dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Cache" to the base app data dir for Windows. See
+ discussion below.
+
+ Typical user cache directories are:
+ Mac OS X: ~/Library/Caches/
+ Unix: ~/.cache/ (XDG default)
+ Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache
+ Vista: C:\Users\\AppData\Local\\\Cache
+
+ On Windows the only suggestion in the MSDN docs is that local settings go in
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+ app data dir (the default returned by `user_data_dir` above). Apps typically
+ put cache data somewhere *under* the given dir here. Some examples:
+ ...\Mozilla\Firefox\Profiles\\Cache
+ ...\Acme\SuperApp\Cache\1.0
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ if opinion:
+ path = os.path.join(path, "Cache")
+ elif system == "darwin":
+ path = os.path.expanduser("~/Library/Caches")
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific log dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be ".".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Logs" to the base app data dir for Windows, and "log" to the
+ base cache dir for Unix. See discussion below.
+
+ Typical user log directories are:
+ Mac OS X: ~/Library/Logs/
+ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined
+ Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs
+ Vista: C:\Users\\AppData\Local\\\Logs
+
+ On Windows the only suggestion in the MSDN docs is that local settings
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+ examples of what some windows apps use for a logs dir.)
+
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+ value for Windows and appends "log" to the user cache dir for Unix.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "darwin":
+ path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
+ elif system == "win32":
+ path = user_data_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "Logs")
+ else:
+ path = user_cache_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "log")
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+class AppDirs(object):
+ """Convenience wrapper for getting application dirs."""
+
+ def __init__(
+ self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
+ ):
+ self.appname = appname
+ self.appauthor = appauthor
+ self.version = version
+ self.roaming = roaming
+ self.multipath = multipath
+
+ @property
+ def user_data_dir(self):
+ return user_data_dir(
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
+ )
+
+ @property
+ def site_data_dir(self):
+ return site_data_dir(
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
+ )
+
+ @property
+ def user_config_dir(self):
+ return user_config_dir(
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
+ )
+
+ @property
+ def site_config_dir(self):
+ return site_config_dir(
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
+ )
+
+ @property
+ def user_cache_dir(self):
+ return user_cache_dir(self.appname, self.appauthor, version=self.version)
+
+ @property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor, version=self.version)
+
+ @property
+ def user_log_dir(self):
+ return user_log_dir(self.appname, self.appauthor, version=self.version)
+
+
+# ---- internal support stuff
+
+
+def _get_win_folder_from_registry(csidl_name):
+ """This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ import winreg as _winreg
+
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ }[csidl_name]
+
+ key = _winreg.OpenKey(
+ _winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
+ )
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+ return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+ from win32com.shell import shell, shellcon
+
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+ # Try to make this a unicode path because SHGetFolderPath does
+ # not return unicode strings when there is unicode data in the
+ # path.
+ try:
+ dir = unicode(dir)
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ try:
+ import win32api
+
+ dir = win32api.GetShortPathName(dir)
+ except ImportError:
+ pass
+ except UnicodeError:
+ pass
+ return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+ import ctypes
+
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ }[csidl_name]
+
+ buf = ctypes.create_unicode_buffer(1024)
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in buf:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+
+def _get_win_folder_with_jna(csidl_name):
+ import array
+
+ from com.sun import jna
+ from com.sun.jna.platform import win32
+
+ buf_size = win32.WinDef.MAX_PATH * 2
+ buf = array.zeros("c", buf_size)
+ shell = win32.Shell32.INSTANCE
+ shell.SHGetFolderPath(
+ None,
+ getattr(win32.ShlObj, csidl_name),
+ None,
+ win32.ShlObj.SHGFP_TYPE_CURRENT,
+ buf,
+ )
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ # Downgrade to short path name if have highbit chars. See
+ # .
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf = array.zeros("c", buf_size)
+ kernel = win32.Kernel32.INSTANCE
+ if kernel.GetShortPathName(dir, buf, buf_size):
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ return dir
+
+
+if system == "win32":
+ try:
+ import win32com.shell
+
+ _get_win_folder = _get_win_folder_with_pywin32
+ except ImportError:
+ try:
+ from ctypes import windll
+
+ _get_win_folder = _get_win_folder_with_ctypes
+ except ImportError:
+ try:
+ import com.sun.jna
+
+ _get_win_folder = _get_win_folder_with_jna
+ except ImportError:
+ _get_win_folder = _get_win_folder_from_registry
+
+
+# ---- self test code
+
+if __name__ == "__main__":
+ appname = "MyApp"
+ appauthor = "MyCompany"
+
+ props = (
+ "user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir",
+ )
+
+ print(f"-- app dirs {__version__} --")
+
+ print("-- app dirs (with optional 'version')")
+ dirs = AppDirs(appname, appauthor, version="1.0")
+ for prop in props:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = AppDirs(appname, appauthor)
+ for prop in props:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = AppDirs(appname)
+ for prop in props:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = AppDirs(appname, appauthor=False)
+ for prop in props:
+ print(f"{prop}: {getattr(dirs, prop)}")
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7a0065c7dfab67492606091b63928fd4f6059d8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__init__.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from typing import cast, Callable, Generic, Type, TypeVar
+
+import torch
+
+__all__ = ['Await']
+
+W = TypeVar("W")
+
+class _PyAwaitMeta(type(torch._C._Await), type(Generic)): # type: ignore[misc, no-redef]
+ pass
+
+class _Await(torch._C._Await, Generic[W], metaclass=_PyAwaitMeta):
+ r"""
+ Wrapper around a ``torch._C.Await`` which encapsulates delayed execution
+ of a callable. All manipulations happen with functions ``torch.jit._awaitable``,
+ ``torch.jit._awaitable_wait``, ``torch.jit._awaitable_nowait``.
+
+ Torch scriptable manipulations:
+ ``torch.jit._awaitable(func, *args)``
+ Creates ``Await[W]`` object, where W is return type of func.
+
+ Returns:
+ ``torch.jit._awaitable_wait(Await[W])``
+ Returns the result of the function, specified at ``_awaitable``, with specified arguments.
+
+ Returns:
+ The result of type ``W`` of the function call. The result is owned by ``Await[W]``
+ and returned on all following ``_awaitable_wait`` calls.
+
+
+ ``torch.jit._awaitable_nowait(W)``
+ Returns:
+ Trivial ``Await[W]`` with specified result.
+
+
+ Only in eager mode:
+ ``fn() -> Callable[Tuple[Any], W]``
+ Returns:
+ Specified at ``_awaitable`` python function ``func``.
+
+ ``args() -> Tuple[Any]``
+ Returns:
+ Specified at ``_awaitable`` python args.
+
+ ``is_nowait() -> _bool``
+ Returns:
+ ``True`` if this object was created via ``_awaitable_nowait`` call (trivial `Await[W]`).
+
+ In eager mode ``Await[W]`` can be used as ``W`` i.e. attributes of W can be called on ``Await[W]``,
+ ``_awaitable_wait()`` call will be transparently added.
+ """
+ pass
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..011d9c372484844174c7d42c8ce1d3c16b750a95
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_classes.py b/env-llmeval/lib/python3.10/site-packages/torch/_classes.py
new file mode 100644
index 0000000000000000000000000000000000000000..870073fea6eaf852f7886e559311a6aa50354455
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_classes.py
@@ -0,0 +1,55 @@
+import types
+
+import torch._C
+
+
+class _ClassNamespace(types.ModuleType):
+ def __init__(self, name):
+ super().__init__("torch.classes" + name)
+ self.name = name
+
+ def __getattr__(self, attr):
+ proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
+ if proxy is None:
+ raise RuntimeError(f"Class {self.name}.{attr} not registered!")
+ return proxy
+
+
+class _Classes(types.ModuleType):
+ __file__ = "_classes.py"
+
+ def __init__(self):
+ super().__init__("torch.classes")
+
+ def __getattr__(self, name):
+ namespace = _ClassNamespace(name)
+ setattr(self, name, namespace)
+ return namespace
+
+ @property
+ def loaded_libraries(self):
+ return torch.ops.loaded_libraries
+
+ def load_library(self, path):
+ """
+ Loads a shared library from the given path into the current process.
+
+ The library being loaded may run global initialization code to register
+ custom classes with the PyTorch JIT runtime. This allows dynamically
+ loading custom classes. For this, you should compile your class
+ and the static registration code into a shared library object, and then
+ call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
+ shared object.
+
+ After the library is loaded, it is added to the
+ ``torch.classes.loaded_libraries`` attribute, a set that may be inspected
+ for the paths of all libraries loaded using this function.
+
+ Args:
+ path (str): A path to a shared library to load.
+ """
+ torch.ops.load_library(path)
+
+
+# The classes "namespace"
+classes = _Classes()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_compile.py b/env-llmeval/lib/python3.10/site-packages/torch/_compile.py
new file mode 100644
index 0000000000000000000000000000000000000000..354d64e9ff9fddc9a1dc321241ce8bea7955b58a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_compile.py
@@ -0,0 +1,30 @@
+"""
+APIs related to torch.compile which lazily import torch._dynamo to avoid
+circular dependencies.
+"""
+import functools
+
+
+def _disable_dynamo(fn=None, recursive=True):
+ """
+ This API should be only used inside torch, external users should still use
+ torch._dynamo.disable. The main goal of this API is to avoid circular
+ imports issues that is common while using _dynamo.disable inside torch
+ itself.
+
+ This API avoids it by lazily importing torch._dynamo from the import time to
+ the invocation of the decorated function.
+ """
+ if fn is not None:
+
+ @functools.wraps(fn)
+ def inner(*args, **kwargs):
+ import torch._dynamo
+
+ return torch._dynamo.disable(fn, recursive)(*args, **kwargs)
+
+ return inner
+ else:
+ # decorator usage like @_disable_dynamo(recursive=False). The resulting
+ # object expects the original decorated function as the arg.
+ return functools.partial(_disable_dynamo, recursive=recursive)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_custom_ops.py b/env-llmeval/lib/python3.10/site-packages/torch/_custom_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe396da3fb90e621542fb98b6a48d1eebc7df138
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_custom_ops.py
@@ -0,0 +1,322 @@
+import inspect
+
+from torch._custom_op.impl import (
+ _custom_op_with_schema,
+ _find_custom_op,
+ infer_schema,
+ parse_qualname,
+ validate_namespace,
+)
+from torch.library import get_ctx
+
+__all__ = [
+ "custom_op",
+ "impl",
+ "impl_abstract",
+ "get_ctx",
+ "impl_save_for_backward",
+ "impl_backward",
+]
+
+
+def custom_op(qualname, func_or_schema=None):
+ r"""Register a new custom operator
+
+ In PyTorch, defining an op (short for "operator") is a two step-process:
+ - we need to define the op (by providing an operator name and schema)
+ - we need to implement behavior for how the operator interacts with
+ various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
+
+ This entrypoint defines the custom operator (the first step)
+ you must then perform the second step by calling various
+ ``impl_*`` APIs.
+
+ This API may be used as a decorator (see examples).
+
+ For a detailed guide on custom ops, please see
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
+
+ Arguments:
+ qualname (str): Should be a string that looks like
+ "namespace::operator_name". Operators in PyTorch need a namespace to
+ avoid name collisions; a given operator may only be created once.
+ If you are writing a Python library, we recommend the namespace to
+ be the name of your top-level module.
+ func_or_schema (Union[Callable, str]): Each PyTorch operator needs a
+ schema that tells PyTorch the types of the inputs/outputs.
+ If this is a Callable, we will automatically infer the schema from
+ the type annotations on the function (see examples). Otherwise,
+ if you don't want to use type annotations, you may provide us the
+ schema string.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
+ >>> import torch
+ >>> import numpy as np
+ >>> from torch import Tensor
+ >>>
+ >>> # Step 1: define the custom op.
+ >>> # We need to provide the API a "prototype function"
+ >>> # (a function that returns NotImplementedError), from which
+ >>> # we will infer the types of the inputs and outputs.
+ >>> @torch._custom_ops.custom_op("mylibrary::numpy_sin")
+ >>> def numpy_sin(x: Tensor) -> Tensor:
+ >>> raise NotImplementedError()
+ >>>
+ >>> # The custom op is now accessible via the torch.ops module:
+ >>> torch.ops.mylibrary.numpy_sin
+ >>>
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
+ >>>
+ >>> # Register an implementation for CPU tensors
+ >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu")
+ >>> def numpy_sin_impl_cpu(x):
+ >>> return torch.from_numpy(np.sin(x.numpy()))
+ >>>
+ >>> # Register an implementation for CUDA tensors
+ >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda")
+ >>> def numpy_sin_impl_cuda(x):
+ >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
+ >>>
+ >>> x = torch.randn(3)
+ >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu
+ >>>
+ >>> x_cuda = x.cuda()
+ >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda
+
+ """
+ ns, name = parse_qualname(qualname)
+ validate_namespace(ns)
+
+ def inner(func):
+ if not inspect.isfunction(func):
+ raise ValueError(
+ f"custom_op(...)(func): Expected `func` to be a Python "
+ f"function, got: {type(func)}"
+ )
+
+ if func.__name__ != name:
+ raise ValueError(
+ f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
+ f"to have name '{name}' but got '{func.__name__}'. "
+ f"Please either change the name of `func` or the qualname that "
+ f"is passed to `custom_op`"
+ )
+
+ schema = infer_schema(func)
+ _custom_op_with_schema(qualname, schema)
+ return func
+
+ if func_or_schema is None:
+ return inner
+ if isinstance(func_or_schema, str):
+ _custom_op_with_schema(qualname, func_or_schema)
+ else:
+ return inner(func_or_schema)
+
+
+def impl(qualname, *, device_types=("cpu", "cuda"), func=None):
+ r"""Register an implementation for a device type for this custom op.
+
+ If the op is passed multiple Tensor inputs with different device
+ types, it will dispatch to the registered implementation for the highest
+ priority device type among those present.
+ The supported device types, in order of priority, are {'cuda', 'cpu'}.
+
+ This API may be used as a decorator (see examples).
+
+ For a detailed guide on custom ops, please see
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
+
+ Arguments:
+ device_types (str or Iterable[str]): the device type(s) to register the function for.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
+ >>> import torch
+ >>> import numpy as np
+ >>> from torch import Tensor
+ >>>
+ >>> # Step 1: define the custom op.
+ >>> # We need to provide the API a "prototype function"
+ >>> # (a function that returns NotImplementedError), from which
+ >>> # we will infer the types of the inputs and outputs.
+ >>> @torch._custom_ops.custom_op("mylibrary::numpy_cos")
+ >>> def numpy_cos(x: Tensor) -> Tensor:
+ >>> raise NotImplementedError()
+ >>>
+ >>> # The custom op is now accessible via the torch.ops module:
+ >>> torch.ops.mylibrary.numpy_cos
+ >>>
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
+ >>>
+ >>> # Register an implementation for CPU tensors
+ >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu")
+ >>> def numpy_cos_impl_cpu(x):
+ >>> return torch.from_numpy(np.cos(x.numpy()))
+ >>>
+ >>> # Register an implementation for CUDA tensors
+ >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda")
+ >>> def numpy_cos_impl_cuda(x):
+ >>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
+ >>>
+ >>> x = torch.randn(3)
+ >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu
+ >>>
+ >>> x_cuda = x.cuda()
+ >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda
+
+ """
+
+ def inner(func):
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
+ custom_op.impl(device_types, _stacklevel=3)(func)
+ return func
+
+ if func is None:
+ return inner
+ return inner(func)
+
+
+def impl_abstract(qualname, *, func=None):
+ r"""Register an abstract implementation for this operator.
+
+ An "abstract implementation" specifies the behavior of this operator on
+ Tensors that carry no data. Given some input Tensors with certain properties
+ (sizes/strides/storage_offset/device), it specifies what the properties of
+ the output Tensors are.
+
+ The abstract implementation has the same signature as the operator.
+ It is run for both FakeTensors and meta tensors. To write an abstract
+ implementation, assume that all Tensor inputs to the operator are
+ regular CPU/CUDA/Meta tensors, but they do not have storage, and
+ you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
+ The abstract implementation must consist of only PyTorch operations
+ (and may not directly access the storage or data of any input or
+ intermediate Tensors).
+
+ This API may be used as a decorator (see examples).
+
+ For a detailed guide on custom ops, please see
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
+
+ Examples::
+ >>> import numpy as np
+ >>> from torch import Tensor
+ >>>
+ >>> # Example 1: an operator without data-dependent output shape
+ >>> @torch._custom_ops.custom_op("mylibrary::custom_linear")
+ >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
+ >>> raise NotImplementedError()
+ >>>
+ >>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear")
+ >>> def custom_linear_abstract(x, weight):
+ >>> assert x.dim() == 2
+ >>> assert weight.dim() == 2
+ >>> assert bias.dim() == 1
+ >>> assert x.shape[1] == weight.shape[1]
+ >>> assert weight.shape[0] == bias.shape[0]
+ >>> assert x.device == weight.device
+ >>>
+ >>> return (x @ weight.t()) + bias
+ >>>
+ >>> # Example 2: an operator with data-dependent output shape
+ >>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero')
+ >>> def custom_nonzero(x: Tensor) -> Tensor:
+ >>> ...
+ >>>
+ >>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero")
+ >>> def custom_nonzero_abstract(x):
+ >>> # Number of nonzero-elements is data-dependent.
+ >>> # Since we cannot peek at the data in an abstract impl,
+ >>> # we use the ctx object to construct a new symint that
+ >>> # represents the data-dependent size.
+ >>> ctx = torch._custom_ops.get_ctx()
+ >>> nnz = ctx.create_unbacked_symint()
+ >>> shape = [x.dim(), nnz]
+ >>> result = x.new_empty(shape, dtype=torch.long)
+ >>> return result
+ >>>
+ >>> @torch._custom_ops.impl("mylibrary::custom_nonzero")
+ >>> def custom_nonzero_impl(x):
+ >>> x_np = to_numpy(x)
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
+ >>> # unbacked symbolic ints in PyTorch must be >= 2, so we
+ >>> # constrain the range to at least 2
+ >>> if res.shape[0] <= 1:
+ >>> raise RuntimeError("not supported")
+ >>> return torch.tensor(res, device=x.device)
+
+ """
+ import torch.library
+
+ return torch.library.impl_abstract(qualname, func, _stacklevel=2)
+
+
+def impl_save_for_backward(qualname, *, func=None):
+ r"""Register a function that tells us what to save for backward.
+
+ Please see :func:`impl_backward` for more details.
+ """
+
+ def inner(func):
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
+ custom_op.impl_save_for_backward(_stacklevel=3)(func)
+ return func
+
+ if func is None:
+ return inner
+ return inner(func)
+
+
+def impl_backward(qualname, output_differentiability=None, *, func=None):
+ r"""Registers a backward formula for an operator.
+
+ In order for an operator to work with autograd, you need to register
+ a backward formula. There are two pieces to this:
+ 1. You must give us a function to specify what to save for backward.
+ Call this the "save for backward" function.
+ 2. You must give us a function that computes gradients. Call this the
+ "backward" function.
+
+ Use `impl_save_for_backward` to define a "save for backward" function
+ that specifies what gets saved for backward. The function should accept
+ two arguments ``(inputs, output)`` and return the quantities to be saved
+ for backward.
+
+ During runtime, when you call the operator in a forwards pass, PyTorch
+ will invoke the "save for backward" function with the inputs and output
+ of the operator.
+
+ Use `impl_backward` to define the "backward" function. The backward
+ function must accept ``(ctx, saved, *grads)``:
+ - ``ctx`` is a context object where we may provide information
+ - ``saved`` is exactly what gets returned from the "save for backward"
+ function
+ - ``grads`` is one or more gradients. The number of gradients matches
+ the number of outputs of the operator.
+
+ The backward function must return a dict that maps the name of
+ an input to the operator to its corresponding gradient. All inputs that
+ were declared to be Tensors in the operator definition must be accounted
+ for in the dict. The gradient may be a Tensor or None.
+
+ For a detailed guide on custom ops, please see
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
+
+ """
+
+ def inner(func):
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
+ custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)
+ return func
+
+ if func is None:
+ return inner
+ return inner(func)
+
+
+def _destroy(qualname):
+ """De-registers a custom op. For testing purposes only"""
+ custom_op = _find_custom_op(qualname)
+ custom_op._destroy()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_deploy.py b/env-llmeval/lib/python3.10/site-packages/torch/_deploy.py
new file mode 100644
index 0000000000000000000000000000000000000000..30c022eac8793b1efba6d7bd8862469c19794e1b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_deploy.py
@@ -0,0 +1,105 @@
+import io
+
+import torch
+from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
+from torch.package._package_pickler import create_pickler
+from torch.package._package_unpickler import PackageUnpickler
+from torch.serialization import _maybe_decode_ascii
+
+
+def _save_storages(importer, obj):
+ serialized_storages = []
+ serialized_dtypes = []
+
+ importer = importer if isinstance(importer, torch.package.PackageImporter) else None
+ importers: Importer
+ if importer is not None:
+ importers = OrderedImporter(importer, sys_importer)
+ else:
+ importers = sys_importer
+
+ def persistent_id(obj):
+ if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
+ if isinstance(obj, torch.storage.TypedStorage):
+ # TODO: Once we decide to break serialization FC, we can
+ # remove this case
+ storage = obj._untyped_storage
+ dtype = obj.dtype
+ else:
+ storage = obj
+ dtype = torch.uint8
+
+ serialized_storages.append(obj)
+ serialized_dtypes.append(dtype)
+ return ("storage", len(serialized_storages) - 1)
+
+ if hasattr(obj, "__reduce_deploy__"):
+ if _serialized_reduces.get(id(obj)) is None:
+ _serialized_reduces[id(obj)] = (
+ "reduce_deploy",
+ id(obj),
+ *obj.__reduce_deploy__(importers),
+ )
+ return _serialized_reduces[id(obj)]
+
+ return None
+
+ # Write the pickle data for `obj`
+ data_buf = io.BytesIO()
+ pickler = create_pickler(data_buf, importers)
+ pickler.persistent_id = persistent_id
+ pickler.dump(obj)
+ data_value = data_buf.getvalue()
+ return (
+ data_value,
+ serialized_storages,
+ serialized_dtypes,
+ importer.zip_reader if importer else None,
+ )
+
+
+def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
+ def persistent_load(saved_id):
+ assert isinstance(saved_id, tuple)
+ typename = _maybe_decode_ascii(saved_id[0])
+ data = saved_id[1:]
+
+ if typename == "storage":
+ # TODO: Once we decide to break serialization FC, we can
+ # stop wrapping with TypedStorage
+ storage = serialized_storages[data[0]]
+ dtype = serialized_dtypes[data[0]]
+ return torch.storage.TypedStorage(
+ wrap_storage=storage.untyped(), dtype=dtype
+ )
+
+ if typename == "reduce_deploy":
+ reduce_id, func, args = data
+ if reduce_id not in _loaded_reduces:
+ _loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
+ return _loaded_reduces[reduce_id]
+
+ return None
+
+ importer: Importer
+ if zip_reader is not None:
+ importer = OrderedImporter(_get_package(zip_reader), sys_importer)
+ else:
+ importer = sys_importer
+
+ unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
+ unpickler.persistent_load = persistent_load # type: ignore[assignment]
+ result = _deploy_objects[id] = unpickler.load()
+ return result
+
+
+def _get_package(zip_reader):
+ if zip_reader not in _raw_packages:
+ _raw_packages[zip_reader] = PackageImporter(zip_reader)
+ return _raw_packages[zip_reader]
+
+
+_raw_packages: dict = {}
+_deploy_objects: dict = {}
+_serialized_reduces: dict = {}
+_loaded_reduces: dict = {}
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_guards.py b/env-llmeval/lib/python3.10/site-packages/torch/_guards.py
new file mode 100644
index 0000000000000000000000000000000000000000..69912b15313d8b1e5e2bb359b5393b22b590ecc3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_guards.py
@@ -0,0 +1,833 @@
+from __future__ import annotations
+
+import contextlib
+
+import dataclasses
+import enum
+import functools
+import logging
+import threading
+import traceback
+import unittest.mock
+import weakref
+from abc import ABC, abstractmethod
+from contextlib import contextmanager
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generic,
+ List,
+ NamedTuple,
+ Optional,
+ Set,
+ Tuple,
+ TYPE_CHECKING,
+ TypeVar,
+)
+
+import torch
+from torch.utils import _pytree as pytree
+from torch.utils._traceback import CapturedTraceback
+from torch.utils.weak import WeakTensorKeyDictionary
+
+log = logging.getLogger(__name__)
+
+
+if TYPE_CHECKING:
+ # Import the following modules during type checking to enable code intelligence features,
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
+ # imported in user code.
+
+ import sympy
+
+
+"""
+torch._guards is the definitional source of truth for general purpose guard structures.
+
+An important thing to keep in mind here is the preservation of layering. There should be no dynamo notions,
+and no guard installation notions here.
+"""
+
+
+class CompileId(NamedTuple):
+ frame_id: int
+ # This id is per-frame, and counts how many times we've compiled this
+ # frame. This could have been a global id but having this be per-frame
+ # gives you a better intuitive sense for how many recompiles have occurred
+ # so far.
+ frame_compile_id: int
+ # TODO: consider also tracking the recompilation count
+
+ def __str__(self):
+ return f"{self.frame_id}/{self.frame_compile_id}"
+
+
+class TraceId(NamedTuple):
+ compile_id: CompileId
+ # This starts off as 0, and every time we restart analysis it goes
+ # up by one
+ attempt: int
+
+ def __str__(self):
+ if self.attempt == 0:
+ return str(self.compile_id)
+ else:
+ return f"{self.compile_id}_{self.attempt}"
+
+
+class GuardSource(enum.Enum):
+ LOCAL = 0
+ GLOBAL = 1
+ LOCAL_NN_MODULE = 2
+ GLOBAL_NN_MODULE = 3
+ CONSTANT = 4
+ RANDOM_VALUE = 5
+ SHAPE_ENV = 6
+ LOCAL_FSDP_MODULE = 7
+ GLOBAL_FSDP_MODULE = 8
+
+ def is_fsdp_module(self) -> bool:
+ return self in (GuardSource.GLOBAL_FSDP_MODULE, GuardSource.LOCAL_FSDP_MODULE)
+
+ def is_nn_module(self) -> bool:
+ return (
+ self
+ in (
+ GuardSource.GLOBAL_NN_MODULE,
+ GuardSource.LOCAL_NN_MODULE,
+ )
+ or self.is_fsdp_module()
+ )
+
+ def is_local(self):
+ return self in (
+ GuardSource.LOCAL,
+ GuardSource.LOCAL_NN_MODULE,
+ GuardSource.LOCAL_FSDP_MODULE,
+ )
+
+
+"""
+Base class for a "GuardBuilder" role.
+
+The GuardBuilderBase role is to represent a scope within which to build a guard. The name is a little
+confusing, as its not a builder, but for the sake of avoiding a lot of renames and keeping the original reference
+to torchdynamo's GuardBuilder.
+
+Note: create_fn is invoked with a GuardBuilderBase and a Guard. A GuardBuilder is chosen based
+on GuardSource's select function.
+
+There is value in keeping this GuardBuilderBase empty to keep layering clean.
+"""
+
+
+class GuardBuilderBase:
+ pass
+
+
+class ShapeGuard(NamedTuple):
+ expr: sympy.Expr
+ stack: CapturedTraceback
+
+
+@dataclasses.dataclass
+class Guard:
+ # originating_source is the source that called the make_guard method to
+ # construct this guard object. The property name specifies what exactly it
+ # is the guard is guarding on. The meaning of the name is dependent on the
+ # create_fn; you must look at the use-site inside create_fn to know what
+ # name means.
+ #
+ # That being said, although you might think this is just a "name", name is
+ # usually an arbitrary Python expression that will be evaluated with all
+ # globals (and locals, if you create a LOCAL guard) to extract the Python
+ # object that we want to perform guard tests on. This evaluation
+ # typically happens in GuardBuilder.eval. In these cases, name is
+ # typically produced by originating_source.name() (not to be confused with
+ # GuardSource - the property source).
+ #
+ # Occasionally, name is not a valid Python expression; sometimes
+ # it is meaningless. Example create_fns that are like this include
+ # GRAD_MODE and SHAPE_ENV.
+ originating_source: Source
+ create_fn: Callable[[GuardBuilderBase, Guard], None]
+
+ # Export only. These values are written to at time of guard check_fn creation.
+ guard_types: Optional[List[str]] = None
+ code_list: Optional[List[str]] = None
+ obj_weakref: Optional[object] = None
+ guarded_class_weakref: Optional[type] = None
+
+ stack = None
+ user_stack = None
+ _hash = None
+
+ def __hash__(self):
+ if self._hash is None:
+ self._hash = hash((self.name, self.source, id(self.create_fn)))
+ return self._hash
+
+ def sort_key(self):
+ return (
+ self.source.value if self.source else -1,
+ len(self.name),
+ self.name,
+ self.inner_create_fn().__code__.co_firstlineno,
+ )
+
+ def __lt__(self, other):
+ return self.sort_key() < other.sort_key()
+
+ def inner_create_fn(self):
+ if isinstance(self.create_fn, functools.partial):
+ return self.create_fn.func
+ else:
+ return self.create_fn
+
+ @property
+ def name(self) -> str:
+ return self.originating_source.name()
+
+ @property
+ def source(self) -> GuardSource:
+ return self.originating_source.guard_source()
+
+ @staticmethod
+ def weakref_to_str(obj_weakref):
+ """
+ This is a workaround of a Python weakref bug.
+
+ `obj_weakref` is instance returned by `weakref.ref`,
+ `str(obj_weakref)` is buggy if the original obj overrides __getattr__, e.g:
+
+ class MyConfig(dict):
+ def __getattr__(self, x):
+ return self[x]
+
+ obj = MyConfig(offset=5)
+ obj_weakref = weakref.ref(obj)
+ str(obj_weakref) # raise error: KeyError: '__name__'
+ """
+ if isinstance(obj_weakref, weakref.ReferenceType):
+ obj = obj_weakref()
+ if obj is not None:
+ return f""
+ else:
+ return f""
+ else:
+ return str(obj_weakref)
+
+ def __repr__(self):
+ s = f"""
+ {self.source.name.lower() if self.source else ""} {repr(self.name)} {self.inner_create_fn().__name__}
+ {{
+ 'guard_types': {self.guard_types},
+ 'code': {self.code_list},
+ 'obj_weakref': {self.weakref_to_str(self.obj_weakref)}
+ 'guarded_class': {self.guarded_class_weakref}
+ }}
+ """
+ return s
+
+ def __str__(self):
+ output = f"Name: {repr(self.name)}\n"
+ source = self.source.name.lower() if self.source else ""
+ output += f" Source: {source}\n"
+ output += f" Create Function: {self.inner_create_fn().__name__}\n"
+ output += f" Guard Types: {self.guard_types}\n"
+ output += f" Code List: {self.code_list}\n"
+ output += f" Object Weakref: {self.weakref_to_str(self.obj_weakref)}\n"
+ output += f" Guarded Class Weakref: {self.guarded_class_weakref}\n"
+ return output
+
+ def create(self, builder: GuardBuilderBase):
+ try:
+ return self.create_fn(builder, self)
+ except Exception:
+ log.error("Error while creating guard:\n%s", str(self).rstrip())
+ if self.stack:
+ log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
+ raise
+
+ def is_nn_module(self):
+ return self.source.is_nn_module()
+
+ def is_fsdp_module(self):
+ return self.source.is_fsdp_module()
+
+ def is_local(self):
+ return self.source.is_local()
+
+ def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
+ if not self.guard_types:
+ self.guard_types = list()
+
+ self.guard_types.append(guard_type)
+
+ assert self.guarded_class_weakref in (
+ guarded_class,
+ None,
+ ), "Guarded class id must be identical, or None"
+ self.guarded_class_weakref = guarded_class
+
+ if not self.code_list:
+ self.code_list = code_list
+ else:
+ self.code_list.extend(code_list)
+
+ assert self.obj_weakref in (
+ obj_weakref,
+ None,
+ ), "Guarded object must be identical, or None"
+ self.obj_weakref = obj_weakref
+
+
+T = TypeVar("T")
+
+"""
+Parent structure for guard env expressions.
+A GuardEnvExpr can have any subtype.
+Note: All subtypes must be handled exhaustively in
+torch._dynamo.guards._parse_guard_env_guards to avoid a RuntimeError.
+"""
+
+
+@dataclasses.dataclass
+class GuardEnvExpr:
+ pass
+
+
+"""
+A class representing a pair of duplicate inputs.
+input_pos_a and input_pos_b are input positions we have deduped.
+"""
+
+
+@dataclasses.dataclass
+class DuplicateInputs(GuardEnvExpr):
+ input_source_a: Source
+ input_source_b: Source
+
+ def __post_init__(self):
+ assert self.input_source_a != self.input_source_b
+
+
+"""
+Checkpointable is an interface for driving state snapshotting, left purposely vague for now.
+
+copy_graphstate() -> T, a somewhat legacy name, is expected to emit a snapshot of any type that
+can also be taken in at restore_graphstate(T) calls.
+
+When to snapshot, is, at the moment, an implementation detail of upstream callers. Checkpointable
+does not provide any garuantees around consistency, idempotency, or safety of calling its APIs, yet.
+
+In the future, it will have a closer coupling to a generic Checkpoint management system.
+"""
+
+
+class Checkpointable(ABC, Generic[T]):
+ @abstractmethod
+ def copy_graphstate(self) -> T:
+ ...
+
+ @abstractmethod
+ def restore_graphstate(self, state: T):
+ ...
+
+
+"""
+The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext
+"""
+
+
+class GuardsCheckpointState:
+ dynamo_guards: Set[Guard] = set()
+
+ def __init__(self, dynamo_guards):
+ self.dynamo_guards = dynamo_guards
+
+ """
+ Produces a delta against another GuardsCheckpointState.
+
+ Returns None if no delta is found, otherwise, return a set() of mismatched
+ Guard type objects.
+ """
+
+ def diff(self, other):
+ r = self.dynamo_guards.difference(other.dynamo_guards)
+ if len(r) == 0:
+ return None
+ return r
+
+ def __eq__(self, other):
+ return self.diff(other) is None
+
+
+class ModuleContextCheckpointState:
+ nn_modules: Dict[str, torch.nn.Module] = {}
+
+ def __init__(self, nn_modules):
+ self.nn_modules = nn_modules
+
+ """
+ Produces a delta against another ModuleContextCheckpointState.
+
+ Returns None if no delta is found, otherwise, return a set() of mismatched
+ module key names.
+ """
+
+ def diff(self, other):
+ r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
+ if len(r) == 0:
+ return None
+ return r
+
+ def __eq__(self, other):
+ return self.diff(other) is None
+
+
+class ModuleContext(Checkpointable[ModuleContextCheckpointState]):
+ def __init__(self):
+ self.nn_modules: Dict[str, Any] = {}
+
+ def copy_graphstate(self):
+ return ModuleContextCheckpointState(dict(self.nn_modules))
+
+ def restore_graphstate(self, state):
+ assert isinstance(state, ModuleContextCheckpointState)
+ self.nn_modules = state.nn_modules
+
+
+class GlobalContextCheckpointState:
+ global_state: Dict[str, Tuple[Callable, ...]] = {}
+
+ def __init__(self, global_states):
+ self.global_state = global_states
+
+ """
+ Produces a delta against another GlobalContextCheckpointState.
+
+ Returns None if no delta is found, otherwise, return a set() of mismatched
+ global key names.
+ """
+
+ def diff(self, other):
+ r = set(self.global_state.keys()).difference(set(other.global_state.keys()))
+ if len(r) == 0:
+ return None
+ return r
+
+ def __eq__(self, other):
+ return self.diff(other) is None
+
+
+class GlobalContext(Checkpointable[GlobalContextCheckpointState]):
+ """
+ This keeps track of the global torch state during tracing of a function.
+ For example, torch.is_grad_enabled.
+ """
+
+ _supported_global_states = {
+ "grad_enabled",
+ "torch_function_enabled",
+ "autocast_enabled",
+ "autocast_cpu_enabled",
+ "autocast_gpu_dtype",
+ "autocast_cpu_dtype",
+ "autocast_cache_enabled",
+ }
+
+ def __init__(self):
+ self.global_state: Dict[str, Tuple[Callable, ...]] = {}
+
+ def copy_graphstate(self):
+ return GlobalContextCheckpointState(dict(self.global_state))
+
+ def restore_graphstate(self, state):
+ assert isinstance(state, GlobalContextCheckpointState)
+ self.global_state = state.global_state
+ assert (
+ len(self.global_state) == len(self._supported_global_states)
+ and set(self.global_state.keys()) == self._supported_global_states
+ ), "Global state mismatch"
+ for func, args in self.global_state.values():
+ func(args)
+
+
+"""
+A GuardsContext is a checkpointable representation of all the guards in the current tracing
+context. It's lifecycle is bound 1:1 to the tracing context, and it should never be instantiated
+directly outside of it. For passing around internal state representations of this object,
+prefer to extract them with copy_graphstate to produce a GuardsCheckpointState.
+"""
+
+
+# Like a Set[Guard] but will record the user stack on all guards at the
+# time they were installed at their destination
+class GuardsSet:
+ def __init__(self, inner=None):
+ if inner is None:
+ inner = set()
+ self.inner = inner
+
+ def __iter__(self):
+ return iter(self.inner)
+
+ def __len__(self):
+ return len(self.inner)
+
+ # Subtraction along with bool is typically used to determine the delta of
+ # added guards between checkpoints for higher order ops
+ def __sub__(self, other):
+ return GuardsSet(self.inner - other.inner)
+
+ def __bool__(self):
+ return bool(self.inner)
+
+ def add(self, guard: Guard, *, skip=0):
+ if guard in self.inner:
+ return
+ if guard.stack is None:
+ guard.stack = CapturedTraceback.extract(skip=1 + skip)
+ if guard.user_stack is None:
+ guard.user_stack = TracingContext.extract_stack()
+ self.inner.add(guard)
+
+ def update(self, *others: Set[Guard]):
+ for o in others:
+ for g in o:
+ self.add(g, skip=1)
+
+
+class GuardsContext(Checkpointable[GuardsCheckpointState]):
+ def __init__(self):
+ self.dynamo_guards: GuardsSet = GuardsSet()
+ self.aotautograd_guards: List[GuardEnvExpr] = []
+
+ def copy_graphstate(self):
+ return GuardsCheckpointState(set(self.dynamo_guards.inner))
+
+ def restore_graphstate(self, state):
+ # NB: "steals" the passed in state
+ assert isinstance(state, GuardsCheckpointState)
+ self.dynamo_guards = GuardsSet(state.dynamo_guards)
+
+
+_TLS = threading.local()
+
+"""
+TracingContext is the source of truth for all currently accumulated information
+needed to trace. Its lifecycle is kept 1:1 when using TorchDynamo, but other systems
+are open to managing their own TracingContext with that in mind.
+
+The purpose of TracingContext is not to be a dumping ground, or god object, but rather to avoid
+having to plumb complex subsystems across multiple verticals.
+
+Ex: A common example is guard accumulation between dynamo, shape_env, aot_autograd, and inductor.
+Accessing the current tracing context via
+TracingContext.get() allows users to accumulate their own guards for processing, without needing to know how
+to plumb objects back up to where frame interpretation happened.
+
+Note that you can end up with multiple TracingContext for a single compilation
+of a frame, as we reset the TracingContext whenever we restart analysis.
+CompileContext is a more overarching context that encompasses multiple restarts.
+"""
+
+
+class CompileContext:
+ @staticmethod
+ def get() -> CompileContext:
+ assert _TLS.compile_context is not None
+ return _TLS.compile_context
+
+ @staticmethod
+ def try_get() -> Optional[CompileContext]:
+ return getattr(_TLS, "compile_context", None)
+
+ def __init__(self, compile_id):
+ assert compile_id is None or isinstance(compile_id, CompileId)
+ self.compile_id: Optional[CompileId] = compile_id
+ self.attempt = 0
+
+ @staticmethod
+ def current_compile_id():
+ self = CompileContext.try_get()
+ if self is None:
+ return None
+ return self.compile_id
+
+ @staticmethod
+ def current_trace_id():
+ self = CompileContext.try_get()
+ if self is None:
+ return None
+ if self.compile_id is None:
+ return None
+ return TraceId(self.compile_id, self.attempt)
+
+
+class TracingContext:
+ """
+ Provides the currently installed TracingContext, or None.
+
+ Note that it is a staticmethod, and invocations outside of `with tracing()` (see below), are valid but
+ will return None.
+ """
+
+ @staticmethod
+ def try_get() -> Optional[TracingContext]:
+ return getattr(_TLS, "tracing_context", None)
+
+ @staticmethod
+ def get() -> TracingContext:
+ if ctx := TracingContext.try_get():
+ return ctx
+ raise RuntimeError(
+ "TracingContext.get() must be called within an ongoing trace."
+ )
+
+ def __init__(self, fake_mode):
+ self.guards_context = GuardsContext()
+ self.module_context = ModuleContext()
+ self.global_context = GlobalContext()
+ self.fake_mode = fake_mode
+ self.frame_summary_stack = []
+ # This is morally part of frame_summary_stack, but it is kept separate
+ # for clarity. As we process a frame, this variable gets updated
+ # to keep track of what line we are in the function. We make a
+ # function call, this gets cleared and the frame location is pushed
+ # to frame_summary_stack (prepping this variable for the inner frame's
+ # progress)
+ self.loc_in_frame = None
+ # this is only set after aot_autograd
+ self.fw_metadata = None
+ self.params_flat = None
+ # this is for extended return calling convention from backend
+ # compiler to aot_autograd
+ # Per output, what the compiler specified stride of the output is,
+ # or None if no stride is known. This is always the HINT, it
+ # is never a SymInt (it would be better if it was a SymInt, but
+ # I can't conveniently get this from Inductor atm. Also, be
+ # careful not to accidentally induce guards on the SymInt if
+ # you ever do change this in aot_autograd.py; you should check
+ # on permutations preferentially.)
+ self.output_strides: Optional[List[Optional[List[int]]]] = None
+ # When this is True, whenever we encounter an int in Dynamo tracing,
+ # we will (1) force unspec it and (2) force it as a size-like unbacked
+ # integer. This is currently used when processing certain lists of
+ # ints that are known to be size-like and may have 0/1 entries that we
+ # must not specialize on.
+ self.force_unspec_int_unbacked_size_like = False
+ # See note [Tensor Fakification and Symbol Caching]
+ self.tensor_to_context = WeakTensorKeyDictionary()
+
+ @staticmethod
+ @contextmanager
+ def patch(**kwargs):
+ prior = {}
+ ctx = TracingContext.get()
+
+ for key in kwargs.keys():
+ # KeyError on invalid entry
+ prior[key] = getattr(ctx, key)
+ for key, val in kwargs.items():
+ setattr(ctx, key, val)
+ try:
+ yield
+ finally:
+ for key, val in prior.items():
+ setattr(ctx, key, val)
+
+ @staticmethod
+ def extract_stack():
+ self = TracingContext.try_get()
+ if self is None:
+ return traceback.StackSummary()
+ stack = list(self.frame_summary_stack)
+ if self.loc_in_frame is not None:
+ stack.append(self.loc_in_frame)
+ return traceback.StackSummary.from_list(stack)
+
+ # Call this when you want to call into some code that isn't necessarily
+ # associated with the current frame state
+ @staticmethod
+ @contextlib.contextmanager
+ def clear_frame():
+ tc = TracingContext.get()
+ with unittest.mock.patch.object(
+ tc, "frame_summary_stack", []
+ ), unittest.mock.patch.object(tc, "loc_in_frame", None):
+ try:
+ yield
+ except Exception as e:
+ # Prevent real_stack from getting attached
+ #
+ # The invariant is that if an Exception as real_stack, we've
+ # appropriately attached a user stack and we no longer need to
+ # attach anything. Because we cannot conveniently interpose
+ # when an exception is thrown, we instead interpose everywhere
+ # we set what the user stack is set (using the context
+ # manager). However, our compiler stack does "tail calls"
+ # (when it calls into user compiler), at which point the
+ # parent exception frames would incorrectly attach an
+ # incorrect frame.
+ #
+ # However, if, somehow, someone raised an exception with this
+ # scope that had a stack (for example, because they are
+ # restoring the user stack state appropriately as they process
+ # node by node), we should respect it. Thus, we cannot
+ # unconditionally set None.
+ if not hasattr(e, "real_stack"):
+ e.real_stack = None # type: ignore[attr-defined]
+ raise
+
+ @staticmethod
+ @contextlib.contextmanager
+ def current_frame(frame_summary):
+ # frame_summary can be None to solely take advantage of real_stack
+ # attachment to thrown exceptions
+ tc = TracingContext.get()
+ if frame_summary is not None:
+ tc.frame_summary_stack.append(frame_summary)
+ old = tc.loc_in_frame
+ tc.loc_in_frame = None
+ try:
+ yield
+ except Exception as e:
+ if not hasattr(e, "real_stack"):
+ e.real_stack = tc.extract_stack() # type: ignore[attr-defined]
+ raise
+ finally:
+ if frame_summary is not None:
+ tc.frame_summary_stack.pop()
+ tc.loc_in_frame = old
+
+ @staticmethod
+ @contextlib.contextmanager
+ def report_output_strides():
+ tc = TracingContext.try_get()
+ if tc is None:
+ yield None
+ return
+ old_output_strides = tc.output_strides
+ tc.output_strides = []
+ try:
+ yield tc.output_strides
+ finally:
+ tc.output_strides = old_output_strides
+
+ @staticmethod
+ def set_current_loc(filename, lineno, frame_name):
+ TracingContext.get().loc_in_frame = traceback.FrameSummary(
+ filename, lineno, frame_name
+ )
+
+
+@contextmanager
+def compile_context(context: CompileContext):
+ old_context = getattr(_TLS, "compile_context", None)
+ _TLS.compile_context = context
+ try:
+ yield context
+ finally:
+ _TLS.compile_context = old_context
+
+
+@contextmanager
+def tracing(context: Optional[TracingContext]):
+ """
+ This function installs the passed in tracing context as a dynamic scoped
+ global variable.
+
+ Calls to TracingContext.get() while not under a `with tracing()` context
+ will return None.
+ """
+ old_context = getattr(_TLS, "tracing_context", None)
+ _TLS.tracing_context = context
+ try:
+ yield context
+ except Exception as e:
+ if not hasattr(e, "real_stack") and context is not None:
+ e.real_stack = context.extract_stack() # type: ignore[attr-defined]
+ raise
+ finally:
+ if (
+ context is not None
+ and context.fake_mode is not None
+ and context.fake_mode.shape_env is not None
+ ):
+ context.fake_mode.shape_env.cleanup()
+ _TLS.tracing_context = old_context
+
+
+# Subclasses can be found in torch/_dynamo/source.py
+# TODO(voz): Consider a toplevel torch/_source.py
+@dataclasses.dataclass(frozen=True)
+class Source:
+ def reconstruct(self, codegen):
+ raise NotImplementedError()
+
+ def guard_source(self) -> GuardSource:
+ raise NotImplementedError()
+
+ def name(self) -> str:
+ raise NotImplementedError()
+
+ def make_guard(self, fn) -> Guard:
+ if self.guard_source() is GuardSource.CONSTANT:
+ raise NotImplementedError()
+ return Guard(self, fn)
+
+ def is_nn_module(self) -> bool:
+ return self.guard_source().is_nn_module()
+
+
+# Subclasses can be found in torch/_dynamo/source.py
+@dataclasses.dataclass(frozen=True)
+class ChainedSource(Source):
+ base: Source
+
+
+def detect_fake_mode(inputs: Any = None):
+ """
+ Attempts to "detect" what the current fake mode is. If there is one ambiently
+ available from TracingContext, we preferentially use that. Otherwise, we
+ heuristically detect the fake mode via the following sources, in order of
+ priority:
+
+ - Currently active fake mode on stack
+ - Fake mode associated with passed in tensors (inputs does not
+ have to be flattened)
+ """
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
+
+ fake_modes = []
+
+ if context := TracingContext.try_get():
+ fake_mode = context.fake_mode
+ if fake_mode is not None:
+ fake_modes.append((fake_mode, "tracing context", 0))
+
+ from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
+
+ for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
+ if isinstance(m, FakeTensorMode):
+ fake_modes.append((m, "active fake mode", i))
+
+ flat_inputs = pytree.tree_leaves(inputs)
+ for i, flat_input in enumerate(flat_inputs):
+ if isinstance(flat_input, FakeTensor):
+ fake_modes.append((flat_input.fake_mode, "fake tensor input", i))
+
+ if fake_modes:
+ fake_mode, desc1, i1 = fake_modes[0]
+ for m, desc2, i2 in fake_modes[1:]:
+ assert fake_mode is m, (
+ f"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\n\n"
+ f"fake mode from {desc1} {i1} allocated at:\n{fake_mode.stack}\n"
+ f"fake mode from {desc2} {i2} allocated at:\n{m.stack}"
+ )
+ return fake_mode
+ else:
+ return None
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_jit_internal.py b/env-llmeval/lib/python3.10/site-packages/torch/_jit_internal.py
new file mode 100644
index 0000000000000000000000000000000000000000..be1b86f5c860179fc9301ea27a17093bf1f5a9ae
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_jit_internal.py
@@ -0,0 +1,1510 @@
+"""
+The weak_script annotation needs to be here instead of inside torch/jit/ so it
+can be used in other places in torch/ (namely torch.nn) without running into
+circular dependency problems
+"""
+
+import ast
+import builtins
+import collections
+import contextlib
+import enum
+import inspect
+import io
+import pickle
+import sys
+import threading
+import types
+import typing
+import warnings
+import weakref
+from textwrap import dedent
+from typing import ( # noqa: F401
+ Any,
+ Callable,
+ Dict,
+ Final,
+ ForwardRef,
+ Generic,
+ get_args, # new in 3.8
+ get_origin, # new in 3.8
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+import torch
+
+# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
+# Explicitly ask to import `torch.distributed.__init__` first.
+# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
+import torch.distributed.rpc
+import torch.package._mangling as package_mangling
+from torch._awaits import _Await
+from torch._C import _Await as CAwait, Future as CFuture
+from torch._sources import fake_range, get_source_lines_and_file, parse_def
+from torch.futures import Future
+
+IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9)
+IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10)
+
+BuiltinUnionType: Union[Type, Tuple[Type, ...]]
+if sys.version_info >= (3, 10):
+ # NOTE: IS_PY310_PLUS doesn't work with mypy.
+ # cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks
+ BuiltinUnionType = types.UnionType
+else:
+ BuiltinUnionType = () # trick: this makes isinstance short circuit.
+
+LockType: Type
+try:
+ import _thread
+
+ LockType = _thread.LockType
+except ImportError:
+ import _dummy_thread
+
+ LockType = _dummy_thread.LockType
+
+# Wrapper functions that can call either of 2 functions depending on a boolean
+# argument
+boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
+ weakref.WeakKeyDictionary()
+) # noqa: T484
+
+
+FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
+
+
+class SourceLoader:
+ def __init__(self):
+ self.content = {}
+
+ def cache(self, fn, source):
+ self.content[fn] = source
+
+ def get_source(self, fn):
+ return self.content.get(fn)
+
+
+loader = SourceLoader()
+
+
+def createResolutionCallbackFromEnv(lookup_base):
+ """
+ Creates a resolution callback that will look up qualified names in an
+ environment, starting with `lookup_base` for the base of any qualified
+ names, then proceeding down the lookup chain with the resolved object.
+
+ You should not use this directly, it should only be used from the other
+ createResolutionCallbackFrom* functions.
+ """
+
+ def lookupInModule(qualified_name, module):
+ if "." in qualified_name:
+ parts = qualified_name.split(".")
+ base = parts[0]
+ remaining_pieces = ".".join(parts[1:])
+ module_value = getattr(module, base)
+ return lookupInModule(remaining_pieces, module_value)
+ else:
+ return getattr(module, qualified_name)
+
+ def parseNestedExpr(expr, module) -> Tuple[Any, int]:
+ i = 0
+ while i < len(expr) and expr[i] not in (",", "[", "]"):
+ i += 1
+
+ # Special case logic for the empty Tuple as a subscript (used
+ # in the type annotation `Tuple[()]`)
+ if expr[:i] == "()":
+ return (), i
+
+ base = lookupInModule(expr[:i].strip(), module)
+ assert base is not None, f"Unresolvable type {expr[:i]}"
+ if i == len(expr) or expr[i] != "[":
+ return base, i
+
+ assert expr[i] == "["
+ parts = []
+ while expr[i] != "]":
+ part_len = 0
+ i += 1
+ part, part_len = parseNestedExpr(expr[i:], module)
+ parts.append(part)
+ i += part_len
+ if len(parts) > 1:
+ return base[tuple(parts)], i + 1
+ else:
+ return base[parts[0]], i + 1
+
+ def parseExpr(expr, module):
+ try:
+ value, len_parsed = parseNestedExpr(expr, module)
+ assert len_parsed == len(
+ expr
+ ), "whole expression was not parsed, falling back to c++ parser"
+ return value
+ except Exception:
+ """
+ The python resolver fails in several cases in known unit tests, and is intended
+ to fall back gracefully to the c++ resolver in general. For example, python 2 style
+ annotations which are frequent in our unit tests often fail with types e.g. int not
+ resolvable from the calling frame.
+ """
+ return None
+
+ return lambda expr: parseExpr(expr, lookup_base)
+
+
+def createResolutionCallbackFromFrame(frames_up: int = 0):
+ """
+ Creates a function which, given a string variable name,
+ returns the value of the variable in the scope of the caller of
+ the function which called createResolutionCallbackFromFrame (by default).
+
+ This is used to enable access in-scope Python variables inside
+ TorchScript fragments.
+
+ frames_up is number of additional frames to go up on the stack.
+ The default value is 0, which correspond to the frame of the caller
+ of createResolutionCallbackFromFrame. Also for example, if frames_up is set
+ to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
+ will be taken.
+
+ For example, the following program prints 2::
+
+ def bar():
+ cb = createResolutionCallbackFromFrame(1)
+ print(cb("foo"))
+
+ def baz():
+ foo = 2
+ bar()
+
+ baz()
+ """
+ frame = inspect.currentframe()
+ i = 0
+ while i < frames_up + 1:
+ assert frame is not None
+ frame = frame.f_back
+ i += 1
+
+ assert frame is not None
+ f_locals = frame.f_locals
+ f_globals = frame.f_globals
+
+ class env:
+ def __getattr__(self, key):
+ if key in f_locals:
+ return f_locals[key]
+ elif key in f_globals:
+ return f_globals[key]
+ elif key in dir(builtins):
+ return getattr(builtins, key)
+
+ return createResolutionCallbackFromEnv(env())
+
+
+def get_closure(fn):
+ """
+ Get a dictionary of closed over variables from a function
+ """
+ captures = {}
+ captures.update(fn.__globals__)
+
+ for index, captured_name in enumerate(fn.__code__.co_freevars):
+ captures[captured_name] = fn.__closure__[index].cell_contents
+
+ return captures
+
+
+# [local resolution in python]
+# Depending on where a variable is defined, and where it is used, we may
+# or may not be able to recover its value when recursively compiling a
+# script function. Remember in the general case, a module or function is
+# first defined and then later scripted. This means we do not have a
+# chance to capture the active frames when the function is defined. Hence any
+# name resolution has to happen later on the created closure. The way
+# python captures type annotations restricts what we can recover. The
+# follow example illustrates the different cases:
+#
+# class MyGlobalClass:
+# ...
+# def my_local_scope():
+# @torch.jit.script
+# class MyClass:
+# ...
+# @torch.jit.script
+# class MyClassUsedAsVar:
+# ...
+# def eg(x: MyClass, y: MyGlobalClass):
+# a_local_capture : Foo
+# return MyClassUsedAsVar(x)
+#
+# MyGlobalClass is defined in the __globals__ dictionary of function
+# 'eg', so it is always recoverable. my_local_scope introduces a new local
+# variable scope in the function. Classes defined here are only visible as
+# local variables. For the case of MyClassUsedAsVar, it is captured
+# because it is used as a variable inside the body of the function, and we
+# can resolve it using the captures returned from `get_closure`. However,
+# the type annotations are not captured by the closure. In Python
+# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
+# annotations on `eg``, but starting in Python 4.0, they will represented as
+# strings and no longer present. Furthermore, since the body of `eg` does
+# not reference those names, they do not appear in the list of closed over
+# variables. In Python 2.x, type annotations are in comments, leading to a
+# similar situation where their definitions are not available. We anticipate
+# that most users will not run into this issue because their modules and
+# functions will be defined at a global scope like MyGlobalClass. In cases
+# where they are not, it is possible to work around issues by declaring the
+# values global in the function.
+# In Python 3.9 declaring class as global will make it invisible to
+# `inspect.getsource`, see https://bugs.python.org/issue42666 .
+# This could be worked around by manualy adding it to `global()` dictionary.
+
+
+def createResolutionCallbackFromClosure(fn):
+ """
+ Create a resolutionCallback by introspecting the function instead of
+ looking up the stack for the enclosing scope
+ """
+ closure = get_closure(fn)
+
+ class closure_lookup:
+ # This is a class since `closure` is a dict and it's easier in
+ # `env_helper` if everything just works with `getattr` calls
+ def __getattr__(self, key):
+ if key in closure:
+ return closure[key]
+ elif hasattr(typing, key):
+ return getattr(typing, key)
+ elif hasattr(builtins, key):
+ return getattr(builtins, key)
+ return None
+
+ return createResolutionCallbackFromEnv(closure_lookup())
+
+
+def can_compile_class(cls) -> bool:
+ # If any of the functions on a type don't have a code object, this type can't
+ # be compiled and is probably a builtin / bound from C
+ if is_ignored_fn(cls):
+ return False
+
+ # Ignore the following list of built-in classes.
+ ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
+ if issubclass(cls, ignored_builtin_classes):
+ return False
+
+ names = cls.__dict__
+ fns = [
+ getattr(cls, name)
+ for name in names
+ if inspect.isroutine(getattr(cls, name, None))
+ ]
+ has_code = [hasattr(fn, "__code__") for fn in fns]
+ return all(has_code)
+
+
+def get_callable_argument_names(fn) -> List[str]:
+ """
+ Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
+ Returns an empty list when other types of arguments are present.
+
+ This is used by `torch.jit.trace` to assign meaningful argument names to
+ traced functions and modules.
+
+ Args:
+ fn: A callable.
+ Returns:
+ Argument names: List[str]
+ """
+ # inspect.signature may fail, give up in that case.
+ try:
+ callable_signature = inspect.signature(fn)
+ except Exception:
+ return []
+
+ argument_names = []
+ for name, param in callable_signature.parameters.items():
+ # All four other types of arguments do not map to individual values
+ # with a keyword as name.
+ if not param.kind == param.POSITIONAL_OR_KEYWORD:
+ continue
+
+ argument_names.append(name)
+
+ return argument_names
+
+
+def get_annotation_str(annotation):
+ """
+ Convert an AST node containing a type annotation to the string present in the source
+ that represents the same annotation.
+ """
+ if isinstance(annotation, ast.Name):
+ return annotation.id
+ elif isinstance(annotation, ast.Attribute):
+ return ".".join([get_annotation_str(annotation.value), annotation.attr])
+ elif isinstance(annotation, ast.Subscript):
+ # In Python3.9+ subscript indicies are not wrapped in ast.Index
+ subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined]
+ return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
+ elif isinstance(annotation, ast.Tuple):
+ return ",".join([get_annotation_str(elt) for elt in annotation.elts])
+ elif isinstance(annotation, (ast.Constant, ast.NameConstant)):
+ return f"{annotation.value}"
+
+ # If an AST node is not handled here, it's probably handled in ScriptTypeParser.
+ return None
+
+
+def get_type_hint_captures(fn):
+ """
+ Get a dictionary containing type resolution mappings necessary to resolve types
+ for the literal annotations on 'fn'. These are not considered to be closed-over by fn
+ and must be obtained separately (e.g. using this function).
+
+ Args:
+ fn: A callable.
+ Returns:
+ A Dict[str, Any] containing a mapping from the literal annotations used on
+ fn to the Python objects they refer to.
+ """
+ # First, try to get the source of the function. We'll need to parse it to find the actual string names
+ # that were used to annotate the types, since inspect.signature() will only return the class object that
+ # the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
+ # This may happen in cases where the function is synthesized dynamically at runtime.
+ src = loader.get_source(fn)
+ if src is None:
+ src = inspect.getsource(fn)
+
+ # Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
+ # types are strings. These are only understood by TorchScript in the context of a type annotation
+ # that refers to a class in its own definition, but trying to include a mapping for this in the result
+ # function would cause infinite recursion because the class is currently being compiled.
+ # In addition, there is logic in ScriptTypeParser to handle this.
+ signature = inspect.signature(fn)
+ name_to_type = {
+ name: parameter.annotation
+ for name, parameter in signature.parameters.items()
+ if parameter.annotation is not inspect.Parameter.empty
+ and not isinstance(parameter.annotation, str)
+ }
+
+ # Then, get the literal type annotations from the function declaration
+ # by source inspection. This accounts for the case in which aliases are used
+ # to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
+ # frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
+ a = ast.parse(dedent(src))
+ if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
+ raise RuntimeError(f"Expected {fn} to be a function")
+ f = a.body[0]
+
+ # Prepare a dictionary of source annotation -> type, which will be the final result of this function,
+ # by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
+ # them to the type object corresponding to the annotation via name_to_type using the parameter name.
+ annotation_to_type = {}
+
+ for arg in f.args.args:
+ # Get the source type annotation string for this argument if possible.
+ arg_annotation_str = (
+ get_annotation_str(arg.annotation) if arg.annotation else None
+ )
+
+ # If the argument has no annotation or get_annotation_str cannot convert it to a string,
+ # arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
+ # this in the latter case.
+ if arg_annotation_str is None:
+ continue
+
+ # Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
+ # be present in name_to_type is that the annotation itself is a string and not a type object
+ # (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
+ arg_name = arg.arg
+ if arg_name in name_to_type:
+ annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
+
+ # If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
+ # the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
+ # of the annotation cannot be a string.
+ literal_return_annotation = get_annotation_str(f.returns)
+ valid_literal_annotation = literal_return_annotation is not None
+ return_annotation = signature.return_annotation
+ valid_return_annotation_type = (
+ return_annotation is not inspect.Parameter.empty
+ and not isinstance(return_annotation, str)
+ )
+ if valid_literal_annotation and valid_return_annotation_type:
+ annotation_to_type[literal_return_annotation] = return_annotation
+
+ return annotation_to_type
+
+
+def createResolutionCallbackForClassMethods(cls):
+ """
+ This looks at all the methods defined in a class and pulls their closed-over
+ variables into a dictionary and uses that to resolve variables.
+ """
+ # cls is a type here, so `ismethod` is false since the methods on the type
+ # aren't bound to anything, so Python treats them as regular functions
+ fns = [
+ getattr(cls, name)
+ for name in cls.__dict__
+ if inspect.isroutine(getattr(cls, name))
+ ]
+ # Skip built-ins, as they do not have global scope nor type hints
+ # Needed to support `enum.Enum` derived classes in Python-3.11
+ # That adds `_new_member_` property which is an alias to `__new__`
+ fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")]
+ captures = {}
+
+ for fn in fns:
+ captures.update(get_closure(fn))
+ captures.update(get_type_hint_captures(fn))
+
+ def lookup_in_class(key):
+ if key in captures:
+ return captures[key]
+ else:
+ return getattr(builtins, key, None)
+
+ return lookup_in_class
+
+
+def boolean_dispatch(
+ arg_name, arg_index, default, if_true, if_false, module_name, func_name
+):
+ """
+ Dispatches to either of 2 script functions based on a boolean argument.
+ In TorchScript, the boolean argument must be constant so that the correct
+ function to use can be determined at compile time.
+ """
+
+ def fn(*args, **kwargs):
+ dispatch_flag = default
+ if arg_name in kwargs:
+ dispatch_flag = kwargs[arg_name]
+ elif arg_index < len(args):
+ dispatch_flag = args[arg_index]
+
+ if dispatch_flag:
+ return if_true(*args, **kwargs)
+ else:
+ return if_false(*args, **kwargs)
+
+ if if_true.__doc__ is None and if_false.__doc__ is not None:
+ doc = if_false.__doc__
+ if_true.__doc__ = doc
+ elif if_false.__doc__ is None and if_true.__doc__ is not None:
+ doc = if_true.__doc__
+ if_false.__doc__ = doc
+ elif if_false.__doc__ is None and if_true.__doc__ is None:
+ # neither function has a docstring
+ doc = None
+ else:
+ raise RuntimeError("only one function can have a docstring")
+ fn.__doc__ = doc
+
+ if module_name is not None:
+ fn.__module__ = module_name
+ if func_name is not None:
+ fn.__name__ = func_name
+
+ boolean_dispatched[fn] = {
+ "if_true": if_true,
+ "if_false": if_false,
+ "index": arg_index,
+ "default": default,
+ "arg_name": arg_name,
+ }
+ return fn
+
+
+class FunctionModifiers:
+ """
+ Used to denote the behavior of a function in TorchScript. See export() and
+ ignore() for details.
+ """
+
+ UNUSED = "unused (ignored and replaced with raising of an exception)"
+ IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
+ EXPORT = "export (compile this function even if nothing calls it)"
+ DEFAULT = "default (compile if called from a exported function / forward)"
+ COPY_TO_SCRIPT_WRAPPER = (
+ "if this method is not scripted, copy the python method onto the scripted model"
+ )
+ _DROP = "_drop (function is fully ignored, declaration can be unscriptable)"
+
+
+def export(fn):
+ """
+ This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
+ :class:`ScriptModule` and should be compiled.
+
+ ``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
+ Functions and methods called from ``forward`` are compiled as they are seen
+ by the compiler, so they do not need this decorator either.
+
+ Example (using ``@torch.jit.export`` on a method):
+
+ .. testcode::
+
+ import torch
+ import torch.nn as nn
+
+ class MyModule(nn.Module):
+ def implicitly_compiled_method(self, x):
+ return x + 99
+
+ # `forward` is implicitly decorated with `@torch.jit.export`,
+ # so adding it here would have no effect
+ def forward(self, x):
+ return x + 10
+
+ @torch.jit.export
+ def another_forward(self, x):
+ # When the compiler sees this call, it will compile
+ # `implicitly_compiled_method`
+ return self.implicitly_compiled_method(x)
+
+ def unused_method(self, x):
+ return x - 20
+
+ # `m` will contain compiled methods:
+ # `forward`
+ # `another_forward`
+ # `implicitly_compiled_method`
+ # `unused_method` will not be compiled since it was not called from
+ # any compiled methods and wasn't decorated with `@torch.jit.export`
+ m = torch.jit.script(MyModule())
+ """
+ fn._torchscript_modifier = FunctionModifiers.EXPORT
+ return fn
+
+
+def unused(fn):
+ """
+ This decorator indicates to the compiler that a function or method should
+ be ignored and replaced with the raising of an exception. This allows you
+ to leave code in your model that is not yet TorchScript compatible and still
+ export your model.
+
+ Example (using ``@torch.jit.unused`` on a method)::
+
+ import torch
+ import torch.nn as nn
+
+ class MyModule(nn.Module):
+ def __init__(self, use_memory_efficient):
+ super().__init__()
+ self.use_memory_efficient = use_memory_efficient
+
+ @torch.jit.unused
+ def memory_efficient(self, x):
+ import pdb
+ pdb.set_trace()
+ return x + 10
+
+ def forward(self, x):
+ # Use not-yet-scriptable memory efficient mode
+ if self.use_memory_efficient:
+ return self.memory_efficient(x)
+ else:
+ return x + 10
+
+ m = torch.jit.script(MyModule(use_memory_efficient=False))
+ m.save("m.pt")
+
+ m = torch.jit.script(MyModule(use_memory_efficient=True))
+ # exception raised
+ m(torch.rand(100))
+ """
+ if isinstance(fn, property):
+ prop = fn
+ setattr( # noqa: B010
+ prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
+ )
+
+ if prop.fset:
+ setattr( # noqa: B010
+ prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
+ )
+
+ return prop
+
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
+ return fn
+
+
+# No op context manager from python side
+class _IgnoreContextManager(contextlib.AbstractContextManager):
+ def __init__(self, **kwargs):
+ pass
+
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
+ pass
+
+
+def ignore(drop=False, **kwargs):
+ """
+ This decorator indicates to the compiler that a function or method should
+ be ignored and left as a Python function. This allows you to leave code in
+ your model that is not yet TorchScript compatible. If called from TorchScript,
+ ignored functions will dispatch the call to the Python interpreter. Models with ignored
+ functions cannot be exported; use :func:`@torch.jit.unused ` instead.
+
+ Example (using ``@torch.jit.ignore`` on a method)::
+
+ import torch
+ import torch.nn as nn
+
+ class MyModule(nn.Module):
+ @torch.jit.ignore
+ def debugger(self, x):
+ import pdb
+ pdb.set_trace()
+
+ def forward(self, x):
+ x += 10
+ # The compiler would normally try to compile `debugger`,
+ # but since it is `@ignore`d, it will be left as a call
+ # to Python
+ self.debugger(x)
+ return x
+
+ m = torch.jit.script(MyModule())
+
+ # Error! The call `debugger` cannot be saved since it calls into Python
+ m.save("m.pt")
+
+ Example (using ``@torch.jit.ignore(drop=True)`` on a method):
+
+ .. testcode::
+
+ import torch
+ import torch.nn as nn
+
+ class MyModule(nn.Module):
+ @torch.jit.ignore(drop=True)
+ def training_method(self, x):
+ import pdb
+ pdb.set_trace()
+
+ def forward(self, x):
+ if self.training:
+ self.training_method(x)
+ return x
+
+ m = torch.jit.script(MyModule())
+
+ # This is OK since `training_method` is not saved, the call is replaced
+ # with a `raise`.
+ m.save("m.pt")
+
+ .. testcleanup::
+
+ import os
+ os.remove('m.pt')
+ """
+
+ if callable(drop):
+ # used without any args, so drop is actually a function
+ # @torch.jit.ignore
+ # def fn(...):
+ fn = drop
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
+ return fn
+
+ if not isinstance(drop, bool):
+ raise RuntimeError(
+ "Argument to @torch.jit.ignore must be a bool or "
+ f"a function but got {drop}"
+ )
+
+ # for backwards compat
+ drop_on_export = kwargs.pop("drop_on_export", None)
+ if drop_on_export:
+ warnings.warn(
+ "ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
+ "call on compilation. Use torch.jit.unused now. {}",
+ category=FutureWarning,
+ )
+
+ drop = drop_on_export
+ elif drop:
+ warnings.warn(
+ "ignore(True) has been deprecated. TorchScript will now drop the function "
+ "call on compilation. Use torch.jit.unused now. {}",
+ category=FutureWarning,
+ )
+
+ def decorator(fn):
+ if drop:
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
+ else:
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
+ return fn
+
+ return decorator
+
+
+def _drop(fn):
+ fn._torchscript_modifier = FunctionModifiers._DROP
+ return fn
+
+
+def _copy_to_script_wrapper(fn):
+ fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
+ return fn
+
+
+def module_has_exports(mod):
+ for name in dir(mod):
+ if hasattr(mod, name):
+ item = getattr(mod, name)
+ if callable(item):
+ if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
+ return True
+ return False
+
+
+# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
+# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
+# allow JIT'd code to still be covered.
+def should_drop(fn) -> bool:
+ attr = get_torchscript_modifier(fn)
+ if attr is None:
+ return False
+ return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
+
+
+def is_ignored_fn(fn) -> bool:
+ mod = get_torchscript_modifier(fn)
+ return (
+ mod is FunctionModifiers.UNUSED
+ or mod is FunctionModifiers.IGNORE
+ or mod is FunctionModifiers._DROP
+ )
+
+
+def _is_drop_fn(fn) -> bool:
+ mod = get_torchscript_modifier(fn)
+ return mod is FunctionModifiers._DROP
+
+
+def is_static_fn(cls, fn) -> bool:
+ return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
+
+
+def get_static_fn(cls, fn):
+ return inspect.getattr_static(cls, fn).__func__
+
+
+def get_torchscript_modifier(fn):
+ if not callable(fn):
+ return None
+ if hasattr(fn, "__func__"):
+ fn = fn.__func__
+ return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
+
+
+def copy_torchscript_modifier(orig, new) -> None:
+ attr = get_torchscript_modifier(orig)
+ if attr is None:
+ return
+ new._torchscript_modifier = attr
+
+
+# overloading registration
+# overloads get registered in this file, and compiled in torch/jit/__init__.py
+# so that they can be imported in nn/functional.py without an import cycle
+
+# qualified_name => list[overload_functions]
+_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
+
+
+_OVERLOAD_EXAMPLE = """
+Example usage of overload function:
+@torch.jit._overload
+def my_function(x: type0) -> type0: # decl 1
+ pass
+
+@torch.jit._overload
+def my_function(x: type1) -> type1: # decl 2
+ pass
+
+def my_function(x): # implementation
+ if isinstance(x, type0):
+ return x
+ elif isinstance(x, type1):
+ return x
+"""
+
+
+def get_overload_no_implementation_error_message(kind, obj):
+ sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
+ return (
+ f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
+ f"sure a definition is provided and defined after all overload declarations.\n"
+ f'File "{filename}", line {file_lineno}:\n'
+ + "".join(sourcelines)
+ + "\n"
+ + _OVERLOAD_EXAMPLE
+ )
+
+
+def _check_overload_body(func):
+ try:
+ parsed_def = parse_def(func)
+ except OSError as e:
+ # Parsing the function definition can raise an OSError if source is unavailable.
+ # Since this is just an initial check, just raise a warning if this is the case.
+ warnings.warn(
+ f"Unable to retrieve source for @torch.jit._overload function: {func}."
+ )
+ return
+
+ body = parsed_def.ast.body[0].body
+
+ def is_pass(x):
+ return isinstance(x, ast.Pass)
+
+ def is_ellipsis(x):
+ return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
+
+ if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
+ msg = (
+ "Only `pass` statement or `...` can be the body of overload declaration:\n"
+ )
+ msg += "\n".join(parsed_def.source.split("\n")[:3])
+ msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
+ raise RuntimeError(msg)
+
+
+def _overload(func):
+ _check_overload_body(func)
+ qual_name = _qualified_name(func)
+ global _overloaded_fns
+ fn_overload_list = _overloaded_fns.get(qual_name)
+ if fn_overload_list is None:
+ fn_overload_list = []
+ _overloaded_fns[qual_name] = fn_overload_list
+ fn_overload_list.append(func)
+ return func
+
+
+def _get_fn_overloads(qual_name):
+ return _overloaded_fns.get(qual_name)
+
+
+def _clear_fn_overloads(qual_name) -> None:
+ del _overloaded_fns[qual_name]
+
+
+def get_class_name_lineno(method) -> Tuple[str, int]:
+ current_frame = inspect.currentframe()
+
+ # one for the get_class_name call, one for _overload_method call
+ for i in range(2):
+ assert (
+ current_frame is not None
+ ) # assert current frame is not an Optional[FrameType]
+ current_frame = current_frame.f_back
+
+ assert current_frame is not None # same here
+ class_name = current_frame.f_code.co_name
+ line_no = current_frame.f_code.co_firstlineno
+ return class_name, line_no
+
+
+# At the point the decorator is applied to class methods the method
+# has no reference to its owning class. _qualified_name would not include
+# the class it is defined in, so any methods with the same name in the same file
+# would have the same _qualified_name, even if they were defined in different
+# classes. This problem only exists in python 2.
+# We get around this problem by looking at the stack frame and identifying
+# the class name, and throwing an error whenever overloads are used
+# when modules of the same name are in the same file
+
+# qualified_name => class name => list[overload_functions]
+_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
+
+
+# (qualified_name, class name) => class_fileno
+_overloaded_method_class_fileno = {}
+
+
+def _overload_method(func):
+ _check_overload_body(func)
+ qual_name = _qualified_name(func)
+ global _overloaded_methods
+ class_name_map = _overloaded_methods.get(qual_name, None)
+ if class_name_map is None:
+ class_name_map = {}
+ _overloaded_methods[qual_name] = class_name_map
+
+ class_name, line_no = get_class_name_lineno(func)
+ method_overloads = class_name_map.get(class_name, None)
+ if method_overloads is None:
+ method_overloads = []
+ class_name_map[class_name] = method_overloads
+ _overloaded_method_class_fileno[(qual_name, class_name)] = line_no
+ else:
+ existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
+ if existing_lineno != line_no:
+ raise RuntimeError(
+ "Cannot currently overload the same method name in two different"
+ " classes with the same name in the same module"
+ )
+
+ method_overloads.append(func)
+ return func
+
+
+def _get_overloaded_methods(method, mod_class):
+ # TODO: __name__ not set for submodules in recursive script
+ if not hasattr(method, "__name__"):
+ return None
+ qual_name = _qualified_name(method)
+ class_name_map = _overloaded_methods.get(qual_name, None)
+ if class_name_map is None:
+ return None
+ overloads = class_name_map.get(mod_class.__name__, None)
+ if overloads is None:
+ return None
+
+ method_line_no = get_source_lines_and_file(method)[1]
+ mod_class_fileno = get_source_lines_and_file(mod_class)[1]
+ mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
+ if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
+ raise Exception(
+ "Overloads are not useable when a module is redeclared within the same file: "
+ + str(method)
+ )
+ return overloads
+
+
+def is_tuple(ann) -> bool:
+ if ann is Tuple:
+ raise_error_container_parameter_missing("Tuple")
+
+ # For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
+ if not hasattr(ann, "__module__"):
+ return False
+
+ ann_origin = get_origin(ann)
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple:
+ return True
+ return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple)
+
+
+def is_list(ann) -> bool:
+ if ann is List:
+ raise_error_container_parameter_missing("List")
+
+ if not hasattr(ann, "__module__"):
+ return False
+
+ ann_origin = get_origin(ann)
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list:
+ return True
+ return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list)
+
+
+def is_dict(ann) -> bool:
+ if ann is Dict:
+ raise_error_container_parameter_missing("Dict")
+
+ if not hasattr(ann, "__module__"):
+ return False
+
+ ann_origin = get_origin(ann)
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict:
+ return True
+ return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict)
+
+
+def is_union(ann):
+ if ann is Union:
+ raise_error_container_parameter_missing("Union")
+
+ return isinstance(ann, BuiltinUnionType) or (
+ hasattr(ann, "__module__")
+ and ann.__module__ == "typing"
+ and (get_origin(ann) is Union)
+ )
+
+
+def is_optional(ann):
+ if ann is Optional:
+ raise_error_container_parameter_missing("Optional")
+
+ def is_optional_as_optional(ann):
+ return (
+ hasattr(ann, "__module__")
+ and ann.__module__ == "typing"
+ and (get_origin(ann) is Optional)
+ )
+
+ def is_union_as_optional(ann):
+ ann_args = get_args(ann)
+ return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
+
+ return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
+
+
+def is_future(ann) -> bool:
+ if ann is Future:
+ raise RuntimeError(
+ "Attempted to use Future without a "
+ "contained type. Please add a contained type, e.g. "
+ "Future[int]"
+ )
+ return get_origin(ann) is Future
+
+
+def is_await(ann) -> bool:
+ if ann is _Await:
+ return True
+ return get_origin(ann) is _Await
+
+
+if torch.distributed.rpc.is_available():
+ from torch._C._distributed_rpc import PyRRef
+ from torch.distributed.rpc import RRef
+
+ def is_rref(ann) -> bool:
+ if ann is RRef:
+ raise RuntimeError(
+ "Attempted to use RRef without a "
+ "contained type. Please add a contained type, e.g. "
+ "RRef[int]"
+ )
+ return get_origin(ann) is RRef
+
+ def is_rref_instance(obj) -> bool:
+ return isinstance(obj, PyRRef)
+
+else:
+
+ def is_rref_instance(obj) -> bool:
+ # If the RPC module doesn't exist then RRefs don't exist either.
+ return False
+
+
+def is_final(ann) -> bool:
+ return ann.__module__ in {"typing", "typing_extensions"} and (
+ get_origin(ann) is Final or isinstance(ann, type(Final))
+ )
+
+
+# allows BroadcastingList instance to be subscriptable
+class BroadcastingListCls:
+ def __getitem__(self, types):
+ return
+
+
+# mypy doesn't support parameters on types, so we have to explicitly type each
+# list size
+BroadcastingList1 = BroadcastingListCls()
+for i in range(2, 7):
+ globals()[f"BroadcastingList{i}"] = BroadcastingList1
+
+
+def is_scripting() -> bool:
+ r"""
+ Function that returns True when in compilation and False otherwise. This
+ is useful especially with the @unused decorator to leave code in your
+ model that is not yet TorchScript compatible.
+ .. testcode::
+
+ import torch
+
+ @torch.jit.unused
+ def unsupported_linear_op(x):
+ return x
+
+ def linear(x):
+ if torch.jit.is_scripting():
+ return torch.linear(x)
+ else:
+ return unsupported_linear_op(x)
+ """
+ return False
+
+
+# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
+def _qualified_name(obj, mangle_name=True) -> str:
+ # This special case allows us to override the qualified name on a type.
+ # It's currently used in conjunction with tracing, where we create a
+ # fake module to filter only supported attributes. However, since this
+ # new type is defined as a local class, we need a mechanism to override
+ # its qualname so it appears correctly in the TorchScript system. This,
+ # we set '_jit_override_qualname' with the original traced module's
+ # qualified name, which is picked up here
+ if hasattr(obj, "_jit_override_qualname"):
+ return obj._jit_override_qualname
+ # short-circuit in cases where the object already has a known qualified name
+ if isinstance(obj, torch._C.ScriptFunction):
+ return obj.qualified_name
+
+ if getattr(obj, "__name__", None):
+ name = obj.__name__
+ # Enum classes do not have `__name__` attr, instead they have `name`.
+ elif isinstance(obj, enum.Enum):
+ name = obj.name
+ else:
+ raise RuntimeError("Could not get name of python class object")
+
+ if name == "":
+ name = "_lambda" # make name a valid identifier
+
+ module_name = obj.__module__
+
+ # If the module is actually a torchbind module, then we should short circuit
+ if module_name == "torch._classes":
+ return obj.qualified_name
+
+ # The Python docs are very clear that `__module__` can be None, but I can't
+ # figure out when it actually would be.
+ if module_name is None:
+ raise RuntimeError(
+ f"Could not get qualified name for class '{name}': "
+ "__module__ can't be None."
+ )
+
+ # if getattr(sys.modules[module_name], name) is not obj:
+ # raise RuntimeError(f"Could not get qualified name for class '{name}': "
+ # f"the attr {name} on module {module_name} is not the class")
+
+ # torch.package and TorchScript have separate mangling schemes to avoid
+ # name collisions from multiple packages. To avoid them interfering with
+ # each other, normalize the package manging here.
+ if package_mangling.is_mangled(module_name):
+ module_name = module_name.replace("<", "_")
+ module_name = module_name.replace(">", "_")
+
+ # The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
+ # does not need mangle the python class name.
+ if mangle_name:
+ # __main__ is a builtin module, so rewrite it to "__torch__".
+ if module_name == "__main__":
+ module_name = "__torch__"
+ else:
+ # Everything else gets a "__torch__" prefix to avoid name collisions
+ # with the names of user values.
+ module_name = "__torch__." + module_name
+
+ if "." in name:
+ raise RuntimeError(
+ f"Could not get qualified name for class '{name}': "
+ f"'{name}' is not a valid identifier"
+ )
+
+ return module_name + "." + name
+
+
+def _try_get_dispatched_fn(fn):
+ if not callable(fn):
+ return None
+ return boolean_dispatched.get(fn)
+
+
+def _get_named_tuple_properties(
+ obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None
+):
+ if loc is None:
+ loc = fake_range()
+
+ assert issubclass(obj, tuple) and hasattr(obj, "_fields")
+ if hasattr(obj, "_field_defaults"):
+ defaults = [
+ obj._field_defaults[field]
+ for field in obj._fields
+ if field in obj._field_defaults
+ ]
+ else:
+ defaults = []
+ # In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
+ # Also, annotations from base class are not inherited so they need to be queried explicitly
+ if sys.version_info[:2] < (3, 10):
+ obj_annotations = getattr(obj, "__annotations__", {})
+ else:
+ obj_annotations = inspect.get_annotations(obj)
+ if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
+ obj_annotations = inspect.get_annotations(obj.__base__)
+
+ annotations = []
+ for field in obj._fields:
+ if field in obj_annotations:
+ field_type = obj_annotations[field]
+ # [Note: ForwardRef annotations in NamedTuple attributes]
+ # NamedTuple types are slightly different from normal types.
+ #
+ # Normally, annotations are evaluted like this (during jit.script):
+ # 1. Load strings of python code into c++ and parse.
+ # 2. Get annotations as strings
+ # 3. Use the PythonResolver's resolution callback (rcb) to convert
+ # the string into a python object
+ # 4. We call into annotations.py:ann_to_type to convert python obj
+ # from step 3 into a type that torchscript understands.
+ #
+ # NamedTuples are more complicated, because it has sub-types.
+ # Normally, once we have the NamedTuple type object from #3,
+ # we can just look at the annotation literal values and use
+ # ann_to_type directly on them.
+ #
+ # But sometimes, users will annotate with string literals, e.g.
+ # x: 'int'
+ # This also happens with PEP563 (from __forward__ import annotations)
+ #
+ # These annotations appear in the annotation dict as ForwardRef('int').
+ #
+ # Then, we need to convert the string into a python object. This
+ # requires having local context for custom objects or imported types.
+ # rcb() is what gives us this. So, we plumb rcb through the stack so
+ # it can be used in this context for the if block below.
+ #
+ # FAQ:
+ # - Why do we need this special handling for NamedTuple but string
+ # annotations work fine for normal types? Normally, we parse the
+ # string directly and then call rcb() directly from C++.
+ # - Why not use ForwardRef._evaluate? For that, we need globals()
+ # and locals() for the local context where the NamedTuple was defined.
+ # rcb is what lets us look up into these. So, basically rcb does the
+ # hard work for us.
+ if isinstance(field_type, ForwardRef) and rcb is not None:
+ rcb_type = rcb(field_type.__forward_arg__)
+ # rcb returns None if it can't find anything.
+ if rcb_type is None:
+ raise ValueError(
+ f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}."
+ f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858."
+ f" Issue occurred at {loc.highlight()}"
+ )
+ field_type = rcb_type
+ the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb)
+ annotations.append(the_type)
+ else:
+ annotations.append(torch._C.TensorType.getInferred())
+ return type(obj).__name__, obj._fields, annotations, defaults
+
+
+def _create_named_tuple(
+ t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
+):
+ TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
+ return TupleType(*t)
+
+
+@contextlib.contextmanager
+def _disable_emit_hooks():
+ hooks = torch._C._jit_get_emit_hooks()
+ torch._C._jit_set_emit_hooks(None, None)
+ try:
+ yield
+ finally:
+ torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
+
+
+def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
+ def __enter__(self) -> None:
+ self.hooks = torch._C._jit_get_emit_hooks()
+ torch._C._jit_set_emit_hooks(None, None)
+
+ def __exit__(self, *args) -> None:
+ torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
+
+
+def _is_exception(obj) -> bool:
+ if not inspect.isclass(obj):
+ return False
+ return issubclass(obj, Exception)
+
+
+def raise_error_container_parameter_missing(target_type) -> None:
+ if target_type == "Dict":
+ raise RuntimeError(
+ "Attempted to use Dict without "
+ "contained types. Please add contained type, e.g. "
+ "Dict[int, int]"
+ )
+ raise RuntimeError(
+ f"Attempted to use {target_type} without a "
+ "contained type. Please add a contained type, e.g. "
+ f"{target_type}[int]"
+ )
+
+
+def check_args_exist(target_type) -> None:
+ if target_type is List or target_type is list:
+ raise_error_container_parameter_missing("List")
+ elif target_type is Tuple or target_type is tuple:
+ raise_error_container_parameter_missing("Tuple")
+ elif target_type is Dict or target_type is dict:
+ raise_error_container_parameter_missing("Dict")
+ elif target_type is None or target_type is Optional:
+ raise_error_container_parameter_missing("Optional")
+
+
+def check_empty_containers(obj) -> None:
+ if obj == [] or obj == {} or obj == ():
+ warnings.warn(
+ "The inner type of a container is lost when "
+ "calling torch.jit.isinstance in eager mode. For "
+ "example, List[int] would become list and "
+ "therefore falsely return True for List[float] or"
+ " List[str]."
+ )
+
+
+# supports List/Dict/Tuple and Optional types
+# TODO support future
+def container_checker(obj, target_type) -> bool:
+ origin_type = get_origin(target_type)
+ check_args_exist(target_type)
+ if origin_type is None:
+ return False
+ elif origin_type is list or origin_type is List:
+ check_empty_containers(obj)
+ if not isinstance(obj, list):
+ return False
+ arg_type = get_args(target_type)[0]
+ arg_origin = get_origin(arg_type)
+ for el in obj:
+ # check if nested container, ex: List[List[str]]
+ if arg_origin: # processes nested container, ex: List[List[str]]
+ if not container_checker(el, arg_type):
+ return False
+ elif not isinstance(el, arg_type):
+ return False
+ return True
+ elif origin_type is Dict or origin_type is dict:
+ check_empty_containers(obj)
+ if not isinstance(obj, dict):
+ return False
+ key_type = get_args(target_type)[0]
+ val_type = get_args(target_type)[1]
+ for key, val in obj.items():
+ # check if keys are of right type
+ if not isinstance(key, key_type):
+ return False
+ val_origin = get_origin(val_type)
+ if val_origin:
+ if not container_checker(val, val_type):
+ return False
+ elif not isinstance(val, val_type):
+ return False
+ return True
+ elif origin_type is Tuple or origin_type is tuple:
+ check_empty_containers(obj)
+ if not isinstance(obj, tuple):
+ return False
+ arg_types = get_args(target_type)
+ if len(obj) != len(arg_types):
+ return False
+ for el, el_type in zip(obj, arg_types):
+ el_origin = get_origin(el_type)
+ if el_origin:
+ if not container_checker(el, el_type):
+ return False
+ elif not isinstance(el, el_type):
+ return False
+ return True
+ elif origin_type is Union or issubclass(
+ origin_type, BuiltinUnionType
+ ): # also handles Optional
+ if obj is None: # check before recursion because None is always fine
+ return True
+ inner_types = get_args(target_type)
+ for t in inner_types:
+ t_origin = get_origin(t)
+ if t_origin:
+ return container_checker(obj, t)
+ elif isinstance(obj, t):
+ return True
+ return False
+
+
+def _isinstance(obj, target_type) -> bool:
+ if isinstance(target_type, collections.abc.Container):
+ if not isinstance(target_type, tuple):
+ raise RuntimeError(
+ "The second argument to "
+ "`torch.jit.isinstance` must be a type "
+ "or a tuple of types"
+ )
+ for t_type in target_type:
+ if _isinstance(obj, t_type):
+ return True
+ return False
+
+ origin_type = get_origin(target_type)
+ if origin_type:
+ return container_checker(obj, target_type)
+
+ # Check to handle non-typed optional origin returns as none instead
+ # of as optional in 3.7-3.8
+ check_args_exist(target_type)
+
+ # handle non-containers
+ return isinstance(obj, target_type)
+
+
+class _TensorExtractor(pickle.Pickler):
+ def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
+ super().__init__(*args, **kwargs)
+ self.tensors = tensors
+
+ def persistent_id(self, obj):
+ if isinstance(obj, torch.Tensor):
+ self.tensors.append(obj)
+ return ""
+ # Since we just want to extract tensors, we don't mind if an object is
+ # unpicklable if it doesn't contain tensors, as we can just ignore/skip
+ # it. To play it safe, we only do so for common objects that we're sure
+ # don't contain tensors. Feel free to add new types here. Note also that
+ # even if a type isn't listed here this won't block users, since thet
+ # can just add a __getstate__ or __reduce__ method to their class.
+ if isinstance(obj, LockType):
+ return ""
+ # Futures and RRefs don't technically contain a value, they just offer
+ # the means to access a value.
+ if isinstance(obj, CFuture) or is_rref_instance(obj):
+ return ""
+ if isinstance(obj, CAwait):
+ return ""
+ if isinstance(obj, torch.cuda.Event):
+ return ""
+ if isinstance(obj, threading.Thread):
+ return ""
+ return None
+
+
+def _extract_tensors(obj):
+ r"""
+ This function is exclusively called from C++.
+ See ``torch/csrc/jit/python/python_ivalue.h``.
+
+ It extracts the tensors contained in the given object, through pickling.
+ """
+ tensors: List[torch.Tensor] = []
+ extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
+ extractor.dump(obj)
+ return tensors
+
+
+# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
+# that were previously dropped. To preserve the behavior, explicitly drop them there
+
+if sys.version_info > (3, 10):
+ _drop(enum.Enum.__new__)
+ _drop(enum.Enum.__format__)
+ _drop(enum.Enum.__repr__)
+ _drop(enum.Enum.__str__)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_linalg_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/_linalg_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d5cde41f6006abe94d71e9ff9509ebae6c3085
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_linalg_utils.py
@@ -0,0 +1,164 @@
+"""Various linear algebra utility methods for internal use.
+
+"""
+
+from typing import Optional, Tuple
+
+import torch
+from torch import Tensor
+
+
+def is_sparse(A):
+ """Check if tensor A is a sparse tensor"""
+ if isinstance(A, torch.Tensor):
+ return A.layout == torch.sparse_coo
+
+ error_str = "expected Tensor"
+ if not torch.jit.is_scripting():
+ error_str += f" but got {type(A)}"
+ raise TypeError(error_str)
+
+
+def get_floating_dtype(A):
+ """Return the floating point dtype of tensor A.
+
+ Integer types map to float32.
+ """
+ dtype = A.dtype
+ if dtype in (torch.float16, torch.float32, torch.float64):
+ return dtype
+ return torch.float32
+
+
+def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
+ """Multiply two matrices.
+
+ If A is None, return B. A can be sparse or dense. B is always
+ dense.
+ """
+ if A is None:
+ return B
+ if is_sparse(A):
+ return torch.sparse.mm(A, B)
+ return torch.matmul(A, B)
+
+
+def conjugate(A):
+ """Return conjugate of tensor A.
+
+ .. note:: If A's dtype is not complex, A is returned.
+ """
+ if A.is_complex():
+ return A.conj()
+ return A
+
+
+def transpose(A):
+ """Return transpose of a matrix or batches of matrices."""
+ ndim = len(A.shape)
+ return A.transpose(ndim - 1, ndim - 2)
+
+
+def transjugate(A):
+ """Return transpose conjugate of a matrix or batches of matrices."""
+ return conjugate(transpose(A))
+
+
+def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
+ """Return bilinear form of matrices: :math:`X^T A Y`."""
+ return matmul(transpose(X), matmul(A, Y))
+
+
+def qform(A: Optional[Tensor], S: Tensor):
+ """Return quadratic form :math:`S^T A S`."""
+ return bform(S, A, S)
+
+
+def basis(A):
+ """Return orthogonal basis of A columns."""
+ return torch.linalg.qr(A).Q
+
+
+def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
+ """Return eigenpairs of A with specified ordering."""
+ if largest is None:
+ largest = False
+ E, Z = torch.linalg.eigh(A, UPLO="U")
+ # assuming that E is ordered
+ if largest:
+ E = torch.flip(E, dims=(-1,))
+ Z = torch.flip(Z, dims=(-1,))
+ return E, Z
+
+
+# These functions were deprecated and removed
+# This nice error message can be removed in version 1.13+
+def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
+ raise RuntimeError(
+ "This function was deprecated since version 1.9 and is now removed.\n"
+ "Please use the `torch.linalg.matrix_rank` function instead. "
+ "The parameter 'symmetric' was renamed in `torch.linalg.matrix_rank()` to 'hermitian'."
+ )
+
+
+def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
+ raise RuntimeError(
+ "This function was deprecated since version 1.9 and is now removed. "
+ "`torch.solve` is deprecated in favor of `torch.linalg.solve`. "
+ "`torch.linalg.solve` has its arguments reversed and does not return the LU factorization.\n\n"
+ "To get the LU factorization see `torch.lu`, which can be used with `torch.lu_solve` or `torch.lu_unpack`.\n"
+ "X = torch.solve(B, A).solution "
+ "should be replaced with:\n"
+ "X = torch.linalg.solve(A, B)"
+ )
+
+
+def lstsq(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
+ raise RuntimeError(
+ "This function was deprecated since version 1.9 and is now removed. "
+ "`torch.lstsq` is deprecated in favor of `torch.linalg.lstsq`.\n"
+ "`torch.linalg.lstsq` has reversed arguments and does not return the QR decomposition in "
+ "the returned tuple (although it returns other information about the problem).\n\n"
+ "To get the QR decomposition consider using `torch.linalg.qr`.\n\n"
+ "The returned solution in `torch.lstsq` stored the residuals of the solution in the "
+ "last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, "
+ "the residuals are in the field 'residuals' of the returned named tuple.\n\n"
+ "The unpacking of the solution, as in\n"
+ "X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n"
+ "should be replaced with:\n"
+ "X = torch.linalg.lstsq(A, B).solution"
+ )
+
+
+def _symeig(
+ input, eigenvectors=False, upper=True, *, out=None
+) -> Tuple[Tensor, Tensor]:
+ raise RuntimeError(
+ "This function was deprecated since version 1.9 and is now removed. "
+ "The default behavior has changed from using the upper triangular portion of the matrix by default "
+ "to using the lower triangular portion.\n\n"
+ "L, _ = torch.symeig(A, upper=upper) "
+ "should be replaced with:\n"
+ "L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n\n"
+ "and\n\n"
+ "L, V = torch.symeig(A, eigenvectors=True) "
+ "should be replaced with:\n"
+ "L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
+ )
+
+
+def eig(
+ self: Tensor, eigenvectors: bool = False, *, e=None, v=None
+) -> Tuple[Tensor, Tensor]:
+ raise RuntimeError(
+ "This function was deprecated since version 1.9 and is now removed. "
+ "`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble` rather than real tensors "
+ "mimicking complex tensors.\n\n"
+ "L, _ = torch.eig(A) "
+ "should be replaced with:\n"
+ "L_complex = torch.linalg.eigvals(A)\n\n"
+ "and\n\n"
+ "L, V = torch.eig(A, eigenvectors=True) "
+ "should be replaced with:\n"
+ "L_complex, V_complex = torch.linalg.eig(A)"
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lobpcg.py b/env-llmeval/lib/python3.10/site-packages/torch/_lobpcg.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5ed5cf8fcfd263e1ac512103cf336783715df0b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_lobpcg.py
@@ -0,0 +1,1167 @@
+"""Locally Optimal Block Preconditioned Conjugate Gradient methods.
+"""
+# Author: Pearu Peterson
+# Created: February 2020
+
+from typing import Dict, Optional, Tuple
+
+import torch
+from torch import Tensor
+from . import _linalg_utils as _utils
+from .overrides import handle_torch_function, has_torch_function
+
+
+__all__ = ["lobpcg"]
+
+
+def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
+ # compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0
+ F = D.unsqueeze(-2) - D.unsqueeze(-1)
+ F.diagonal(dim1=-2, dim2=-1).fill_(float("inf"))
+ F.pow_(-1)
+
+ # A.grad = U (D.grad + (U^T U.grad * F)) U^T
+ Ut = U.mT.contiguous()
+ res = torch.matmul(
+ U, torch.matmul(torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F, Ut)
+ )
+
+ return res
+
+
+def _polynomial_coefficients_given_roots(roots):
+ """
+ Given the `roots` of a polynomial, find the polynomial's coefficients.
+
+ If roots = (r_1, ..., r_n), then the method returns
+ coefficients (a_0, a_1, ..., a_n (== 1)) so that
+ p(x) = (x - r_1) * ... * (x - r_n)
+ = x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0
+
+ Note: for better performance requires writing a low-level kernel
+ """
+ poly_order = roots.shape[-1]
+ poly_coeffs_shape = list(roots.shape)
+ # we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0,
+ # so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)},
+ # but we insert one extra coefficient to enable better vectorization below
+ poly_coeffs_shape[-1] += 2
+ poly_coeffs = roots.new_zeros(poly_coeffs_shape)
+ poly_coeffs[..., 0] = 1
+ poly_coeffs[..., -1] = 1
+
+ # perform the Horner's rule
+ for i in range(1, poly_order + 1):
+ # note that it is computationally hard to compute backward for this method,
+ # because then given the coefficients it would require finding the roots and/or
+ # calculating the sensitivity based on the Vieta's theorem.
+ # So the code below tries to circumvent the explicit root finding by series
+ # of operations on memory copies imitating the Horner's method.
+ # The memory copies are required to construct nodes in the computational graph
+ # by exploting the explicit (not in-place, separate node for each step)
+ # recursion of the Horner's method.
+ # Needs more memory, O(... * k^2), but with only O(... * k^2) complexity.
+ poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs
+ out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)
+ out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(
+ -1, poly_order - i + 1, i + 1
+ )
+ poly_coeffs = poly_coeffs_new
+
+ return poly_coeffs.narrow(-1, 1, poly_order + 1)
+
+
+def _polynomial_value(poly, x, zero_power, transition):
+ """
+ A generic method for computing poly(x) using the Horner's rule.
+
+ Args:
+ poly (Tensor): the (possibly batched) 1D Tensor representing
+ polynomial coefficients such that
+ poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and
+ poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n
+
+ x (Tensor): the value (possible batched) to evalate the polynomial `poly` at.
+
+ zero_power (Tensor): the representation of `x^0`. It is application-specific.
+
+ transition (Callable): the function that accepts some intermediate result `int_val`,
+ the `x` and a specific polynomial coefficient
+ `poly[..., k]` for some iteration `k`.
+ It basically performs one iteration of the Horner's rule
+ defined as `x * int_val + poly[..., k] * zero_power`.
+ Note that `zero_power` is not a parameter,
+ because the step `+ poly[..., k] * zero_power` depends on `x`,
+ whether it is a vector, a matrix, or something else, so this
+ functionality is delegated to the user.
+ """
+
+ res = zero_power.clone()
+ for k in range(poly.size(-1) - 2, -1, -1):
+ res = transition(res, x, poly[..., k])
+ return res
+
+
+def _matrix_polynomial_value(poly, x, zero_power=None):
+ """
+ Evaluates `poly(x)` for the (batched) matrix input `x`.
+ Check out `_polynomial_value` function for more details.
+ """
+
+ # matrix-aware Horner's rule iteration
+ def transition(curr_poly_val, x, poly_coeff):
+ res = x.matmul(curr_poly_val)
+ res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))
+ return res
+
+ if zero_power is None:
+ zero_power = torch.eye(
+ x.size(-1), x.size(-1), dtype=x.dtype, device=x.device
+ ).view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
+
+ return _polynomial_value(poly, x, zero_power, transition)
+
+
+def _vector_polynomial_value(poly, x, zero_power=None):
+ """
+ Evaluates `poly(x)` for the (batched) vector input `x`.
+ Check out `_polynomial_value` function for more details.
+ """
+
+ # vector-aware Horner's rule iteration
+ def transition(curr_poly_val, x, poly_coeff):
+ res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
+ return res
+
+ if zero_power is None:
+ zero_power = x.new_ones(1).expand(x.shape)
+
+ return _polynomial_value(poly, x, zero_power, transition)
+
+
+def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest):
+ # compute a projection operator onto an orthogonal subspace spanned by the
+ # columns of U defined as (I - UU^T)
+ Ut = U.mT.contiguous()
+ proj_U_ortho = -U.matmul(Ut)
+ proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1)
+
+ # compute U_ortho, a basis for the orthogonal complement to the span(U),
+ # by projecting a random [..., m, m - k] matrix onto the subspace spanned
+ # by the columns of U.
+ #
+ # fix generator for determinism
+ gen = torch.Generator(A.device)
+
+ # orthogonal complement to the span(U)
+ U_ortho = proj_U_ortho.matmul(
+ torch.randn(
+ (*A.shape[:-1], A.size(-1) - D.size(-1)),
+ dtype=A.dtype,
+ device=A.device,
+ generator=gen,
+ )
+ )
+ U_ortho_t = U_ortho.mT.contiguous()
+
+ # compute the coefficients of the characteristic polynomial of the tensor D.
+ # Note that D is diagonal, so the diagonal elements are exactly the roots
+ # of the characteristic polynomial.
+ chr_poly_D = _polynomial_coefficients_given_roots(D)
+
+ # the code belows finds the explicit solution to the Sylvester equation
+ # U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U
+ # and incorporates it into the whole gradient stored in the `res` variable.
+ #
+ # Equivalent to the following naive implementation:
+ # res = A.new_zeros(A.shape)
+ # p_res = A.new_zeros(*A.shape[:-1], D.size(-1))
+ # for k in range(1, chr_poly_D.size(-1)):
+ # p_res.zero_()
+ # for i in range(0, k):
+ # p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2)
+ # res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t())
+ #
+ # Note that dX is a differential, so the gradient contribution comes from the backward sensitivity
+ # Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g,
+ # and we need to compute g(U_grad, A, U, D)
+ #
+ # The naive implementation is based on the paper
+ # Hu, Qingxi, and Daizhan Cheng.
+ # "The polynomial solution to the Sylvester matrix equation."
+ # Applied mathematics letters 19.9 (2006): 859-864.
+ #
+ # We can modify the computation of `p_res` from above in a more efficient way
+ # p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2)
+ # + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2)
+ # + ...
+ # + A.matrix_power(k - 1) U_grad * chr_poly_D[k]
+ # Note that this saves us from redundant matrix products with A (elimination of matrix_power)
+ U_grad_projected = U_grad
+ series_acc = U_grad_projected.new_zeros(U_grad_projected.shape)
+ for k in range(1, chr_poly_D.size(-1)):
+ poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D)
+ series_acc += U_grad_projected * poly_D.unsqueeze(-2)
+ U_grad_projected = A.matmul(U_grad_projected)
+
+ # compute chr_poly_D(A) which essentially is:
+ #
+ # chr_poly_D_at_A = A.new_zeros(A.shape)
+ # for k in range(chr_poly_D.size(-1)):
+ # chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k)
+ #
+ # Note, however, for better performance we use the Horner's rule
+ chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A)
+
+ # compute the action of `chr_poly_D_at_A` restricted to U_ortho_t
+ chr_poly_D_at_A_to_U_ortho = torch.matmul(
+ U_ortho_t, torch.matmul(chr_poly_D_at_A, U_ortho)
+ )
+ # we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its
+ # Cholesky decomposition and then use `torch.cholesky_solve` for better stability.
+ # Cholesky decomposition requires the input to be positive-definite.
+ # Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if
+ # 1. `largest` == False, or
+ # 2. `largest` == True and `k` is even
+ # under the assumption that `A` has distinct eigenvalues.
+ #
+ # check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite
+ chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1
+ chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky(
+ chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho
+ )
+
+ # compute the gradient part in span(U)
+ res = _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
+
+ # incorporate the Sylvester equation solution into the full gradient
+ # it resides in span(U_ortho)
+ res -= U_ortho.matmul(
+ chr_poly_D_at_A_to_U_ortho_sign
+ * torch.cholesky_solve(
+ U_ortho_t.matmul(series_acc), chr_poly_D_at_A_to_U_ortho_L
+ )
+ ).matmul(Ut)
+
+ return res
+
+
+def _symeig_backward(D_grad, U_grad, A, D, U, largest):
+ # if `U` is square, then the columns of `U` is a complete eigenspace
+ if U.size(-1) == U.size(-2):
+ return _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
+ else:
+ return _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest)
+
+
+class LOBPCGAutogradFunction(torch.autograd.Function):
+ @staticmethod
+ def forward( # type: ignore[override]
+ ctx,
+ A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None,
+ ) -> Tuple[Tensor, Tensor]:
+ # makes sure that input is contiguous for efficiency.
+ # Note: autograd does not support dense gradients for sparse input yet.
+ A = A.contiguous() if (not A.is_sparse) else A
+ if B is not None:
+ B = B.contiguous() if (not B.is_sparse) else B
+
+ D, U = _lobpcg(
+ A,
+ k,
+ B,
+ X,
+ n,
+ iK,
+ niter,
+ tol,
+ largest,
+ method,
+ tracker,
+ ortho_iparams,
+ ortho_fparams,
+ ortho_bparams,
+ )
+
+ ctx.save_for_backward(A, B, D, U)
+ ctx.largest = largest
+
+ return D, U
+
+ @staticmethod
+ def backward(ctx, D_grad, U_grad):
+ A_grad = B_grad = None
+ grads = [None] * 14
+
+ A, B, D, U = ctx.saved_tensors
+ largest = ctx.largest
+
+ # lobpcg.backward has some limitations. Checks for unsupported input
+ if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]):
+ raise ValueError(
+ "lobpcg.backward does not support sparse input yet."
+ "Note that lobpcg.forward does though."
+ )
+ if (
+ A.dtype in (torch.complex64, torch.complex128)
+ or B is not None
+ and B.dtype in (torch.complex64, torch.complex128)
+ ):
+ raise ValueError(
+ "lobpcg.backward does not support complex input yet."
+ "Note that lobpcg.forward does though."
+ )
+ if B is not None:
+ raise ValueError(
+ "lobpcg.backward does not support backward with B != I yet."
+ )
+
+ if largest is None:
+ largest = True
+
+ # symeig backward
+ if B is None:
+ A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest)
+
+ # A has index 0
+ grads[0] = A_grad
+ # B has index 2
+ grads[2] = B_grad
+ return tuple(grads)
+
+
+def lobpcg(
+ A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None,
+) -> Tuple[Tensor, Tensor]:
+ """Find the k largest (or smallest) eigenvalues and the corresponding
+ eigenvectors of a symmetric positive definite generalized
+ eigenvalue problem using matrix-free LOBPCG methods.
+
+ This function is a front-end to the following LOBPCG algorithms
+ selectable via `method` argument:
+
+ `method="basic"` - the LOBPCG method introduced by Andrew
+ Knyazev, see [Knyazev2001]. A less robust method, may fail when
+ Cholesky is applied to singular input.
+
+ `method="ortho"` - the LOBPCG method with orthogonal basis
+ selection [StathopoulosEtal2002]. A robust method.
+
+ Supported inputs are dense, sparse, and batches of dense matrices.
+
+ .. note:: In general, the basic method spends least time per
+ iteration. However, the robust methods converge much faster and
+ are more stable. So, the usage of the basic method is generally
+ not recommended but there exist cases where the usage of the
+ basic method may be preferred.
+
+ .. warning:: The backward method does not support sparse and complex inputs.
+ It works only when `B` is not provided (i.e. `B == None`).
+ We are actively working on extensions, and the details of
+ the algorithms are going to be published promptly.
+
+ .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
+ To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
+ in first-order optimization routines, prior to running `lobpcg`
+ we do the following symmetrization map: `A -> (A + A.t()) / 2`.
+ The map is performed only when the `A` requires gradients.
+
+ Args:
+
+ A (Tensor): the input tensor of size :math:`(*, m, m)`
+
+ B (Tensor, optional): the input tensor of size :math:`(*, m,
+ m)`. When not specified, `B` is interpreted as
+ identity matrix.
+
+ X (tensor, optional): the input tensor of size :math:`(*, m, n)`
+ where `k <= n <= m`. When specified, it is used as
+ initial approximation of eigenvectors. X must be a
+ dense tensor.
+
+ iK (tensor, optional): the input tensor of size :math:`(*, m,
+ m)`. When specified, it will be used as preconditioner.
+
+ k (integer, optional): the number of requested
+ eigenpairs. Default is the number of :math:`X`
+ columns (when specified) or `1`.
+
+ n (integer, optional): if :math:`X` is not specified then `n`
+ specifies the size of the generated random
+ approximation of eigenvectors. Default value for `n`
+ is `k`. If :math:`X` is specified, the value of `n`
+ (when specified) must be the number of :math:`X`
+ columns.
+
+ tol (float, optional): residual tolerance for stopping
+ criterion. Default is `feps ** 0.5` where `feps` is
+ smallest non-zero floating-point number of the given
+ input tensor `A` data type.
+
+ largest (bool, optional): when True, solve the eigenproblem for
+ the largest eigenvalues. Otherwise, solve the
+ eigenproblem for smallest eigenvalues. Default is
+ `True`.
+
+ method (str, optional): select LOBPCG method. See the
+ description of the function above. Default is
+ "ortho".
+
+ niter (int, optional): maximum number of iterations. When
+ reached, the iteration process is hard-stopped and
+ the current approximation of eigenpairs is returned.
+ For infinite iteration but until convergence criteria
+ is met, use `-1`.
+
+ tracker (callable, optional) : a function for tracing the
+ iteration process. When specified, it is called at
+ each iteration step with LOBPCG instance as an
+ argument. The LOBPCG instance holds the full state of
+ the iteration process in the following attributes:
+
+ `iparams`, `fparams`, `bparams` - dictionaries of
+ integer, float, and boolean valued input
+ parameters, respectively
+
+ `ivars`, `fvars`, `bvars`, `tvars` - dictionaries
+ of integer, float, boolean, and Tensor valued
+ iteration variables, respectively.
+
+ `A`, `B`, `iK` - input Tensor arguments.
+
+ `E`, `X`, `S`, `R` - iteration Tensor variables.
+
+ For instance:
+
+ `ivars["istep"]` - the current iteration step
+ `X` - the current approximation of eigenvectors
+ `E` - the current approximation of eigenvalues
+ `R` - the current residual
+ `ivars["converged_count"]` - the current number of converged eigenpairs
+ `tvars["rerr"]` - the current state of convergence criteria
+
+ Note that when `tracker` stores Tensor objects from
+ the LOBPCG instance, it must make copies of these.
+
+ If `tracker` sets `bvars["force_stop"] = True`, the
+ iteration process will be hard-stopped.
+
+ ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
+ various parameters to LOBPCG algorithm when using
+ `method="ortho"`.
+
+ Returns:
+
+ E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
+
+ X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
+
+ References:
+
+ [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
+ Preconditioned Eigensolver: Locally Optimal Block Preconditioned
+ Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
+ 517-541. (25 pages)
+ https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
+
+ [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
+ Wu. (2002) A Block Orthogonalization Procedure with Constant
+ Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
+ 2165-2182. (18 pages)
+ https://epubs.siam.org/doi/10.1137/S1064827500370883
+
+ [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
+ Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
+ SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
+ https://epubs.siam.org/doi/abs/10.1137/17M1129830
+
+ """
+
+ if not torch.jit.is_scripting():
+ tensor_ops = (A, B, X, iK)
+ if not set(map(type, tensor_ops)).issubset(
+ (torch.Tensor, type(None))
+ ) and has_torch_function(tensor_ops):
+ return handle_torch_function(
+ lobpcg,
+ tensor_ops,
+ A,
+ k=k,
+ B=B,
+ X=X,
+ n=n,
+ iK=iK,
+ niter=niter,
+ tol=tol,
+ largest=largest,
+ method=method,
+ tracker=tracker,
+ ortho_iparams=ortho_iparams,
+ ortho_fparams=ortho_fparams,
+ ortho_bparams=ortho_bparams,
+ )
+
+ if not torch._jit_internal.is_scripting():
+ if A.requires_grad or (B is not None and B.requires_grad):
+ # While it is expected that `A` is symmetric,
+ # the `A_grad` might be not. Therefore we perform the trick below,
+ # so that `A_grad` becomes symmetric.
+ # The symmetrization is important for first-order optimization methods,
+ # so that (A - alpha * A_grad) is still a symmetric matrix.
+ # Same holds for `B`.
+ A_sym = (A + A.mT) / 2
+ B_sym = (B + B.mT) / 2 if (B is not None) else None
+
+ return LOBPCGAutogradFunction.apply(
+ A_sym,
+ k,
+ B_sym,
+ X,
+ n,
+ iK,
+ niter,
+ tol,
+ largest,
+ method,
+ tracker,
+ ortho_iparams,
+ ortho_fparams,
+ ortho_bparams,
+ )
+ else:
+ if A.requires_grad or (B is not None and B.requires_grad):
+ raise RuntimeError(
+ "Script and require grads is not supported atm."
+ "If you just want to do the forward, use .detach()"
+ "on A and B before calling into lobpcg"
+ )
+
+ return _lobpcg(
+ A,
+ k,
+ B,
+ X,
+ n,
+ iK,
+ niter,
+ tol,
+ largest,
+ method,
+ tracker,
+ ortho_iparams,
+ ortho_fparams,
+ ortho_bparams,
+ )
+
+
+def _lobpcg(
+ A: Tensor,
+ k: Optional[int] = None,
+ B: Optional[Tensor] = None,
+ X: Optional[Tensor] = None,
+ n: Optional[int] = None,
+ iK: Optional[Tensor] = None,
+ niter: Optional[int] = None,
+ tol: Optional[float] = None,
+ largest: Optional[bool] = None,
+ method: Optional[str] = None,
+ tracker: None = None,
+ ortho_iparams: Optional[Dict[str, int]] = None,
+ ortho_fparams: Optional[Dict[str, float]] = None,
+ ortho_bparams: Optional[Dict[str, bool]] = None,
+) -> Tuple[Tensor, Tensor]:
+ # A must be square:
+ assert A.shape[-2] == A.shape[-1], A.shape
+ if B is not None:
+ # A and B must have the same shapes:
+ assert A.shape == B.shape, (A.shape, B.shape)
+
+ dtype = _utils.get_floating_dtype(A)
+ device = A.device
+ if tol is None:
+ feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype]
+ tol = feps**0.5
+
+ m = A.shape[-1]
+ k = (1 if X is None else X.shape[-1]) if k is None else k
+ n = (k if n is None else n) if X is None else X.shape[-1]
+
+ if m < 3 * n:
+ raise ValueError(
+ f"LPBPCG algorithm is not applicable when the number of A rows (={m})"
+ f" is smaller than 3 x the number of requested eigenpairs (={n})"
+ )
+
+ method = "ortho" if method is None else method
+
+ iparams = {
+ "m": m,
+ "n": n,
+ "k": k,
+ "niter": 1000 if niter is None else niter,
+ }
+
+ fparams = {
+ "tol": tol,
+ }
+
+ bparams = {"largest": True if largest is None else largest}
+
+ if method == "ortho":
+ if ortho_iparams is not None:
+ iparams.update(ortho_iparams)
+ if ortho_fparams is not None:
+ fparams.update(ortho_fparams)
+ if ortho_bparams is not None:
+ bparams.update(ortho_bparams)
+ iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3)
+ iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3)
+ fparams["ortho_tol"] = fparams.get("ortho_tol", tol)
+ fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol)
+ fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol)
+ bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False)
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[assignment]
+
+ if len(A.shape) > 2:
+ N = int(torch.prod(torch.tensor(A.shape[:-2])))
+ bA = A.reshape((N,) + A.shape[-2:])
+ bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None
+ bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None
+ bE = torch.empty((N, k), dtype=dtype, device=device)
+ bXret = torch.empty((N, m, k), dtype=dtype, device=device)
+
+ for i in range(N):
+ A_ = bA[i]
+ B_ = bB[i] if bB is not None else None
+ X_ = (
+ torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i]
+ )
+ assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n))
+ iparams["batch_index"] = i
+ worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker)
+ worker.run()
+ bE[i] = worker.E[:k]
+ bXret[i] = worker.X[:, :k]
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
+
+ return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
+
+ X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X
+ assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n))
+
+ worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker)
+
+ worker.run()
+
+ if not torch.jit.is_scripting():
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
+
+ return worker.E[:k], worker.X[:, :k]
+
+
+class LOBPCG:
+ """Worker class of LOBPCG methods."""
+
+ def __init__(
+ self,
+ A: Optional[Tensor],
+ B: Optional[Tensor],
+ X: Tensor,
+ iK: Optional[Tensor],
+ iparams: Dict[str, int],
+ fparams: Dict[str, float],
+ bparams: Dict[str, bool],
+ method: str,
+ tracker: None,
+ ) -> None:
+ # constant parameters
+ self.A = A
+ self.B = B
+ self.iK = iK
+ self.iparams = iparams
+ self.fparams = fparams
+ self.bparams = bparams
+ self.method = method
+ self.tracker = tracker
+ m = iparams["m"]
+ n = iparams["n"]
+
+ # variable parameters
+ self.X = X
+ self.E = torch.zeros((n,), dtype=X.dtype, device=X.device)
+ self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device)
+ self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device)
+ self.tvars: Dict[str, Tensor] = {}
+ self.ivars: Dict[str, int] = {"istep": 0}
+ self.fvars: Dict[str, float] = {"_": 0.0}
+ self.bvars: Dict[str, bool] = {"_": False}
+
+ def __str__(self):
+ lines = ["LOPBCG:"]
+ lines += [f" iparams={self.iparams}"]
+ lines += [f" fparams={self.fparams}"]
+ lines += [f" bparams={self.bparams}"]
+ lines += [f" ivars={self.ivars}"]
+ lines += [f" fvars={self.fvars}"]
+ lines += [f" bvars={self.bvars}"]
+ lines += [f" tvars={self.tvars}"]
+ lines += [f" A={self.A}"]
+ lines += [f" B={self.B}"]
+ lines += [f" iK={self.iK}"]
+ lines += [f" X={self.X}"]
+ lines += [f" E={self.E}"]
+ r = ""
+ for line in lines:
+ r += line + "\n"
+ return r
+
+ def update(self):
+ """Set and update iteration variables."""
+ if self.ivars["istep"] == 0:
+ X_norm = float(torch.norm(self.X))
+ iX_norm = X_norm**-1
+ A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm
+ B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm
+ self.fvars["X_norm"] = X_norm
+ self.fvars["A_norm"] = A_norm
+ self.fvars["B_norm"] = B_norm
+ self.ivars["iterations_left"] = self.iparams["niter"]
+ self.ivars["converged_count"] = 0
+ self.ivars["converged_end"] = 0
+
+ if self.method == "ortho":
+ self._update_ortho()
+ else:
+ self._update_basic()
+
+ self.ivars["iterations_left"] = self.ivars["iterations_left"] - 1
+ self.ivars["istep"] = self.ivars["istep"] + 1
+
+ def update_residual(self):
+ """Update residual R from A, B, X, E."""
+ mm = _utils.matmul
+ self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E
+
+ def update_converged_count(self):
+ """Determine the number of converged eigenpairs using backward stable
+ convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018].
+
+ Users may redefine this method for custom convergence criteria.
+ """
+ # (...) -> int
+ prev_count = self.ivars["converged_count"]
+ tol = self.fparams["tol"]
+ A_norm = self.fvars["A_norm"]
+ B_norm = self.fvars["B_norm"]
+ E, X, R = self.E, self.X, self.R
+ rerr = (
+ torch.norm(R, 2, (0,))
+ * (torch.norm(X, 2, (0,)) * (A_norm + E[: X.shape[-1]] * B_norm)) ** -1
+ )
+ converged = rerr < tol
+ count = 0
+ for b in converged:
+ if not b:
+ # ignore convergence of following pairs to ensure
+ # strict ordering of eigenpairs
+ break
+ count += 1
+ assert (
+ count >= prev_count
+ ), f"the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease"
+ self.ivars["converged_count"] = count
+ self.tvars["rerr"] = rerr
+ return count
+
+ def stop_iteration(self):
+ """Return True to stop iterations.
+
+ Note that tracker (if defined) can force-stop iterations by
+ setting ``worker.bvars['force_stop'] = True``.
+ """
+ return (
+ self.bvars.get("force_stop", False)
+ or self.ivars["iterations_left"] == 0
+ or self.ivars["converged_count"] >= self.iparams["k"]
+ )
+
+ def run(self):
+ """Run LOBPCG iterations.
+
+ Use this method as a template for implementing LOBPCG
+ iteration scheme with custom tracker that is compatible with
+ TorchScript.
+ """
+ self.update()
+
+ if not torch.jit.is_scripting() and self.tracker is not None:
+ self.call_tracker()
+
+ while not self.stop_iteration():
+ self.update()
+
+ if not torch.jit.is_scripting() and self.tracker is not None:
+ self.call_tracker()
+
+ @torch.jit.unused
+ def call_tracker(self):
+ """Interface for tracking iteration process in Python mode.
+
+ Tracking the iteration process is disabled in TorchScript
+ mode. In fact, one should specify tracker=None when JIT
+ compiling functions using lobpcg.
+ """
+ # do nothing when in TorchScript mode
+ pass
+
+ # Internal methods
+
+ def _update_basic(self):
+ """
+ Update or initialize iteration variables when `method == "basic"`.
+ """
+ mm = torch.matmul
+ ns = self.ivars["converged_end"]
+ nc = self.ivars["converged_count"]
+ n = self.iparams["n"]
+ largest = self.bparams["largest"]
+
+ if self.ivars["istep"] == 0:
+ Ri = self._get_rayleigh_ritz_transform(self.X)
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
+ E, Z = _utils.symeig(M, largest)
+ self.X[:] = mm(self.X, mm(Ri, Z))
+ self.E[:] = E
+ np = 0
+ self.update_residual()
+ nc = self.update_converged_count()
+ self.S[..., :n] = self.X
+
+ W = _utils.matmul(self.iK, self.R)
+ self.ivars["converged_end"] = ns = n + np + W.shape[-1]
+ self.S[:, n + np : ns] = W
+ else:
+ S_ = self.S[:, nc:ns]
+ Ri = self._get_rayleigh_ritz_transform(S_)
+ M = _utils.qform(_utils.qform(self.A, S_), Ri)
+ E_, Z = _utils.symeig(M, largest)
+ self.X[:, nc:] = mm(S_, mm(Ri, Z[:, : n - nc]))
+ self.E[nc:] = E_[: n - nc]
+ P = mm(S_, mm(Ri, Z[:, n : 2 * n - nc]))
+ np = P.shape[-1]
+
+ self.update_residual()
+ nc = self.update_converged_count()
+ self.S[..., :n] = self.X
+ self.S[:, n : n + np] = P
+ W = _utils.matmul(self.iK, self.R[:, nc:])
+
+ self.ivars["converged_end"] = ns = n + np + W.shape[-1]
+ self.S[:, n + np : ns] = W
+
+ def _update_ortho(self):
+ """
+ Update or initialize iteration variables when `method == "ortho"`.
+ """
+ mm = torch.matmul
+ ns = self.ivars["converged_end"]
+ nc = self.ivars["converged_count"]
+ n = self.iparams["n"]
+ largest = self.bparams["largest"]
+
+ if self.ivars["istep"] == 0:
+ Ri = self._get_rayleigh_ritz_transform(self.X)
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
+ E, Z = _utils.symeig(M, largest)
+ self.X = mm(self.X, mm(Ri, Z))
+ self.update_residual()
+ np = 0
+ nc = self.update_converged_count()
+ self.S[:, :n] = self.X
+ W = self._get_ortho(self.R, self.X)
+ ns = self.ivars["converged_end"] = n + np + W.shape[-1]
+ self.S[:, n + np : ns] = W
+
+ else:
+ S_ = self.S[:, nc:ns]
+ # Rayleigh-Ritz procedure
+ E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest)
+
+ # Update E, X, P
+ self.X[:, nc:] = mm(S_, Z[:, : n - nc])
+ self.E[nc:] = E_[: n - nc]
+ P = mm(
+ S_,
+ mm(
+ Z[:, n - nc :],
+ _utils.basis(_utils.transpose(Z[: n - nc, n - nc :])),
+ ),
+ )
+ np = P.shape[-1]
+
+ # check convergence
+ self.update_residual()
+ nc = self.update_converged_count()
+
+ # update S
+ self.S[:, :n] = self.X
+ self.S[:, n : n + np] = P
+ W = self._get_ortho(self.R[:, nc:], self.S[:, : n + np])
+ ns = self.ivars["converged_end"] = n + np + W.shape[-1]
+ self.S[:, n + np : ns] = W
+
+ def _get_rayleigh_ritz_transform(self, S):
+ """Return a transformation matrix that is used in Rayleigh-Ritz
+ procedure for reducing a general eigenvalue problem :math:`(S^TAS)
+ C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T
+ S^TAS Ri) Z = Z E` where `C = Ri Z`.
+
+ .. note:: In the original Rayleight-Ritz procedure in
+ [DuerschEtal2018], the problem is formulated as follows::
+
+ SAS = S^T A S
+ SBS = S^T B S
+ D = () ** -1/2
+ R^T R = Cholesky(D SBS D)
+ Ri = D R^-1
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
+ C = Ri Z
+
+ To reduce the number of matrix products (denoted by empty
+ space between matrices), here we introduce element-wise
+ products (denoted by symbol `*`) so that the Rayleight-Ritz
+ procedure becomes::
+
+ SAS = S^T A S
+ SBS = S^T B S
+ d = () ** -1/2 # this is 1-d column vector
+ dd = d d^T # this is 2-d matrix
+ R^T R = Cholesky(dd * SBS)
+ Ri = R^-1 * d # broadcasting
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
+ C = Ri Z
+
+ where `dd` is 2-d matrix that replaces matrix products `D M
+ D` with one element-wise product `M * dd`; and `d` replaces
+ matrix product `D M` with element-wise product `M *
+ d`. Also, creating the diagonal matrix `D` is avoided.
+
+ Args:
+ S (Tensor): the matrix basis for the search subspace, size is
+ :math:`(m, n)`.
+
+ Returns:
+ Ri (tensor): upper-triangular transformation matrix of size
+ :math:`(n, n)`.
+
+ """
+ B = self.B
+ mm = torch.matmul
+ SBS = _utils.qform(B, S)
+ d_row = SBS.diagonal(0, -2, -1) ** -0.5
+ d_col = d_row.reshape(d_row.shape[0], 1)
+ # TODO use torch.linalg.cholesky_solve once it is implemented
+ R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True)
+ return torch.linalg.solve_triangular(
+ R, d_row.diag_embed(), upper=True, left=False
+ )
+
+ def _get_svqb(
+ self, U: Tensor, drop: bool, tau: float # Tensor # bool # float
+ ) -> Tensor:
+ """Return B-orthonormal U.
+
+ .. note:: When `drop` is `False` then `svqb` is based on the
+ Algorithm 4 from [DuerschPhD2015] that is a slight
+ modification of the corresponding algorithm
+ introduced in [StathopolousWu2002].
+
+ Args:
+
+ U (Tensor) : initial approximation, size is (m, n)
+ drop (bool) : when True, drop columns that
+ contribution to the `span([U])` is small.
+ tau (float) : positive tolerance
+
+ Returns:
+
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size
+ is (m, n1), where `n1 = n` if `drop` is `False,
+ otherwise `n1 <= n`.
+
+ """
+ if torch.numel(U) == 0:
+ return U
+ UBU = _utils.qform(self.B, U)
+ d = UBU.diagonal(0, -2, -1)
+
+ # Detect and drop exact zero columns from U. While the test
+ # `abs(d) == 0` is unlikely to be True for random data, it is
+ # possible to construct input data to lobpcg where it will be
+ # True leading to a failure (notice the `d ** -0.5` operation
+ # in the original algorithm). To prevent the failure, we drop
+ # the exact zero columns here and then continue with the
+ # original algorithm below.
+ nz = torch.where(abs(d) != 0.0)
+ assert len(nz) == 1, nz
+ if len(nz[0]) < len(d):
+ U = U[:, nz[0]]
+ if torch.numel(U) == 0:
+ return U
+ UBU = _utils.qform(self.B, U)
+ d = UBU.diagonal(0, -2, -1)
+ nz = torch.where(abs(d) != 0.0)
+ assert len(nz[0]) == len(d)
+
+ # The original algorithm 4 from [DuerschPhD2015].
+ d_col = (d**-0.5).reshape(d.shape[0], 1)
+ DUBUD = (UBU * d_col) * _utils.transpose(d_col)
+ E, Z = _utils.symeig(DUBUD)
+ t = tau * abs(E).max()
+ if drop:
+ keep = torch.where(E > t)
+ assert len(keep) == 1, keep
+ E = E[keep[0]]
+ Z = Z[:, keep[0]]
+ d_col = d_col[keep[0]]
+ else:
+ E[(torch.where(E < t))[0]] = t
+
+ return torch.matmul(U * _utils.transpose(d_col), Z * E**-0.5)
+
+ def _get_ortho(self, U, V):
+ """Return B-orthonormal U with columns are B-orthogonal to V.
+
+ .. note:: When `bparams["ortho_use_drop"] == False` then
+ `_get_ortho` is based on the Algorithm 3 from
+ [DuerschPhD2015] that is a slight modification of
+ the corresponding algorithm introduced in
+ [StathopolousWu2002]. Otherwise, the method
+ implements Algorithm 6 from [DuerschPhD2015]
+
+ .. note:: If all U columns are B-collinear to V then the
+ returned tensor U will be empty.
+
+ Args:
+
+ U (Tensor) : initial approximation, size is (m, n)
+ V (Tensor) : B-orthogonal external basis, size is (m, k)
+
+ Returns:
+
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`)
+ such that :math:`V^T B U=0`, size is (m, n1),
+ where `n1 = n` if `drop` is `False, otherwise
+ `n1 <= n`.
+ """
+ mm = torch.matmul
+ mm_B = _utils.matmul
+ m = self.iparams["m"]
+ tau_ortho = self.fparams["ortho_tol"]
+ tau_drop = self.fparams["ortho_tol_drop"]
+ tau_replace = self.fparams["ortho_tol_replace"]
+ i_max = self.iparams["ortho_i_max"]
+ j_max = self.iparams["ortho_j_max"]
+ # when use_drop==True, enable dropping U columns that have
+ # small contribution to the `span([U, V])`.
+ use_drop = self.bparams["ortho_use_drop"]
+
+ # clean up variables from the previous call
+ for vkey in list(self.fvars.keys()):
+ if vkey.startswith("ortho_") and vkey.endswith("_rerr"):
+ self.fvars.pop(vkey)
+ self.ivars.pop("ortho_i", 0)
+ self.ivars.pop("ortho_j", 0)
+
+ BV_norm = torch.norm(mm_B(self.B, V))
+ BU = mm_B(self.B, U)
+ VBU = mm(_utils.transpose(V), BU)
+ i = j = 0
+ stats = ""
+ for i in range(i_max):
+ U = U - mm(V, VBU)
+ drop = False
+ tau_svqb = tau_drop
+ for j in range(j_max):
+ if use_drop:
+ U = self._get_svqb(U, drop, tau_svqb)
+ drop = True
+ tau_svqb = tau_replace
+ else:
+ U = self._get_svqb(U, False, tau_replace)
+ if torch.numel(U) == 0:
+ # all initial U columns are B-collinear to V
+ self.ivars["ortho_i"] = i
+ self.ivars["ortho_j"] = j
+ return U
+ BU = mm_B(self.B, U)
+ UBU = mm(_utils.transpose(U), BU)
+ U_norm = torch.norm(U)
+ BU_norm = torch.norm(BU)
+ R = UBU - torch.eye(UBU.shape[-1], device=UBU.device, dtype=UBU.dtype)
+ R_norm = torch.norm(R)
+ # https://github.com/pytorch/pytorch/issues/33810 workaround:
+ rerr = float(R_norm) * float(BU_norm * U_norm) ** -1
+ vkey = f"ortho_UBUmI_rerr[{i}, {j}]"
+ self.fvars[vkey] = rerr
+ if rerr < tau_ortho:
+ break
+ VBU = mm(_utils.transpose(V), BU)
+ VBU_norm = torch.norm(VBU)
+ U_norm = torch.norm(U)
+ rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1
+ vkey = f"ortho_VBU_rerr[{i}]"
+ self.fvars[vkey] = rerr
+ if rerr < tau_ortho:
+ break
+ if m < U.shape[-1] + V.shape[-1]:
+ # TorchScript needs the class var to be assigned to a local to
+ # do optional type refinement
+ B = self.B
+ assert B is not None
+ raise ValueError(
+ "Overdetermined shape of U:"
+ f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold"
+ )
+ self.ivars["ortho_i"] = i
+ self.ivars["ortho_j"] = j
+ return U
+
+
+# Calling tracker is separated from LOBPCG definitions because
+# TorchScript does not support user-defined callback arguments:
+LOBPCG_call_tracker_orig = LOBPCG.call_tracker
+
+
+def LOBPCG_call_tracker(self):
+ self.tracker(self)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lowrank.py b/env-llmeval/lib/python3.10/site-packages/torch/_lowrank.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe5a1f3da71d0f5be7c48a4b7cc31fad85f4147e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_lowrank.py
@@ -0,0 +1,298 @@
+"""Implement various linear algebra algorithms for low rank matrices.
+"""
+
+__all__ = ["svd_lowrank", "pca_lowrank"]
+
+from typing import Optional, Tuple
+
+import torch
+from torch import Tensor
+from . import _linalg_utils as _utils
+from .overrides import handle_torch_function, has_torch_function
+
+
+def get_approximate_basis(
+ A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None
+) -> Tensor:
+ """Return tensor :math:`Q` with :math:`q` orthonormal columns such
+ that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
+ specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
+ approximates :math:`A - M`.
+
+ .. note:: The implementation is based on the Algorithm 4.4 from
+ Halko et al, 2009.
+
+ .. note:: For an adequate approximation of a k-rank matrix
+ :math:`A`, where k is not known in advance but could be
+ estimated, the number of :math:`Q` columns, q, can be
+ choosen according to the following criteria: in general,
+ :math:`k <= q <= min(2*k, m, n)`. For large low-rank
+ matrices, take :math:`q = k + 5..10`. If k is
+ relatively small compared to :math:`min(m, n)`, choosing
+ :math:`q = k + 0..2` may be sufficient.
+
+ .. note:: To obtain repeatable results, reset the seed for the
+ pseudorandom number generator
+
+ Args::
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
+
+ q (int): the dimension of subspace spanned by :math:`Q`
+ columns.
+
+ niter (int, optional): the number of subspace iterations to
+ conduct; ``niter`` must be a
+ nonnegative integer. In most cases, the
+ default value 2 is more than enough.
+
+ M (Tensor, optional): the input tensor's mean of size
+ :math:`(*, 1, n)`.
+
+ References::
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
+ structure with randomness: probabilistic algorithms for
+ constructing approximate matrix decompositions,
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
+ `arXiv `_).
+ """
+
+ niter = 2 if niter is None else niter
+ m, n = A.shape[-2:]
+ dtype = _utils.get_floating_dtype(A)
+ matmul = _utils.matmul
+
+ R = torch.randn(n, q, dtype=dtype, device=A.device)
+
+ # The following code could be made faster using torch.geqrf + torch.ormqr
+ # but geqrf is not differentiable
+ A_H = _utils.transjugate(A)
+ if M is None:
+ Q = torch.linalg.qr(matmul(A, R)).Q
+ for i in range(niter):
+ Q = torch.linalg.qr(matmul(A_H, Q)).Q
+ Q = torch.linalg.qr(matmul(A, Q)).Q
+ else:
+ M_H = _utils.transjugate(M)
+ Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
+ for i in range(niter):
+ Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q
+ Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q
+
+ return Q
+
+
+def svd_lowrank(
+ A: Tensor,
+ q: Optional[int] = 6,
+ niter: Optional[int] = 2,
+ M: Optional[Tensor] = None,
+) -> Tuple[Tensor, Tensor, Tensor]:
+ r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
+ batches of matrices, or a sparse matrix :math:`A` such that
+ :math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
+ SVD is computed for the matrix :math:`A - M`.
+
+ .. note:: The implementation is based on the Algorithm 5.1 from
+ Halko et al, 2009.
+
+ .. note:: To obtain repeatable results, reset the seed for the
+ pseudorandom number generator
+
+ .. note:: The input is assumed to be a low-rank matrix.
+
+ .. note:: In general, use the full-rank SVD implementation
+ :func:`torch.linalg.svd` for dense matrices due to its 10-fold
+ higher performance characteristics. The low-rank SVD
+ will be useful for huge sparse matrices that
+ :func:`torch.linalg.svd` cannot handle.
+
+ Args::
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
+
+ q (int, optional): a slightly overestimated rank of A.
+
+ niter (int, optional): the number of subspace iterations to
+ conduct; niter must be a nonnegative
+ integer, and defaults to 2
+
+ M (Tensor, optional): the input tensor's mean of size
+ :math:`(*, 1, n)`.
+
+ References::
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
+ structure with randomness: probabilistic algorithms for
+ constructing approximate matrix decompositions,
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
+ `arXiv `_).
+
+ """
+ if not torch.jit.is_scripting():
+ tensor_ops = (A, M)
+ if not set(map(type, tensor_ops)).issubset(
+ (torch.Tensor, type(None))
+ ) and has_torch_function(tensor_ops):
+ return handle_torch_function(
+ svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
+ )
+ return _svd_lowrank(A, q=q, niter=niter, M=M)
+
+
+def _svd_lowrank(
+ A: Tensor,
+ q: Optional[int] = 6,
+ niter: Optional[int] = 2,
+ M: Optional[Tensor] = None,
+) -> Tuple[Tensor, Tensor, Tensor]:
+ q = 6 if q is None else q
+ m, n = A.shape[-2:]
+ matmul = _utils.matmul
+ if M is None:
+ M_t = None
+ else:
+ M_t = _utils.transpose(M)
+ A_t = _utils.transpose(A)
+
+ # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
+ # the number conjugate and transpose operations
+ if m < n or n > q:
+ # computing the SVD approximation of a transpose in
+ # order to keep B shape minimal (the m < n case) or the V
+ # shape small (the n > q case)
+ Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
+ Q_c = _utils.conjugate(Q)
+ if M is None:
+ B_t = matmul(A, Q_c)
+ else:
+ B_t = matmul(A, Q_c) - matmul(M, Q_c)
+ assert B_t.shape[-2] == m, (B_t.shape, m)
+ assert B_t.shape[-1] == q, (B_t.shape, q)
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
+ V = Vh.mH
+ V = Q.matmul(V)
+ else:
+ Q = get_approximate_basis(A, q, niter=niter, M=M)
+ Q_c = _utils.conjugate(Q)
+ if M is None:
+ B = matmul(A_t, Q_c)
+ else:
+ B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
+ B_t = _utils.transpose(B)
+ assert B_t.shape[-2] == q, (B_t.shape, q)
+ assert B_t.shape[-1] == n, (B_t.shape, n)
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
+ V = Vh.mH
+ U = Q.matmul(U)
+
+ return U, S, V
+
+
+def pca_lowrank(
+ A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2
+) -> Tuple[Tensor, Tensor, Tensor]:
+ r"""Performs linear Principal Component Analysis (PCA) on a low-rank
+ matrix, batches of such matrices, or sparse matrix.
+
+ This function returns a namedtuple ``(U, S, V)`` which is the
+ nearly optimal approximation of a singular value decomposition of
+ a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
+
+ .. note:: The relation of ``(U, S, V)`` to PCA is as follows:
+
+ - :math:`A` is a data matrix with ``m`` samples and
+ ``n`` features
+
+ - the :math:`V` columns represent the principal directions
+
+ - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
+ :math:`A^T A / (m - 1)` which is the covariance of
+ ``A`` when ``center=True`` is provided.
+
+ - ``matmul(A, V[:, :k])`` projects data to the first k
+ principal components
+
+ .. note:: Different from the standard SVD, the size of returned
+ matrices depend on the specified rank and q
+ values as follows:
+
+ - :math:`U` is m x q matrix
+
+ - :math:`S` is q-vector
+
+ - :math:`V` is n x q matrix
+
+ .. note:: To obtain repeatable results, reset the seed for the
+ pseudorandom number generator
+
+ Args:
+
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
+
+ q (int, optional): a slightly overestimated rank of
+ :math:`A`. By default, ``q = min(6, m,
+ n)``.
+
+ center (bool, optional): if True, center the input tensor,
+ otherwise, assume that the input is
+ centered.
+
+ niter (int, optional): the number of subspace iterations to
+ conduct; niter must be a nonnegative
+ integer, and defaults to 2.
+
+ References::
+
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
+ structure with randomness: probabilistic algorithms for
+ constructing approximate matrix decompositions,
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
+ `arXiv `_).
+
+ """
+
+ if not torch.jit.is_scripting():
+ if type(A) is not torch.Tensor and has_torch_function((A,)):
+ return handle_torch_function(
+ pca_lowrank, (A,), A, q=q, center=center, niter=niter
+ )
+
+ (m, n) = A.shape[-2:]
+
+ if q is None:
+ q = min(6, m, n)
+ elif not (q >= 0 and q <= min(m, n)):
+ raise ValueError(
+ f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}"
+ )
+ if not (niter >= 0):
+ raise ValueError(f"niter(={niter}) must be non-negative integer")
+
+ dtype = _utils.get_floating_dtype(A)
+
+ if not center:
+ return _svd_lowrank(A, q, niter=niter, M=None)
+
+ if _utils.is_sparse(A):
+ if len(A.shape) != 2:
+ raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
+ c = torch.sparse.sum(A, dim=(-2,)) / m
+ # reshape c
+ column_indices = c.indices()[0]
+ indices = torch.zeros(
+ 2,
+ len(column_indices),
+ dtype=column_indices.dtype,
+ device=column_indices.device,
+ )
+ indices[0] = column_indices
+ C_t = torch.sparse_coo_tensor(
+ indices, c.values(), (n, 1), dtype=dtype, device=A.device
+ )
+
+ ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
+ M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
+ return _svd_lowrank(A, q, niter=niter, M=M)
+ else:
+ C = A.mean(dim=(-2,), keepdim=True)
+ return _svd_lowrank(A - C, q, niter=niter, M=None)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_meta_registrations.py b/env-llmeval/lib/python3.10/site-packages/torch/_meta_registrations.py
new file mode 100644
index 0000000000000000000000000000000000000000..23e0f8ed21387f8236008aeda82436d6e167ddcd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_meta_registrations.py
@@ -0,0 +1,6242 @@
+import math
+from enum import Enum
+from functools import partial
+from typing import List, Optional, Sequence, Tuple, Union
+
+import torch
+import torch._prims_common as utils
+from torch import SymBool, SymFloat, Tensor
+from torch._decomp import (
+ _add_op_to_registry,
+ _convert_out_params,
+ global_decomposition_table,
+ meta_table,
+)
+from torch._ops import OpOverload
+from torch._prims import _prim_elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND
+from torch._prims_common import (
+ corresponding_complex_dtype,
+ corresponding_real_dtype,
+ elementwise_dtypes,
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
+ IntLike,
+ make_contiguous_strides_for,
+ TensorLike,
+)
+
+from torch._prims_common.wrappers import (
+ _maybe_convert_to_dtype,
+ _maybe_resize_out,
+ _resize_output_check,
+ _safe_copy_out,
+ out_wrapper,
+)
+from torch._refs import _broadcast_shapes, _maybe_broadcast
+from torch.utils import _pytree as pytree
+
+
+aten = torch.ops.aten
+
+_meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta")
+
+
+def register_meta(op):
+ def wrapper(fn):
+ fn = _convert_out_params(fn)
+
+ def register(op):
+ _add_op_to_registry(meta_table, op, fn)
+
+ pytree.tree_map_(register, op)
+ return fn
+
+ return wrapper
+
+
+def elementwise_meta(
+ *args,
+ type_promotion: ELEMENTWISE_TYPE_PROMOTION_KIND,
+):
+ # Perform type promotion, as this is expected from prim_metafunction
+ _, result_dtype = utils.elementwise_dtypes(
+ *args,
+ type_promotion_kind=type_promotion,
+ )
+ args = [_maybe_convert_to_dtype(x, result_dtype) for x in args]
+
+ # Broadcast
+ args = _maybe_broadcast(*args)
+
+ # Perform prim checks
+ return _prim_elementwise_meta(
+ *args, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
+ )
+
+
+def toRealValueType(dtype):
+ from_complex = {
+ torch.complex32: torch.half,
+ torch.cfloat: torch.float,
+ torch.cdouble: torch.double,
+ }
+ return from_complex.get(dtype, dtype)
+
+
+def check_inplace_broadcast(self_shape, *args_shape):
+ broadcasted_shape = tuple(_broadcast_shapes(self_shape, *args_shape))
+ torch._check(
+ broadcasted_shape == self_shape,
+ lambda: f"output with shape {self_shape} doesn't match the broadcast shape {broadcasted_shape}",
+ )
+
+
+@register_meta([aten.linspace, aten.logspace])
+@out_wrapper()
+def meta_linspace_logspace(
+ start,
+ end,
+ steps,
+ base=None,
+ dtype=None,
+ device=None,
+ layout=torch.strided,
+ pin_memory=False,
+ requires_grad=False,
+):
+ if isinstance(start, torch.Tensor):
+ torch._check(
+ start.dim() == 0,
+ lambda: "linspace only supports 0-dimensional start and end tensors",
+ )
+ if isinstance(end, torch.Tensor):
+ torch._check(
+ end.dim() == 0,
+ lambda: "linspace only supports 0-dimensional start and end tensors",
+ )
+
+ if any(isinstance(arg, complex) for arg in (start, end, steps)):
+ default_complex_dtype = utils.corresponding_complex_dtype(
+ torch.get_default_dtype()
+ )
+ if dtype is None:
+ dtype = default_complex_dtype
+ else:
+ torch._check(
+ utils.is_complex_dtype(dtype),
+ lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}",
+ )
+ else:
+ dtype = dtype or torch.get_default_dtype()
+ assert isinstance(dtype, torch.dtype)
+
+ # steps does not participate in the computation of the dtype
+ torch._check_type(
+ isinstance(steps, IntLike),
+ lambda: f"received an invalid combination of arguments - got \
+({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})",
+ )
+ assert isinstance(steps, IntLike) # for mypy
+ torch._check(steps >= 0, lambda: "number of steps must be non-negative")
+
+ return torch.empty(
+ (steps,), # type: ignore[arg-type]
+ dtype=dtype,
+ layout=layout,
+ device="meta",
+ pin_memory=pin_memory,
+ requires_grad=requires_grad,
+ )
+
+
+@register_meta([aten.take.default, aten.take.out])
+@out_wrapper()
+def meta_take(self, index):
+ # Type and device checks
+ torch._check(
+ index.dtype == torch.long,
+ lambda: f"take(): Expected a long tensor for index, but got {index.dtype}",
+ )
+ # Index checks
+ torch._check_index(
+ not (self.numel() == 0 and index.numel() != 0),
+ lambda: "take(): tried to take from an empty tensor",
+ )
+ return self.new_empty(index.shape)
+
+
+@register_meta([aten.linalg_cross.default, aten.linalg_cross.out])
+@out_wrapper()
+def linalg_cross(self, other, *, dim=-1):
+ x_d = self.ndim
+ y_d = other.ndim
+ torch._check(
+ x_d == y_d,
+ lambda: "linalg.cross: inputs must have the same number of dimensions.",
+ )
+ torch._check(
+ self.size(dim) == 3 and other.size(dim) == 3,
+ lambda: (
+ f"linalg.cross: inputs dimension {dim} must have length 3. "
+ f"Got {self.size(dim)} and {other.size(dim)}"
+ ),
+ )
+ out_shape = _broadcast_shapes(self.shape, other.shape)
+ return self.new_empty(out_shape)
+
+
+@register_meta(aten.linalg_matrix_exp)
+@out_wrapper()
+def linalg_matrix_exp(self):
+ squareCheckInputs(self, "linalg.matrix_exp")
+ checkFloatingOrComplex(self, "linalg.matrix_exp")
+ return torch.empty_like(self, memory_format=torch.contiguous_format)
+
+
+@register_meta(
+ [aten.cummax.default, aten.cummax.out, aten.cummin.default, aten.cummin.out]
+)
+@out_wrapper("values", "indices")
+def cummaxmin(self, dim):
+ values = torch.empty(self.shape, device=self.device, dtype=self.dtype)
+ indices = torch.empty(self.shape, device=self.device, dtype=torch.int64)
+ if self.numel() != 0 and self.ndim != 0:
+ # Checks that dim is within bounds
+ maybe_wrap_dim(dim, self.ndim)
+ return values, indices
+
+
+@register_meta([aten.logcumsumexp.default, aten.logcumsumexp.out])
+@out_wrapper()
+def logcumsumexp(self, dim):
+ # Checks that dim is within bounds
+ maybe_wrap_dim(dim, self.ndim)
+ return torch.empty_like(self).contiguous()
+
+
+# Stride-related code from _exec_fft in aten/src/ATen/native/cuda/SpectralOps.cpp
+def _exec_fft(out, self, out_sizes, dim, forward):
+ ndim = self.ndim
+ signal_ndim = len(dim)
+ batch_dims = ndim - signal_ndim
+
+ # Permute dimensions so batch dimensions come first, and in stride order
+ dim_permute = list(range(ndim))
+
+ is_transformed_dim = [False for _ in range(ndim)]
+ for d in dim:
+ is_transformed_dim[d] = True
+
+ # std::partition
+ left, right = [], []
+ for d in dim_permute:
+ if not is_transformed_dim[d]:
+ left.append(d)
+ else:
+ right.append(d)
+ dim_permute = left + right
+ batch_end = len(left)
+
+ self_strides = self.stride()
+ tmp = dim_permute[:batch_end]
+ tmp.sort(key=lambda x: self_strides[x], reverse=True)
+ dim_permute = tmp + dim_permute[batch_end:]
+ input = self.permute(dim_permute)
+
+ # Collapse batch dimensions into a single dimension
+ batched_sizes = [-1] + list(input.shape[batch_dims:])
+ input = input.reshape(batched_sizes)
+
+ batch_size = input.size(0)
+ batched_sizes[0] = batch_size
+ batched_out_sizes = batched_sizes
+ for i in range(len(dim)):
+ batched_out_sizes[i + 1] = out_sizes[dim[i]]
+ out = out.reshape(batched_out_sizes)
+
+ # Reshaping to original batch shape and inverting the dimension permutation
+ out_strides = [0 for _ in range(ndim)]
+ batch_numel = 1
+ i = batch_dims - 1
+ while i >= 0:
+ out_strides[dim_permute[i]] = batch_numel * out.stride(0)
+ batch_numel *= out_sizes[dim_permute[i]]
+ i -= 1
+ for i in range(batch_dims, ndim):
+ out_strides[dim_permute[i]] = out.stride(1 + (i - batch_dims))
+ return out.as_strided(out_sizes, out_strides, out.storage_offset())
+
+
+# See _fft_c2c_cufft in aten/src/ATen/native/cuda/SpectralOps.cpp
+# and _fft_c2c_mkl in aten/src/ATen/native/mkl/SpectralOps.cpp
+@register_meta([aten._fft_c2c.default, aten._fft_c2c.out])
+@out_wrapper()
+def meta_fft_c2c(self, dim, normalization, forward):
+ assert self.dtype.is_complex
+
+ out_sizes = self.shape
+ output = self.new_empty(out_sizes)
+
+ if not dim:
+ return output
+
+ sorted_dims = dim[:]
+ self_strides = self.stride()
+ sorted_dims.sort(key=lambda x: self_strides[x], reverse=True)
+ output = _exec_fft(output, self, out_sizes, sorted_dims, forward)
+
+ return output
+
+
+@register_meta([aten._fft_r2c.default, aten._fft_r2c.out])
+@out_wrapper()
+def meta_fft_r2c(self, dim, normalization, onesided):
+ assert self.dtype.is_floating_point
+ output_sizes = list(self.size())
+
+ if onesided:
+ last_dim = dim[-1]
+ last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
+ output_sizes[last_dim] = last_dim_halfsize
+
+ return self.new_empty(
+ output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
+ )
+
+
+@register_meta(aten.randperm.generator_out)
+def meta_randperm(n, *, generator=None, out):
+ return _maybe_resize_out(out, torch.Size([n]))
+
+
+@register_meta(aten.randperm.default)
+def meta_randperm_default(
+ n, *, dtype=torch.long, layout=None, device=None, pin_memory=None
+):
+ return torch.empty(
+ n, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
+ )
+
+
+@register_meta(aten.randint.default)
+def meta_randint(
+ high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
+):
+ return torch.empty(
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
+ )
+
+
+@register_meta(aten.randint.low)
+def meta_randint_low(
+ low,
+ high,
+ size,
+ *,
+ dtype=torch.long,
+ layout=None,
+ device=None,
+ pin_memory=None,
+):
+ return torch.empty(
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
+ )
+
+
+@register_meta(aten.rand.default)
+def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None):
+ return torch.empty(
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
+ )
+
+
+@register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
+@out_wrapper()
+def meta_fft_c2r(self, dim, normalization, lastdim):
+ assert self.dtype.is_complex
+ output_sizes = list(self.size())
+ output_sizes[dim[-1]] = lastdim
+ return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
+
+
+@register_meta(aten.copy_.default)
+def meta_copy_(self, src, non_blocking=False):
+ # This code simulates the original decomp from inductor,
+ # which runs most of the meta checks that we care about.
+ # In theory, we should make this more robust by carefully
+ # auditing our C++ copy_() kernel and copying the checks here.
+
+ if torch._debug_has_internal_overlap(self) == 1: # 1 == MemOverlap::Yes
+ raise RuntimeError(
+ "more than one element of the written-to tensor refers to a single memory location"
+ )
+
+ if isinstance(src, Tensor):
+ intermediate = src.to(self, non_blocking)
+ if self.size() != intermediate.size():
+ aten.expand_copy.default(intermediate, self.size())
+ return self
+
+
+def inferUnsqueezeGeometry(tensor, dim):
+ result_sizes = list(tensor.size())
+ result_strides = list(tensor.stride())
+ new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim]
+ result_sizes.insert(dim, 1)
+ result_strides.insert(dim, new_stride)
+ return result_sizes, result_strides
+
+
+@register_meta(aten.unsqueeze_.default)
+def meta_unsqueeze_(self, dim):
+ dim = maybe_wrap_dim(dim, self.dim() + 1)
+ g_sizes, g_strides = inferUnsqueezeGeometry(self, dim)
+ self.as_strided_(g_sizes, g_strides)
+ return self
+
+
+@register_meta(aten._sparse_semi_structured_linear)
+def meta_sparse_structured_linear(
+ input: Tensor,
+ weight: Tensor,
+ _meta: Tensor,
+ bias: Optional[Tensor] = None,
+ _activation_opt: Optional[str] = None,
+):
+ output_sizes = list(input.shape)
+ if bias is not None:
+ assert weight.size(0) == bias.size(0), "output size mismatch"
+ assert weight.size(1) == input.size(-1) / 2
+ output_sizes[-1] = weight.size(0)
+
+ # see: https://github.com/pytorch/pytorch/pull/114477#issuecomment-1830121375
+ # We assume that we have already squashed the inputs into a 2-D tensor
+ # Then, as the output is transposed, we need to propagate the transposed
+ # stride information to the output tensor
+ assert len(input.shape) == 2, "we can only handle the squashed input case"
+ transposed_strides = (1, input.size(0))
+
+ output = input.new_empty(
+ output_sizes,
+ dtype=input.dtype if input.dtype != torch.int8 else torch.int32,
+ ).as_strided(output_sizes, transposed_strides)
+
+ return output
+
+
+@register_meta(aten._cslt_sparse_mm)
+def meta__cslt_sparse_mm(
+ compressed_A: torch.Tensor,
+ dense_B: torch.Tensor,
+ bias: Optional[Tensor] = None,
+ alpha: Optional[Tensor] = None,
+ out_dtype: Optional[torch.dtype] = None,
+ transpose_result: bool = False,
+):
+ assert dense_B.dtype in {
+ torch.float16,
+ torch.bfloat16,
+ torch.int8,
+ }, "_cslt_sparse_mm only supports fp16, bf16, and int8"
+ assert compressed_A.dtype == dense_B.dtype, "inputs must have the same dtype"
+ assert len(dense_B.shape) == 2, "_cslt_sparse_mm only supports 2d inputs"
+
+ is_int8_input_type = compressed_A.dtype == torch.int8
+ compression_factor = 10 if is_int8_input_type else 9
+ k = dense_B.size(0)
+ n = dense_B.size(1)
+ m = (compressed_A.numel() * 16) // (compression_factor * k)
+ if bias is not None:
+ assert m == bias.size(0)
+
+ if out_dtype is not None:
+ assert (
+ is_int8_input_type and out_dtype == torch.float16
+ ), "out_dtype is only supported for i8i8->fp16 matmul"
+ output_shape = (n, m) if transpose_result else (m, n)
+ result = dense_B.new_empty(output_shape, dtype=out_dtype)
+ return result
+
+
+@register_meta(aten.index_reduce.default)
+def meta_index_reduce(
+ self: Tensor,
+ dim: int,
+ index: Tensor,
+ source: torch.Tensor,
+ reduce: str,
+ *,
+ include_self: bool = True,
+) -> Tensor:
+ return torch.empty_like(self, memory_format=torch.contiguous_format)
+
+
+@register_meta(aten.index_reduce_.default)
+def meta_index_reduce_(
+ self: Tensor,
+ dim: int,
+ index: Tensor,
+ source: torch.Tensor,
+ reduce: str,
+ *,
+ include_self: bool = True,
+) -> Tensor:
+ return self
+
+
+# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
+@out_wrapper()
+@register_meta(aten.index_select.default)
+def meta_index_select(self, dim, index):
+ result_size = list(self.size())
+ if self.dim() > 0:
+ result_size[dim] = index.numel()
+ return self.new_empty(result_size)
+
+
+@register_meta(aten.segment_reduce.default)
+def meta_segment_reduce(
+ data: Tensor,
+ reduce: str,
+ *,
+ lengths: Optional[Tensor] = None,
+ indices: Optional[Tensor] = None,
+ offsets: Optional[Tensor] = None,
+ axis: int = 0,
+ unsafe: bool = False,
+ initial=None,
+) -> Tensor:
+ if indices is not None:
+ raise NotImplementedError(
+ "segment_reduce(): indices based reduction is not supported yet."
+ )
+
+ def segment_reduce_lengths_tensor(lengths_shape):
+ return torch.empty(
+ lengths_shape + data.shape[axis + 1 :],
+ dtype=data.dtype,
+ device="meta",
+ memory_format=torch.contiguous_format,
+ )
+
+ if lengths is not None:
+ return segment_reduce_lengths_tensor(lengths.shape)
+ # FIXME should probably check that lengths and offset aren't both set, but
+ # the ATen implementation neglects this too
+ if offsets is not None:
+ # lengths == torch.diff(offsets)
+ lengths_shape = offsets.shape[:-1] + (offsets.shape[-1] - 1,)
+ return segment_reduce_lengths_tensor(lengths_shape)
+ raise RuntimeError("segment_reduce(): Either lengths or offsets must be defined.")
+
+
+@register_meta([aten.max.default, aten.max.unary_out])
+@out_wrapper()
+def meta_max(self):
+ return self.new_empty(())
+
+
+@register_meta(aten.max.dim)
+def meta_max_dim(self, dim, keepdim=False):
+ dim = utils.reduction_dims(self.shape, (dim,))
+ output_shape = _compute_reduction_shape(self, dim, keepdim)
+ return (
+ self.new_empty(output_shape),
+ self.new_empty(output_shape, dtype=torch.long),
+ )
+
+
+@register_meta([aten.min.default, aten.min.unary_out])
+@out_wrapper()
+def meta_min(self):
+ return self.new_empty(())
+
+
+@register_meta(aten.min.dim)
+def meta_min_dim(self, dim, keepdim=False):
+ dim = utils.reduction_dims(self.shape, (dim,))
+ output_shape = _compute_reduction_shape(self, dim, keepdim)
+ return (
+ self.new_empty(output_shape),
+ self.new_empty(output_shape, dtype=torch.long),
+ )
+
+
+@register_meta(aten.angle.default)
+def meta_angle(self):
+ if self.is_complex():
+ result_dtype = corresponding_real_dtype(self.dtype)
+ else:
+ _, result_dtype = elementwise_dtypes(
+ self,
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
+ )
+ return torch.empty_like(self, dtype=result_dtype)
+
+
+@register_meta(aten.angle.out)
+def meta_angle_out(self, out):
+ torch._resize_output_(out, self.size(), self.device)
+ return out.copy_(torch.angle(self))
+
+
+@register_meta(aten._assert_async.default)
+def assert_async(val):
+ return
+
+
+@register_meta(aten._assert_async.msg)
+def assert_async_meta(val, assert_msg):
+ return
+
+
+@register_meta(aten._make_dep_token.default)
+def make_dep_token(
+ *,
+ dtype=None,
+ layout=None,
+ device=None,
+ pin_memory=None,
+ memory_format=None,
+):
+ return torch.empty([], device="meta")
+
+
+@register_meta(aten.sym_constrain_range.default)
+def sym_constrain_range(size, min=None, max=None):
+ # Avoid importing sympy at a module level
+ from torch.fx.experimental.symbolic_shapes import constrain_range
+
+ if isinstance(size, (SymFloat, SymBool)):
+ raise ValueError("Constraining SymFloat or Symbool is nyi")
+ constrain_range(size, min=min, max=max)
+
+
+@register_meta(aten._functional_sym_constrain_range.default)
+def functional_sym_constrain_range(size, min=None, max=None, dep_token=None):
+ aten.sym_constrain_range(size, min=min, max=max)
+ return dep_token
+
+
+@register_meta(aten.sym_constrain_range_for_size.default)
+def sym_constrain_range_for_size(size, min=None, max=None):
+ # Avoid importing sympy at a module level
+ from torch.fx.experimental.symbolic_shapes import _constrain_range_for_size
+
+ if isinstance(size, (SymFloat, SymBool)):
+ raise ValueError("Constraining SymFloat or Symbool is nyi")
+ _constrain_range_for_size(size, min=min, max=max)
+
+
+@register_meta(aten._functional_sym_constrain_range_for_size.default)
+def functional_sym_constrain_range_for_size(size, min, max, dep_token):
+ aten.sym_constrain_range_for_size(size, min=min, max=max)
+ return dep_token
+
+
+@register_meta(aten._functional_assert_async.msg)
+def functional_assert_async_meta(val, assert_msg, dep_token):
+ return dep_token
+
+
+# From aten/src/ATen/native/LinearAlgebraUtils.h
+def squareCheckInputs(self: Tensor, f_name: str):
+ assert (
+ self.dim() >= 2
+ ), f"{f_name}: The input tensor must have at least 2 dimensions."
+ assert self.size(-1) == self.size(
+ -2
+ ), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
+
+
+# Validates input shapes and devices
+# for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
+# From aten/src/ATen/native/LinearAlgebraUtils.h
+def linearSolveCheckInputs(
+ self: Tensor,
+ A: Tensor,
+ name: str,
+):
+ torch._check(
+ self.device == A.device,
+ lambda: (
+ f"Expected b and A to be on the same device, but found b on "
+ f"{self.device} and A on {A.device} instead."
+ ),
+ )
+
+ torch._check(
+ self.dtype == A.dtype,
+ lambda: (
+ f"Expected b and A to have the same dtype, but found b of type "
+ f"{self.dtype} and A of type {A.dtype} instead."
+ ),
+ )
+
+ torch._check(
+ A.size(-1) == A.size(-2),
+ lambda: (
+ f"A must be batches of square matrices, "
+ f"but they are {A.size(-2)} by {A.size(-1)} matrices"
+ ),
+ )
+
+ torch._check(
+ A.size(-1) == self.size(-2),
+ lambda: (
+ f"Incompatible matrix sizes for {name}: each A "
+ f"matrix is {A.size(-1)} by {A.size(-1)}"
+ f" but each b matrix is {self.size(-2)} by {self.size(-1)}"
+ ),
+ )
+
+
+# From aten/src/ATen/native/LinearAlgebraUtils.h
+def checkFloatingOrComplex(
+ t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True
+):
+ dtype = t.dtype
+ torch._check(
+ t.is_floating_point() or t.is_complex(),
+ lambda: f"{f_name}: Expected a floating point or complex tensor as input. Got {dtype}",
+ )
+ if not allow_low_precision_dtypes:
+ torch._check(
+ dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble),
+ lambda: f"{f_name}: Low precision dtypes not supported. Got {dtype}",
+ )
+
+
+# From aten/src/ATen/native/LinearAlgebraUtils.h
+def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"):
+ torch._check(
+ A.dim() >= 2,
+ lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
+ )
+
+
+def checkInputsSolver(
+ A: Tensor,
+ B: Tensor,
+ left: bool,
+ f_name: str,
+):
+ squareCheckInputs(A, f_name)
+ checkIsMatrix(B, f_name)
+ torch._check(
+ A.size(-2) == B.size(-2) if left else A.size(-1) == B.size(-1),
+ lambda: (
+ f"{f_name}: Incompatible shapes of A and B for the equation "
+ f"{'AX = B' if left else 'XA = B'}"
+ f" ({A.size(-2)}x{A.size(-1)} and {B.size(-2)}x{B.size(-1)})"
+ ),
+ )
+
+
+def checkSameDevice(
+ fn_name: str, result: Tensor, input: Tensor, result_name: str = "result"
+):
+ torch._check(
+ result.device == input.device,
+ lambda: (
+ f"{fn_name}: Expected {result_name} and input tensors to be on the same device, but got "
+ f"{result_name} on {result.device} and input on {input.device}"
+ ),
+ )
+
+
+def checkUplo(UPLO: str):
+ UPLO_uppercase = UPLO.upper()
+ torch._check(
+ len(UPLO) == 1 and (UPLO_uppercase == "U" or UPLO_uppercase == "L"),
+ lambda: f"Expected UPLO argument to be 'L' or 'U', but got {UPLO}",
+ )
+
+
+@register_meta([aten._linalg_eigh.default, aten._linalg_eigh.eigenvalues])
+@out_wrapper("eigenvalues", "eigenvectors")
+def meta__linalg_eigh(
+ A: Tensor,
+ UPLO: str = "L",
+ compute_v: bool = True,
+):
+ squareCheckInputs(A, "linalg.eigh")
+ checkUplo(UPLO)
+
+ shape = list(A.shape)
+ if compute_v:
+ vecs = A.new_empty(shape)
+ vecs.as_strided_(shape, make_contiguous_strides_for(shape, row_major=False))
+ else:
+ vecs = A.new_empty([0])
+
+ shape.pop()
+ vals = A.new_empty(shape, dtype=toRealValueType(A.dtype))
+
+ return vals, vecs
+
+
+def cloneBatchedColumnMajor(src: Tensor) -> Tensor:
+ return src.mT.clone(memory_format=torch.contiguous_format).transpose(-2, -1)
+
+
+@register_meta(aten._cholesky_solve_helper)
+@out_wrapper()
+def _cholesky_solve_helper(self: Tensor, A: Tensor, upper: bool) -> Tensor:
+ return cloneBatchedColumnMajor(self)
+
+
+@register_meta(aten.cholesky_solve)
+@out_wrapper()
+def cholesky_solve(self: Tensor, A: Tensor, upper: bool = False) -> Tensor:
+ torch._check(
+ self.ndim >= 2,
+ lambda: f"b should have at least 2 dimensions, but has {self.ndim} dimensions instead",
+ )
+ torch._check(
+ A.ndim >= 2,
+ lambda: f"u should have at least 2 dimensions, but has {A.ndim} dimensions instead",
+ )
+ self_broadcasted, A_broadcasted = _linalg_broadcast_batch_dims_name(
+ self, A, "cholesky_solve"
+ )
+ return _cholesky_solve_helper(self_broadcasted, A_broadcasted, upper)
+
+
+@register_meta(aten.cholesky)
+@out_wrapper()
+def cholesky(self: Tensor, upper: bool = False) -> Tensor:
+ if self.numel() == 0:
+ return torch.empty_like(self, memory_format=torch.legacy_contiguous_format)
+ squareCheckInputs(self, "cholesky")
+ return cloneBatchedColumnMajor(self)
+
+
+@register_meta(aten.cholesky_inverse)
+@out_wrapper()
+def cholesky_inverse(self: Tensor, upper: bool = False) -> Tensor:
+ squareCheckInputs(self, "cholesky_inverse")
+ return cloneBatchedColumnMajor(self)
+
+
+# From aten/src/ATen/native/BatchLinearAlgebra.cpp
+@register_meta(aten.linalg_cholesky_ex.default)
+def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False):
+ squareCheckInputs(A, "linalg.cholesky")
+ checkFloatingOrComplex(A, "linalg.cholesky")
+
+ A_shape = A.shape
+ ndim = len(A_shape)
+
+ # L
+ L_strides = make_contiguous_strides_for(A_shape, False)
+ L = A.new_empty(A_shape)
+ L.as_strided_(A_shape, L_strides)
+
+ # infos
+ infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32)
+ return L, infos
+
+
+@register_meta(
+ [aten.linalg_householder_product.default, aten.linalg_householder_product.out]
+)
+@out_wrapper()
+def linalg_householder_product(input: Tensor, tau: Tensor) -> Tensor:
+ torch._check(
+ input.ndim >= 2,
+ lambda: "torch.linalg.householder_product: input must have at least 2 dimensions.",
+ )
+ torch._check(
+ input.size(-2) >= input.size(-1),
+ lambda: "torch.linalg.householder_product: input.shape[-2] must be greater than or equal to input.shape[-1]",
+ )
+ torch._check(
+ input.size(-1) >= tau.size(-1),
+ lambda: "torch.linalg.householder_product: input.shape[-1] must be greater than or equal to tau.shape[-1]",
+ )
+
+ torch._check(
+ input.ndim - tau.ndim == 1,
+ lambda: (
+ f"torch.linalg.householder_product: Expected tau to have one dimension less than input, "
+ f"but got tau.ndim equal to {tau.ndim} and input.ndim is equal to {input.ndim}"
+ ),
+ )
+ if input.ndim > 2:
+ expected_batch_tau_shape = input.shape[:-2]
+ actual_batch_tau_shape = tau.shape[:-1]
+ torch._check(
+ actual_batch_tau_shape == expected_batch_tau_shape,
+ lambda: (
+ f"torch.linalg.householder_product: Expected batch dimensions of tau to be "
+ f"equal to input.shape[:-2], but got {actual_batch_tau_shape}"
+ ),
+ )
+
+ torch._check(
+ tau.dtype == input.dtype,
+ lambda: (
+ f"torch.linalg.householder_product: tau dtype {tau.dtype}"
+ f" does not match input dtype {input.dtype}"
+ ),
+ )
+ checkSameDevice("torch.linalg.householder_product", tau, input, "tau")
+
+ return torch.empty_strided(
+ size=input.shape,
+ stride=make_contiguous_strides_for(input.shape, row_major=False),
+ dtype=input.dtype,
+ device=input.device,
+ )
+
+
+# From aten/src/ATen/native/BatchLinearAlgebra.cpp
+@register_meta(aten.linalg_inv_ex.default)
+def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False):
+ squareCheckInputs(A, "linalg.inv_ex")
+ checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False)
+
+ L = A.new_empty(A.shape)
+ L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
+
+ infos = A.new_empty(A.shape[:-2], dtype=torch.int32)
+ return L, infos
+
+
+@register_meta([aten.linalg_ldl_factor_ex.default, aten.linalg_ldl_factor_ex.out])
+@out_wrapper("LD", "pivots", "info")
+def linalg_ldl_factor_ex_meta(
+ self: Tensor,
+ *,
+ hermitian: bool = False,
+ check_errors: bool = False,
+) -> Tuple[Tensor, Tensor, Tensor]:
+ squareCheckInputs(self, "torch.linalg.ldl_factor_ex")
+ checkFloatingOrComplex(self, "torch.linalg.ldl_factor_ex")
+ LD = torch.empty_strided(
+ size=self.shape,
+ stride=make_contiguous_strides_for(self.shape, row_major=False),
+ dtype=self.dtype,
+ device=self.device,
+ )
+ pivots = self.new_empty(self.shape[:-1], dtype=torch.int)
+ info = self.new_empty(self.shape[:-2], dtype=torch.int)
+ return LD, pivots, info
+
+
+@register_meta([aten.linalg_ldl_solve.default, aten.linalg_ldl_solve.out])
+@out_wrapper()
+def linalg_ldl_solve_meta(
+ LD: Tensor, pivots: Tensor, B: Tensor, *, hermitian: bool = False
+) -> Tensor:
+ squareCheckInputs(LD, "torch.linalg.ldl_solve")
+ checkFloatingOrComplex(LD, "torch.linalg.ldl_solve")
+ linearSolveCheckInputs(B, LD, "torch.linalg.ldl_solve")
+ torch._check(
+ B.ndim >= 2,
+ lambda: (
+ f"torch.linalg.ldl_solve: Expected B to have at least 2 dimensions, "
+ f"but it has {B.ndim} dimensions instead"
+ ),
+ )
+ expected_pivots_shape = LD.shape[:-1]
+ torch._check(
+ expected_pivots_shape == pivots.shape,
+ lambda: (
+ f"torch.linalg.ldl_solve: Expected LD.shape[:-1] and pivots.shape to be the same, "
+ f"but got pivots with shape {pivots.shape} instead"
+ ),
+ )
+ torch._check(
+ utils.is_integer_dtype(pivots.dtype),
+ lambda: f"torch.linalg.ldl_solve: Expected pivots to be integers. Got {pivots.dtype}",
+ )
+ torch._check(
+ LD.dtype == B.dtype,
+ lambda: f"torch.linalg.ldl_solve: LD dtype {LD.dtype} does not match b dtype {B.dtype}",
+ )
+ B_broadcast_size, _ = _linalg_broadcast_batch_dims(B, LD)
+ return torch.empty_strided(
+ size=B_broadcast_size,
+ stride=make_contiguous_strides_for(B_broadcast_size, row_major=False),
+ dtype=B.dtype,
+ device=B.device,
+ )
+
+
+@register_meta([aten.linalg_lu.default, aten.linalg_lu.out])
+@out_wrapper("P", "L", "U")
+def linalg_lu_meta(A: Tensor, *, pivot: bool = True) -> Tuple[Tensor, Tensor, Tensor]:
+ torch._check(
+ A.ndim >= 2,
+ lambda: f"linalg.lu: Expected tensor with 2 or more dimensions. Got size: {A.shape} instead",
+ )
+
+ sizes = list(A.shape)
+ m = sizes[-2]
+ n = sizes[-1]
+ k = min(m, n)
+
+ sizes[-1] = m
+ if pivot:
+ P = A.new_empty(sizes)
+ else:
+ P = A.new_empty([0])
+
+ sizes[-1] = k
+ L = A.new_empty(sizes)
+
+ sizes[-2] = k
+ sizes[-1] = n
+ U = A.new_empty(sizes)
+ return P, L, U
+
+
+@register_meta([aten.linalg_lu_factor_ex.default, aten.linalg_lu_factor_ex.out])
+@out_wrapper("LU", "pivots", "info")
+def linalg_lu_factor_ex_meta(
+ A: Tensor, *, pivot: bool = True, check_errors: bool = False
+) -> Tuple[Tensor, Tensor, Tensor]:
+ torch._check(
+ A.ndim >= 2,
+ lambda: f"torch.lu_factor: Expected tensor with 2 or more dimensions. Got size: {A.shape} instead",
+ )
+
+ sizes = list(A.shape)
+ m = sizes[-2]
+ n = sizes[-1]
+
+ LU = torch.empty_strided(
+ size=sizes,
+ stride=make_contiguous_strides_for(sizes, row_major=False),
+ dtype=A.dtype,
+ device=A.device,
+ )
+
+ # Sets sizes to the size of pivots
+ sizes.pop()
+ sizes[-1] = min(m, n)
+ pivots = A.new_empty(sizes, dtype=torch.int)
+
+ # Sets sizes to the size of info
+ sizes.pop()
+ info = A.new_empty(sizes, dtype=torch.int)
+
+ return LU, pivots, info
+
+
+@register_meta([aten.linalg_lu_solve.default, aten.linalg_lu_solve.out])
+@out_wrapper()
+def linalg_lu_solve_meta(
+ LU: Tensor,
+ pivots: Tensor,
+ B: Tensor,
+ *,
+ left: bool = True,
+ adjoint: bool = False,
+) -> Tensor:
+ # dtype
+ checkFloatingOrComplex(LU, "torch.linalg.lu_solve")
+ torch._check(
+ LU.dtype == B.dtype,
+ lambda: (
+ f"linalg.lu_solve: Expected LU and B to have the same dtype, "
+ f"but found LU of type {LU.dtype} and B of type {B.dtype} instead"
+ ),
+ )
+ torch._check(
+ pivots.dtype == torch.int,
+ lambda: "linalg.lu_solve: pivots should be a Tensor of scalar type torch.int32",
+ )
+
+ # matrix shapes
+ squareCheckInputs(LU, "torch.linalg.lu_solve")
+ checkInputsSolver(LU, B, left, "linalg.lu_solve")
+ torch._check(
+ LU.size(-1) == pivots.size(-1),
+ lambda: "linalg.lu_solve: Number of pivots per batch should be same as the dimension of the matrix",
+ )
+
+ # batches
+ torch._check(
+ LU.shape[:-1] == pivots.shape,
+ lambda: (
+ f"linalg.lu_solve: Expected LU.shape[:-1] and pivots.shape to be the same, "
+ f"but got pivots with shape {pivots.shape} instead"
+ ),
+ )
+
+ B_broadcast_size, _ = _linalg_broadcast_batch_dims(B, LU)
+
+ result = torch.empty_strided(
+ size=B_broadcast_size,
+ stride=make_contiguous_strides_for(B_broadcast_size, row_major=not left),
+ dtype=B.dtype,
+ device=B.device,
+ )
+
+ if result.numel() != 0 and not left:
+ if result.is_complex():
+ result = result.conj()
+
+ return result
+
+
+@register_meta(aten.lu_unpack)
+@out_wrapper("P", "L", "U")
+def lu_unpack_meta(
+ LU: Tensor,
+ pivots: Tensor,
+ unpack_data: bool = True,
+ unpack_pivots: bool = True,
+) -> Tuple[Tensor, Tensor, Tensor]:
+ torch._check(
+ LU.ndim >= 2,
+ lambda: f"torch.lu_unpack: Expected tensor with 2 or more dimensions. Got size: {LU.shape} instead",
+ )
+ if unpack_pivots:
+ torch._check(
+ pivots.dtype == torch.int32,
+ lambda: (
+ "torch.lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype.\n"
+ "Note: this function is intended to be used with the output produced by torch.linalg.lu_factor"
+ ),
+ )
+ sizes = list(LU.shape)
+ m = sizes[-2]
+ n = sizes[-1]
+ k = min(m, n)
+ sizes[-1] = m
+ if unpack_pivots:
+ P = LU.new_empty(sizes)
+ else:
+ P = LU.new_empty([0])
+ if unpack_data:
+ sizes[-1] = k
+ L = LU.new_empty(sizes)
+ sizes[-2] = k
+ sizes[-1] = n
+ U = LU.new_empty(sizes)
+ else:
+ L = LU.new_empty([0])
+ U = LU.new_empty([0])
+ return P, L, U
+
+
+# parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced)
+def _parse_qr_mode(mode: str) -> Tuple[bool, bool]:
+ if mode == "reduced":
+ compute_q = True
+ reduced = True
+ elif mode == "complete":
+ compute_q = True
+ reduced = False
+ elif mode == "r":
+ compute_q = False
+ reduced = True # this is actually irrelevant in this mode
+ else:
+ torch._check(
+ False,
+ lambda: (
+ f"qr received unrecognized mode '{mode}' "
+ f"but expected one of 'reduced' (default), 'r', or 'complete'"
+ ),
+ )
+ return compute_q, reduced
+
+
+@register_meta([aten.linalg_qr.default, aten.linalg_qr.out])
+@out_wrapper("Q", "R")
+def linalg_qr_meta(
+ A: Tensor,
+ mode: str = "reduced",
+) -> Tuple[Tensor, Tensor]:
+ checkIsMatrix(A, "linalg.qr")
+ checkFloatingOrComplex(A, "linalg.qr")
+
+ compute_q, reduced_mode = _parse_qr_mode(mode)
+
+ m = A.shape[-2]
+ n = A.shape[-1]
+ k = min(m, n)
+
+ if compute_q:
+ Q_shape = list(A.shape)
+ Q_shape[-1] = k if reduced_mode else m
+ Q = A.new_empty(Q_shape)
+ Q.as_strided_(Q_shape, make_contiguous_strides_for(Q_shape, row_major=False))
+ else:
+ Q = A.new_empty([0])
+
+ # For readability
+ R_shape = list(A.shape)
+ R_shape[-2] = k if reduced_mode or not compute_q else m
+ R = A.new_empty(R_shape)
+ R.as_strided_(R_shape, make_contiguous_strides_for(R_shape, row_major=False))
+ return Q, R
+
+
+@register_meta([aten._linalg_slogdet.default, aten._linalg_slogdet.sign])
+@out_wrapper("sign", "logabsdet", "LU", "pivots")
+def _linalg_slogdet(A: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
+ squareCheckInputs(A, "linalg.slogdet")
+ checkFloatingOrComplex(A, "linalg.slogdet", False)
+ shape = A.shape
+ sign = A.new_empty(shape[:-2])
+ logabsdet = A.new_empty(shape[:-2], dtype=toRealValueType(A.dtype))
+ LU = torch.empty_strided(
+ size=shape,
+ stride=make_contiguous_strides_for(shape, False),
+ dtype=A.dtype,
+ device=A.device,
+ )
+ pivots = A.new_empty(shape[:-1], dtype=torch.int32)
+ return sign, logabsdet, LU, pivots
+
+
+# From aten/src/ATen/native/BatchLinearAlgebra.cpp
+# NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml
+@register_meta(aten._linalg_svd.default)
+def _linalg_svd_meta(
+ A: Tensor,
+ full_matrices: bool = False,
+ compute_uv: bool = True,
+ driver: Optional[str] = None,
+):
+ checkIsMatrix(A, "linalg.svd")
+ checkFloatingOrComplex(A, "linalg.svd")
+
+ batch_dims = list(A.shape[:-2])
+ m = A.shape[-2]
+ n = A.shape[-1]
+ k = min(m, n)
+
+ if compute_uv:
+ U_shape = batch_dims + [m, m if full_matrices else k]
+ U = A.new_empty(U_shape)
+ U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False))
+
+ V_shape = batch_dims + [n if full_matrices else k, n]
+ V = A.new_empty(V_shape)
+ # NB: This checks for CUDA since there is no way to check for cuSolver.
+ # Also, this might not work correctly on CPU when fake_device is not
+ # available as device_hint just defaults to CUDA in that case. See
+ # _linalg_svd meta in core.
+ is_cuda = device_hint(A) == "cuda"
+ V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=is_cuda))
+ else:
+ # doesn't matter
+ U = A.new_empty([0])
+ V = A.new_empty([0])
+
+ # S is always real, even when A is complex.
+ S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype))
+ return U, S, V
+
+
+def _linalg_broadcast_batch_dims(
+ arg1: Tensor, arg2: Tensor
+) -> Tuple[List[int], List[int]]:
+ # broadcast the batch dimensions of arg1 and arg2.
+ arg1_batch_sizes = arg1.shape[:-2]
+ arg2_batch_sizes = arg2.shape[:-2]
+ expand_batch_portion = _broadcast_shapes(arg1_batch_sizes, arg2_batch_sizes)
+
+ arg1_expand_size = list(expand_batch_portion)
+ arg1_expand_size += [arg1.size(-2), arg1.size(-1)]
+
+ arg2_expand_size = list(expand_batch_portion)
+ arg2_expand_size += [arg2.size(-2), arg2.size(-1)]
+ return arg1_expand_size, arg2_expand_size
+
+
+def _linalg_broadcast_batch_dims_name(
+ arg1: Tensor, arg2: Tensor, name: Optional[str]
+) -> Tuple[Tensor, Tensor]:
+ # If there's no name we assume we don't want to check the errors
+ if name:
+ linearSolveCheckInputs(arg1, arg2, name)
+
+ arg1_expand_size, arg2_expand_size = _linalg_broadcast_batch_dims(arg1, arg2)
+
+ arg1_broadcasted = (
+ arg1 if arg1_expand_size == arg1.shape else arg1.expand(arg1_expand_size)
+ )
+ arg2_broadcasted = (
+ arg2 if arg2_expand_size == arg2.shape else arg2.expand(arg2_expand_size)
+ )
+ return arg1_broadcasted, arg2_broadcasted
+
+
+def linalg_solve_is_vector_rhs(input: Tensor, other: Tensor) -> bool:
+ expected_batched_rhs_shape = input.shape[:-1]
+ vector_case = other.ndim == 1 or (
+ input.ndim - 1 == other.ndim and other.shape == expected_batched_rhs_shape
+ )
+ return vector_case
+
+
+@register_meta(aten._linalg_solve_ex)
+def _linalg_solve_ex(
+ A: Tensor,
+ B: Tensor,
+ *,
+ left: bool = True,
+ check_errors: bool = False,
+ result: Optional[Tensor] = None,
+ LU: Optional[Tensor] = None,
+ pivots: Optional[Tensor] = None,
+ info: Optional[Tensor] = None,
+) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
+ checkFloatingOrComplex(A, "linalg.solve")
+ torch._check(
+ A.dtype == B.dtype,
+ lambda: (
+ f"linalg.solve: Expected A and B to have the same dtype, but found A of type "
+ f"{A.dtype} and B of type {B.dtype} instead"
+ ),
+ )
+ vector_case = linalg_solve_is_vector_rhs(A, B)
+ B_ = B.unsqueeze(-1) if vector_case else B
+ checkInputsSolver(A, B_, left, "linalg.solve")
+ B_broad_shape, _ = _linalg_broadcast_batch_dims(B_, A)
+ torch._check(
+ left or not vector_case,
+ lambda: (
+ "linalg.solve: Vector broadcasting of the left hand side is not supported for left=False. "
+ "In this case linalg.solve is equivalent to B / A.squeeze(-1)"
+ ),
+ )
+ result_shape = B_broad_shape[:-1] if vector_case else B_broad_shape
+ result_ = torch.empty_strided(
+ size=result_shape,
+ stride=make_contiguous_strides_for(result_shape, not left),
+ dtype=B.dtype,
+ device=B.device,
+ )
+ shape = A.shape
+ ndim = A.ndim
+ LU_ = torch.empty_strided(
+ size=shape,
+ stride=make_contiguous_strides_for(shape, False),
+ dtype=A.dtype,
+ device=A.device,
+ )
+ pivots_ = A.new_empty(shape[:-1], dtype=torch.int32)
+ info_ = A.new_empty(shape[:-2], dtype=torch.int32)
+ out = (result, LU, pivots, info)
+ res = (result_, LU_, pivots_, info_)
+ if all(x is not None for x in out):
+ for r, o in zip(res, out):
+ # resize and copy operations are done in-place
+ _maybe_resize_out(o, r.shape) # type: ignore[arg-type]
+ # strides are not copied in out_wrapper
+ o.as_strided_(r.shape, r.stride()) # type: ignore[union-attr]
+ _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=False) # type: ignore[arg-type]
+ return res
+
+
+@register_meta([aten.linalg_solve_triangular.default, aten.linalg_solve_triangular.out])
+def linalg_solve_triangular_meta(
+ A: Tensor,
+ B: Tensor,
+ *,
+ upper: bool,
+ left: bool = True,
+ unitriangular: bool = False,
+ out: Optional[Tensor] = None,
+) -> Tensor:
+ if out is None:
+ out = A.new_empty([0])
+ assert isinstance(out, TensorLike)
+ checkInputsSolver(A, B, left, "linalg.solve_triangular")
+ B_, A_ = _linalg_broadcast_batch_dims_name(B, A, None)
+ avoid_copy_A = A_.transpose(-2, -1).is_contiguous() and A_.is_conj()
+ if avoid_copy_A:
+ out = _maybe_resize_out(out, B_.shape)
+ else:
+ # reimplementation of resize_output with result F-contig
+ if _resize_output_check(out, B_.shape):
+ out.resize_(B_.transpose(-2, -1).shape)
+ out.transpose_(-2, -1)
+ return out # type: ignore[return-value]
+
+
+@register_meta(aten.triangular_solve)
+@out_wrapper("solution", "cloned_coefficient")
+def triangular_solve_meta(
+ self: Tensor,
+ A: Tensor,
+ upper: bool = True,
+ transpose: bool = False,
+ unitriangular: bool = False,
+) -> Tuple[Tensor, Tensor]:
+ torch._check(
+ self.ndim >= 2,
+ lambda: (
+ f"torch.triangular_solve: Expected b to have at least 2 dimensions, "
+ f"but it has {self.ndim} dimensions instead"
+ ),
+ )
+ torch._check(
+ A.ndim >= 2,
+ lambda: (
+ f"torch.triangular_solve: Expected A to have at least 2 dimensions, "
+ f"but it has {A.ndim} dimensions instead"
+ ),
+ )
+
+ linearSolveCheckInputs(self, A, "triangular_solve")
+
+ if A.layout == torch.strided:
+ self_broadcast_size, A_broadcast_size = _linalg_broadcast_batch_dims(self, A)
+ solution = torch.empty_strided(
+ size=self_broadcast_size,
+ stride=make_contiguous_strides_for(self_broadcast_size, row_major=False),
+ dtype=self.dtype,
+ device=self.device,
+ )
+ cloned_coefficient = torch.empty_strided(
+ size=A_broadcast_size,
+ stride=make_contiguous_strides_for(A_broadcast_size, row_major=False),
+ dtype=A.dtype,
+ device=A.device,
+ )
+ elif A.layout == torch.sparse_csr or A.layout == torch.sparse_bsr:
+ solution = torch.empty_like(self)
+ cloned_coefficient = self.new_empty([0])
+ else:
+ torch._check(False, lambda: "triangular_solve: Got an unexpected layout.")
+ return solution, cloned_coefficient
+
+
+# From aten/src/ATen/native/LinearAlgebra.cpp
+@register_meta(aten._linalg_det.default)
+def _linalg_det_meta(A):
+ squareCheckInputs(A, "linalg.det")
+ checkFloatingOrComplex(A, "linalg.det")
+
+ det = A.new_empty(A.shape[:-2])
+
+ LU = A.new_empty(A.shape)
+ LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
+
+ pivots = A.new_empty(A.shape[:-1], dtype=torch.int32)
+ return det, LU, pivots
+
+
+@register_meta(aten.ormqr)
+@out_wrapper()
+def ormqr(
+ input: Tensor,
+ tau: Tensor,
+ other: Tensor,
+ left: bool = True,
+ transpose: bool = False,
+) -> Tensor:
+ torch._check(
+ input.ndim >= 2, lambda: "torch.ormqr: input must have at least 2 dimensions."
+ )
+ torch._check(
+ other.ndim >= 2, lambda: "torch.ormqr: other must have at least 2 dimensions."
+ )
+
+ left_size_condition = -2 if left else -1
+ torch._check(
+ other.shape[left_size_condition] >= tau.shape[-1],
+ lambda: f"torch.ormqr: other.shape[{left_size_condition}] must be greater than or equal to tau.shape[-1]",
+ )
+ torch._check(
+ other.shape[left_size_condition] == input.shape[-2],
+ lambda: f"torch.ormqr: other.shape[{left_size_condition}] must be equal to input.shape[-2]",
+ )
+
+ torch._check(
+ tau.shape[-1] <= input.shape[-1],
+ lambda: "torch.ormqr: tau.shape[-1] must be less than or equal to input.shape[-1]",
+ )
+
+ torch._check(
+ input.ndim - tau.ndim == 1,
+ lambda: (
+ f"torch.ormqr: Expected tau to have one dimension less than input, "
+ f"but got tau.ndim equal to {tau.ndim} and input.ndim is equal to {input.ndim}"
+ ),
+ )
+ torch._check(
+ input.ndim == other.ndim,
+ lambda: (
+ f"torch.ormqr: Expected other to have the same number of dimensions as input, "
+ f"but got other.ndim equal to {other.ndim} and input.ndim is equal to {input.ndim}"
+ ),
+ )
+
+ if input.ndim > 2:
+ expected_batch_shape = input.shape[:-2]
+ actual_batch_tau_shape = tau.shape[:-1]
+ torch._check(
+ actual_batch_tau_shape == expected_batch_shape,
+ lambda: (
+ f"torch.ormqr: Expected batch dimensions of tau to be "
+ f"equal to input.shape[:-2], but got {actual_batch_tau_shape}"
+ ),
+ )
+
+ actual_batch_other_shape = other.shape[:-2]
+ torch._check(
+ actual_batch_other_shape == expected_batch_shape,
+ lambda: (
+ f"torch.ormqr: Expected batch dimensions of other to be "
+ f"equal to input.shape[:-2], but got {actual_batch_other_shape}"
+ ),
+ )
+
+ torch._check(
+ tau.dtype == input.dtype,
+ lambda: (
+ f"torch.ormqr: Expected input and tau to have the same dtype, "
+ f"but input has dtype {input.dtype} and tau has dtype {tau.dtype}"
+ ),
+ )
+ torch._check(
+ other.dtype == input.dtype,
+ lambda: (
+ f"torch.ormqr: Expected input and other to have the same dtype, "
+ f"but input has dtype {input.dtype} and other has dtype {other.dtype}"
+ ),
+ )
+
+ checkSameDevice("torch.ormqr", tau, input, "tau")
+ checkSameDevice("torch.ormqr", other, input, "other")
+
+ return torch.empty_strided(
+ size=other.shape,
+ stride=make_contiguous_strides_for(other.shape, row_major=False),
+ dtype=other.dtype,
+ device=other.device,
+ )
+
+
+def _padding_check_valid_input(input, padding, *, dim):
+ torch._check(
+ len(padding) == 2 * dim,
+ lambda: f"padding size is expected to be {2 * dim}, but got: {len(padding)}",
+ )
+
+ input_dim = input.ndim
+
+ is_batch_mode = input_dim == (dim + 2)
+
+ valid_batch_mode = is_batch_mode
+ valid_non_batch_mode = not is_batch_mode
+
+ if is_batch_mode:
+ # allow batch size of 0-dim.
+ for d in range(1, input_dim):
+ valid_batch_mode = valid_batch_mode and input.size(d) != 0
+ else:
+ for d in range(0, input_dim):
+ valid_non_batch_mode = valid_non_batch_mode and input.size(d) != 0
+
+ # allow empty batch size but not other dimensions.
+ torch._check(
+ valid_batch_mode or valid_non_batch_mode,
+ lambda: (
+ f"Expected {dim + 1}D or {dim + 2}D (batch mode) tensor with possibly 0 batch size "
+ f"and other non-zero dimensions for input, but got: {input.shape}"
+ ),
+ )
+
+
+def _pad1d_common(input, padding, *, is_reflection):
+ dim_plane = 0
+ dim_w = 1
+ nbatch = 1
+
+ if input.ndim == 3:
+ nbatch = input.size(0)
+ dim_w += 1
+ dim_plane += 1
+
+ _padding_check_valid_input(input, padding, dim=1)
+
+ pad_l, pad_r = padding
+
+ nplane = input.size(dim_plane)
+ input_w = input.size(dim_w)
+ output_w = input_w + pad_l + pad_r
+
+ if is_reflection:
+ torch._check(
+ pad_l < input_w and pad_r < input_w,
+ lambda: (
+ f"Argument #4: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}"
+ ),
+ )
+
+ torch._check(
+ output_w >= 1,
+ lambda: f"input (W: {input_w}) is too small. Calculated output W: {output_w}",
+ )
+
+ if input.ndim == 2:
+ return input.new_empty((nplane, output_w))
+ else:
+ return input.new_empty((nbatch, nplane, output_w))
+
+
+@register_meta(aten.reflection_pad1d)
+@out_wrapper()
+def meta_reflection_pad1d(input, padding):
+ return _pad1d_common(input, padding, is_reflection=True)
+
+
+@register_meta(aten.replication_pad1d)
+@out_wrapper()
+def meta_replication_pad1d(input, padding):
+ return _pad1d_common(input, padding, is_reflection=False)
+
+
+def _pad1d_backward_common(grad_output, input, padding, *, is_reflection):
+ dim_w = 1
+ if not is_reflection:
+ torch._check(len(padding) == 2, lambda: "padding size is expected to be 2")
+
+ if input.ndim == 3:
+ dim_w += 1
+
+ pad_l, pad_r = padding
+
+ input_w = input.size(dim_w)
+ output_w = input_w + pad_l + pad_r
+
+ if is_reflection:
+ torch._check(
+ pad_l < input_w and pad_r < input_w,
+ lambda: (
+ f"Argument #4: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}"
+ ),
+ )
+
+ torch._check(
+ output_w == grad_output.size(dim_w),
+ lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}",
+ )
+
+ return input.new_empty(input.shape)
+
+
+@register_meta(aten.reflection_pad1d_backward)
+@out_wrapper("grad_input")
+def meta_reflection_pad1d_backward(grad_output, input, padding):
+ return _pad1d_backward_common(grad_output, input, padding, is_reflection=True)
+
+
+@register_meta(aten.replication_pad1d_backward)
+@out_wrapper("grad_input")
+def meta_replication_pad1d_backward(grad_output, input, padding):
+ return _pad1d_backward_common(grad_output, input, padding, is_reflection=False)
+
+
+def _pad2d_common(input, padding, *, is_reflection):
+ dim_w = 2
+ dim_h = 1
+ dim_slices = 0
+ nbatch = 1
+
+ _padding_check_valid_input(input, padding, dim=2)
+
+ ndim = input.ndim
+ if ndim == 4:
+ nbatch = input.size(0)
+ dim_w += 1
+ dim_h += 1
+ dim_slices += 1
+
+ pad_l, pad_r, pad_t, pad_b = padding
+
+ nplane = input.size(dim_slices)
+ input_h = input.size(dim_h)
+ input_w = input.size(dim_w)
+ output_h = input_h + pad_t + pad_b
+ output_w = input_w + pad_l + pad_r
+
+ if is_reflection:
+ torch._check(
+ pad_l < input_w and pad_r < input_w,
+ lambda: (
+ f"Argument #4: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}"
+ ),
+ )
+ torch._check(
+ pad_t < input_h and pad_b < input_h,
+ lambda: (
+ f"Argument #6: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_t}, {pad_b}) at dimension {dim_h} of input {input.shape}"
+ ),
+ )
+
+ torch._check(
+ output_w >= 1 or output_h >= 1,
+ lambda: (
+ f"input (H: {input_h} W: {input_w}) is too small. "
+ f"Calculated output H: {output_h} W: {output_w}"
+ ),
+ )
+
+ if input.ndim == 3:
+ return input.new_empty((nplane, output_h, output_w))
+ else:
+ return input.new_empty((nbatch, nplane, output_h, output_w))
+
+
+@register_meta(aten.reflection_pad2d)
+@out_wrapper()
+def meta_reflection_pad2d(input, padding):
+ return _pad2d_common(input, padding, is_reflection=True)
+
+
+@register_meta(aten.replication_pad2d)
+@out_wrapper()
+def meta_replication_pad2d(input, padding):
+ return _pad2d_common(input, padding, is_reflection=False)
+
+
+@register_meta(
+ [
+ aten.reflection_pad2d_backward.default,
+ aten.reflection_pad2d_backward.grad_input,
+ aten.replication_pad2d_backward.default,
+ aten.replication_pad2d_backward.grad_input,
+ ]
+)
+@out_wrapper("grad_input")
+def meta_pad2d_backward(grad_output, self, padding):
+ dim_w = 2
+ dim_h = 1
+ dim_plane = 0
+ nbatch = 1
+
+ self_shape = self.shape
+ if self.dim() == 4:
+ nbatch = self_shape[0]
+ dim_w += 1
+ dim_h += 1
+ dim_plane += 1
+
+ pad_l, pad_r, pad_t, pad_b = padding
+
+ nplane = self_shape[dim_plane]
+ input_h = self_shape[dim_h]
+ input_w = self_shape[dim_w]
+ output_h = input_h + pad_t + pad_b
+ output_w = input_w + pad_l + pad_r
+
+ torch._check(
+ output_w == grad_output.size(dim_w),
+ lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}",
+ )
+ torch._check(
+ output_h == grad_output.size(dim_h),
+ lambda: f"grad_output height unexpected. Expected: {output_h}, Got: {grad_output.size(dim_h)}",
+ )
+ return self.new_empty(self.shape)
+
+
+def _pad3d_common(input, padding, *, is_reflection):
+ dim_w = 3
+ dim_h = 2
+ dim_d = 1
+ dim_plane = 0
+
+ _padding_check_valid_input(input, padding, dim=3)
+
+ batch_mode = input.ndim == 5
+ if batch_mode:
+ nbatch = input.size(0)
+ dim_w += 1
+ dim_h += 1
+ dim_d += 1
+ dim_plane += 1
+
+ pad_l, pad_r, pad_t, pad_b, pad_f, pad_bk = padding
+
+ nplane = input.size(dim_plane)
+ input_d = input.size(dim_d)
+ input_h = input.size(dim_h)
+ input_w = input.size(dim_w)
+ output_d = input_d + pad_f + pad_bk
+ output_h = input_h + pad_t + pad_b
+ output_w = input_w + pad_l + pad_r
+
+ if is_reflection:
+ torch._check(
+ pad_l < input_w and pad_r < input_w,
+ lambda: (
+ f"Argument #4: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_l}, {pad_r}) at dimension {dim_w} of input {input.shape}"
+ ),
+ )
+ torch._check(
+ pad_t < input_h and pad_b < input_h,
+ lambda: (
+ f"Argument #6: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_t}, {pad_b}) at dimension {dim_h} of input {input.shape}"
+ ),
+ )
+ torch._check(
+ pad_f < input_d and pad_bk < input_d,
+ lambda: (
+ f"Argument #8: Padding size should be less than the corresponding input dimension, "
+ f"but got: padding ({pad_f}, {pad_bk}) at dimension {dim_d} of input {input.shape}"
+ ),
+ )
+
+ torch._check(
+ output_w >= 1 or output_h >= 1 or output_d >= 1,
+ lambda: (
+ f"input (D: {input_d} H: {input_h} W: {input_w}) is too small. "
+ f"Calculated output D: {output_d} H: {output_h} W: {output_w}"
+ ),
+ )
+
+ if batch_mode:
+ return input.new_empty((nbatch, nplane, output_d, output_h, output_w))
+ else:
+ return input.new_empty((nplane, output_d, output_h, output_w))
+
+
+@register_meta(aten.reflection_pad3d)
+@out_wrapper()
+def meta_reflection_pad3d(input, padding):
+ return _pad3d_common(input, padding, is_reflection=True)
+
+
+@register_meta(aten.replication_pad3d)
+@out_wrapper()
+def meta_replication_pad3d(input, padding):
+ return _pad3d_common(input, padding, is_reflection=False)
+
+
+@register_meta(
+ [
+ aten.reflection_pad3d_backward.default,
+ aten.reflection_pad3d_backward.grad_input,
+ aten.replication_pad3d_backward.default,
+ aten.replication_pad3d_backward.grad_input,
+ ]
+)
+@out_wrapper("grad_input")
+def meta_pad3d_backward(grad_output, input, padding):
+ torch._check(len(padding) == 6, lambda: "padding size is expected to be 6")
+ assert input.ndim > 3
+ assert grad_output.ndim == input.ndim
+
+ dim_w = 3
+ dim_h = 2
+ dim_d = 1
+
+ if input.ndim == 5:
+ dim_w += 1
+ dim_h += 1
+ dim_d += 1
+
+ pad_l, pad_r, pad_t, pad_b, pad_f, pad_bk = padding
+
+ input_d = input.size(dim_d)
+ input_h = input.size(dim_h)
+ input_w = input.size(dim_w)
+ output_d = input_d + pad_f + pad_bk
+ output_h = input_h + pad_t + pad_b
+ output_w = input_w + pad_l + pad_r
+
+ torch._check(
+ output_w == grad_output.size(dim_w),
+ lambda: f"grad_output width unexpected. Expected: {output_w}, Got: {grad_output.size(dim_w)}",
+ )
+ torch._check(
+ output_h == grad_output.size(dim_h),
+ lambda: f"grad_output height unexpected. Expected: {output_h}, Got: {grad_output.size(dim_h)}",
+ )
+ torch._check(
+ output_d == grad_output.size(dim_d),
+ lambda: f"grad_output depth unexpected. Expected: {output_d}, Got: {grad_output.size(dim_d)}",
+ )
+
+ return input.new_empty(input.shape)
+
+
+@register_meta(aten._pdist_forward)
+@out_wrapper()
+def meta__pdist_forward(self: Tensor, p: float = 2) -> Tensor:
+ torch._check(
+ self.is_contiguous(), lambda: "_pdist_forward requires contiguous input"
+ )
+ n = self.size(0)
+ if n <= 1:
+ return self.new_empty([0]).to(memory_format=torch.legacy_contiguous_format) # type: ignore[call-overload]
+ else:
+ return self.new_empty((n * (n - 1) // 2,)).to(
+ memory_format=torch.legacy_contiguous_format
+ ) # type: ignore[call-overload]
+
+
+@register_meta(aten._pdist_backward)
+@out_wrapper()
+def meta__pdist_backward(grad: Tensor, self: Tensor, p: float, pdist: Tensor) -> Tensor:
+ torch._check(
+ self.is_contiguous(), lambda: "_pdist_backward requires self to be contiguous"
+ )
+ torch._check(
+ pdist.is_contiguous(), lambda: "_pdist_backward requires pdist to be contiguous"
+ )
+ return torch.empty_like(self, memory_format=torch.legacy_contiguous_format)
+
+
+@register_meta([aten.baddbmm.default, aten.baddbmm.out])
+@out_wrapper()
+def meta_baddbmm(self, batch1, batch2, *, beta=1, alpha=1):
+ dim1 = batch1.size(0)
+ dim2 = batch1.size(1)
+ dim3 = batch2.size(2)
+ self = self.expand((dim1, dim2, dim3))
+ torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
+ torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
+ torch._check(
+ self.dtype == batch1.dtype == batch2.dtype,
+ lambda: f"Input dtypes must be the same, got: input: {self.dtype}, batch1: {batch1.dtype}, batch2: {batch2.dtype}",
+ )
+ batch1_sizes = batch1.shape
+ batch2_sizes = batch2.shape
+ bs = batch1_sizes[0]
+ contraction_size = batch1_sizes[2]
+ torch._check(
+ batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
+ lambda: (
+ f"Expected size for first two dimensions of batch2 tensor to be: "
+ f"[{bs}, {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}]."
+ ),
+ )
+ return self.new_empty(self.size())
+
+
+@register_meta([aten.bernoulli.default, aten.bernoulli.out])
+@out_wrapper()
+def meta_bernoulli(self, *, generator=None):
+ # https://github.com/pytorch/pytorch/issues/88612
+ return torch.empty_like(self).contiguous()
+
+
+@register_meta(aten.bernoulli_.float)
+def meta_bernoulli_(self, p=0.5, generator=None):
+ return self
+
+
+@register_meta(aten.bernoulli.p)
+def meta_bernoulli_p(self, p=0.5, generator=None):
+ # https://github.com/pytorch/pytorch/issues/88612
+ return torch.empty_like(self).contiguous()
+
+
+@register_meta(aten._fused_moving_avg_obs_fq_helper.default)
+def meta__fused_moving_avg_obs_fq_helper(
+ self,
+ observer_on,
+ fake_quant_on,
+ running_min,
+ running_max,
+ scale,
+ zero_point,
+ averaging_const,
+ quant_min,
+ quant_max,
+ ch_axis,
+ per_row_fake_quant=False,
+ symmetric_quant=False,
+):
+ torch._check(
+ ch_axis < self.dim(),
+ lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()",
+ )
+ mask = torch.empty_like(self, dtype=torch.bool)
+ return (torch.empty_like(self), mask)
+
+
+@register_meta(aten.mm)
+@out_wrapper()
+def meta_mm(a, b):
+ torch._check(a.dim() == 2, lambda: "a must be 2D")
+ torch._check(b.dim() == 2, lambda: "b must be 2D")
+ N, M1 = a.shape
+ M2, P = b.shape
+ torch._check(
+ M1 == M2,
+ lambda: f"a and b must have same reduction dim, but got [{N}, {M1}] X [{M2}, {P}].",
+ )
+ return a.new_empty(N, P)
+
+
+def _compute_reduction_shape(self, dims, keepdim):
+ if keepdim:
+ return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
+
+ return utils.compute_reduction_output_shape(self.shape, dims)
+
+
+# FakeTensors (meta tensors with a device) will report device as meta
+# when running meta kernels. Here, access the "fake device" of FakeTensor if it
+# exists so meta kernels which have diverge per device will be more
+# accurate when run with FakeTensors
+def device_hint(tensor) -> "str":
+ if isinstance(tensor, torch._subclasses.FakeTensor):
+ return tensor.fake_device.type
+ else:
+ return "cuda" # default to cuda
+
+
+def calc_conv_nd_return_shape(
+ input_tensor: torch.Tensor,
+ weight: torch.Tensor,
+ stride: Union[List[int], int],
+ padding: Union[List[int], int],
+ dilation: Union[List[int], int],
+ is_transposed: bool,
+ groups: int,
+ output_padding: Optional[Union[List[int], int]] = None,
+):
+ def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
+ """
+ Formula to apply to calculate the length of some dimension of the output
+
+ See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
+
+ Args:
+ ln: length of the dimension
+ p: padding in that dim
+ d: dilation in that dim
+ k: kernel size in that dim
+ s: stride in that dim
+ Returns:
+ The output length
+ """
+ return (ln + 2 * p - d * (k - 1) - 1) // s + 1
+
+ def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
+ """
+ Formula to apply to calculate the length of some dimension of the output
+ if transposed convolution is used.
+ See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
+
+ Args:
+ ln: length of the dimension
+ p: padding in that dim
+ d: dilation in that dim
+ k: kernel size in that dim
+ s: stride in that dim
+ op: output padding in that dim
+
+ Returns:
+ The output length
+ """
+ return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
+
+ kernel_size = weight.shape[2:]
+ dims = input_tensor.shape[2:]
+ if is_transposed:
+ out_channels = groups * weight.shape[1]
+ else:
+ out_channels = weight.shape[0]
+ if weight.shape[1] * groups != input_tensor.shape[1]:
+ raise RuntimeError("Invalid channel dimensions")
+
+ ret_shape = [input_tensor.shape[0], out_channels]
+ if isinstance(stride, IntLike):
+ stride = [stride] * len(dims)
+ elif len(stride) == 1:
+ stride = [stride[0]] * len(dims)
+
+ if isinstance(padding, IntLike):
+ padding = [padding] * len(dims)
+ elif len(padding) == 1:
+ padding = [padding[0]] * len(dims)
+
+ if isinstance(dilation, IntLike):
+ dilation = [dilation] * len(dims)
+ elif len(dilation) == 1:
+ dilation = [dilation[0]] * len(dims)
+
+ output_padding_list: Optional[List[int]] = None
+ if output_padding:
+ if isinstance(output_padding, IntLike):
+ output_padding_list = [output_padding] * len(dims)
+ elif len(output_padding) == 1:
+ output_padding_list = [output_padding[0]] * len(dims)
+ else:
+ output_padding_list = output_padding
+
+ for i in range(len(dims)):
+ # If output_padding is present, we are dealing with a transposed convolution
+ if output_padding_list:
+ ret_shape.append(
+ _formula_transposed(
+ dims[i],
+ padding[i],
+ dilation[i],
+ kernel_size[i],
+ stride[i],
+ output_padding_list[i],
+ )
+ )
+ else:
+ ret_shape.append(
+ _formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
+ )
+
+ return ret_shape
+
+
+def is_channels_last(ten):
+ return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
+
+
+@register_meta(aten.convolution.default)
+def meta_conv(
+ input_tensor: torch.Tensor,
+ weight: torch.Tensor,
+ bias: torch.Tensor,
+ stride: List[int],
+ padding: List[int],
+ dilation: List[int],
+ is_transposed: bool,
+ output_padding: List[int],
+ groups: int,
+):
+ def pick_memory_format():
+ if device_hint(input_tensor) == "cuda":
+ if is_channels_last(input_tensor) or is_channels_last(weight):
+ return torch.channels_last
+ else:
+ if is_channels_last(input_tensor):
+ return torch.channels_last
+ if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
+ return torch.contiguous_format
+ elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
+ return torch.preserve_format
+
+ shape_out = calc_conv_nd_return_shape(
+ input_tensor,
+ weight,
+ stride,
+ padding,
+ dilation,
+ is_transposed,
+ groups,
+ output_padding if is_transposed else None,
+ )
+
+ out = input_tensor.new_empty(shape_out)
+ out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
+ return out
+
+
+if torch._C._has_mkldnn:
+ _meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library(
+ "mkldnn", "IMPL", "Meta"
+ )
+
+ @register_meta(torch.ops.mkldnn._convolution_pointwise.default)
+ def meta_mkldnn_convolution_default(
+ input_tensor,
+ weight,
+ bias,
+ padding,
+ stride,
+ dilation,
+ groups,
+ attr,
+ scalars,
+ algorithm,
+ ):
+ shape_out = calc_conv_nd_return_shape(
+ input_tensor, weight, stride, padding, dilation, False, groups, []
+ )
+ out = input_tensor.new_empty(shape_out)
+ out_memory_format = torch.channels_last
+ out = out.to(memory_format=out_memory_format) # type: ignore[call-overload]
+ return out
+
+ @register_meta(torch.ops.mkldnn._linear_pointwise.default)
+ def meta_linear_pointwise_default(
+ input_tensor, weight, bias, attr, scalars, algorithm
+ ):
+ return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0]))
+
+ if torch._C.has_mkl:
+ _meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library(
+ "mkl", "IMPL", "Meta"
+ )
+
+ @register_meta(torch.ops.mkl._mkl_linear)
+ def meta_mkl_linear(
+ input_tensor,
+ packed_weight,
+ orig_weight,
+ bias,
+ batch_size,
+ ):
+ return input_tensor.new_empty(
+ (*input_tensor.shape[:-1], orig_weight.shape[0])
+ )
+
+ _meta_lib_dont_use_me_use_register_meta_for_onednn = torch.library.Library(
+ "onednn", "IMPL", "Meta"
+ )
+
+ @register_meta(torch.ops.onednn.qconv2d_pointwise.default)
+ def meta_qconv2d_pointwise(
+ x,
+ x_scale,
+ x_zp,
+ w, # prepacked_weight
+ w_scale,
+ w_zp,
+ bias,
+ stride,
+ padding,
+ dilation,
+ groups,
+ output_scale,
+ output_zero_point,
+ output_dtype,
+ attr,
+ scalars,
+ algorithm,
+ ):
+ shape_out = calc_conv_nd_return_shape(
+ x,
+ w,
+ stride,
+ padding,
+ dilation,
+ False,
+ groups,
+ None,
+ )
+ assert output_dtype in [torch.float32, torch.bfloat16]
+ out = x.new_empty(shape_out, dtype=output_dtype)
+ out = out.to(memory_format=torch.channels_last)
+ return out
+
+ @register_meta(torch.ops.onednn.qlinear_pointwise.default)
+ def meta_qlinear_pointwise(
+ x,
+ x_scale,
+ x_zp,
+ w,
+ w_scale,
+ w_zp,
+ bias,
+ output_scale,
+ output_zero_point,
+ output_dtype,
+ post_op_name,
+ post_op_args,
+ post_op_algorithm,
+ ):
+ output_shape = list(x.shape)
+ # The weight has been transposed during the qlinear weight prepack process.
+ output_shape[-1] = w.shape[1]
+ assert output_dtype in [torch.float32, torch.bfloat16]
+ out = x.new_empty(output_shape, dtype=output_dtype)
+ return out
+
+ _meta_lib_dont_use_me_use_register_meta_for_quantized = torch.library.Library(
+ "quantized", "IMPL", "Meta"
+ )
+
+ @register_meta(torch.ops.quantized.max_pool2d)
+ def meta_quantized_max_pool2d(
+ input,
+ kernel_size,
+ stride=(),
+ padding=(0,),
+ dilation=(1,),
+ ceil_mode=False,
+ ):
+ (
+ nInputPlane,
+ outputHeight,
+ outputWidth,
+ ) = max_pool2d_checks_and_compute_shape(
+ input, kernel_size, stride, padding, dilation, ceil_mode
+ )
+ nbatch = input.size(-4) if input.dim() == 4 else 1
+ memory_format = torch.channels_last
+ if input.dim() == 3:
+ size = [nInputPlane, outputHeight, outputWidth]
+ else:
+ size = [nbatch, nInputPlane, outputHeight, outputWidth]
+ return torch.empty(
+ size,
+ dtype=input.dtype,
+ device=input.device,
+ memory_format=memory_format,
+ )
+
+
+# from check_dim_size() in aten/src/ATen/TensorUtils.cpp.
+def check_dim_size(tensor, dim, dim_size, size):
+ torch._check(
+ tensor.dim() == dim and tensor.shape[dim_size] == size,
+ lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, "
+ + f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}",
+ )
+
+
+@register_meta(aten.avg_pool2d.default)
+def meta_avg_pool2d(
+ input,
+ kernel_size,
+ stride=(),
+ padding=(0,),
+ ceil_mode=False,
+ count_include_pad=True,
+ divisor_override=None,
+):
+ def unpack(name, val):
+ torch._check(
+ len(val) in [1, 2],
+ lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints",
+ )
+ H = val[0]
+ W = H if len(val) == 1 else val[1]
+ return H, W
+
+ kH, kW = unpack("kernel_size", kernel_size)
+ torch._check(
+ len(stride) in [0, 1, 2],
+ lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
+ )
+ if len(stride) == 0:
+ dH, dW = kH, kW
+ elif len(stride) == 1:
+ dH, dW = stride[0], stride[0]
+ else:
+ dH, dW = unpack("stride", stride)
+
+ padH, padW = unpack("padding", padding)
+
+ torch._check(
+ divisor_override is None or divisor_override != 0,
+ lambda: "divisor must be not zero",
+ )
+
+ nbatch = input.size(-4) if input.dim() == 4 else 1
+ nInputPlane = input.size(-3)
+ inputHeight = input.size(-2)
+ inputWidth = input.size(-1)
+
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
+
+ memory_format = utils.suggest_memory_format(input)
+ pool2d_shape_check(
+ input,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ 1,
+ 1,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ memory_format,
+ )
+
+ if input.dim() == 3:
+ size = [nInputPlane, outputHeight, outputWidth]
+ else:
+ size = [nbatch, nInputPlane, outputHeight, outputWidth]
+ return torch.empty(
+ size,
+ dtype=input.dtype,
+ device=input.device,
+ memory_format=memory_format,
+ )
+
+
+# from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h.
+def avg_pool2d_backward_shape_check(
+ input,
+ gradOutput,
+ nbatch,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ mem_format,
+):
+ pool2d_shape_check(
+ input,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ 1,
+ 1,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ mem_format,
+ )
+
+ ndim = input.dim()
+ nOutputPlane = nInputPlane
+
+ check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane)
+ check_dim_size(gradOutput, ndim, ndim - 2, outputHeight)
+ check_dim_size(gradOutput, ndim, ndim - 1, outputWidth)
+
+
+# Don't override the C++ registration.
+@register_meta(aten.avg_pool2d_backward.default)
+def meta_avg_pool2d_backward(
+ gradOutput_,
+ input,
+ kernel_size,
+ stride,
+ padding,
+ ceil_mode,
+ count_include_pad,
+ divisor_override,
+):
+ # From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func.
+ torch._check(
+ len(kernel_size) == 1 or len(kernel_size) == 2,
+ lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints",
+ )
+ kH = kernel_size[0]
+ kW = kH if len(kernel_size) == 1 else kernel_size[1]
+ torch._check(
+ len(stride) == 0 or len(stride) == 1 or len(stride) == 2,
+ lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
+ )
+ dH = kH if len(stride) == 0 else stride[0]
+ dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1]
+ torch._check(
+ len(padding) == 1 or len(padding) == 2,
+ lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints",
+ )
+ padH = padding[0]
+ padW = padH if len(padding) == 1 else padding[1]
+
+ torch._check(
+ divisor_override is None or divisor_override != 0,
+ lambda: "divisor must be not zero",
+ )
+
+ input_size = input.shape
+ nbatch = input_size[-4] if input.dim() == 4 else 1
+ nInputPlane = input_size[-3]
+ inputHeight = input_size[-2]
+ inputWidth = input_size[-1]
+
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
+
+ mem_format = utils.suggest_memory_format(input)
+
+ avg_pool2d_backward_shape_check(
+ input,
+ gradOutput_,
+ nbatch,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ mem_format,
+ )
+
+ return torch.empty(
+ input_size,
+ dtype=input.dtype,
+ device=input.device,
+ memory_format=mem_format,
+ )
+
+
+@register_meta(aten.avg_pool3d)
+@out_wrapper()
+def meta_avg_pool3d(
+ input,
+ kernel_size,
+ stride=(),
+ padding=(0,),
+ ceil_mode=False,
+ count_include_pad=True,
+ divisor_override=None,
+):
+ torch._check(
+ len(kernel_size) in (1, 3),
+ lambda: "avg_pool3d: kernel_size must be a single int, or a tuple of three ints",
+ )
+ kT = kernel_size[0]
+ kH = kT if len(kernel_size) == 1 else kernel_size[1]
+ kW = kT if len(kernel_size) == 1 else kernel_size[2]
+
+ torch._check(
+ not stride or len(stride) in (1, 3),
+ lambda: "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints",
+ )
+ dT = kT if not stride else stride[0]
+ dH = kH if not stride else (dT if len(stride) == 1 else stride[1])
+ dW = kW if not stride else (dT if len(stride) == 1 else stride[2])
+
+ torch._check(
+ len(padding) in (1, 3),
+ lambda: "avg_pool3d: padding must be a single int, or a tuple of three ints",
+ )
+ padT = padding[0]
+ padH = padT if len(padding) == 1 else padding[1]
+ padW = padT if len(padding) == 1 else padding[2]
+
+ torch._check(
+ input.ndim in (4, 5),
+ lambda: "non-empty 4D or 5D (batch mode) tensor expected for input",
+ )
+
+ torch._check(
+ not divisor_override or divisor_override != 0,
+ lambda: "divisor must be not zero",
+ )
+
+ nbatch = input.size(0)
+ nslices = input.size(-4)
+ itime = input.size(-3)
+ iheight = input.size(-2)
+ iwidth = input.size(-1)
+
+ otime = pooling_output_shape(itime, kT, padT, dT, 1, ceil_mode)
+ oheight = pooling_output_shape(iheight, kH, padH, dH, 1, ceil_mode)
+ owidth = pooling_output_shape(iwidth, kW, padW, dW, 1, ceil_mode)
+
+ pool3d_shape_check(
+ input,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ padT,
+ padH,
+ padW,
+ 1,
+ 1,
+ 1,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ "avg_pool3d()",
+ check_input_size=True,
+ )
+
+ if input.ndim == 4:
+ return input.new_empty((nslices, otime, oheight, owidth))
+ else:
+ return input.new_empty((nbatch, nslices, otime, oheight, owidth))
+
+
+@register_meta(aten.avg_pool3d_backward)
+@out_wrapper("grad_input")
+def meta_avg_pool3d_backward(
+ grad_output,
+ input,
+ kernel_size,
+ stride,
+ padding,
+ ceil_mode,
+ count_include_pad,
+ divisor_override,
+):
+ torch._check(
+ len(kernel_size) in (1, 3),
+ lambda: "avg_pool3d: kernel_size must be a single int, or a tuple of three ints",
+ )
+ kT = kernel_size[0]
+ kH = kT if len(kernel_size) == 1 else kernel_size[1]
+ kW = kT if len(kernel_size) == 1 else kernel_size[2]
+
+ torch._check(
+ not stride or len(stride) in (1, 3),
+ lambda: "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints",
+ )
+ dT = kT if not stride else stride[0]
+ dH = kH if not stride else (dT if len(stride) == 1 else stride[1])
+ dW = kW if not stride else (dT if len(stride) == 1 else stride[2])
+
+ torch._check(
+ len(padding) in (1, 3),
+ lambda: "avg_pool3d: padding must be a single int, or a tuple of three ints",
+ )
+ padT = padding[0]
+ padH = padT if len(padding) == 1 else padding[1]
+ padW = padT if len(padding) == 1 else padding[2]
+
+ torch._check(
+ input.ndim in (4, 5),
+ lambda: "non-empty 4D or 5D (batch mode) tensor expected for input",
+ )
+
+ torch._check(
+ not divisor_override or divisor_override != 0,
+ lambda: "divisor must be not zero",
+ )
+
+ nslices = input.size(-4)
+ itime = input.size(-3)
+ iheight = input.size(-2)
+ iwidth = input.size(-1)
+
+ otime_for_shape_check = pooling_output_shape(itime, kT, padT, dT, 1, ceil_mode)
+ oheight_for_shape_check = pooling_output_shape(iheight, kH, padH, dH, 1, ceil_mode)
+ owidth_for_shape_check = pooling_output_shape(iwidth, kW, padW, dW, 1, ceil_mode)
+
+ avg_pool3d_backward_shape_check(
+ input,
+ grad_output,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ padT,
+ padH,
+ padW,
+ itime,
+ iheight,
+ iwidth,
+ otime_for_shape_check,
+ oheight_for_shape_check,
+ owidth_for_shape_check,
+ "avg_pool3d_backward()",
+ )
+
+ return input.new_empty(input.shape)
+
+
+@register_meta(aten._adaptive_avg_pool2d.default)
+def meta_adaptive_avg_pool2d(self, output_size):
+ torch._check(
+ self.ndim == 3 or self.ndim == 4,
+ lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
+ )
+ output_shape = self.shape[:-2] + tuple(output_size)
+ memory_format = utils.suggest_memory_format(self)
+ # need to set memory_format to preserve the memory format of the input
+ # channel last input should have channel last output
+ return torch.empty(
+ output_shape,
+ dtype=self.dtype,
+ device=self.device,
+ memory_format=memory_format,
+ )
+
+
+@register_meta(aten._adaptive_avg_pool3d.default)
+def meta_adaptive_avg_pool3d(self, output_size):
+ torch._check(
+ self.ndim == 4 or self.ndim == 5,
+ lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
+ )
+ return self.new_empty(self.shape[:-3] + tuple(output_size))
+
+
+@register_meta(aten._adaptive_avg_pool2d_backward.default)
+def meta__adaptive_avg_pool2d_backward(grad_out, self):
+ ndim = grad_out.ndim
+ for i in range(1, ndim):
+ torch._check(
+ grad_out.size(i) > 0,
+ lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \
+ size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty",
+ )
+ torch._check(
+ ndim == 3 or ndim == 4,
+ lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}",
+ )
+ torch._check(
+ self.dtype == grad_out.dtype,
+ lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}",
+ )
+ memory_format = torch.contiguous_format
+ if is_channels_last(self):
+ memory_format = torch.channels_last
+ return self.new_empty(self.shape).to(memory_format=memory_format)
+
+
+@register_meta(aten._adaptive_avg_pool3d_backward)
+@out_wrapper("grad_input")
+def meta__adaptive_avg_pool3d_backward(grad_output, self):
+ _adaptive_pool_empty_output_check(grad_output, "adaptive_avg_pool3d_backward")
+ return torch.empty_like(self, memory_format=torch.legacy_contiguous_format)
+
+
+def _adaptive_pool_empty_output_check(grad_output: Tensor, arg_name: str):
+ ndim = grad_output.ndim
+ for i in range(1, ndim):
+ torch._check(
+ grad_output.size(i) > 0,
+ lambda: (
+ f"{arg_name}(): Expected grad_output to have non-zero size for non-batch dimensions, "
+ f"but grad_output has sizes {grad_output.shape} with dimension {i} being empty"
+ ),
+ )
+
+
+@register_meta(aten.adaptive_max_pool2d)
+@out_wrapper("out", "indices")
+def meta_adaptive_max_pool2d(input, output_size):
+ ndim = input.ndim
+ torch._check(
+ ndim in (3, 4),
+ lambda: f"adaptive_max_pool2d(): Expected 3D or 4D tensor, but got: {input.shape}",
+ )
+ for i in range(1, ndim):
+ torch._check(
+ input.size(i) > 0,
+ lambda: (
+ f"adaptive_max_pool2d(): Expected input to have non-zero size for non-batch dimensions, "
+ f"but input has sizes {input.shape} with dimension {i} being empty"
+ ),
+ )
+
+ torch._check(
+ len(output_size) == 2,
+ lambda: "adaptive_max_pool2d(): internal error: output_size.size() must be 2",
+ )
+
+ dimH = 1
+ sizeB = 1
+ sizeD = 0
+
+ if input.ndim == 4:
+ sizeB = input.size(0)
+ dimH += 1
+
+ sizeD = input.size(dimH - 1)
+ osizeH, osizeW = output_size
+
+ if input.ndim == 3:
+ out_shape = (sizeD, osizeH, osizeW)
+ out = input.new_empty(out_shape)
+ indices = input.new_empty(out_shape, dtype=torch.int64)
+ return out, indices
+ else:
+ out_shape = (sizeB, sizeD, osizeH, osizeW) # type: ignore[assignment]
+ memory_format = utils.suggest_memory_format(input)
+ out = input.new_empty(out_shape).to(memory_format=memory_format)
+ indices = input.new_empty(out_shape, dtype=torch.int64).to(
+ memory_format=memory_format
+ )
+ return out, indices
+
+
+@register_meta(aten.adaptive_max_pool2d_backward)
+@out_wrapper("grad_input")
+def meta_adaptive_max_pool2d_backward(grad_output, input, indices):
+ ndim = grad_output.ndim
+ torch._check(
+ ndim in (3, 4),
+ lambda: f"adaptive_max_pooling2d_backward(): Expected 3D or 4D grad_output, but got: {grad_output.shape}",
+ )
+
+ _adaptive_pool_empty_output_check(grad_output, "adaptive_max_pool2d_backward")
+
+ torch._check(
+ input.dtype == grad_output.dtype,
+ lambda: f"expected dtype {input.dtype} for `grad_output` but got dtype {grad_output.dtype}",
+ )
+
+ memory_format = utils.suggest_memory_format(input)
+ return input.new_empty(input.shape).to(memory_format=memory_format)
+
+
+@register_meta(aten.adaptive_max_pool3d)
+@out_wrapper("out", "indices")
+def meta_adaptive_max_pool3d(input, output_size):
+ ndim = input.ndim
+ torch._check(
+ ndim in (4, 5),
+ lambda: f"adaptive_max_pool3d(): Expected 4D or 5D tensor, but got: {input.shape}",
+ )
+ for i in range(1, ndim):
+ torch._check(
+ input.size(i) > 0,
+ lambda: (
+ f"adaptive_max_pool3d(): Expected input to have non-zero size for non-batch dimensions, "
+ f"but input has sizes {input.shape} with dimension {i} being empty"
+ ),
+ )
+
+ torch._check(
+ len(output_size) == 3,
+ lambda: "adaptive_max_pool3d(): internal error: output_size.size() must be 3",
+ )
+
+ dimD = 0
+ sizeB = 1
+ sizeD = 0
+
+ if ndim == 5:
+ sizeB = input.size(0)
+ dimD += 1
+
+ sizeD = input.size(dimD)
+ osizeT, osizeH, osizeW = output_size
+
+ if ndim == 4:
+ out_shape = (sizeD, osizeT, osizeH, osizeW)
+ else:
+ out_shape = (sizeB, sizeD, osizeT, osizeH, osizeW) # type: ignore[assignment]
+
+ out = input.new_empty(out_shape)
+ indices = input.new_empty(out_shape, dtype=torch.int64)
+
+ return out, indices
+
+
+@register_meta(aten.adaptive_max_pool3d_backward)
+@out_wrapper("grad_input")
+def meta_adaptive_max_pool3d_backward(grad_output, input, indices):
+ _adaptive_pool_empty_output_check(grad_output, "adaptive_max_pool3d_backward")
+ return input.new_empty(input.shape)
+
+
+@register_meta(aten.repeat_interleave.Tensor)
+def meta_repeat_interleave_Tensor(repeats, output_size=None):
+ if output_size is None:
+ raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
+ return repeats.new_empty(output_size)
+
+
+@register_meta([aten.complex.default, aten.complex.out])
+@out_wrapper()
+def meta_complex(real, imag):
+ assert real.dtype.is_floating_point
+ assert imag.dtype.is_floating_point
+ out_shape = _broadcast_shapes(real.shape, imag.shape)
+ return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
+
+
+@register_meta([aten.nonzero_static.default, aten.nonzero_static.out])
+@out_wrapper()
+def nonzero_static(self, *, size: int, fill_value: int = -1):
+ return self.new_empty((size, self.dim()), dtype=torch.long)
+
+
+@register_meta([aten.index.Tensor, aten._unsafe_index.Tensor])
+def meta_index_Tensor(self, indices):
+ torch._check(bool(indices), lambda: "at least one index must be provided")
+ # aten::index is the internal advanced indexing implementation
+ # checkIndexTensorTypes and expandTensors
+ result: List[Optional[Tensor]] = []
+ for i, index in enumerate(indices):
+ if index is not None:
+ torch._check(
+ index.dtype in [torch.long, torch.int, torch.int8, torch.bool],
+ lambda: "tensors used as indices must be long, int, byte or bool tensors",
+ )
+ if index.dtype in [torch.int8, torch.bool]:
+ nonzero = index.nonzero()
+ k = len(result)
+ torch._check_index(
+ k + index.ndim <= self.ndim,
+ lambda: f"too many indices for tensor of dimension {self.ndim}",
+ )
+ for j in range(index.ndim):
+ torch._check_index(
+ index.shape[j] == self.shape[k + j],
+ lambda: f"The shape of the mask {index.shape} at index {i} "
+ f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
+ )
+ result.append(nonzero.select(1, j))
+ else:
+ result.append(index)
+ else:
+ result.append(index)
+ indices = result
+ torch._check(
+ len(indices) <= self.ndim,
+ lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
+ )
+ # expand_outplace
+ import torch._refs as refs # avoid import cycle in mypy
+
+ indices = list(refs._maybe_broadcast(*indices))
+ # add missing null tensors
+ while len(indices) < self.ndim:
+ indices.append(None)
+
+ # hasContiguousSubspace
+ # true if all non-null tensors are adjacent
+ # See:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
+ # https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
+ state = 0
+ has_contiguous_subspace = False
+ for index in indices:
+ if state == 0:
+ if index is not None:
+ state = 1
+ elif state == 1:
+ if index is None:
+ state = 2
+ else:
+ if index is not None:
+ break
+ else:
+ has_contiguous_subspace = True
+
+ # transposeToFront
+ # This is the logic that causes the newly inserted dimensions to show up
+ # at the beginning of the tensor, if they're not contiguous
+ if not has_contiguous_subspace:
+ dims = []
+ transposed_indices = []
+ for i, index in enumerate(indices):
+ if index is not None:
+ dims.append(i)
+ transposed_indices.append(index)
+ for i, index in enumerate(indices):
+ if index is None:
+ dims.append(i)
+ transposed_indices.append(index)
+ self = self.permute(dims)
+ indices = transposed_indices
+
+ # AdvancedIndex::AdvancedIndex
+ # Now we can assume the indices have contiguous subspace
+ # This is simplified from AdvancedIndex which goes to more effort
+ # to put the input and indices in a form so that TensorIterator can
+ # take them. If we write a ref for this, probably that logic should
+ # get implemented
+ before_shape: List[int] = []
+ after_shape: List[int] = []
+ replacement_shape: List[int] = []
+ for dim, index in enumerate(indices):
+ if index is None:
+ if replacement_shape:
+ after_shape.append(self.shape[dim])
+ else:
+ before_shape.append(self.shape[dim])
+ else:
+ replacement_shape = list(index.shape)
+ return self.new_empty(before_shape + replacement_shape + after_shape)
+
+
+@register_meta([aten.convolution_backward.default])
+def meta_convolution_backward(
+ grad_output_,
+ input_,
+ weight_,
+ bias_sizes_opt,
+ stride,
+ padding,
+ dilation,
+ transposed,
+ output_padding,
+ groups,
+ output_mask,
+):
+ # High level logic taken from slow_conv3d_backward_cpu which should
+ # be representative of all convolution_backward impls
+ backend_grad_input = None
+ backend_grad_weight = None
+ backend_grad_bias = None
+
+ if output_mask[0]:
+ backend_grad_input = grad_output_.new_empty(input_.size())
+ if output_mask[1]:
+ backend_grad_weight = grad_output_.new_empty(weight_.size())
+ if output_mask[2]:
+ backend_grad_bias = grad_output_.new_empty(bias_sizes_opt)
+
+ return (backend_grad_input, backend_grad_weight, backend_grad_bias)
+
+
+@register_meta([aten.addbmm.default, aten.addbmm.out])
+@out_wrapper()
+def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
+ dim1 = batch1.size(1)
+ dim2 = batch2.size(2)
+ self = self.expand((dim1, dim2))
+ torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
+ torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
+ torch._check(
+ batch1.size(0) == batch2.size(0),
+ lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
+ )
+ torch._check(
+ batch1.size(2) == batch2.size(1),
+ lambda: (
+ f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
+ f"and {batch2.size(1)}x{batch2.size(2)})"
+ ),
+ )
+ torch._check(
+ self.size(0) == dim1 and self.size(1) == dim2,
+ lambda: "self tensor does not match matmul output shape",
+ )
+ return self.new_empty(self.size())
+
+
+def register_meta_foreach(ops):
+ def wrapper(fn):
+ def register(op):
+ op_name = str(op).split(".")[1]
+ scalar_op = getattr(aten, op_name.replace("_foreach_", ""))
+
+ _add_op_to_registry(
+ meta_table,
+ op,
+ partial(
+ fn,
+ _scalar_op=scalar_op,
+ ),
+ )
+
+ pytree.tree_map_(register, ops)
+ return fn
+
+ return wrapper
+
+
+@register_meta_foreach(
+ [
+ aten._foreach_abs,
+ aten._foreach_acos,
+ aten._foreach_asin,
+ aten._foreach_atan,
+ aten._foreach_ceil,
+ aten._foreach_cos,
+ aten._foreach_cosh,
+ aten._foreach_erf,
+ aten._foreach_erfc,
+ aten._foreach_exp,
+ aten._foreach_expm1,
+ aten._foreach_frac,
+ aten._foreach_floor,
+ aten._foreach_lgamma,
+ aten._foreach_log,
+ aten._foreach_log10,
+ aten._foreach_log1p,
+ aten._foreach_log2,
+ aten._foreach_neg,
+ aten._foreach_reciprocal,
+ aten._foreach_round,
+ aten._foreach_sigmoid,
+ aten._foreach_sign,
+ aten._foreach_sin,
+ aten._foreach_sinh,
+ aten._foreach_sqrt,
+ aten._foreach_tan,
+ aten._foreach_tanh,
+ aten._foreach_trunc,
+ aten._foreach_zero,
+ aten._foreach_add,
+ aten._foreach_sub,
+ aten._foreach_mul,
+ aten._foreach_div,
+ aten._foreach_clamp_min,
+ aten._foreach_clamp_max,
+ aten._foreach_lerp,
+ ],
+)
+def _meta_foreach_out_of_place(*args, _scalar_op=None, **kwargs):
+ torch._check(
+ isinstance(args[0], list),
+ lambda: (f"The first argument must be List[Tensor], but got {type(args[0])}."),
+ )
+
+ nelem = len(args[0])
+ torch._check(
+ nelem > 0,
+ lambda: ("Tensor list must have at least one tensor."),
+ )
+
+ nlists = 1
+ for iarg, arg in enumerate(args[1:]):
+ if isinstance(arg, list):
+ nlists += 1
+ torch._check(
+ len(arg) == nelem,
+ lambda: (
+ f"self and argument-{iarg+2} must match in length, "
+ f"but got {nelem} and {len(arg)}."
+ ),
+ )
+ elif isinstance(arg, Tensor):
+ torch._check(
+ arg.dim() == 0 and arg.numel() == 1,
+ lambda: (
+ "scalar tensor expected to be 0 dim but it has "
+ f"{arg.dim()} dimensions and {arg.numel()} elements."
+ ),
+ )
+ else:
+ break
+
+ result = []
+ for elem in range(nelem):
+ each_args = [args[i][elem] for i in range(nlists)]
+ result.append(_scalar_op(*each_args, *args[nlists:], **kwargs))
+
+ return result
+
+
+@register_meta_foreach(
+ [
+ aten._foreach_abs_,
+ aten._foreach_acos_,
+ aten._foreach_asin_,
+ aten._foreach_atan_,
+ aten._foreach_ceil_,
+ aten._foreach_cos_,
+ aten._foreach_cosh_,
+ aten._foreach_erf_,
+ aten._foreach_erfc_,
+ aten._foreach_exp_,
+ aten._foreach_expm1_,
+ aten._foreach_frac_,
+ aten._foreach_floor_,
+ aten._foreach_lgamma_,
+ aten._foreach_log_,
+ aten._foreach_log10_,
+ aten._foreach_log1p_,
+ aten._foreach_log2_,
+ aten._foreach_neg_,
+ aten._foreach_reciprocal_,
+ aten._foreach_round_,
+ aten._foreach_sigmoid_,
+ aten._foreach_sign_,
+ aten._foreach_sin_,
+ aten._foreach_sinh_,
+ aten._foreach_sqrt_,
+ aten._foreach_tan_,
+ aten._foreach_tanh_,
+ aten._foreach_trunc_,
+ aten._foreach_zero_,
+ aten._foreach_add_,
+ aten._foreach_sub_,
+ aten._foreach_mul_,
+ aten._foreach_div_,
+ aten._foreach_clamp_min_,
+ aten._foreach_clamp_max_,
+ aten._foreach_lerp_,
+ aten._foreach_copy_,
+ ]
+)
+def _meta_foreach_inplace(*args, _scalar_op=None, **kwargs):
+ _meta_foreach_out_of_place(*args, _scalar_op=_scalar_op, **kwargs)
+ return
+
+
+@register_meta([aten._foreach_pow.ScalarAndTensor])
+def meta__foreach_pow_scalar_and_tensor(self, exponent):
+ # Only foreach_pow has a ScalarAndTensor method and needs special
+ # handling because it does not work with _meta_foreach_out_of_place.
+ torch._check(
+ isinstance(exponent, List),
+ lambda: f"exponent must be a tensor list but got {type(exponent)}",
+ )
+ return [torch.empty_like(e) for e in exponent]
+
+
+def _check_foreach_binop_tensor_lists(self, other):
+ torch._check(
+ isinstance(self, List) and isinstance(other, List),
+ lambda: (
+ "The first two arguments of must be List[Tensor], "
+ f"but got {type(self)} and {type(other)}."
+ ),
+ )
+ torch._check(
+ len(self) > 0 and len(self) == len(other),
+ lambda: (
+ "self and other must be non-empty and match in length, "
+ f"but got {len(self)} and {len(other)}."
+ ),
+ )
+
+
+@register_meta(
+ [
+ aten._foreach_maximum,
+ aten._foreach_minimum,
+ ]
+)
+def meta__foreach_binop_scalar(*args):
+ # aten.maximum(Tensor, Scalar) does not exist.
+ return _meta_foreach_out_of_place(*args, _scalar_op=aten.clamp_min)
+
+
+@register_meta(
+ [
+ aten._foreach_maximum_,
+ aten._foreach_minimum_,
+ ]
+)
+def meta__foreach_binop__scalar(*args):
+ # aten.maximum(Tensor, Scalar) does not exist
+ _meta_foreach_inplace(*args, _scalar_op=aten.clamp_min_)
+ return
+
+
+@register_meta(
+ [
+ aten._foreach_addcdiv.Scalar,
+ aten._foreach_addcmul.Scalar,
+ ]
+)
+def meta__foreach_addcop_scalar(self, tensor1, tensor2, scalar=1):
+ # forach_addcdiv and addcdiv have different signatures and
+ # cannot use _meta_foreach_out_of_place.
+ torch._check(
+ all(isinstance(l, List) for l in [self, tensor1, tensor2]),
+ lambda: (
+ "All arguments must be List[Tensor], "
+ f"but got {type(self)}, {type(tensor1)}, and {type(tensor2)}"
+ ),
+ )
+ torch._check(len(self) > 0, lambda: "input tensor list must not be empty.")
+ torch._check(
+ len(self) == len(tensor1) and len(self) == len(tensor2),
+ lambda: "All input tensor lists must have the same length",
+ )
+
+ return [torch.empty_like(s) for s in self]
+
+
+@register_meta([aten._foreach_addcdiv_.Tensor, aten._foreach_addcmul_.Tensor])
+def meta__foreach_addcop_tensor(self, tensor1, tensor2, scalars):
+ torch._check(
+ all(isinstance(l, List) for l in [self, tensor1, tensor2])
+ and isinstance(scalars, torch.Tensor),
+ lambda: (
+ "_foreach_addc*_ op expects arguments of type: List[Tensor], List[Tensor], List[Tensor], tensor, "
+ f"but got: {type(self)}, {type(tensor1)}, {type(tensor2)}, and {type(scalars)}"
+ ),
+ )
+ torch._check(len(self) > 0, lambda: "input tensor list must not be empty.")
+ torch._check(
+ len(self) == len(tensor1) and len(self) == len(tensor2),
+ lambda: "All input tensor lists must have the same length",
+ )
+
+
+@register_meta(
+ [
+ aten._foreach_addcdiv_.Scalar,
+ aten._foreach_addcmul_.Scalar,
+ ]
+)
+def meta__foreach_addcop__scalar(self, tensor1, tensor2, scalar=1):
+ torch._check(
+ all(isinstance(l, List) for l in [self, tensor1, tensor2]),
+ lambda: (
+ "All arguments of _foreach_addc*_ must be List[Tensor], "
+ f"but got {type(self)}, {type(tensor1)}, and {type(tensor2)}"
+ ),
+ )
+ torch._check(len(self) > 0, lambda: "input tensor list must not be empty.")
+ torch._check(
+ len(self) == len(tensor1) and len(self) == len(tensor2),
+ lambda: "All input tensor lists must have the same length",
+ )
+
+
+@register_meta([aten._fused_adam_.default])
+def meta__fused_adam_(
+ self,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ max_exp_avg_sqs,
+ state_steps,
+ *,
+ lr,
+ beta1,
+ beta2,
+ weight_decay,
+ eps,
+ amsgrad,
+ maximize,
+ grad_scale=None,
+ found_inf=None,
+):
+ for l in [self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]:
+ torch._check(
+ isinstance(l, List),
+ lambda: f"exponent must be a tensor list but got {type(l)}",
+ )
+
+
+@register_meta([aten._fused_adam.default])
+def meta__fused_adam(
+ self,
+ grads,
+ exp_avgs,
+ exp_avg_sqs,
+ max_exp_avg_sqs,
+ state_steps,
+ *,
+ lr,
+ beta1,
+ beta2,
+ weight_decay,
+ eps,
+ amsgrad,
+ maximize,
+ grad_scale=None,
+ found_inf=None,
+):
+ for l in [self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]:
+ torch._check(
+ isinstance(l, List),
+ lambda: f"exponent must be a tensor list but got {type(l)}",
+ )
+
+ def empty_like_list(tensor_list):
+ return [torch.empty_like(t) for t in tensor_list]
+
+ return (
+ empty_like_list(self),
+ empty_like_list(grads),
+ empty_like_list(exp_avgs),
+ empty_like_list(exp_avg_sqs),
+ empty_like_list(max_exp_avg_sqs),
+ )
+
+
+@register_meta([aten._int_mm])
+@out_wrapper()
+def meta__int_mm(a, b):
+ torch._check(a.dim() == 2, lambda: "a must be a 2D tensor")
+ torch._check(b.dim() == 2, lambda: "b must be a 2D tensor")
+ torch._check(
+ a.dtype is torch.int8,
+ lambda: f"expected self to be int8, got {a.dtype}",
+ )
+ torch._check(
+ b.dtype is torch.int8,
+ lambda: f"expected mat2 to be int8, got {b.dtype}",
+ )
+ torch._check(
+ a.size(1) == b.size(0),
+ lambda: (
+ f"Incompatible matrix sizes for _int_mm ({a.size(0)}x{a.size(1)} "
+ f"and {b.size(0)}x{b.size(1)})"
+ ),
+ )
+ return a.new_empty((a.size(0), b.size(1)), dtype=torch.int32)
+
+
+@register_meta([aten._convert_weight_to_int4pack])
+def meta__convert_weight_to_int4pack(w, inner_k_tiles):
+ torch._check(w.dim() == 2, lambda: "w must be a 2D tensor")
+ torch._check(
+ w.dtype is torch.int32,
+ lambda: f"expected w to be int32, got {w.dtype}",
+ )
+ n = w.size(0)
+ k = w.size(1)
+ return w.new_empty(
+ (
+ n // 8,
+ k // (inner_k_tiles * 16),
+ 32,
+ inner_k_tiles // 2,
+ ),
+ dtype=torch.int32,
+ )
+
+
+@register_meta([aten._weight_int4pack_mm])
+def meta__weight_int4pack_mm(x, w, q_group_size, q_scale_and_zeros):
+ torch._check(x.dim() == 2, lambda: "x must be a 2D tensor")
+ torch._check(w.dim() == 4, lambda: "w must be a 4D tensor")
+ torch._check(
+ x.dtype is torch.bfloat16,
+ lambda: f"expected x to be bf16, got {x.dtype}",
+ )
+ torch._check(
+ w.dtype is torch.int32,
+ lambda: f"expected w to be int32, got {w.dtype}",
+ )
+ return x.new_empty(x.size(0), w.size(0) * 8, dtype=x.dtype)
+
+
+@register_meta(aten._cdist_forward.default)
+def meta_cdist_forward(x1, x2, p, compute_mode):
+ torch._check(
+ x1.dim() >= 2,
+ lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
+ )
+ torch._check(
+ x2.dim() >= 2,
+ lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
+ )
+ torch._check(
+ x1.size(-1) == x2.size(-1),
+ lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
+ )
+ torch._check(
+ utils.is_float_dtype(x1.dtype),
+ lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
+ )
+ torch._check(
+ utils.is_float_dtype(x2.dtype),
+ lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
+ )
+ torch._check(p >= 0, lambda: "cdist only supports non-negative p values")
+ torch._check(
+ compute_mode in (None, 1, 2),
+ lambda: f"possible modes: None, 1, 2, but was: {compute_mode}",
+ )
+ r1 = x1.size(-2)
+ r2 = x2.size(-2)
+ batch_tensor1 = x1.shape[:-2]
+ batch_tensor2 = x2.shape[:-2]
+ output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
+ output_shape.extend([r1, r2])
+ return x1.new_empty(output_shape)
+
+
+@register_meta(aten._cdist_backward)
+@out_wrapper()
+def meta_cdist_backward(grad, x1, x2, p, cdist):
+ c1 = x1.shape[-1]
+ r1 = x1.shape[-2]
+ r2 = x2.shape[-2]
+ batch_tensor1 = x1.shape[:-2]
+ batch_tensor2 = x2.shape[:-2]
+ expand_batch_portion = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
+ tensor1_expand_size = expand_batch_portion.copy()
+ tensor1_expand_size.extend([r1, c1])
+ batch_product = math.prod(expand_batch_portion)
+ if r1 == 0 or r2 == 0 or c1 == 0 or batch_product == 0:
+ return torch.zeros_like(x1)
+ if tensor1_expand_size != list(x1.shape):
+ x1 = x1.expand(tensor1_expand_size)
+ return torch.empty_like(x1, memory_format=torch.contiguous_format)
+
+
+# NB: This meta function accepts non-meta arguments! When this behavior
+# was originally introduced this was accidental, but it is now load bearing
+# as people are using this so that they can conveniently test code involving
+# embeddings (feeding CPU tensor inputs with meta device EmbeddingBag module)
+@register_meta(aten._embedding_bag.default)
+def meta_embedding_bag(
+ weight,
+ indices,
+ offsets,
+ scale_grad_by_freq=False,
+ mode=0,
+ sparse=False,
+ per_sample_weights=None,
+ include_last_offset=False,
+ padding_idx=-1,
+):
+ torch._check(
+ indices.dtype in (torch.long, torch.int),
+ lambda: f"expected indices to be long or int, got {indices.dtype}",
+ )
+ torch._check(
+ offsets.dtype in (torch.long, torch.int),
+ lambda: f"expected offsets to be long or int, got {offsets.dtype}",
+ )
+ torch._check(
+ utils.is_float_dtype(weight.dtype),
+ lambda: f"expected weight to be floating point type, got {weight.dtype}",
+ )
+
+ num_bags = offsets.size(0)
+ if include_last_offset:
+ torch._check(
+ num_bags >= 1,
+ lambda: "include_last_offset: numBags should be at least 1",
+ )
+ num_bags -= 1
+
+ output = weight.new_empty(num_bags, weight.size(1))
+ MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
+
+ if per_sample_weights is not None:
+ torch._check(
+ mode == MODE_SUM,
+ lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
+ )
+ torch._check(
+ per_sample_weights.dtype == weight.dtype,
+ lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
+ )
+ torch._check(
+ per_sample_weights.ndim == 1,
+ lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
+ )
+ torch._check(
+ per_sample_weights.numel() == indices.numel(),
+ lambda: (
+ f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
+ f"to be the same as indices.numel() ({indices.numel()})"
+ ),
+ )
+
+ def is_fast_path_index_select_scale(src, scale, output, padding_idx):
+ return (
+ is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
+ )
+
+ def is_fast_path_index_select(src, output, padding_idx):
+ return (
+ (src.dtype == torch.float or src.dtype == torch.half)
+ and src.stride(1) == 1
+ and output.stride(1) == 1
+ and padding_idx < 0
+ )
+
+ def is_fast_path(src, scale, output, padding_idx):
+ if scale is not None:
+ return is_fast_path_index_select_scale(src, scale, output, padding_idx)
+ else:
+ return is_fast_path_index_select(src, output, padding_idx)
+
+ if device_hint(offsets) != "cpu":
+ offset2bag = indices.new_empty(indices.size(0))
+ bag_size = indices.new_empty(offsets.size())
+ if mode == MODE_MAX:
+ max_indices = indices.new_empty(num_bags, weight.size(1))
+ else:
+ max_indices = indices.new_empty(0)
+ else:
+ fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
+ if mode in (MODE_MEAN, MODE_MAX) or not fast_path_sum:
+ offset2bag = offsets.new_empty(indices.size(0))
+ else:
+ offset2bag = offsets.new_empty(0)
+ bag_size = offsets.new_empty(num_bags)
+ # This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp
+ numBags = offsets.shape[0]
+ if mode == MODE_MAX:
+ if include_last_offset:
+ torch._check(
+ numBags >= 1,
+ lambda: "include_last_offset: numBags should be at least 1",
+ )
+ numBags -= 1
+ max_indices = offsets.new_empty(numBags, weight.shape[1])
+ else:
+ max_indices = offsets.new_empty(bag_size.size())
+ return output, offset2bag, bag_size, max_indices
+
+
+@register_meta(aten._embedding_bag_forward_only.default)
+def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
+ output, offset2bag, bag_size, max_indices = meta_embedding_bag(
+ weight, indices, offsets, *args
+ )
+ if device_hint(offsets) == "cpu":
+ bag_size = offsets.new_empty(offsets.size())
+ return output, offset2bag, bag_size, max_indices
+
+
+def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
+ # if specified, dtype takes precedence
+ if dtype:
+ return dtype
+
+ if input.dtype.is_floating_point or input.dtype.is_complex:
+ return input.dtype
+ elif promote_int_to_long:
+ return torch.long
+
+ return input.dtype
+
+
+@register_meta([aten.nansum.default, aten.nansum.out])
+@out_wrapper()
+def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
+ output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
+ dims = utils.reduction_dims(input.shape, dims)
+ output_shape = _compute_reduction_shape(input, dims, keepdim)
+ return input.new_empty(output_shape, dtype=output_dtype)
+
+
+@register_meta([aten.median.default, aten.nanmedian.default])
+def meta_median(input):
+ output_shape = utils.compute_reduction_output_shape(
+ input.shape, tuple(range(input.dim()))
+ )
+ return input.new_empty(output_shape)
+
+
+@register_meta(
+ [
+ aten.median.dim,
+ aten.median.dim_values,
+ aten.nanmedian.dim,
+ aten.nanmedian.dim_values,
+ aten.mode.default,
+ aten.mode.values,
+ ]
+)
+@out_wrapper("values", "indices")
+def meta_median_mode_dim(input, dim=-1, keepdim=False):
+ if device_hint(input) == "cuda":
+ utils.alert_not_deterministic("median CUDA with indices output")
+ dim = utils.reduction_dims(input.shape, (dim,))
+ output_shape = _compute_reduction_shape(input, dim, keepdim)
+ return (
+ input.new_empty(output_shape),
+ input.new_empty(output_shape, dtype=torch.long),
+ )
+
+
+@register_meta(aten.logical_not_.default)
+def meta_logical_not_(self):
+ return self
+
+
+@register_meta(aten.repeat.default)
+def meta_repeat(self, repeats):
+ torch._check(
+ len(repeats) >= self.dim(),
+ lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor",
+ )
+ # Add new leading dimensions to the tensor if the
+ # number of target dimensions is larger than the
+ # number of source dimensions.
+ num_new_dimensions = len(repeats) - self.dim()
+ padded_size = (1,) * num_new_dimensions + tuple(self.shape)
+ target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))]
+ return self.new_empty(target_size)
+
+
+@register_meta(aten.zero_.default)
+def meta_zero_(self):
+ return self
+
+
+@register_meta(
+ [
+ aten.mul_.Scalar,
+ aten.div_.Scalar,
+ aten.mul_.Tensor,
+ aten.div_.Tensor,
+ aten.logical_and_.default,
+ aten.logical_or_.default,
+ aten.logical_xor_.default,
+ ],
+)
+def meta_binop_inplace(self, other):
+ if isinstance(other, torch.Tensor):
+ check_inplace_broadcast(self.shape, other.shape)
+ return self
+
+
+@register_meta(
+ [
+ aten.add_.Scalar,
+ aten.sub_.Scalar,
+ aten.add_.Tensor,
+ aten.sub_.Tensor,
+ ],
+)
+def meta_binop_inplace_alpha(self, other, alpha=1):
+ if isinstance(other, torch.Tensor):
+ check_inplace_broadcast(self.shape, other.shape)
+ return self
+
+
+@register_meta([aten.round.default, aten.round.decimals])
+def meta_round(self, **kwargs):
+ return elementwise_meta(
+ self, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
+ )
+
+
+def shift_dtype_check(fn_name, self, val):
+ torch._check(
+ utils.is_integer_dtype(self.dtype),
+ lambda: f"{fn_name}: Expected input tensor to have an integral dtype. Got {self.dtype}",
+ )
+ if isinstance(val, torch.Tensor):
+ torch._check(
+ utils.is_integer_dtype(val.dtype),
+ lambda: f"{fn_name}: Expected shift value to have an integral dtype. Got {val.dtype}",
+ )
+ else:
+ torch._check(
+ isinstance(val, IntLike),
+ lambda: f"{fn_name}: Expected shift value to be an int. Got {val}",
+ )
+
+
+@register_meta([aten.__rshift__.Tensor, aten.__rshift__.Scalar])
+def meta_rshifts(self, other):
+ shift_dtype_check("rshift", self, other)
+ return elementwise_meta(
+ self, other, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
+ )
+
+
+@register_meta([aten.__lshift__.Tensor, aten.__lshift__.Scalar])
+def meta_lshifts(self, other):
+ shift_dtype_check("lshift", self, other)
+ return elementwise_meta(
+ self, other, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
+ )
+
+
+@register_meta(aten.zero.default)
+def meta_zero(self):
+ return self.new_empty(self.shape)
+
+
+@register_meta([aten.fill_.Tensor, aten.fill_.Scalar])
+def meta_fill_(self, val):
+ return self
+
+
+@register_meta([aten.fill.Tensor, aten.fill.Scalar])
+def meta_fill(self, val):
+ return torch.empty_like(self)
+
+
+@register_meta(aten.relu_.default)
+def meta_relu_(self):
+ return self
+
+
+@register_meta([aten.index_put.default, aten._unsafe_index_put.default])
+def meta_index_put(self, indices, values, accumulate=False):
+ return torch.empty_like(self)
+
+
+@register_meta(aten.masked_fill_.Scalar)
+def meta_masked_fill_(self, mask, value):
+ check_inplace_broadcast(self.shape, mask.shape)
+ return self
+
+
+@register_meta(aten.masked_scatter_)
+def meta_masked_scatter_(self, mask, source):
+ torch._check(
+ mask.dtype in (torch.bool, torch.uint8), lambda: "Mask must be bool or uint8"
+ )
+ torch._check(
+ self.dtype == source.dtype,
+ lambda: "masked_scatter: expected self and source to have same "
+ "dtypes but got {self.dtype} and {source.dtype}",
+ )
+ return self
+
+
+@register_meta(aten.masked_scatter)
+@out_wrapper()
+def meta_masked_scatter(self, mask, source):
+ self, mask = _maybe_broadcast(self, mask)
+ output = torch.empty_like(self, memory_format=torch.contiguous_format)
+ return meta_masked_scatter_(output, mask, source)
+
+
+@register_meta(aten.masked_scatter_backward)
+def meta_masked_scatter_backward(self, mask, sizes):
+ return self.new_empty(sizes)
+
+
+@register_meta(aten.index_put_.default)
+def meta_index_put_(self, indices, values, accumulate=False):
+ return self
+
+
+@register_meta(aten.alias.default)
+def meta_alias(self):
+ return self.view(self.shape)
+
+
+def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
+ torch._check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
+ torch._check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
+
+ batch1_sizes = batch1.size()
+ batch2_sizes = batch2.size()
+
+ bs = batch1_sizes[0]
+ contraction_size = batch1_sizes[2]
+ res_rows = batch1_sizes[1]
+ res_cols = batch2_sizes[2]
+ output_size = (bs, res_rows, res_cols)
+
+ torch._check(
+ batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
+ lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}"
+ f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].",
+ )
+
+ # TODO: handle out
+
+ output = batch2.new_empty(output_size)
+
+ if not is_bmm and self_baddbmm is not None:
+ torch._check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor")
+ torch._check(
+ self_baddbmm.size() == output_size,
+ lambda: f"Expected an input tensor shape with shape {output_size} but got shape: {self_baddbmm.size()}",
+ )
+
+ return output
+
+
+@register_meta(aten.bmm.default)
+def meta_bmm(self, mat2):
+ return common_meta_baddbmm_bmm(self, mat2, True)
+
+
+def div_rtn(x, y):
+ q = x // y
+ r = x % y
+ # WARNING: explicit bool conversion here is necessary;
+ # would be fixed by SymBool
+ if r != 0 and (bool(r < 0) != bool(y < 0)):
+ q -= 1
+ return q
+
+
+def pooling_output_shape_pad_lr(
+ inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode
+):
+ outputSize = (
+ div_rtn(
+ inputSize
+ + pad_l
+ + pad_r
+ - dilation * (kernelSize - 1)
+ - 1
+ + (stride - 1 if ceil_mode else 0),
+ stride,
+ )
+ + 1
+ )
+ if ceil_mode:
+ if (outputSize - 1) * stride >= inputSize + pad_l:
+ outputSize -= 1
+ return outputSize
+
+
+def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode):
+ torch._check(stride != 0, lambda: "stride should not be zero")
+ torch._check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}")
+ torch._check(
+ pad <= kernelSize // 2,
+ lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}",
+ )
+ return pooling_output_shape_pad_lr(
+ inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode
+ )
+
+
+def pool2d_shape_check(
+ input,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ dilationH,
+ dilationW,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ memory_format,
+):
+ ndim = input.dim()
+ nOutputPlane = nInputPlane
+
+ torch._check(
+ kW > 0 and kH > 0,
+ lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}",
+ )
+ torch._check(
+ dW > 0 and dH > 0,
+ lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}",
+ )
+ torch._check(
+ dilationH > 0 and dilationW > 0,
+ lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}",
+ )
+
+ valid_dims = input.size(1) != 0 and input.size(2) != 0
+
+ if memory_format == torch.channels_last:
+ torch._check(
+ ndim == 4 and valid_dims and input.size(3) != 0,
+ lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout"
+ " with optional 0 dim batch size for input, but got: {input.size()}",
+ )
+ else:
+ torch._check(
+ (ndim == 3 and input.size(0) != 0 and valid_dims)
+ or (ndim == 4 and valid_dims and input.size(3) != 0),
+ lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}",
+ )
+
+ torch._check(
+ kW // 2 >= padW and kH // 2 >= padH,
+ lambda: "pad should be smaller than or equal to half of kernel size, but got "
+ f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}",
+ )
+
+ torch._check(
+ outputWidth >= 1 and outputHeight >= 1,
+ lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). "
+ f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). "
+ "Output size is too small",
+ )
+
+
+def pool3d_shape_check(
+ input: Tensor,
+ nslices: int,
+ kT: int,
+ kH: int,
+ kW: int,
+ dT: int,
+ dH: int,
+ dW: int,
+ pT: int,
+ pH: int,
+ pW: int,
+ dilationT: int,
+ dilationH: int,
+ dilationW: int,
+ itime: int,
+ iheight: int,
+ iwidth: int,
+ otime: int,
+ oheight: int,
+ owidth: int,
+ fn_name: str,
+ check_input_size: bool = False,
+):
+ ndim = input.ndim
+
+ torch._check(
+ kT > 0 and kW > 0 and kH > 0,
+ lambda: (
+ f"kernel size should be greater than zero, but got "
+ f"kT: {kT}, kH: {kH}, kW: {kW}"
+ ),
+ )
+ torch._check(
+ dT > 0 and dW > 0 and dH > 0,
+ lambda: (
+ f"stride should be greater than zero, but got "
+ f"dT: {dT}, dH: {dH}, dW: {dW}"
+ ),
+ )
+ torch._check(
+ dilationT > 0 and dilationW > 0 and dilationH > 0,
+ lambda: (
+ f"dilation should be greater than zero, but got "
+ f"dilationT: {dilationT}, dilationH: {dilationH}, dilationW: {dilationW}"
+ ),
+ )
+
+ torch._check(
+ ndim in (4, 5),
+ lambda: f"{fn_name}: Expected 4D or 5D tensor for input, but got: {input.shape}",
+ )
+
+ for i in range(ndim):
+ if ndim == 5 and i == 0:
+ # size of batch-dim can be 0.
+ continue
+ torch._check(
+ input.size(i) > 0,
+ lambda: (
+ f"{fn_name}: Expected input's non-batch dimensions to have positive length,"
+ f" but input has a shape of {input.shape}"
+ f" and non-batch dimension {input.size(i)} has length zero!"
+ ),
+ )
+
+ if check_input_size: # AveragePool3d
+ torch._check(
+ itime >= kT and iheight >= kH and iwidth >= kW,
+ lambda: (
+ f"input image (T: {itime} H: {iheight} W: {iwidth}) smaller than "
+ f"kernel size (kT: {kT} kH: {kH} kW: {kW})"
+ ),
+ )
+
+ torch._check(
+ kT / 2 >= pT and kW / 2 >= pW and kH / 2 >= pH,
+ lambda: (
+ f"pad should be smaller than or equal to half of kernel size, but got "
+ f"kT: {kT} kW: {kW} kH: {kH} padT: {pT} padW: {pW} padH: {pH}"
+ ),
+ )
+
+ torch._check(
+ otime >= 1 and owidth >= 1 and oheight >= 1,
+ lambda: (
+ f"Given input size: ({nslices}x{itime}x{iheight}x{iwidth}). "
+ f"Calculated output size: ({nslices}x{otime}x{oheight}x{owidth}). "
+ f"Output size is too small"
+ ),
+ )
+
+
+def max_pool3d_backward_shape_check(
+ input,
+ grad_output,
+ indices,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ pT,
+ pH,
+ pW,
+ dilationT,
+ dilationH,
+ dilationW,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ fn_name,
+):
+ ndim = input.ndim
+
+ pool3d_shape_check(
+ input,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ pT,
+ pH,
+ pW,
+ dilationT,
+ dilationH,
+ dilationW,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ fn_name,
+ )
+
+ check_dim_size(grad_output, ndim, ndim - 4, nslices)
+ check_dim_size(grad_output, ndim, ndim - 3, otime)
+ check_dim_size(grad_output, ndim, ndim - 2, oheight)
+ check_dim_size(grad_output, ndim, ndim - 1, owidth)
+
+ check_dim_size(indices, ndim, ndim - 4, nslices)
+ check_dim_size(indices, ndim, ndim - 3, otime)
+ check_dim_size(indices, ndim, ndim - 2, oheight)
+ check_dim_size(indices, ndim, ndim - 1, owidth)
+
+
+def avg_pool3d_backward_shape_check(
+ input: Tensor,
+ grad_output: Tensor,
+ nslices: int,
+ kT: int,
+ kH: int,
+ kW: int,
+ dT: int,
+ dH: int,
+ dW: int,
+ pT: int,
+ pH: int,
+ pW: int,
+ itime: int,
+ iheight: int,
+ iwidth: int,
+ otime: int,
+ oheight: int,
+ owidth: int,
+ fn_name: str,
+):
+ ndim = input.ndim
+
+ pool3d_shape_check(
+ input,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ pT,
+ pH,
+ pW,
+ 1,
+ 1,
+ 1,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ fn_name,
+ True,
+ )
+
+ check_dim_size(grad_output, ndim, ndim - 4, nslices)
+ check_dim_size(grad_output, ndim, ndim - 3, otime)
+ check_dim_size(grad_output, ndim, ndim - 2, oheight)
+ check_dim_size(grad_output, ndim, ndim - 1, owidth)
+
+
+def max_pool2d_checks_and_compute_shape(
+ input, kernel_size, stride, padding, dilation, ceil_mode
+):
+ # Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp
+ def unpack(name, val):
+ torch._check(
+ len(val) in [1, 2],
+ lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints",
+ )
+ H = val[0]
+ W = H if len(val) == 1 else val[1]
+ return H, W
+
+ kH, kW = unpack("kernel_size", kernel_size)
+
+ torch._check(
+ len(stride) in [0, 1, 2],
+ lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
+ )
+ if len(stride) == 0:
+ dH, dW = kH, kW
+ else:
+ dH, dW = unpack("stride", stride)
+
+ padH, padW = unpack("padding", padding)
+ dilationH, dilationW = unpack("dilation", dilation)
+ nInputPlane = input.size(-3)
+ inputHeight = input.size(-2)
+ inputWidth = input.size(-1)
+
+ memory_format = utils.suggest_memory_format(input)
+ if memory_format == torch.channels_last:
+ torch._check(
+ input.dim() == 4,
+ lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout",
+ )
+ elif memory_format == torch.contiguous_format:
+ torch._check(
+ input.dim() in [3, 4],
+ lambda: "non-empty 3D or 4D (batch mode) tensor expected for input",
+ )
+ else:
+ torch._check(
+ False,
+ lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous",
+ )
+
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
+
+ pool2d_shape_check(
+ input,
+ kH,
+ kW,
+ dH,
+ dW,
+ padH,
+ padW,
+ dilationH,
+ dilationW,
+ nInputPlane,
+ inputHeight,
+ inputWidth,
+ outputHeight,
+ outputWidth,
+ memory_format,
+ )
+
+ return nInputPlane, outputHeight, outputWidth
+
+
+@register_meta(aten.max_pool2d_with_indices_backward.default)
+def meta_max_pool2d_with_indices_backward(
+ grad_output,
+ self,
+ kernel_size,
+ stride,
+ padding,
+ dilation,
+ ceil_mode,
+ indices,
+):
+ (
+ nInputPlane,
+ outputHeight,
+ outputWidth,
+ ) = max_pool2d_checks_and_compute_shape(
+ self, kernel_size, stride, padding, dilation, ceil_mode
+ )
+
+ torch._check(
+ self.dtype == grad_output.dtype,
+ lambda: f"Expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}",
+ )
+
+ nOutputPlane = nInputPlane
+ ndim = self.ndim
+
+ def _check_dim_size(t):
+ check_dim_size(t, ndim, ndim - 3, nOutputPlane)
+ check_dim_size(t, ndim, ndim - 2, outputHeight)
+ check_dim_size(t, ndim, ndim - 1, outputWidth)
+
+ _check_dim_size(grad_output)
+ _check_dim_size(indices)
+
+ memory_format = utils.suggest_memory_format(self)
+ return torch.empty(
+ self.shape,
+ dtype=self.dtype,
+ device=self.device,
+ memory_format=memory_format,
+ )
+
+
+@register_meta(aten.max_pool2d_with_indices.default)
+def meta_max_pool2d_with_indices(
+ input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False
+):
+ (
+ nInputPlane,
+ outputHeight,
+ outputWidth,
+ ) = max_pool2d_checks_and_compute_shape(
+ input, kernel_size, stride, padding, dilation, ceil_mode
+ )
+
+ nbatch = input.size(-4) if input.dim() == 4 else 1
+ memory_format = utils.suggest_memory_format(input)
+ if input.dim() == 3:
+ size = [nInputPlane, outputHeight, outputWidth]
+ else:
+ size = [nbatch, nInputPlane, outputHeight, outputWidth]
+ return (
+ torch.empty(
+ size,
+ dtype=input.dtype,
+ device=input.device,
+ memory_format=memory_format,
+ ),
+ torch.empty(
+ size,
+ dtype=torch.int64,
+ device=input.device,
+ memory_format=memory_format,
+ ),
+ )
+
+
+@register_meta(aten.max_unpool2d)
+@out_wrapper()
+def meta_max_unpool2d(self_, indices, output_size):
+ utils.alert_not_deterministic("max_unpooling2d_forward_out")
+
+ torch._check(
+ indices.dtype == torch.int64,
+ lambda: f"elements in indices should be type int64 but got: {indices.dtype}",
+ )
+ torch._check(
+ len(output_size) == 2,
+ lambda: (
+ f"There should be exactly two elements (height, width) in output_size, "
+ f"but got {len(output_size)} elements."
+ ),
+ )
+
+ oheight, owidth = output_size
+
+ torch._check(
+ self_.ndim in (3, 4),
+ lambda: (
+ f"Input to max_unpooling2d should be a 3d or 4d Tensor, "
+ f"but got a tensor with {self_.ndim} dimensions."
+ ),
+ )
+ torch._check(
+ self_.shape == indices.shape,
+ lambda: (
+ f"Expected shape of indices to be same as that of the input tensor ({self_.shape}) "
+ f"but got indices tensor with shape: {indices.shape}"
+ ),
+ )
+
+ for i in range(1, self_.ndim):
+ torch._check(
+ self_.size(i) > 0,
+ lambda: (
+ f"max_unpooling2d(): "
+ f"Expected input to have non-zero size for non-batch dimensions, "
+ f"but got {self_.shape} with dimension {i} being empty."
+ ),
+ )
+
+ self = self_.contiguous()
+
+ if self_.ndim == 3:
+ nchannels = self.size(0)
+ result = self.new_empty((nchannels, oheight, owidth))
+ else:
+ nbatch = self.size(0)
+ nchannels = self.size(1)
+ result = self.new_empty((nbatch, nchannels, oheight, owidth))
+
+ return result
+
+
+def _max_unpooling3d_shape_check(input, indices, output_size, stride, padding, fn_name):
+ torch._check(
+ indices.dtype == torch.int64, lambda: "elements in indices should be type int64"
+ )
+ torch._check(
+ input.ndim in (4, 5),
+ lambda: f"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with {input.ndim} dimensions.",
+ )
+ torch._check(
+ len(output_size) == 3,
+ lambda: (
+ f"There should be exactly three elements (depth, height, width) in output_size, "
+ f"but got {len(output_size)} elements."
+ ),
+ )
+ torch._check(
+ len(stride) == 3,
+ lambda: f"There should be exactly three elements (depth, height, width) in stride, but got: {len(stride)} elements.",
+ )
+ torch._check(
+ len(padding) == 3,
+ lambda: f"There should be exactly three elements (depth, height, width) in padding, but got: {len(padding)} elements.",
+ )
+ torch._check(
+ input.shape == indices.shape,
+ lambda: (
+ f"Expected shape of indices to be same as that of the input tensor ({input.shape}) "
+ f"but got indices tensor with shape: {indices.shape}"
+ ),
+ )
+
+ for i in range(1, input.ndim):
+ torch._check(
+ input.size(i) > 0,
+ lambda: (
+ f"{fn_name}: "
+ f"Expected input to have non-zero size for non-batch dimensions, "
+ f"but got {input.shape} with dimension {i} being empty."
+ ),
+ )
+
+ torch._check(
+ stride[0] > 0 and stride[1] > 0 and stride[2] > 0,
+ lambda: f"strides should be greater than zero, but got stride: {stride}",
+ )
+
+
+@register_meta(aten.max_unpool3d)
+@out_wrapper()
+def meta_max_unpool3d(self_, indices, output_size, stride, padding):
+ utils.alert_not_deterministic("max_unpooling3d_forward_out")
+
+ _max_unpooling3d_shape_check(
+ self_, indices, output_size, stride, padding, "max_unpooling3d()"
+ )
+
+ self = self_.contiguous()
+
+ odepth, oheight, owidth = output_size
+
+ if self_.ndim == 4:
+ nchannels = self.size(0)
+ result = self.new_empty((nchannels, odepth, oheight, owidth))
+ else:
+ nbatch = self.size(0)
+ nchannels = self.size(1)
+ result = self.new_empty((nbatch, nchannels, odepth, oheight, owidth))
+
+ return result
+
+
+@register_meta(aten.max_pool3d_with_indices)
+@out_wrapper("out", "indices")
+def meta_max_pool3d_with_indices(
+ input,
+ kernel_size,
+ stride=(),
+ padding=(0,),
+ dilation=(1,),
+ ceil_mode=False,
+):
+ torch._check(
+ len(kernel_size) in (1, 3),
+ lambda: "max_pool3d: kernel_size must either be a single int, or a tuple of three ints",
+ )
+ kT = kernel_size[0]
+ kH = kT if len(kernel_size) == 1 else kernel_size[1]
+ kW = kT if len(kernel_size) == 1 else kernel_size[2]
+
+ torch._check(
+ not stride or len(stride) in (1, 3),
+ lambda: "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints",
+ )
+ dT = kT if not stride else stride[0]
+ dH = kH if not stride else (dT if len(stride) == 1 else stride[1])
+ dW = kW if not stride else (dT if len(stride) == 1 else stride[2])
+
+ torch._check(
+ len(padding) in (1, 3),
+ lambda: "max_pool3d: padding must either be a single int, or a tuple of three ints",
+ )
+ pT = padding[0]
+ pH = pT if len(padding) == 1 else padding[1]
+ pW = pT if len(padding) == 1 else padding[2]
+
+ torch._check(
+ len(dilation) in (1, 3),
+ lambda: "max_pool3d: dilation must be either a single int, or a tuple of three ints",
+ )
+ dilationT = dilation[0]
+ dilationH = dilationT if len(dilation) == 1 else dilation[1]
+ dilationW = dilationT if len(dilation) == 1 else dilation[2]
+
+ torch._check(
+ input.ndim in (4, 5),
+ lambda: "non-empty 4D or 5D (batch mode) tensor expected for input",
+ )
+
+ nbatch = input.size(-5) if input.ndim == 5 else 1
+ nslices = input.size(-4)
+ itime = input.size(-3)
+ iheight = input.size(-2)
+ iwidth = input.size(-1)
+
+ otime = pooling_output_shape(itime, kT, pT, dT, dilationT, ceil_mode)
+ oheight = pooling_output_shape(iheight, kH, pH, dH, dilationH, ceil_mode)
+ owidth = pooling_output_shape(iwidth, kW, pW, dW, dilationW, ceil_mode)
+
+ pool3d_shape_check(
+ input,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ pT,
+ pH,
+ pW,
+ dilationT,
+ dilationH,
+ dilationW,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ "max_pool3d_with_indices()",
+ )
+
+ channels_last = (
+ input.ndim == 5 and utils.suggest_memory_format(input) == torch.channels_last_3d
+ )
+ if input.ndim == 4:
+ input_channels_last_check = input.unsqueeze(0)
+ channels_last = (
+ not input_channels_last_check.is_contiguous()
+ ) and input_channels_last_check.is_contiguous(
+ memory_format=torch.channels_last_3d
+ )
+ out_shape = (nslices, otime, oheight, owidth)
+ else:
+ out_shape = (nbatch, nslices, otime, oheight, owidth) # type: ignore[assignment]
+
+ out = input.new_empty(out_shape)
+ indices = input.new_empty(out_shape, dtype=torch.int64)
+
+ if channels_last:
+ out = out.to(memory_format=torch.channels_last_3d)
+ indices = indices.to(memory_format=torch.channels_last_3d)
+
+ return out, indices
+
+
+@register_meta(aten.max_pool3d_with_indices_backward)
+@out_wrapper("grad_input")
+def meta_max_pool3d_with_indices_backward(
+ grad_output,
+ input,
+ kernel_size,
+ stride,
+ padding,
+ dilation,
+ ceil_mode,
+ indices,
+):
+ torch._check(
+ len(kernel_size) in (1, 3),
+ lambda: "max_pool3d: kernel_size must either be a single int, or a tuple of three ints",
+ )
+ kT = kernel_size[0]
+ kH = kT if len(kernel_size) == 1 else kernel_size[1]
+ kW = kT if len(kernel_size) == 1 else kernel_size[2]
+
+ torch._check(
+ not stride or len(stride) in (1, 3),
+ lambda: "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints",
+ )
+ dT = kT if not stride else stride[0]
+ dH = kH if not stride else (dT if len(stride) == 1 else stride[1])
+ dW = kW if not stride else (dT if len(stride) == 1 else stride[2])
+
+ torch._check(
+ len(padding) in (1, 3),
+ lambda: "max_pool3d: padding must either be a single int, or a tuple of three ints",
+ )
+ pT = padding[0]
+ pH = pT if len(padding) == 1 else padding[1]
+ pW = pT if len(padding) == 1 else padding[2]
+
+ torch._check(
+ len(dilation) in (1, 3),
+ lambda: "max_pool3d: dilation must be either a single int, or a tuple of three ints",
+ )
+ dilationT = dilation[0]
+ dilationH = dilationT if len(dilation) == 1 else dilation[1]
+ dilationW = dilationT if len(dilation) == 1 else dilation[2]
+
+ torch._check(
+ input.ndim in (4, 5),
+ lambda: "non-empty 4D or 5D (batch mode) tensor expected for input",
+ )
+
+ nslices = input.size(-4)
+ itime = input.size(-3)
+ iheight = input.size(-2)
+ iwidth = input.size(-1)
+
+ otime = grad_output.size(-3)
+ oheight = grad_output.size(-2)
+ owidth = grad_output.size(-1)
+
+ max_pool3d_backward_shape_check(
+ input,
+ grad_output,
+ indices,
+ nslices,
+ kT,
+ kH,
+ kW,
+ dT,
+ dH,
+ dW,
+ pT,
+ pH,
+ pW,
+ dilationT,
+ dilationH,
+ dilationW,
+ itime,
+ iheight,
+ iwidth,
+ otime,
+ oheight,
+ owidth,
+ "max_pool3d_with_indices_backward()",
+ )
+
+ channels_last = (
+ input.ndim == 5 and utils.suggest_memory_format(input) == torch.channels_last_3d
+ )
+ if input.ndim == 4:
+ input_channels_last_check = input.unsqueeze(0)
+ channels_last = (
+ not input_channels_last_check.is_contiguous()
+ ) and input_channels_last_check.is_contiguous(
+ memory_format=torch.channels_last_3d
+ )
+
+ grad_input = input.new_empty(input.shape)
+
+ if channels_last:
+ grad_input = grad_input.to(memory_format=torch.channels_last_3d)
+
+ return grad_input
+
+
+def check_grid_sampler_common(input: Tensor, grid: Tensor):
+ torch._check(
+ input.device == grid.device,
+ lambda: (
+ f"grid_sampler(): expected input and grid to be on same device, but input "
+ f"is on {input.device} and grid is on {grid.device}"
+ ),
+ )
+ torch._check(
+ input.layout == torch.strided and grid.layout == torch.strided,
+ lambda: (
+ f"grid_sampler(): expected input and grid to have torch.strided layout, but "
+ f"input has {input.layout} and grid has {grid.layout}"
+ ),
+ )
+ torch._check(
+ input.shape[0] == grid.shape[0],
+ lambda: (
+ f"grid_sampler(): expected grid and input to have same batch size, but got "
+ f"input with sizes {input.shape} and grid with sizes {grid.shape}"
+ ),
+ )
+ torch._check(
+ grid.shape[-1] == input.ndim - 2,
+ lambda: (
+ f"grid_sampler(): expected grid to have size {input.ndim - 2} in last "
+ f"dimension, but got grid with sizes {grid.shape}"
+ ),
+ )
+
+ for i in range(2, input.ndim):
+ torch._check(
+ input.shape[i] > 0,
+ lambda: (
+ f"grid_sampler(): expected input to have non-empty spatial dimensions, "
+ f"but input has sizes {input.shape} with dimension {i} being empty"
+ ),
+ )
+
+
+class GridSamplerInterpolation(Enum):
+ BILINEAR = 0
+ NEAREST = 1
+ BICUBIC = 2
+
+
+def check_grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: int):
+ torch._check(
+ input.ndim == 5 and input.ndim == grid.ndim,
+ lambda: (
+ f"grid_sampler(): expected 5D input and grid with same number of "
+ f"dimensions, but got input with sizes {input.shape}"
+ f" and grid with sizes {grid.shape}"
+ ),
+ )
+ torch._check(
+ not (
+ input.ndim == 5
+ and interpolation_mode == GridSamplerInterpolation.BICUBIC.value
+ ),
+ lambda: "grid_sampler(): bicubic interpolation only supports 4D input",
+ )
+
+
+@register_meta(aten.grid_sampler_2d_backward.default)
+def grid_sampler_2d_backward_meta(
+ grad_output,
+ input,
+ grid,
+ interpolation_mode,
+ padding_mode,
+ align_corners,
+ output_mask,
+):
+ input_requires_grad = output_mask[0]
+ if input_requires_grad:
+ grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format)
+ else:
+ grad_input = None
+ grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format)
+ return (grad_input, grad_grid)
+
+
+@register_meta(aten.grid_sampler_3d)
+@out_wrapper()
+def grid_sampler_3d(
+ input,
+ grid,
+ interpolation_mode,
+ padding_mode,
+ align_corners,
+):
+ check_grid_sampler_common(input, grid)
+ check_grid_sampler_3d(input, grid, interpolation_mode)
+ N = input.shape[0]
+ C = input.shape[1]
+ out_D = grid.shape[1]
+ out_H = grid.shape[2]
+ out_W = grid.shape[3]
+ return input.new_empty((N, C, out_D, out_H, out_W))
+
+
+@register_meta(aten.grid_sampler_3d_backward)
+@out_wrapper("grad_input", "grad_grid")
+def grid_sampler_3d_backward(
+ grad_output,
+ input,
+ grid,
+ interpolation_mode,
+ padding_mode,
+ align_corners,
+ output_mask,
+):
+ check_grid_sampler_common(input, grid)
+ check_grid_sampler_3d(input, grid, interpolation_mode)
+ input_requires_grad = output_mask[0]
+ if input_requires_grad:
+ grad_input = torch.zeros_like(
+ input, memory_format=torch.legacy_contiguous_format
+ )
+ else:
+ grad_input = None
+ grad_grid = torch.empty_like(grid, memory_format=torch.legacy_contiguous_format)
+ return grad_input, grad_grid
+
+
+@register_meta([aten.full.default])
+def full(size, fill_value, *args, **kwargs):
+ dtype = kwargs.get("dtype", None)
+ if not dtype:
+ dtype = utils.get_dtype(fill_value)
+ kwargs["dtype"] = dtype
+ return torch.empty(size, *args, **kwargs)
+
+
+# zeros_like is special cased to work for sparse
+@register_meta(aten.zeros_like.default)
+def zeros_like(
+ self,
+ dtype=None,
+ layout=None,
+ device=None,
+ pin_memory=None,
+ memory_format=None,
+):
+ if layout == torch.sparse_coo:
+ torch._check(
+ memory_format is None,
+ lambda: "memory format option is only supported by strided tensors",
+ )
+
+ res = torch.empty(
+ 0,
+ dtype=self.dtype if dtype is None else dtype,
+ layout=layout,
+ device=self.device if device is None else device,
+ pin_memory=pin_memory,
+ )
+
+ if self.is_sparse:
+ res.sparse_resize_and_clear_(
+ self.size(), self.sparse_dim(), self.dense_dim()
+ )
+ else:
+ res.sparse_resize_and_clear_(self.size(), self.dim(), 0)
+
+ res._coalesced_(True)
+ return res
+ res = aten.empty_like.default(
+ self,
+ dtype=dtype,
+ layout=layout,
+ device=device,
+ pin_memory=pin_memory,
+ memory_format=memory_format,
+ )
+ # device can be not "meta"
+ res.fill_(0)
+ return res
+
+
+@register_meta(aten.select.int)
+def meta_select(self, dim, index):
+ ndim = self.dim()
+ torch._check_index(
+ ndim != 0,
+ lambda: "select() cannot be applied to a 0-dim tensor.",
+ )
+
+ dim = dim if dim >= 0 else dim + ndim
+ size = self.size(dim)
+
+ torch._check_index(
+ not (-index > size or index >= size),
+ lambda: f"select(): index {index} out of range for tensor of size "
+ f"{self.size()} at dimension {dim}",
+ )
+
+ index = index if index >= 0 else index + size
+
+ new_size = list(self.size())
+ new_stride = list(self.stride())
+
+ new_storage_offset = self.storage_offset() + index * new_stride[dim]
+ del new_size[dim]
+ del new_stride[dim]
+
+ return self.as_strided(new_size, new_stride, new_storage_offset)
+
+
+@register_meta(aten.select_scatter.default)
+def meta_select_scatter(self, src, dim, index):
+ return utils.clone_preserve_strides(self)
+
+
+@register_meta(aten.slice_scatter.default)
+def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1):
+ return utils.clone_preserve_strides(self)
+
+
+# TODO: Deduplicate this with canonicalize_dim
+def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
+ if dim_post_expr <= 0:
+ assert wrap_scalar
+ dim_post_expr = 1
+ min = -dim_post_expr
+ max = dim_post_expr - 1
+ assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})"
+ if dim < 0:
+ dim += dim_post_expr
+ return dim
+
+
+def ensure_nonempty_size(t, dim):
+ return 1 if t.dim() == 0 else t.shape[dim]
+
+
+# From aten/src/ATen/native/ScatterGatherChecks.h
+def gather_shape_check(self, dim, index):
+ self_dims = max(self.dim(), 1)
+ index_dims = max(index.dim(), 1)
+ torch._check(
+ self_dims == index_dims,
+ lambda: "Index tensor must have the same number of dimensions as input tensor",
+ )
+ for i in range(self_dims):
+ if i != dim:
+ torch._check(
+ ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
+ lambda: f"Size does not match at dimension {i} expected index {index.shape}"
+ + f" to be smaller than self {self.shape} apart from dimension {dim}",
+ )
+
+
+@register_meta(aten.gather.default)
+def meta_gather(self, dim, index, sparse_grad=False):
+ wrapped_dim = maybe_wrap_dim(dim, self.dim())
+ is_index_empty = index.numel() == 0
+ if not is_index_empty:
+ torch._check(
+ index.dtype == torch.long,
+ lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}",
+ )
+ gather_shape_check(self, wrapped_dim, index)
+ return self.new_empty(index.shape)
+
+
+# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
+def get_operator_enum(reduce_, use_new_options=False):
+ if use_new_options:
+ if reduce_ == "sum":
+ return "REDUCE_ADD"
+ elif reduce_ == "prod":
+ return "REDUCE_MULTIPLY"
+ elif reduce_ == "mean":
+ return "REDUCE_MEAN"
+ elif reduce_ == "amax":
+ return "REDUCE_MAXIMUM"
+ elif reduce_ == "amin":
+ return "REDUCE_MINIMUM"
+ torch._check(
+ False,
+ lambda: "reduce argument must be either sum, prod, mean, amax or amin.",
+ )
+ return
+ else:
+ if reduce_ == "add":
+ return "REDUCE_ADD"
+ elif reduce_ == "multiply":
+ return "REDUCE_MULTIPLY"
+ torch._check(False, lambda: "reduce argument must be either add or multiply.")
+ return
+
+
+# From aten/src/ATen/native/ScatterGatherChecks.h
+def scatter_gather_dtype_check(method_name, self, index, src_opt=None):
+ if index.numel() != 0:
+ torch._check(
+ index.dtype == torch.long,
+ lambda: f"{method_name}(): Expected dtype int64 for index",
+ )
+
+ if src_opt is not None:
+ torch._check(
+ self.dtype == src_opt.dtype,
+ lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype",
+ )
+
+
+def ensure_nonempty_dim(dim):
+ return max(dim, 1)
+
+
+# From aten/src/ATen/native/ScatterGatherChecks.h
+def scatter_shape_check(self, dim, index, src_opt=None):
+ if index.numel() == 0:
+ return
+ torch._check(
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
+ lambda: "Index tensor must have the same number of dimensions as self tensor",
+ )
+
+ is_wrong_shape = False
+ self_dims = ensure_nonempty_dim(self.dim())
+
+ # Check: index.size(d) <= self.size(d) for all d != dim
+ for d in range(self_dims):
+ index_d_size = ensure_nonempty_size(index, d)
+ if d == dim:
+ continue
+ if index_d_size > ensure_nonempty_size(self, d):
+ is_wrong_shape = True
+ break
+
+ # Check: index.size(d) <= src.size(d) for all d if src is Tensor
+ if not is_wrong_shape and src_opt is not None:
+ for d in range(self_dims):
+ index_d_size = ensure_nonempty_size(index, d)
+ if index_d_size > ensure_nonempty_size(src_opt, d):
+ is_wrong_shape = True
+ break
+
+ if src_opt is not None:
+ torch._check(
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
+ lambda: "Index tensor must have the same number of dimensions as self tensor",
+ )
+ torch._check(
+ not is_wrong_shape,
+ lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ + f" apart from dimension {dim} and to be smaller than src {src_opt.shape}",
+ )
+ else:
+ torch._check(
+ not is_wrong_shape,
+ lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ + f" apart from dimension {dim}",
+ )
+
+
+# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
+def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False):
+ wrapped_dim = maybe_wrap_dim(dim, self.dim())
+ scatter_gather_dtype_check("scatter", self, index, src)
+ scatter_shape_check(self, wrapped_dim, index, src)
+ if reduce_ is not None:
+ # Check if we have a valid reduce operator.
+ get_operator_enum(reduce_, use_new_options)
+
+
+@register_meta(aten.scatter_add.default)
+def meta_scatter_add(self, dim, index, src):
+ scatter_meta_impl(self, dim, index, src, "add")
+ return self.new_empty(self.shape)
+
+
+@register_meta(aten.scatter_add_)
+def meta_scatter_add_(self, dim, index, src):
+ scatter_meta_impl(self, dim, index, src, "add")
+ return self
+
+
+@register_meta(
+ [
+ aten.scatter.src,
+ aten.scatter.value,
+ aten.scatter.reduce,
+ aten.scatter.value_reduce,
+ ]
+)
+@out_wrapper()
+def meta_scatter(self, dim, index, src_or_value, reduce=None):
+ src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
+ scatter_meta_impl(self, dim, index, src, reduce)
+ return self.new_empty(self.shape)
+
+
+@register_meta(
+ [
+ aten.scatter_.src,
+ aten.scatter_.value,
+ aten.scatter_.reduce,
+ aten.scatter_.value_reduce,
+ ]
+)
+def meta_scatter_(self, dim, index, src_or_value, reduce=None):
+ src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
+ scatter_meta_impl(self, dim, index, src, reduce)
+ return self
+
+
+@register_meta(
+ [
+ aten._scaled_dot_product_flash_attention,
+ ]
+)
+def meta__scaled_dot_product_flash(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ dropout_p: float = 0.0,
+ is_causal: bool = False,
+ return_debug_mask: bool = False,
+ scale: Optional[float] = None,
+):
+ batch_size = query.size(0)
+ num_heads = query.size(1)
+ max_seqlen_batch_q = query.size(2)
+ head_dim = query.size(3)
+
+ max_seqlen_batch_k = key.size(2)
+
+ if device_hint(query) == "cpu":
+ attention = torch.empty(
+ (batch_size, max_seqlen_batch_q, num_heads, head_dim),
+ dtype=query.dtype,
+ device=query.device,
+ ).transpose(1, 2)
+ logsumexp = torch.empty(
+ (
+ batch_size,
+ max_seqlen_batch_q,
+ num_heads,
+ ),
+ dtype=torch.float,
+ device=query.device,
+ ).transpose(1, 2)
+ return (
+ attention,
+ logsumexp,
+ torch.empty((), dtype=torch.int32, device="meta"),
+ torch.empty((), dtype=torch.int32, device="meta"),
+ 0,
+ 0,
+ torch.empty((), dtype=torch.long, device="meta"),
+ torch.empty((), dtype=torch.long, device="meta"),
+ torch.empty((), dtype=query.dtype, device=query.device),
+ )
+
+ # Cuda Path
+ query_t = query.transpose(1, 2)
+ attention = torch.empty_like(query_t).transpose(1, 2)
+ logsumexp = torch.empty(
+ (batch_size, num_heads, max_seqlen_batch_q),
+ dtype=torch.float,
+ device=query.device,
+ )
+
+ if return_debug_mask:
+ blocksize_c = 128 if head_dim > 64 else 256
+ max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c)
+ if max_seqlen_batch_k <= 128:
+ max_seqlen_k = 128
+ elif max_seqlen_batch_k <= 256:
+ max_seqlen_k = 256
+ debug_mask = torch.empty(
+ (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k),
+ dtype=query.dtype,
+ device=query.device,
+ )
+ else:
+ debug_mask = torch.empty(0, dtype=query.dtype, device=query.device)
+
+ # Note [Seed and Offset]: device for seed and offset below depends on whether we are
+ # capturing or not, but at the time of tracing we don't know if we
+ # are going to use cudagraphs or not, so we return meta tensors here
+ # it's possible we'll need to have some special handling in inductor for sdpa
+
+ return (
+ attention,
+ logsumexp,
+ None,
+ None,
+ max_seqlen_batch_q,
+ max_seqlen_batch_k,
+ torch.empty((), dtype=torch.long, device="meta"),
+ torch.empty((), dtype=torch.long, device="meta"),
+ debug_mask,
+ )
+
+
+@register_meta(
+ [
+ aten._scaled_dot_product_flash_attention_backward,
+ ]
+)
+def meta__scaled_dot_product_flash_backward(
+ grad_out: Tensor,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ out: Tensor,
+ logsumexp: Tensor,
+ cum_seq_q: Tensor,
+ cum_seq_k: Tensor,
+ max_q: int,
+ max_k: int,
+ dropout_p: float,
+ is_causal: bool,
+ philox_seed: Tensor,
+ philox_offset: Tensor,
+ scale: Optional[float] = None,
+):
+ if device_hint(query) != "cpu":
+ grad_q = torch.empty_like(query.transpose(1, 2)).transpose(1, 2)
+ grad_k = torch.empty_like(key.transpose(1, 2)).transpose(1, 2)
+ grad_v = torch.empty_like(value.transpose(1, 2)).transpose(1, 2)
+ return grad_q, grad_k, grad_v
+
+ batch_size = query.size(0)
+ num_heads = query.size(1)
+ head_dim = query.size(3)
+ len_q = query.size(2) if device_hint(query) == "cpu" else max_q
+ len_k = key.size(2) if device_hint(query) == "cpu" else max_k
+
+ grad_q = torch.empty_permuted(
+ (batch_size, num_heads, len_q, head_dim),
+ (0, 2, 1, 3),
+ dtype=query.dtype,
+ device=query.device,
+ )
+ grad_k = torch.empty_permuted(
+ (batch_size, num_heads, len_k, head_dim),
+ (0, 2, 1, 3),
+ dtype=key.dtype,
+ device=key.device,
+ )
+ grad_v = torch.empty_permuted(
+ (batch_size, num_heads, len_k, head_dim),
+ (0, 2, 1, 3),
+ dtype=value.dtype,
+ device=value.device,
+ )
+
+ return grad_q, grad_k, grad_v
+
+
+@register_meta(
+ [
+ aten._scaled_dot_product_efficient_attention,
+ ]
+)
+def meta__scaled_dot_product_efficient(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ attn_bias: Optional[Tensor],
+ compute_log_sumexp: bool,
+ dropout_p=0.0,
+ is_causal: bool = False,
+ scale: Optional[float] = None,
+):
+ query = query.transpose(1, 2)
+ key = key.transpose(1, 2)
+ value = value.transpose(1, 2)
+
+ B = query.size(0)
+ M = query.size(1)
+ N = key.size(1)
+ num_heads = query.size(-2)
+ K = query.size(-1)
+ Kv = value.size(-1)
+
+ res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
+
+ logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
+ logsum_exp = torch.empty(
+ (B, num_heads, logsumexp_dim),
+ dtype=torch.float,
+ device=query.device,
+ )
+
+ res = res.transpose(1, 2)
+
+ # See Note [Seed and Offset]:
+ seed = torch.empty((), dtype=torch.long, device="meta")
+ offset = torch.empty((), dtype=torch.long, device="meta")
+
+ return res, logsum_exp, seed, offset
+
+
+@register_meta(
+ [
+ aten._scaled_dot_product_efficient_attention_backward,
+ ]
+)
+def meta__scaled_dot_product_efficient_backward(
+ grad_out: Tensor,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ attn_bias: Optional[Tensor],
+ out: Tensor,
+ logsumexp: Tensor,
+ philox_seed: Tensor,
+ philox_offset: Tensor,
+ dropout_p: float,
+ grad_input_mask: List[bool],
+ is_causal: bool = False,
+ scale: Optional[float] = None,
+):
+ batch_size = query.size(0)
+ num_heads = query.size(1)
+ max_q = query.size(2)
+ head_dim = query.size(3)
+ head_dim_v = value.size(3)
+
+ max_k = key.size(2)
+
+ grad_q = torch.empty_permuted(
+ (batch_size, num_heads, max_q, head_dim),
+ (0, 2, 1, 3),
+ dtype=query.dtype,
+ device=query.device,
+ )
+ grad_k = torch.empty_permuted(
+ (batch_size, num_heads, max_k, head_dim),
+ (0, 2, 1, 3),
+ dtype=key.dtype,
+ device=key.device,
+ )
+ grad_v = torch.empty_permuted(
+ (batch_size, num_heads, max_k, head_dim_v),
+ (0, 2, 1, 3),
+ dtype=value.dtype,
+ device=value.device,
+ )
+ grad_bias = None
+ if attn_bias is not None and grad_input_mask[3]:
+ lastDim = attn_bias.size(-1)
+ lastDimAligned = lastDim if lastDim % 16 == 0 else lastDim + 16 - lastDim % 16
+ new_sizes = list(attn_bias.size())
+ new_sizes[-1] = lastDimAligned
+ grad_bias = torch.empty(
+ new_sizes, dtype=attn_bias.dtype, device=attn_bias.device
+ )
+ grad_bias = grad_bias[..., :lastDim]
+
+ return grad_q, grad_k, grad_v, grad_bias
+
+
+@register_meta(
+ [
+ aten._flash_attention_forward,
+ ]
+)
+def meta__flash_attention_forward(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ cum_seq_q: Optional[Tensor],
+ cum_seq_k: Optional[Tensor],
+ max_q: int,
+ max_k: int,
+ dropout_p: float,
+ is_causal: bool,
+ return_debug_mask: bool,
+ scale: Optional[float] = None,
+):
+ batch_size = query.size(0)
+ max_seqlen_batch_q = query.size(1)
+ num_heads = query.size(2)
+ head_dim = query.size(3)
+
+ max_seqlen_batch_k = key.size(1)
+
+ # Cuda Path
+ attention = torch.empty_like(query)
+ logsumexp = torch.empty(
+ (batch_size, num_heads, max_seqlen_batch_q),
+ dtype=torch.float,
+ device=query.device,
+ )
+
+ if return_debug_mask:
+ blocksize_c = 128 if head_dim > 64 else 256
+ max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c)
+ if max_seqlen_batch_k <= 128:
+ max_seqlen_k = 128
+ elif max_seqlen_batch_k <= 256:
+ max_seqlen_k = 256
+ debug_mask = torch.empty(
+ (batch_size, num_heads, max_seqlen_batch_q, max_seqlen_k),
+ dtype=query.dtype,
+ device=query.device,
+ )
+ else:
+ debug_mask = torch.empty(0, dtype=query.dtype, device=query.device)
+
+ # See Note [Seed and Offset]:
+ return (
+ attention,
+ logsumexp,
+ torch.empty((), dtype=torch.long, device="meta"),
+ torch.empty((), dtype=torch.long, device="meta"),
+ debug_mask,
+ )
+
+
+@register_meta(
+ [
+ aten._flash_attention_backward,
+ ]
+)
+def meta__flash_attention_backward(
+ grad_out: Tensor,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ out: Tensor,
+ logsumexp: Tensor,
+ cum_seq_q: Tensor,
+ cum_seq_k: Tensor,
+ max_q: int,
+ max_k: int,
+ dropout_p: float,
+ is_causal: bool,
+ philox_seed: Tensor,
+ philox_offset: Tensor,
+ scale: Optional[float] = None,
+):
+ grad_query = torch.empty_like(query)
+ grad_key = torch.empty_like(key)
+ grad_value = torch.empty_like(value)
+
+ return grad_query, grad_key, grad_value
+
+
+@register_meta(
+ [
+ aten._efficient_attention_forward,
+ ]
+)
+def meta__efficient_attention_forward(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ bias: Optional[Tensor],
+ cu_seqlens_q: Optional[Tensor],
+ cu_seqlens_k: Optional[Tensor],
+ max_seqlen_q: Optional[int],
+ dropout_p: float,
+ custom_mask_type: int,
+ compute_log_sumexp: bool = False,
+ scale: Optional[float] = None,
+ causal_diagonal: Optional[Tensor] = None,
+ seqlen_k: Optional[Tensor] = None,
+):
+ B = query.size(0)
+ M = query.size(1)
+ N = key.size(1)
+ num_heads = query.size(-2)
+ K = query.size(-1)
+ Kv = value.size(-1)
+
+ res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
+
+ logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
+ logsum_exp = torch.empty(
+ (B, num_heads, logsumexp_dim),
+ dtype=torch.float,
+ device=query.device,
+ )
+
+ # See Note [Seed and Offset]:
+ seed = torch.empty((), dtype=torch.long, device="meta")
+ offset = torch.empty((), dtype=torch.long, device="meta")
+
+ return res, logsum_exp, seed, offset, M, N
+
+
+@register_meta(
+ [
+ aten._efficient_attention_backward,
+ ]
+)
+def meta__efficient_attention_backward(
+ grad_out: Tensor,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ bias: Optional[Tensor],
+ cu_seqlens_q: Optional[Tensor],
+ cu_seqlens_k: Optional[Tensor],
+ max_seqlen_q: int,
+ max_seqlen_k: int,
+ logsumexp: Tensor,
+ dropout_p: float,
+ philox_seed: Tensor,
+ philox_offset: Tensor,
+ custom_mask_type: int,
+ bias_requires_grad: bool,
+ scale: Optional[float] = None,
+ num_splits_key: Optional[int] = None,
+):
+ grad_query = torch.empty_like(query)
+ grad_key = torch.empty_like(key)
+ grad_value = torch.empty_like(value)
+
+ if bias is not None:
+ lastDim = bias.size(-1)
+ lastDimAligned = lastDim if lastDim % 16 == 0 else lastDim + 16 - lastDim % 16
+ new_sizes = list(bias.size())
+ new_sizes[-1] = lastDimAligned
+ grad_bias = torch.empty(new_sizes, dtype=bias.dtype, device=bias.device)
+ grad_bias = grad_bias[..., :lastDim]
+ else:
+ grad_bias = torch.empty((), device=query.device)
+
+ return grad_query, grad_key, grad_value, grad_bias
+
+
+@register_meta([aten._scaled_mm.default])
+def meta_scaled_mm(
+ self: torch.Tensor,
+ mat2: torch.Tensor,
+ bias: Optional[torch.Tensor] = None,
+ out_dtype: Optional[torch.dtype] = None,
+ scale_a: Optional[torch.Tensor] = None,
+ scale_b: Optional[torch.Tensor] = None,
+ scale_result: Optional[torch.Tensor] = None,
+ use_fast_accum: bool = False,
+):
+ def is_row_major(stride):
+ return stride[0] > stride[1] and stride[1] == 1
+
+ def is_col_major(shape, stride):
+ return stride[0] == 1 and stride[1] == shape[0]
+
+ def is_fp8_type(dtype):
+ return dtype in (torch.float8_e4m3fn, torch.float8_e5m2)
+
+ torch._check(
+ self.dim() == 2 and mat2.dim() == 2,
+ lambda: f"Inputs must be 2D but got self.dim()={self.dim()} and mat2.dim()={mat2.dim()}",
+ )
+ torch._check(
+ is_row_major(self.stride()),
+ lambda: "self must be row_major",
+ )
+ torch._check(
+ is_col_major(mat2.shape, mat2.stride()),
+ lambda: "mat2 must be col_major",
+ )
+ torch._check(
+ self.size(1) % 16 == 0,
+ lambda: f"Expected self.size(0) to be divisible by 16, but got self.size(1)={self.size(1)}",
+ )
+ torch._check(
+ mat2.size(0) % 16 == 0 and mat2.size(1) % 16 == 0,
+ lambda: f"Expected both dimensions of mat2 to be divisble by 16 but got {mat2.shape}",
+ )
+ torch._check(
+ is_fp8_type(self.dtype) and is_fp8_type(mat2.dtype),
+ lambda: f"Expected both inputs to be fp8 types but got self.dtype={self.dtype} and mat2.dtype={mat2.dtype}",
+ )
+ _out_dtype = out_dtype if out_dtype is not None else self.dtype
+ return torch.empty(
+ self.size(0), mat2.size(1), dtype=_out_dtype, device=self.device
+ ), torch.empty((), dtype=torch.float32, device=self.device)
+
+
+@register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out])
+@out_wrapper()
+def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True):
+ scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
+ return self.new_empty(self.shape)
+
+
+@register_meta(aten.scatter_reduce_.two)
+def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True):
+ scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
+ return self
+
+
+@register_meta([aten.multinomial.default, aten.multinomial.out])
+@out_wrapper()
+def meta_multinomial(input, num_samples, replacement=False, *, generator=None):
+ torch._check(
+ 0 < input.dim() <= 2,
+ lambda: f"The probabilty distributions dimensions must be 1 or 2, but got {input.dim()}",
+ )
+ if input.dim() == 1:
+ return torch.empty(num_samples, dtype=torch.long, device=input.device)
+ return torch.empty(
+ input.size(0), num_samples, dtype=torch.long, device=input.device
+ )
+
+
+def multiply_integers(vs):
+ r = 1
+ for v in vs:
+ r *= v
+ return r
+
+
+def upsample_common_check(input_size, output_size, num_spatial_dims):
+ torch._check(
+ len(output_size) == num_spatial_dims,
+ lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}",
+ )
+ expected_input_dims = num_spatial_dims + 2 # N, C, ...
+ torch._check(
+ len(input_size) == expected_input_dims,
+ lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}",
+ )
+
+ torch._check(
+ all(s > 0 for s in input_size[2:]) and all(s > 0 for s in output_size),
+ lambda: f"Input and output sizes should be greater than 0, but got "
+ f"input size {input_size} and output size {output_size}",
+ )
+
+ nbatch, channels = input_size[:2]
+ return (nbatch, channels, *output_size)
+
+
+@register_meta(
+ [aten.upsample_nearest1d.default, aten._upsample_nearest_exact1d.default]
+)
+def upsample_nearest1d(input, output_size, scales=None):
+ torch._check(
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
+ lambda: f"Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}",
+ )
+ full_output_size = upsample_common_check(
+ input.size(), output_size, num_spatial_dims=1
+ )
+ return input.new_empty(full_output_size).to(
+ memory_format=utils.suggest_memory_format(input)
+ )
+
+
+@register_meta(
+ [aten.upsample_nearest2d.default, aten._upsample_nearest_exact2d.default]
+)
+def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None):
+ torch._check(
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
+ lambda: f"Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
+ )
+ full_output_size = upsample_common_check(
+ input.size(), output_size, num_spatial_dims=2
+ )
+ output = input.new_empty(full_output_size)
+
+ # convert output to correct memory format, if necessary
+ memory_format = utils.suggest_memory_format(input)
+
+ # following "heuristic: only use channels_last path when it's faster than the contiguous path"
+ _, n_channels, _, _ = input.shape
+ if input.device.type == "cuda" and n_channels < 4:
+ memory_format = torch.contiguous_format
+
+ output = output.contiguous(memory_format=memory_format)
+
+ return output
+
+
+@register_meta(
+ [
+ aten.upsample_nearest2d_backward.default,
+ aten._upsample_nearest_exact2d_backward.default,
+ ]
+)
+def upsample_nearest2d_backward(
+ grad_output: Tensor,
+ output_size: Sequence[Union[int, torch.SymInt]],
+ input_size: Sequence[Union[int, torch.SymInt]],
+ scales_h: Optional[float] = None,
+ scales_w: Optional[float] = None,
+):
+ full_output_size = upsample_common_check(
+ input_size, output_size, num_spatial_dims=2
+ )
+ torch._check(
+ grad_output.ndim == 4,
+ lambda: f"Expected grad_output to be a tensor of dimension 4 but got: dimension {grad_output.ndim}",
+ )
+ for i in range(4):
+ torch._check(
+ grad_output.size(i) == full_output_size[i],
+ lambda: (
+ f"Expected grad_output to have the same shape as output;"
+ f" output.size({i}) = {full_output_size[i]}"
+ f" but got grad_output.size({i}) = {grad_output.size(i)}"
+ ),
+ )
+
+ return grad_output.new_empty(input_size).to(
+ memory_format=utils.suggest_memory_format(grad_output)
+ ) # type: ignore[call-overload]
+
+
+@register_meta(
+ [aten.upsample_nearest3d.default, aten._upsample_nearest_exact3d.default]
+)
+def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None):
+ torch._check(
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
+ lambda: f"Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}",
+ )
+ full_output_size = upsample_common_check(
+ input.size(), output_size, num_spatial_dims=3
+ )
+ return input.new_empty(full_output_size).to(
+ memory_format=utils.suggest_memory_format(input)
+ )
+
+
+@register_meta(
+ [
+ aten.sort.default,
+ aten.sort.stable,
+ aten.sort.values,
+ aten.sort.values_stable,
+ ]
+)
+def meta_sort(self, stable=None, dim=-1, descending=False, values=None, indices=None):
+ v, i = torch.empty_like(self), torch.empty_like(self, dtype=torch.int64)
+ if values is not None and indices is not None:
+ assert isinstance(values, TensorLike)
+ assert isinstance(indices, TensorLike)
+ # Makes sure values and indices have the same strides. For cases where
+ # these have different shapes, like (5, 10, 5) and (0) in msort.
+ out_shape = v.shape
+ out_stride = v.stride()
+ values = _maybe_resize_out(values, out_shape)
+ indices = _maybe_resize_out(indices, out_shape)
+ values.as_strided_(out_shape, out_stride)
+ indices.as_strided_(out_shape, out_stride)
+ _safe_copy_out(copy_from=v, copy_to=values) # type: ignore[arg-type]
+ _safe_copy_out(copy_from=i, copy_to=indices) # type: ignore[arg-type]
+ return values, indices
+ return v, i
+
+
+@register_meta(aten.argsort.stable)
+def meta_argsort(self, *, stable, dim=-1, descending=False):
+ return meta_sort(self, stable=stable, dim=dim, descending=descending)[1]
+
+
+def rnn_cell_checkSizes(
+ input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden
+):
+ torch._check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2")
+ torch._check(
+ input_gates.shape == hidden_gates.shape,
+ lambda: f"{input_gates.shape} != {hidden_gates.shape}",
+ )
+ gates_size = input_gates.size(1)
+ if input_bias is not None:
+ torch._check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1")
+ torch._check(
+ input_bias.numel() == gates_size,
+ lambda: f"{input_bias.numel()} != {gates_size}",
+ )
+ torch._check(
+ input_bias.shape == hidden_bias.shape,
+ lambda: f"{input_bias.shape} != {hidden_bias.shape}",
+ )
+ torch._check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2")
+ expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor
+ torch._check(
+ prev_hidden.numel() == expected_prev_hidden_numel,
+ lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})",
+ )
+ torch._check(
+ all(
+ x.device == input_gates.device
+ for x in [hidden_gates, input_bias, hidden_bias, prev_hidden]
+ ),
+ lambda: "expected all inputs to be same device",
+ )
+
+
+@register_meta(aten._thnn_fused_lstm_cell.default)
+def _thnn_fused_lstm_cell_meta(
+ input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None
+):
+ rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx)
+ workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format)
+ hy = torch.empty_like(cx, memory_format=torch.contiguous_format)
+ cy = torch.empty_like(cx, memory_format=torch.contiguous_format)
+ return (hy, cy, workspace)
+
+
+@register_meta(aten._cudnn_rnn.default)
+def _cudnn_rnn(
+ input,
+ weight,
+ weight_stride0,
+ weight_buf,
+ hx,
+ cx,
+ mode,
+ hidden_size,
+ proj_size,
+ num_layers,
+ batch_first,
+ dropout,
+ train,
+ bidirectional,
+ batch_sizes,
+ dropout_state,
+):
+ is_input_packed = len(batch_sizes) != 0
+ if is_input_packed:
+ seq_length = len(batch_sizes)
+ mini_batch = batch_sizes[0]
+ batch_sizes_sum = input.shape[0]
+ else:
+ seq_length = input.shape[1] if batch_first else input.shape[0]
+ mini_batch = input.shape[0] if batch_first else input.shape[1]
+ batch_sizes_sum = -1
+
+ num_directions = 2 if bidirectional else 1
+ out_size = proj_size if proj_size != 0 else hidden_size
+ if is_input_packed:
+ out_shape = [batch_sizes_sum, out_size * num_directions]
+ else:
+ out_shape = (
+ [mini_batch, seq_length, out_size * num_directions]
+ if batch_first
+ else [seq_length, mini_batch, out_size * num_directions]
+ )
+ output = input.new_empty(out_shape)
+
+ cell_shape = [num_layers * num_directions, mini_batch, hidden_size]
+ if cx is None:
+ cy = torch.empty(0, device=input.device)
+ else:
+ cy = cx.new_empty(cell_shape)
+
+ hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size])
+
+ # TODO: Query cudnnGetRNNTrainingReserveSize (expose to python)
+ reserve_shape = 0 if train else 0
+ reserve = input.new_empty(reserve_shape, dtype=torch.uint8)
+
+ return output, hy, cy, reserve, weight_buf
+
+
+@register_meta(aten.mkldnn_rnn_layer.default)
+def mkldnn_rnn_layer(
+ input,
+ w0,
+ w1,
+ w2,
+ w3,
+ hx_,
+ cx_,
+ reverse,
+ batch_sizes,
+ mode,
+ hidden_size,
+ num_layers,
+ has_biases,
+ bidirectional,
+ batch_first,
+ train,
+):
+ seq_length = input.shape[1] if batch_first else input.shape[0]
+ mini_batch = input.shape[0] if batch_first else input.shape[1]
+ output_chanels = hidden_size
+ out_shape = (
+ [mini_batch, seq_length, output_chanels]
+ if batch_first
+ else [seq_length, mini_batch, output_chanels]
+ )
+ output = input.new_empty(out_shape)
+ if hx_ is None:
+ hy = torch.empty(0, device=input.device)
+ else:
+ hy = hx_.new_empty(hx_.shape)
+ if cx_ is None:
+ cy = torch.empty(0, device=input.device)
+ else:
+ cy = cx_.new_empty(cx_.shape)
+ workspace = torch.empty(0, device=input.device, dtype=torch.uint8)
+ return output, hy, cy, workspace
+
+
+def zero_numel_check_dims(self, dim, fn_name):
+ if self.ndim == 0:
+ torch._check_index(
+ dim == 0 or dim == -1,
+ lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}",
+ )
+ else:
+ torch._check_index(
+ self.size(dim) != 0,
+ lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.",
+ )
+
+
+# From aten/src/ATen/native/ReduceOps.cpp
+def check_argmax_argmin(name, self, dim):
+ if dim is not None:
+ dim = maybe_wrap_dim(dim, self.dim())
+ zero_numel_check_dims(self, dim, name)
+ else:
+ torch._check(
+ self.numel() != 0,
+ lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.",
+ )
+
+
+@register_meta([aten.argmax.default, aten.argmin.default])
+def argmax_argmin_meta(self, dim=None, keepdim=False):
+ check_argmax_argmin("argmax", self, dim)
+ dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None)
+ shape = _compute_reduction_shape(self, dims, keepdim)
+ return self.new_empty(shape, dtype=torch.int64)
+
+
+@register_meta(aten.scalar_tensor.default)
+def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None):
+ return torch.empty(
+ (), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
+ )
+
+
+@register_meta(aten.topk.default)
+def topk_meta(self, k, dim=-1, largest=True, sorted=True):
+ # From aten/src/ATen/native/Sorting.cpp
+ dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True)
+ torch._check(
+ k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1),
+ lambda: "selected index k out of range",
+ )
+ sliceSize = 1 if self.dim() == 0 else self.size(dim)
+ torch._check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension")
+
+ topKSize = list(self.shape)
+ if len(topKSize) > 0:
+ topKSize[dim] = k
+ return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64)
+
+
+legacy_contiguous_memory_format = torch.contiguous_format
+
+
+# From aten/src/ATen/native/cuda/RNN.cu
+def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace):
+ defined_grad = grad_hy if grad_hy is not None else grad_cy
+ torch._check(defined_grad.dim() == 2, lambda: "")
+ exp_size = defined_grad.size()
+ if grad_hy is not None:
+ torch._check(grad_hy.size() == exp_size, lambda: "")
+ if grad_cy is not None:
+ torch._check(grad_cy.size() == exp_size, lambda: "")
+ torch._check(cx.size() == exp_size, lambda: "")
+ torch._check(cy.size() == exp_size, lambda: "")
+ torch._check(workspace.dim() == 2, lambda: "")
+ torch._check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "")
+
+
+# From aten/src/ATen/native/cuda/RNN.cu
+@register_meta(aten._thnn_fused_lstm_cell_backward_impl.default)
+def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias):
+ if grad_hy is None and grad_cy is None:
+ return None, None, None
+ checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
+ grad_gates = torch.empty_like(
+ workspace, memory_format=legacy_contiguous_memory_format
+ )
+ grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format)
+ grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None
+ return grad_gates, grad_cx, grad_bias
+
+
+# From aten/src/ATen/native/mps/operations/Linear.mm
+@register_meta(aten.linear_backward.default)
+def linear_backward(input_, grad_output_, weight_, output_mask):
+ grad_input = None
+ grad_weight = None
+ grad_bias = None
+ if output_mask[0]:
+ grad_input = grad_output_.new_empty(input_.size())
+ if output_mask[1] or output_mask[2]:
+ grad_weight = grad_output_.new_empty((grad_output_.size(-1), input_.size(-1)))
+ grad_bias = grad_output_.new_empty(grad_output_.size(-1))
+ return (grad_input, grad_weight, grad_bias)
+
+
+@register_meta(aten.pixel_shuffle.default)
+def meta_pixel_shuffle(self, upscale_factor):
+ assert (
+ len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0
+ ), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}"
+
+ def is_channels_last(ten):
+ return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
+
+ def pick_memory_format():
+ if is_channels_last(self):
+ if device_hint(self) == "cuda":
+ return torch.contiguous_format
+ else:
+ return torch.channels_last
+ elif self.is_contiguous(memory_format=torch.contiguous_format):
+ return torch.contiguous_format
+ elif self.is_contiguous(memory_format=torch.preserve_format):
+ return torch.preserve_format
+
+ C = self.shape[-3] // (upscale_factor * upscale_factor)
+ Hr = self.shape[-2] * upscale_factor
+ Wr = self.shape[-1] * upscale_factor
+ out_shape = (*self.shape[:-3], C, Hr, Wr)
+
+ out = self.new_empty(out_shape)
+ out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
+ return out
+
+
+@register_meta(aten.mkldnn_rnn_layer_backward.default)
+def mkldnn_rnn_layer_backward(
+ input,
+ weight0,
+ weight1,
+ weight2,
+ weight3,
+ hx_,
+ cx_tmp,
+ output,
+ hy_,
+ cy_,
+ grad_output_r_opt,
+ grad_hy_r_opt,
+ grad_cy_r_opt,
+ reverse,
+ mode,
+ hidden_size,
+ num_layers,
+ has_biases,
+ train,
+ bidirectional,
+ batch_sizes,
+ batch_first,
+ workspace,
+):
+ diff_x = input.new_empty(input.shape)
+ diff_hx = hx_.new_empty(hx_.shape)
+ diff_cx = cx_tmp.new_empty(cx_tmp.shape)
+ diff_w1 = weight0.new_empty(weight0.shape)
+ diff_w2 = weight1.new_empty(weight1.shape)
+ diff_b = weight2.new_empty(weight2.shape)
+ return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx
+
+
+@register_meta([aten.bucketize.Tensor, aten.bucketize.Tensor_out])
+@out_wrapper()
+def meta_bucketize(self, boundaries, *, out_int32=False, right=False):
+ return torch.empty_like(
+ self, dtype=torch.int32 if out_int32 else torch.int64
+ ).contiguous()
+
+
+@register_meta(aten._upsample_bilinear2d_aa.default)
+def meta_upsample_bilinear2d_aa(
+ input, output_size, align_corners, scales_h=None, scales_w=None
+):
+ full_output_size = upsample_common_check(
+ input.size(), output_size, num_spatial_dims=2
+ )
+ torch._check(
+ input.numel() != 0 or all(size > 0 for size in input.size()[1:]),
+ lambda: f"Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
+ )
+ return input.new_empty(full_output_size).to(
+ memory_format=utils.suggest_memory_format(input)
+ )
+
+
+# From aten/src/ATen/native/cuda/AmpKernels.cu
+@register_meta(aten._amp_foreach_non_finite_check_and_unscale_.default)
+def _amp_foreach_non_finite_check_and_unscale_(self, found_inf, inv_scale):
+ torch._check(
+ found_inf.numel() == 1, lambda: "found_inf must be a 1-element tensor."
+ )
+ torch._check(
+ inv_scale.numel() == 1, lambda: "inv_scale must be a 1-element tensor."
+ )
+ torch._check(
+ found_inf.dtype.is_floating_point,
+ lambda: "found_inf must be a float tensor.",
+ )
+ torch._check(
+ inv_scale.dtype.is_floating_point,
+ lambda: "inv_scale must be a float tensor.",
+ )
+
+
+# From aten/src/ATen/native/UnaryOps.cpp
+@register_meta([aten.nan_to_num.default, aten.nan_to_num.out])
+@out_wrapper()
+def nan_to_num(self, nan=None, posinf=None, neginf=None):
+ result_size = list(self.size())
+ return self.new_empty(result_size)
+
+
+@register_meta(torch.ops.aten.transpose_)
+def transpose_(self, dim0, dim1):
+ assert self.layout not in {
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ }, f"torch.transpose_: in-place transposition is not supported for {self.layout} layout"
+
+ ndims = self.ndim
+
+ dim0 = maybe_wrap_dim(dim0, ndims)
+ dim1 = maybe_wrap_dim(dim1, ndims)
+
+ if dim0 == dim1:
+ return self
+
+ size = list(self.size())
+ stride = list(self.stride())
+
+ stride[dim0], stride[dim1] = stride[dim1], stride[dim0]
+ size[dim0], size[dim1] = size[dim1], size[dim0]
+
+ self.as_strided_(size, stride)
+ return self
+
+
+@register_meta(torch.ops.aten.t_)
+def t_(self):
+ ndims = self.ndim
+
+ if self.is_sparse:
+ sparse_dim = self.sparse_dim()
+ dense_dim = self.dense_dim()
+ assert (
+ sparse_dim <= 2 and dense_dim == 0
+ ), f"t_ expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and {dense_dim} dense dimensions" # noqa: B950
+ else:
+ assert (
+ self.dim() <= 2
+ ), f"t_ expects a tensor with <= 2 dimensions, but self is {ndims}D"
+
+ return transpose_(self, 0, 0 if ndims < 2 else 1)
+
+
+@register_meta(aten.searchsorted)
+@out_wrapper()
+def meta_searchsorted(
+ sorted_sequence, self, *, out_int32=False, right=False, side=None, sorter=None
+):
+ dtype = torch.int32 if out_int32 else torch.int64
+ if isinstance(self, torch.Tensor):
+ return torch.empty_like(self, dtype=dtype).contiguous()
+ else: # Scalar
+ return torch.empty((), dtype=dtype, device=sorted_sequence.device)
+
+
+@register_meta(aten.polygamma)
+@out_wrapper()
+def meta_polygamma(n: int, self: Tensor) -> Tensor:
+ torch._check(n >= 0, lambda: "polygamma(n, x) does not support negative n.")
+ _, result_dtype = elementwise_dtypes(
+ self,
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
+ )
+ return torch.empty_like(self, dtype=result_dtype)
+
+
+def _create_unary_float_meta_func(func):
+ @register_meta(func)
+ @out_wrapper()
+ def _f(x):
+ return elementwise_meta(
+ x, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
+ )
+
+ return _f
+
+
+def _create_binary_float_meta_func(func):
+ @register_meta(func)
+ @out_wrapper()
+ def _f(x, y):
+ return elementwise_meta(
+ x, y, type_promotion=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
+ )
+
+ return _f
+
+
+_create_unary_float_meta_func(aten.special_airy_ai)
+_create_unary_float_meta_func(aten.special_bessel_y0)
+_create_unary_float_meta_func(aten.special_bessel_y1)
+_create_unary_float_meta_func(aten.special_modified_bessel_i0)
+_create_unary_float_meta_func(aten.special_modified_bessel_i1)
+_create_unary_float_meta_func(aten.special_modified_bessel_k0)
+_create_unary_float_meta_func(aten.special_modified_bessel_k1)
+_create_unary_float_meta_func(aten.special_scaled_modified_bessel_k0)
+_create_unary_float_meta_func(aten.special_scaled_modified_bessel_k1)
+
+
+_create_binary_float_meta_func(aten.special_chebyshev_polynomial_t)
+_create_binary_float_meta_func(aten.special_chebyshev_polynomial_u)
+_create_binary_float_meta_func(aten.special_hermite_polynomial_h)
+_create_binary_float_meta_func(aten.special_hermite_polynomial_he)
+_create_binary_float_meta_func(aten.special_laguerre_polynomial_l)
+
+
+# We must also trigger meta registrations from PrimTorch ref
+# decompositions
+import torch._refs
+import torch._refs.nn.functional
+import torch._refs.special
+
+
+def activate_meta():
+ activate_meta_table = {}
+
+ # For a given op, we pick the most specific decomp function from
+ # global_decomp_table in the precedence order of meta > post_autograd > pre_autograd
+ for type in ["meta", "post_autograd", "pre_autograd"]:
+ registry = global_decomposition_table[type]
+
+ for opo in registry:
+ if opo not in activate_meta_table:
+ activate_meta_table[opo] = registry[opo]
+
+ for op_overload, fn in activate_meta_table.items():
+ # Don't register meta for HigherOrderOp's decomp.
+ # We can reconsider this in the future, but in general,
+ # the way you do a meta for a HigherOrderOp is different from
+ # OpOverload.
+ if isinstance(op_overload, torch._ops.HigherOrderOperator):
+ continue
+ assert isinstance(op_overload, OpOverload)
+
+ op_overload.py_impl(torch._C.DispatchKey.Meta)(fn)
+
+ if torch._C._dispatch_has_kernel_for_dispatch_key(
+ op_overload.name(), "CompositeImplicitAutograd"
+ ):
+ # Internally, we shouldn't be registering meta kernels for any operators that
+ # have CompositeImplicitAutograd kernels.
+ # Instead, we should be letting those decompositions run, and writing meta kernels
+ # only for the base operators.
+ if op_overload in global_decomposition_table["meta"]:
+ raise RuntimeError(
+ f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't "
+ "register meta function for it. Instead, we should let the decomposition run and write "
+ "meta kernels for the base operators."
+ )
+ pass
+ elif op_overload.is_view:
+ # Attempting to register a python meta kernel for a view operator.
+ # We shouldn't do this, because the output will report as not having aliased storages.
+ # All view ops have meta kernels in C++ today, so we should use those instead.
+ pass
+ elif op_overload.name() in {
+ "aten::empty_strided", # causing infinite recursion, test_meta.py
+ "aten::clone", # causing infinite recursion
+ "aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950
+ "aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950
+ "aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950
+ "aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950
+ "aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950
+ }:
+ pass
+ else:
+ if "mkldnn::" in op_overload.name():
+ _meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn)
+ elif "mkl::" in op_overload.name():
+ _meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn)
+ elif "onednn::" in op_overload.name():
+ _meta_lib_dont_use_me_use_register_meta_for_onednn.impl(op_overload, fn)
+ elif "quantized::" in op_overload.name():
+ _meta_lib_dont_use_me_use_register_meta_for_quantized.impl(
+ op_overload, fn
+ )
+ else:
+ _meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn)
+
+
+activate_meta()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_ops.py b/env-llmeval/lib/python3.10/site-packages/torch/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..c78893bcbf7cd43a45c1f4ab82140cf03b2c43cd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_ops.py
@@ -0,0 +1,938 @@
+import contextlib
+import ctypes
+import importlib
+import inspect
+import sys
+import types
+from typing import Any, Callable, Dict, List, Type, Union
+
+import torch._C
+import torch.utils._pytree as pytree
+from torch import _utils_internal
+from torch._functorch.pyfunctorch import dispatch_functorch
+
+# Query `hasattr` only once.
+
+_SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
+
+
+@contextlib.contextmanager
+def dl_open_guard():
+ """
+ Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
+ shared library to load custom operators.
+ """
+ if not _SET_GLOBAL_FLAGS:
+ yield
+ return
+ old_flags = sys.getdlopenflags()
+ sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
+ try:
+ yield
+ finally:
+ sys.setdlopenflags(old_flags)
+
+
+class OperatorBase:
+ """
+ Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
+ (which represents Python-only operators that are unrepresentable in TorchScript).
+ """
+
+ def __init__(self):
+ # The dispatch cache precomputes a mapping of dispatch key that the
+ # dispatcher wants to dispatch to, to an actual implementation of the
+ # dispatch key. Confusingly, the actual implementation could *also* be a
+ # dispatch key, but in this case, this refers to the C++ kernel that
+ # was registered to some dispatch key. Aliases are permitted in the
+ # latter but not the former; for example, you might lookup the
+ # entry for AutogradCPU, and this maps you to the Autograd key for
+ # the generic autograd kernel that works for all devices. Since this
+ # is the Python dispatcher, you can also put an arbitrary Python
+ # callable to call instead. This handler gets precisely the
+ # args/kwargs that the operator was __call__'ed with.
+ # NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
+ # for use with OpOverload; cache lookup is done entirely from C++
+ # for speed.
+ # TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
+ self._dispatch_cache: Dict[
+ torch._C.DispatchKey, Union[torch._C.DispatchKey, Callable[..., Any]]
+ ] = {}
+
+ # This table allows you to override the behavior of a particular
+ # dispatch key to call a custom Python function, rather than the
+ # ordinary C++ configured behavior. This is the raison d'etre of
+ # Python dispatcher: to let you program the dispatcher from Python
+ # in case you need something unusual, and don't want to clobber
+ # the existing registrations using the Python operator registration
+ # API.
+ self.py_kernels: Dict[torch._C.DispatchKey, Callable[..., Any]] = {}
+
+ from torch.utils._python_dispatch import TorchDispatchMode
+
+ # This table allows you to override the behavior of a particular
+ # operator for a particular TorchDispatchMode. In practice,
+ # we are using this mostly for ProxyTensorMode. Modes can be
+ # thought of as an open world extension of dispatch keys, so it
+ # makes sense that you should be able to register them, the same
+ # way you can register dispatch keys.
+ self.python_key_mode_table: Dict[
+ Type[TorchDispatchMode], Callable[..., Any]
+ ] = {}
+
+ # This table allows you to override the behavior of functorch
+ # transformations. NB: this currently only does something for
+ # HigherOrderOperator
+ self.functorch_table = {}
+
+ def __call__(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ def has_kernel_for_dispatch_key(self, k):
+ return k in self.py_kernels
+
+ def has_kernel_for_any_dispatch_key(self, ks):
+ for k in self.py_kernels:
+ if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
+ return True
+ return False
+
+ def py_impl(self, k):
+ def inner(fn):
+ if inspect.isclass(k) and issubclass(
+ k, torch.utils._python_dispatch.TorchDispatchMode
+ ):
+ assert k not in self.python_key_mode_table
+ # TODO(voz): Should we replace setting torch._C.DispatchKey.Python entirely with setting mode keys?
+ self.python_key_mode_table[k] = fn
+ self._dispatch_cache.clear()
+ return fn
+
+ if isinstance(k, torch._C._functorch.TransformType):
+ assert k not in self.functorch_table
+ self.functorch_table[k] = fn
+ return fn
+
+ assert isinstance(k, torch._C.DispatchKey)
+ assert (
+ k != torch._C.DispatchKey.Python
+ ), "Please register a mode for the torch._C.DispatchKey.Python key instead."
+
+ if k in self.py_kernels:
+ raise RuntimeError(
+ f"Trying to override a python impl for {k} on operator {self.name()}"
+ )
+ self.py_kernels[k] = fn
+ self._dispatch_cache.clear()
+ return fn
+
+ return inner
+
+ # Registers an implementation to all **3** variants of functionalization that we have:
+ # - DispatchKey.Functionalize
+ # - functorch.TransformType.Functionalize
+ # - FunctionalTensorMode
+ # Example:
+ # @py_functionalize_impl
+ # def functionalize_rule(ctx, inner_f, *args):
+ # args_unwrapped = ctx.unwrap_tensors(args)
+ # with ctx.redispatch_to_next():
+ # out = ctx.functionalize(inner_f)(*args_unwrapped)
+ # return ctx.wrap_tensors(out)
+ def py_functionalize_impl(self, fn):
+ from torch._subclasses.functional_tensor import (
+ CppFunctionalizeAPI as _CppFunctionalizeAPI,
+ FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
+ PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
+ )
+
+ # Construct our three flavors of functionalization,
+ # each of which have slightly different wrap/unwrap/redispatch policies
+ def functionalize_dk_fn(*args, **kwargs):
+ return fn(_CppFunctionalizeAPI(), *args, **kwargs)
+
+ def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
+ # Mode is unused (there's a global FunctionalTensorMode that we can access)
+ return fn(_PythonFunctionalizeAPI(), *args, **kwargs)
+
+ def functionalize_functorch_fn(interpreter, *args, **kwargs):
+ return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
+
+ self.py_impl(torch._C.DispatchKey.Functionalize)(functionalize_dk_fn)
+ self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
+ functionalize_dispatch_mode_fn
+ )
+ self.py_impl(torch._C._functorch.TransformType.Functionalize)(
+ functionalize_functorch_fn
+ )
+
+ return fn
+
+ def name(self):
+ raise NotImplementedError()
+
+
+is_included_in_alias = torch._C._dispatch_is_included_in_alias
+
+DispatchKey = torch._C.DispatchKey
+
+
+# Equivalent to computeDispatchTableEntryWithDebug
+def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
+ # 1. (Direct) operator registration
+ if op.has_kernel_for_dispatch_key(k):
+ return k
+ # 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
+ cand = DispatchKey.CompositeExplicitAutogradNonFunctional
+ if (
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
+ ) and op.has_kernel_for_dispatch_key(cand):
+ return cand
+ # 2.2 Use CompositeExplicitAutograd kernel if available
+ cand = DispatchKey.CompositeExplicitAutograd
+ if (
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
+ ) and op.has_kernel_for_dispatch_key(cand):
+ return cand
+ has_backend_kernel = op.has_kernel_for_any_dispatch_key(
+ torch._C._dispatch_get_backend_keyset_from_autograd(k)
+ ) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
+ # 2.3. Use CompositeImplicitAutograd kernel if available
+ cand = DispatchKey.CompositeImplicitAutogradNestedTensor
+ if (
+ (k != DispatchKey.Undefined and is_included_in_alias(k, cand))
+ and op.has_kernel_for_dispatch_key(cand)
+ and not has_backend_kernel
+ ):
+ return cand
+ cand = DispatchKey.CompositeImplicitAutograd
+ if (
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
+ ) and op.has_kernel_for_dispatch_key(cand):
+ if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
+ torch._C._dispatch_autogradother_backends
+ ):
+ raise RuntimeError("ambiguous autogradother kernel")
+ elif not has_backend_kernel:
+ return cand
+ # 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
+ cand = DispatchKey.Autograd
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
+ return cand
+ # 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
+ cand = DispatchKey.FuncTorchBatchedDecomposition
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
+ return cand
+ # Backend fallback
+ if torch._C._dispatch_has_backend_fallback(k):
+ # The dispatch key itself will implicitly route to backend fallback.
+ # This is probably not great for the pure Python implementation.
+ return k
+ raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
+
+
+_higher_order_ops = {}
+
+_HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
+ DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
+ DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
+ DispatchKey.ADInplaceOrView,
+ DispatchKey.BackendSelect,
+ DispatchKey.AutocastCPU, # type: ignore[attr-defined]
+ DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
+]
+
+
+class HigherOrderOperator(OperatorBase):
+ # The HigherOrderOperator will appear as torch.ops.higher_order.{name}
+ #
+ # If you're creating a new HigherOrderOperator, please do not change the
+ # default. Adding operators to the global torch.ops namespace is a bad
+ # practice due to name collisions.
+ def __init__(self, name):
+ super().__init__()
+ self._name = name
+
+ # Make _OPNamespace not scream, this whole name based association needs a good hard look
+ self.__name__ = name
+ _higher_order_ops[name] = self
+ self._ns = "higher_order"
+
+ # For a normal HigherOrderOperator instance, we will change its __module__ from torch._ops to
+ # torch._ops.higher_order.
+ # For an instance of subclass of HigherOrderOperator (e.g. customized higher order op),
+ # the __module__ attribute will be kept unchanged.
+ if self.__class__ is HigherOrderOperator:
+ self_name_space = "." + self.namespace if self.namespace else ""
+ self.__module__ = self.__module__ + self_name_space
+ self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
+
+ for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
+ self.fallthrough(dispatch_key)
+
+ def py_impl(self, k):
+ if isinstance(k, torch._C.DispatchKey) and not self.non_fallthrough_keys.has(k):
+ self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
+ return super().py_impl(k)
+
+ @property
+ def namespace(self):
+ return self._ns
+
+ def fallthrough(self, dispatch_key):
+ self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
+
+ def dispatch(self, dispatch_key, *args, **kwargs):
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
+
+ if dispatch_key in self._dispatch_cache:
+ kernel = self._dispatch_cache[dispatch_key]
+ assert not isinstance(kernel, torch._C.DispatchKey)
+ return kernel(*args, **kwargs)
+
+ if dispatch_key == torch._C.DispatchKey.FuncTorchDynamicLayerFrontMode:
+ return dispatch_functorch(self, args, kwargs)
+
+ if dispatch_key == torch._C.DispatchKey.Python:
+ # The place to handle ProxyTorchDispatchMode, FakeTensorMode, etc
+ from torch.utils._python_dispatch import _pop_mode_temporarily
+
+ curr_mode = _get_current_dispatch_mode()
+ assert (
+ curr_mode is not None
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
+ assert (
+ type(curr_mode) in self.python_key_mode_table
+ ), f"Current active mode {curr_mode} not registered"
+ handler = self.python_key_mode_table[type(curr_mode)]
+ with _pop_mode_temporarily() as mode:
+ return handler(mode, *args, **kwargs)
+
+ functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
+ if functionality_key in mode_stack_per_key():
+ # The place to handle DispatchKey.PreDispatch
+ curr_stack = mode_stack_per_key()[functionality_key]
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
+ # calls inside of a mode.
+ if len(
+ curr_stack
+ ) > 0 and not torch._C._dispatch_tls_is_dispatch_key_excluded(
+ DispatchKey.Python
+ ):
+ curr_mode = curr_stack[-1]
+ pre_dispatch_modes = mode_stack_per_key().get(
+ DispatchKey.PreDispatch, [] # type: ignore[attr-defined]
+ )
+ handler = self.python_key_mode_table[type(curr_mode)]
+ if len(pre_dispatch_modes) > 0:
+ with temporarily_pop_mode(pre_dispatch_modes) as mode:
+ return handler(mode, *args, **kwargs)
+
+ final_key = resolve_key(self, dispatch_key)
+
+ # This can current fail due to backend fallbacks. You just have to
+ # register them by hand for HigherOrderOperator.
+ if final_key not in self.py_kernels:
+ raise NotImplementedError(
+ f"could not find kernel for HigherOrderOperator {self._name} "
+ f"at dispatch key {final_key} (resolved from {dispatch_key})"
+ )
+ self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
+ kernel = self.py_kernels[final_key]
+ # It's illegal to register DispatchKey to py_kernels, since there's no
+ # C++ kernel to call into
+ assert not isinstance(kernel, torch._C.DispatchKey)
+ return kernel(*args, **kwargs)
+
+ def __call__(self, *args, **kwargs):
+ # Dynamo already traces the body of HigherOrderOp beforehand when it
+ # so no need to trace into it.
+ import torch._dynamo
+ from torch._dynamo import disable
+
+ @disable
+ def wrapper():
+ flat_args = _to_flat_tuple(args, kwargs)
+ if torch.overrides.has_torch_function(flat_args):
+ return torch.overrides.handle_torch_function(
+ self, flat_args, *args, **kwargs
+ )
+
+ dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
+ return self.dispatch(
+ dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
+ )
+
+ return wrapper()
+
+ def __str__(self):
+ return f"{self.name()}"
+
+ def name(self):
+ return self._name
+
+
+def _to_flat_tuple(args, kwargs):
+ return pytree.arg_tree_leaves(*args, **kwargs)
+
+
+def _compute_keyset(args, kwargs, non_fallthrough_keys):
+ tensors = _get_tensors(args, kwargs)
+ return key_extractor(tensors, non_fallthrough_keys)
+
+
+def _get_tensors(args, kwargs):
+ flat_all = _to_flat_tuple(args, kwargs)
+ tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
+ return tuple(tensor_args)
+
+
+# Note - this should maintain identical impl to the C++ dispatcher key extraction logic
+# at ATen/core/dispatch/DispatchKeyExtractor.h
+def key_extractor(tensors, key_mask):
+ key_set = torch._C._dispatch_tls_local_include_set()
+ for tensor in tensors:
+ key_set = key_set | torch._C._dispatch_keys(tensor)
+ key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
+ key_set = key_set & key_mask
+ return key_set
+
+
+# Note [Per Dispatch Key Modes]
+# In ordinary eager mode, we have a Python dispatch key that we attach
+# a mode stack to.
+# However - when the PyDispatcher is enabled, we extend this functionality
+# such that every (functionality) dispatch key is allowed to have
+# its own mode stack.
+# This is controlled by passing a `torch._C.DispatchKey` into
+# the mode constructor.
+_mode_stack_per_key: Dict[torch._C.DispatchKey, List] = {}
+
+
+# Per-dispatch-key mode variant.
+# Temporarily pops the top of a given mode stack.
+@contextlib.contextmanager
+def temporarily_pop_mode(mode_stack):
+ assert len(mode_stack) > 0
+ top_mode = mode_stack.pop()
+ try:
+ yield top_mode
+ finally:
+ mode_stack.append(top_mode)
+
+
+def mode_stack_per_key():
+ global _mode_stack_per_key
+ return _mode_stack_per_key
+
+
+# Per-dispatch-key mode variant of push_mode().
+def push_mode_for_key(key, mode):
+ assert isinstance(key, torch._C.DispatchKey)
+ assert isinstance(mode, torch.utils._python_dispatch.TorchDispatchMode)
+ if key not in mode_stack_per_key():
+ mode_stack_per_key()[key] = []
+ mode_stack_per_key()[key].append(mode)
+
+
+# Per-dispatch-key mode variant of pop_mode().
+def pop_mode_for_key(key):
+ assert isinstance(key, torch._C.DispatchKey)
+ assert key in mode_stack_per_key()
+ curr_mode_stack = mode_stack_per_key()[key]
+ assert len(curr_mode_stack) > 0
+ return curr_mode_stack.pop()
+
+
+cached_ops = set()
+
+
+def add_cached_op(op_overload):
+ global cached_ops
+ cached_ops.add(op_overload)
+
+
+def reset_cached_ops():
+ global cached_ops
+ cached_ops.clear()
+
+
+def get_cached_ops():
+ global cached_ops
+ return cached_ops
+
+
+# Each OpOverload object contains pointer to a a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
+# You can obtain an OpOverload object through attribute query on OpOverloadPacket.
+class OpOverload(OperatorBase):
+ def __init__(self, overloadpacket, op, op_dk, schema, tags):
+ super().__init__()
+ self._op = op
+ self._op_dk = op_dk
+ self._schema = schema
+ self._overloadpacket = overloadpacket
+ self._tags = tags
+ self._overloadname = (
+ "default" if schema.overload_name == "" else schema.overload_name
+ )
+ self._name = self._schema.name
+ if schema.overload_name:
+ self._name += "." + schema.overload_name
+ self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
+ self.__module__ = overloadpacket.__module__
+ op.__module__ = overloadpacket.__module__
+ self.__qualname__ = self._name
+ self.__annotations__ = {}
+
+ # If the OpOverload was constructed from a Library.def in Python.
+ self._defined_in_python = self.__qualname__ in torch.library._defs
+
+ # Logic replicated from aten/src/ATen/native/MathBitsFallback.h
+ is_write = None
+ for a in self._schema.arguments:
+ if a.alias_info is None:
+ continue
+ if is_write is None:
+ is_write = a.alias_info.is_write
+ else:
+ # We will conservatively call mixed mutable/non-mutable
+ # aliased inputs as NOT a view
+ is_write = a.alias_info.is_write or is_write
+ self.is_view = is_write is not None and not is_write
+
+ # it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
+ def __deepcopy__(self, memo=None):
+ return self
+
+ def __repr__(self):
+ return "".format(
+ *self._schema.name.split("::"), self._overloadname
+ )
+
+ def __call__(self, *args, **kwargs):
+ return self._op(*args, **(kwargs or {}))
+
+ def __hash__(self):
+ return hash(self._op)
+
+ # `my_namespace.my_op_name.overload_name`
+ def __str__(self):
+ return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
+
+ def has_kernel_for_dispatch_key(self, k):
+ return super().has_kernel_for_dispatch_key(
+ k
+ ) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
+
+ def has_kernel_for_any_dispatch_key(self, ks):
+ return torch._C._dispatch_has_kernel_for_any_dispatch_key(
+ self.name(), ks
+ ) or super().has_kernel_for_any_dispatch_key(ks)
+
+ @property
+ def namespace(self):
+ return self._schema.name.split("::")[0]
+
+ def decompose(self, *args, **kwargs):
+ dk = torch._C.DispatchKey.CompositeImplicitAutograd
+ if dk in self.py_kernels:
+ # NB: This branch is not too necessary anymore, because we can
+ # apply Python CompositeImplicitAutograd *before* tracing
+ # using Python dispatcher (also taking advantage of the autograd
+ # formula). But it's included for completeness
+ return self.py_kernels[dk](*args, **kwargs)
+ elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
+ return self._op_dk(dk, *args, **kwargs)
+ else:
+ return NotImplemented
+
+ # Remove a dispatch key from the dispatch cache. This will force it to get
+ # recomputed the next time. Does nothing
+ # WARNING: if you register a dispatch key to py_kernels of an OpOverload,
+ # calling _del_dispatch on that key is NOT sufficient to apply your change,
+ # because a single registration may affect MULTIPLE dispatch keys (e.g.,
+ # registering Autograd affects AutogradCPU). del_dispatch is to be used
+ # only if you are specifically modifying how get_dispatch handles a
+ # particular input 'key'.
+ def _uncache_dispatch(self, key):
+ self._dispatch_cache.pop(key, None)
+
+ # This implements the pre-computation logic for the Python dispatcher.
+ def _get_dispatch(self, key):
+ # This is only called upon a cache miss
+ assert key not in self._dispatch_cache, f"{self} {key}"
+
+ if key == torch._C.DispatchKey.Python:
+ if not self.python_key_mode_table:
+ self._dispatch_cache[key] = key
+ add_cached_op(self)
+ return key
+
+ def handler(*args, **kwargs):
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
+
+ # TODO: We also need to handle tensor subclasses here
+ # TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
+ curr_mode = type(_get_current_dispatch_mode())
+ assert (
+ curr_mode is not None
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
+ if curr_mode not in self.python_key_mode_table:
+ # TODO: This path is slow, should generally encourage this
+ # case to not happen
+ return self._op_dk(key, *args, **kwargs)
+ # TODO(voz): The idea behind this is that we do not yet support dispatch by key + mode, only key.
+ return self.python_key_mode_table[curr_mode](*args, **kwargs)
+
+ self._dispatch_cache[key] = handler
+ add_cached_op(self)
+ return handler
+
+ cache_result = True
+ functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
+ if functionality_key in mode_stack_per_key():
+ curr_stack = mode_stack_per_key()[functionality_key]
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
+ # calls inside of a mode.
+ if len(
+ curr_stack
+ ) > 0 and not torch._C._dispatch_tls_is_dispatch_key_excluded(
+ DispatchKey.Python
+ ):
+
+ def handler(*args, **kwargs):
+ # This logic is meant to be a python parallel of handle_torch_function_no_python_arg_parser.
+ with temporarily_pop_mode(curr_stack) as curr_mode:
+ assert hasattr(curr_mode, "__torch_dispatch__")
+ overload_types = []
+ args_flattened = pytree.arg_tree_leaves(*args, **kwargs)
+ for a in args_flattened:
+ # TODO: need to double check the semantics of the "types" argument to torch_dispatch.
+ # It's generated in PyInterpreter.cpp, but seems to be generated in two places,
+ # where in one case we only include tensors with the python key, and in another
+ # we include **all** tensors.
+ if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(
+ a
+ ).has(torch._C.DispatchKey.Python):
+ overload_types.append(type(a))
+ # TODO: check that I got these args correct (in C++, we pass in "0000"??)
+ return curr_mode.__torch_dispatch__(
+ self, overload_types, args, kwargs
+ )
+
+ # Note [Not Caching Per-Dispatch-Key Mode Handlers]
+ # Note that we're not caching this handler. There isn't really a point, since the slow bit
+ # is the handler itself (in python).
+ # Also, not caching means that we don't have to reset the cache when any existing
+ # modes go out of scope (which in of itself takes time to loop through all operators).
+ return handler
+ else:
+ # See Note [Not Caching Per-Dispatch-Key Mode Handlers]
+ cache_result = False
+
+ final_key = resolve_key(self, key)
+
+ # TODO: We could potentially have lots of debugging wrappers against
+ # dispatch keys; design some general registration mechanism instead of
+ # having if statement for each of them
+ if key == torch._C.DispatchKey.Functionalize:
+ import torch._dispatch.python as pydispatch
+
+ if pydispatch.CROSSREF_FUNCTIONALIZE:
+ handler = pydispatch.make_crossref_functionalize(self, final_key)
+ if cache_result:
+ self._dispatch_cache[key] = handler
+ add_cached_op(self)
+ return handler
+
+ # print(self, key, final_key)
+ r = self.py_kernels.get(final_key, final_key)
+ if cache_result:
+ self._dispatch_cache[key] = r
+ add_cached_op(self)
+ return r
+
+ def name(self):
+ return self._name
+
+ @property
+ def overloadpacket(self):
+ return self._overloadpacket
+
+ @property
+ def op(self):
+ return self._op
+
+ @property
+ def tags(self):
+ return self._tags
+
+ # TODO: add more methods to expose information about input and output arguments
+
+
+# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
+# You can obtain an OpOverload object through attribute query.
+class OpOverloadPacket:
+ def __init__(self, qualified_op_name, op_name, op, overload_names):
+ # These attributes are accessible on the object through the properties
+ # defined below but are immutable
+ self._qualified_op_name = qualified_op_name
+ self.__name__ = op_name
+ self._op = op
+ self._overload_names = overload_names
+ self._dir = []
+
+ # it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
+ def __deepcopy__(self, memo=None):
+ return self
+
+ def __repr__(self):
+ return "".format(
+ *self._qualified_op_name.split("::")
+ )
+
+ def __hash__(self):
+ return hash(self._op)
+
+ def __str__(self):
+ return "{}.{}".format(*self._qualified_op_name.split("::"))
+
+ @property
+ def op(self):
+ return self._op
+
+ def __getattr__(self, key):
+ # It is not a valid op_name when __file__ is passed in
+ if key == "__file__":
+ return "torch.ops"
+
+ # ensure that query for dunder attributes that does not exist on
+ # opoverloadpacket but instead exists on the self._op object does not unnecessarily call
+ # `_get_operation_overload` (which is an expensive operation).
+ # This is done to prevent any potential slowdown. This list can be extended
+ # if there exists other attributes like `__name__` that only exist on self._op and not on the
+ # opoverloadpacket.
+ # This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
+ try:
+ if key.startswith("__"):
+ return getattr(self._op, key)
+ except AttributeError:
+ # for consistency because it seems weird to
+ # throw an attribute error with a message containing
+ # an object name different from the one the attribute
+ # query was performed on.
+ raise AttributeError(
+ f"'{str(self)}' can't have an overload name beginning with '__' and the "
+ f"underlying op {str(self._op)} has no attribute {key} either."
+ ) from None
+
+ try:
+ # This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
+ use_key = "" if key == "default" else key
+ # TODO: disallow access to overloads registered by JIT
+ op_, op_dk_, tags = torch._C._get_operation_overload(
+ self._qualified_op_name, use_key
+ )
+ schema = torch._C._get_schema(self._qualified_op_name, use_key)
+ overload = OpOverload(self, op_, op_dk_, schema, tags)
+ # cache the overload object
+ setattr(self, key, overload)
+ self._dir.append(key)
+ return overload
+ except RuntimeError:
+ raise AttributeError(
+ f"The underlying op of '{str(self)}' has no overload name '{key}'"
+ ) from None
+
+ def __iter__(self):
+ return iter(self._dir)
+
+ def __call__(self, *args, **kwargs):
+ # overloading __call__ to ensure torch.ops.foo.bar()
+ # is still callable from JIT
+ # We save the function ptr as the `op` attribute on
+ # OpOverloadPacket to access it here.
+ return self._op(*args, **(kwargs or {}))
+
+ # TODO: use this to make a __dir__
+ def overloads(self):
+ return [n if n else "default" for n in self._overload_names]
+
+
+# Resolution of torch.fn is different from torch.ops.aten.fn
+# torch.fn uses the Python argparser, matches with the
+# appropriate schema, and calls into the unboxed version of the method
+# torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
+# JIT creates a stack of all the overloads and then tries to match the
+# correct one at runtime and always calls into the boxed version of the method
+# Autograd codegen creates VariableType, TracerType,
+# inplace or view type and python bindings.
+# Aten codegen generates tensor methods for the tensor class.
+
+# _OpNamespace is a subclass of ModuleType because the torch script
+# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
+# to work from script, we need to ensure ops and foo are modules
+
+
+class _OpNamespace(types.ModuleType):
+ """
+ An op namespace to dynamically bind Operators into Python.
+
+ Say a user has created a custom Operator called "my_namespace::my_op". To
+ call this op, the user will write torch.ops.my_namespace.my_op(...).
+ At startup, this operation will not yet be bound into Python. Instead, the
+ following sequence of magic tricks will occur:
+ 1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
+ on the `torch.ops` object, which will create a new `_OpNamespace`
+ object called `my_namespace` and set it as an attribute on the `ops`
+ object.
+ 2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
+ the `my_namespace` object, which will retrieve the operation via
+ `torch.get_operation`, a function bound from C++, and then in a similar
+ fashion bind this new object onto the `my_namespace` object.
+ 3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
+ and subsequent accesses will incur no further lookup (the namespace and
+ operation will already exist).
+ """
+
+ def __init__(self, name):
+ super().__init__("torch.ops." + name)
+ self.name = name
+ self._dir = []
+
+ def __iter__(self):
+ return iter(self._dir)
+
+ def __getattr__(self, op_name):
+ # It is not a valid op_name when __file__ is passed in
+ if op_name == "__file__":
+ return "torch.ops"
+ elif op_name in ["__origin__", "__self__"]:
+ raise AttributeError(
+ f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
+ )
+
+ # Get the op `my_namespace::my_op` if available. This will also check
+ # for overloads and raise an exception if there are more than one.
+ namespace_name = self.name
+ qualified_op_name = f"{namespace_name}::{op_name}"
+ try:
+ op, overload_names = torch._C._jit_get_operation(qualified_op_name)
+ if op is None:
+ raise AttributeError(
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
+ )
+ except RuntimeError as e:
+ # Turn this into AttributeError so getattr(obj, key, default)
+ # works (this is called by TorchScript with __origin__)
+ raise AttributeError(
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
+ ) from e
+
+ # let the script frontend know that op is identical to the builtin op
+ # with qualified_op_name
+ torch.jit._builtins._register_builtin(op, qualified_op_name)
+ op.__module__ = self.__module__ + "." + namespace_name
+ opoverloadpacket = OpOverloadPacket(
+ qualified_op_name, op_name, op, overload_names
+ )
+ opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
+ # cache the opoverloadpacket to ensure that each op corresponds to
+ # a unique OpOverloadPacket object
+ setattr(self, op_name, opoverloadpacket)
+ self._dir.append(op_name)
+ return opoverloadpacket
+
+
+class _PyOpNamespace(_OpNamespace):
+ def __init__(self, name, ops):
+ super().__init__(name)
+ self._ops = ops
+
+ def __getattr__(self, name):
+ # Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
+ op = self._ops.get(name, None)
+ if op is None:
+ raise AttributeError(
+ f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
+ )
+ setattr(self, name, op)
+ return op
+
+
+class _Ops(types.ModuleType):
+ __file__ = "_ops.py"
+
+ def __init__(self):
+ super().__init__("torch.ops")
+ self.loaded_libraries = set()
+ self._higher_order_op_namespace = _PyOpNamespace(
+ "torch.ops.higher_order", _higher_order_ops
+ )
+ self._dir = []
+
+ def __getattr__(self, name):
+ # Check if the name is a HigherOrderOperator
+ if name == "higher_order":
+ return self._higher_order_op_namespace
+
+ # Here we are creating `torch.ops.my_namespace`
+ namespace = _OpNamespace(name)
+ setattr(self, name, namespace)
+ self._dir.append(name)
+ return namespace
+
+ def __iter__(self):
+ return iter(self._dir)
+
+ def import_module(self, module):
+ """
+ Imports a Python module that has torch.library registrations.
+
+ Generally, to extend PyTorch with custom operators, a user will
+ create a Python module whose import triggers registration of
+ the custom operators via a torch.ops.load_library call or a call
+ to one or more torch.library.* APIs.
+
+ It is unexpected for Python modules to have side effects, so some
+ linters and formatters will complain. Use this API to import Python
+ modules that contain these torch.library side effects.
+
+ Args:
+ module (str): The name of the Python module to import
+
+ """
+ importlib.import_module(module)
+
+ def load_library(self, path):
+ """
+ Loads a shared library from the given path into the current process.
+
+ The library being loaded may run global initialization code to register
+ custom operators with the PyTorch JIT runtime. This allows dynamically
+ loading custom operators. For this, you should compile your operator
+ and the static registration code into a shared library object, and then
+ call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
+ shared object.
+
+ After the library is loaded, it is added to the
+ ``torch.ops.loaded_libraries`` attribute, a set that may be inspected
+ for the paths of all libraries loaded using this function.
+
+ Args:
+ path (str): A path to a shared library to load.
+ """
+ if torch._running_with_deploy():
+ return
+
+ path = _utils_internal.resolve_library_path(path)
+ with dl_open_guard():
+ # Import the shared library into the process, thus running its
+ # static (global) initialization code in order to register custom
+ # operators with the JIT.
+ ctypes.CDLL(path)
+ self.loaded_libraries.add(path)
+
+
+# The ops "namespace"
+ops = _Ops()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_sources.py b/env-llmeval/lib/python3.10/site-packages/torch/_sources.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f56bd8ef2473aa9c35ad6232448c9d5d44b8056
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_sources.py
@@ -0,0 +1,137 @@
+import ast
+import functools
+import inspect
+from textwrap import dedent
+from typing import Any, List, NamedTuple, Optional, Tuple
+
+from torch._C import ErrorReport
+from torch._C._jit_tree_views import SourceRangeFactory
+
+
+def get_source_lines_and_file(
+ obj: Any,
+ error_msg: Optional[str] = None,
+) -> Tuple[List[str], int, Optional[str]]:
+ """
+ Wrapper around inspect.getsourcelines and inspect.getsourcefile.
+
+ Returns: (sourcelines, file_lino, filename)
+ """
+ filename = None # in case getsourcefile throws
+ try:
+ filename = inspect.getsourcefile(obj)
+ sourcelines, file_lineno = inspect.getsourcelines(obj)
+ except OSError as e:
+ msg = (
+ f"Can't get source for {obj}. TorchScript requires source access in "
+ "order to carry out compilation, make sure original .py files are "
+ "available."
+ )
+ if error_msg:
+ msg += "\n" + error_msg
+ raise OSError(msg) from e
+
+ return sourcelines, file_lineno, filename
+
+
+def normalize_source_lines(sourcelines: List[str]) -> List[str]:
+ """
+ This helper function accepts a list of source lines. It finds the
+ indentation level of the function definition (`def`), then it indents
+ all lines in the function body to a point at or greater than that
+ level. This allows for comments and continued string literals that
+ are at a lower indentation than the rest of the code.
+ Args:
+ sourcelines: function source code, separated into lines by
+ the '\n' character
+ Returns:
+ A list of source lines that have been correctly aligned
+ """
+
+ def remove_prefix(text, prefix):
+ return text[text.startswith(prefix) and len(prefix) :]
+
+ # Find the line and line number containing the function definition
+ idx = None
+ for i, l in enumerate(sourcelines):
+ if l.lstrip().startswith("def"):
+ idx = i
+ break
+
+ # This will happen when the function is a lambda- we won't find "def" anywhere in the source
+ # lines in that case. Currently trying to JIT compile a lambda will throw an error up in
+ # `parse_def()`, but we might want to handle this case in the future.
+ if idx is None:
+ return sourcelines
+
+ # Get a string representing the amount of leading whitespace
+ fn_def = sourcelines[idx]
+ whitespace = fn_def.split("def")[0]
+
+ # Add this leading whitespace to all lines before and after the `def`
+ aligned_prefix = [
+ whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]
+ ]
+ aligned_suffix = [
+ whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :]
+ ]
+
+ # Put it together again
+ aligned_prefix.append(fn_def)
+ return aligned_prefix + aligned_suffix
+
+
+# Thin wrapper around SourceRangeFactory to store extra metadata
+# about the function-to-be-compiled.
+class SourceContext(SourceRangeFactory):
+ def __init__(
+ self,
+ source,
+ filename,
+ file_lineno,
+ leading_whitespace_len,
+ uses_true_division=True,
+ funcname=None,
+ ):
+ super().__init__(source, filename, file_lineno, leading_whitespace_len)
+ self.uses_true_division = uses_true_division
+ self.filename = filename
+ self.funcname = funcname
+
+
+@functools.lru_cache(maxsize=None)
+def make_source_context(*args):
+ return SourceContext(*args)
+
+
+def fake_range():
+ return SourceContext("", None, 0, 0).make_raw_range(0, 1)
+
+
+class ParsedDef(NamedTuple):
+ ast: ast.Module
+ ctx: SourceContext
+ source: str
+ filename: Optional[str]
+ file_lineno: int
+
+
+def parse_def(fn):
+ sourcelines, file_lineno, filename = get_source_lines_and_file(
+ fn, ErrorReport.call_stack()
+ )
+ sourcelines = normalize_source_lines(sourcelines)
+ source = "".join(sourcelines)
+ dedent_src = dedent(source)
+ py_ast = ast.parse(dedent_src)
+ if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
+ raise RuntimeError(
+ f"Expected a single top-level function: {filename}:{file_lineno}"
+ )
+ leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
+ dedent_src.split("\n", 1)[0]
+ )
+ ctx = make_source_context(
+ source, filename, file_lineno, leading_whitespace_len, True, fn.__name__
+ )
+ return ParsedDef(py_ast, ctx, source, filename, file_lineno)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_storage_docs.py b/env-llmeval/lib/python3.10/site-packages/torch/_storage_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6df58d2b6b98e21f022e39dc1d140157fa492e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_storage_docs.py
@@ -0,0 +1,43 @@
+"""Adds docstrings to Storage functions"""
+
+import torch._C
+from torch._C import _add_docstr as add_docstr
+
+
+storage_classes = [
+ "StorageBase",
+]
+
+
+def add_docstr_all(method, docstr):
+ for cls_name in storage_classes:
+ cls = getattr(torch._C, cls_name)
+ try:
+ add_docstr(getattr(cls, method), docstr)
+ except AttributeError:
+ pass
+
+
+add_docstr_all(
+ "from_file",
+ """
+from_file(filename, shared=False, size=0) -> Storage
+
+Creates a CPU storage backed by a memory-mapped file.
+
+If ``shared`` is ``True``, then memory is shared between all processes.
+All changes are written to the file. If ``shared`` is ``False``, then the changes on
+the storage do not affect the file.
+
+``size`` is the number of elements in the storage. If ``shared`` is ``False``,
+then the file must contain at least ``size * sizeof(Type)`` bytes
+(``Type`` is the type of storage, in the case of an ``UnTypedStorage`` the file must contain at
+least ``size`` bytes). If ``shared`` is ``True`` the file will be created if needed.
+
+Args:
+ filename (str): file name to map
+ shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
+ underlying `mmap(2) call `_)
+ size (int): number of elements in the storage
+""",
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_streambase.py b/env-llmeval/lib/python3.10/site-packages/torch/_streambase.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d4737563ddb66259f5a365b193a45d4b9945ef6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_streambase.py
@@ -0,0 +1,45 @@
+from abc import ABC, abstractmethod
+
+
+class _StreamBase(ABC):
+ r"""Base stream class abstraction for multi backends Stream to herit from"""
+
+ @abstractmethod
+ def wait_event(self, event):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def wait_stream(self, stream):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def record_event(self, event=None):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def query(self):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def synchronize(self):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def __eq__(self, stream):
+ raise NotImplementedError()
+
+
+class _EventBase(ABC):
+ r"""Base Event class abstraction for multi backends Event to herit from"""
+
+ @abstractmethod
+ def wait(self, stream=None):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def query(self):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def synchronize(self):
+ raise NotImplementedError()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/_tensor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3aa0cee639d9f4efab1ba365128f7120607435ef
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_tensor.py
@@ -0,0 +1,1518 @@
+import copyreg
+import enum
+import functools
+import warnings
+from collections import OrderedDict
+from copy import deepcopy
+from numbers import Number
+from typing import Any, Dict, Optional, Tuple, Union
+
+import torch
+import torch._C as _C
+import torch.utils.hooks as hooks
+from torch._namedtensor_internals import (
+ check_serializing_named_tensor,
+ is_ellipsis,
+ resolve_ellipsis,
+ single_ellipsis_index,
+ unzip_namedshape,
+ update_names,
+)
+from torch.overrides import (
+ get_default_nowrap_functions,
+ handle_torch_function,
+ has_torch_function,
+ has_torch_function_unary,
+ has_torch_function_variadic,
+)
+from torch.utils.dlpack import DLDeviceType
+
+
+def _handle_torch_function_and_wrap_type_error_to_not_implemented(f):
+ assigned = functools.WRAPPER_ASSIGNMENTS
+
+ @functools.wraps(f, assigned=assigned)
+ def wrapped(*args, **kwargs):
+ try:
+ # See https://github.com/pytorch/pytorch/issues/75462
+ if has_torch_function(args):
+ return handle_torch_function(wrapped, args, *args, **kwargs)
+ return f(*args, **kwargs)
+ except TypeError:
+ return NotImplemented
+
+ return wrapped
+
+
+# Should not be used, this is kept only for BC of loading old serialized Tensor subclasses
+def _rebuild_from_type(func, type, args, dict):
+ if type is Tensor:
+ return func(*args)
+
+ ret = func(*args).as_subclass(type)
+ ret.__dict__ = dict
+ return ret
+
+
+def _rebuild_from_type_v2(func, new_type, args, state):
+ ret = func(*args)
+ if type(ret) is not new_type:
+ ret = ret.as_subclass(new_type)
+ # Tensor does define __setstate__ even though it doesn't define
+ # __getstate__. So only use __setstate__ if it is NOT the one defined
+ # on Tensor
+ if (
+ getattr(ret.__class__, "__setstate__", Tensor.__setstate__)
+ is not Tensor.__setstate__
+ ):
+ ret.__setstate__(state)
+ else:
+ ret = torch._utils._set_obj_state(ret, state)
+ return ret
+
+
+# NB: If you subclass Tensor, and want to share the subclassed class
+# across processes, you must also update torch/multiprocessing/reductions.py
+# to define a ForkingPickler serialization mode for the class.
+#
+# NB: If you add a new method to Tensor, you must update
+# torch/_C/__init__.pyi.in to add a type annotation for your method;
+# otherwise, it will not show up in autocomplete.
+class Tensor(torch._C.TensorBase):
+ def __deepcopy__(self, memo):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__deepcopy__, (self,), self, memo)
+ if not self.is_leaf:
+ raise RuntimeError(
+ "Only Tensors created explicitly by the user "
+ "(graph leaves) support the deepcopy protocol at the moment. "
+ "If you were attempting to deepcopy a module, this may be because "
+ "of a torch.nn.utils.weight_norm usage, "
+ "see https://github.com/pytorch/pytorch/pull/103001"
+ )
+ if id(self) in memo:
+ return memo[id(self)]
+ with torch.no_grad():
+ # TODO: skipping storage copy is wrong for meta, as meta
+ # does accurate alias tracking; however, the code below
+ # doesn't work because of
+ # https://github.com/pytorch/pytorch/issues/47442
+ # Update the test in test_serialization if you remove 'meta' from here
+ if (
+ self.is_sparse
+ or self.device.type
+ in ["lazy", "xla", "mtia", "mps", "ort", "meta", "ipu"]
+ or (
+ not torch._C._has_storage(self)
+ and self.device.type == torch._C._get_privateuse1_backend_name()
+ )
+ or (type(self) is not Tensor and self.data_ptr() == 0)
+ ):
+ new_tensor = self.clone()
+ if type(new_tensor) is not type(self):
+ raise RuntimeError(
+ "The default implementation of __deepcopy__() for wrapper subclasses "
+ "only works for subclass types that implement clone() and for which "
+ "cloning returns another instance of the same subclass. You should either "
+ "properly implement clone() for your subclass or override __deepcopy__() "
+ "if it is intended behavior for clone() to return an instance of a "
+ "different type."
+ )
+ else:
+ new_storage = self._typed_storage()._deepcopy(memo)
+ if self.is_quantized:
+ # quantizer_params can be different type based on torch attribute
+ quantizer_params: Union[
+ Tuple[torch.qscheme, float, int],
+ Tuple[torch.qscheme, Tensor, Tensor, int],
+ ]
+ if self.qscheme() == torch.per_tensor_affine:
+ quantizer_params = (
+ self.qscheme(),
+ self.q_scale(),
+ self.q_zero_point(),
+ )
+ elif self.qscheme() in (
+ torch.per_channel_affine,
+ torch.per_channel_affine_float_qparams,
+ ):
+ quantizer_params = (
+ self.qscheme(),
+ self.q_per_channel_scales(),
+ self.q_per_channel_zero_points(),
+ self.q_per_channel_axis(),
+ )
+ else:
+ raise RuntimeError(
+ f"Unsupported qscheme {self.qscheme()} in deepcopy"
+ )
+ # TODO: Once we decide to break serialization FC, no longer
+ # need to wrap with TypedStorage
+ new_tensor = torch._utils._rebuild_qtensor(
+ torch.storage.TypedStorage(
+ wrap_storage=new_storage._untyped_storage,
+ dtype=self.dtype,
+ _internal=True,
+ ),
+ self.storage_offset(),
+ self.size(),
+ self.stride(),
+ quantizer_params,
+ self.requires_grad,
+ self._backward_hooks,
+ )
+ if type(new_tensor) is not type(self):
+ raise RuntimeError(
+ "The default implementation of __deepcopy__() for quantized tensors "
+ "expects the tensor returned by torch._utils._rebuild_qtensor() to "
+ "match the type of the instance being copied. If you encounter this, "
+ "please open an issue on PyTorch's GitHub."
+ )
+ else:
+ new_tensor = self.new_empty([])
+ if type(new_tensor) is not type(self):
+ raise RuntimeError(
+ "The default implementation of __deepcopy__() for non-wrapper subclasses "
+ "only works for subclass types that implement new_empty() and for which "
+ "that function returns another instance of the same subclass. You should "
+ "either properly implement new_empty() for your subclass or override "
+ "__deepcopy__() if it is intended behavior for new_empty() to return "
+ "an instance of a different type."
+ )
+ new_tensor.set_(
+ new_storage, self.storage_offset(), self.size(), self.stride()
+ )
+ if self.is_conj():
+ new_tensor = new_tensor.conj_physical()
+ if self.is_neg():
+ new_tensor = new_tensor.neg()
+ if self.requires_grad:
+ new_tensor.requires_grad_()
+ if self.grad is not None:
+ new_tensor.grad = self.grad.__deepcopy__(memo)
+
+ if type(self) is not Tensor:
+ if type(new_tensor) is not type(self):
+ raise RuntimeError(
+ "Type of deepcopy result does not match the type of the source tensor. "
+ "If you encounter this, please open an issue on PyTorch's GitHub."
+ )
+
+ # Plain Tensors don't have slots
+ slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
+ for slot in slots_to_save:
+ if hasattr(self, slot):
+ setattr(new_tensor, slot, deepcopy(getattr(self, slot), memo))
+
+ new_tensor.__dict__ = deepcopy(self.__dict__, memo)
+
+ memo[id(self)] = new_tensor
+ return new_tensor
+
+ def __reduce_ex__(self, proto):
+ state = torch._utils._get_obj_state(self)
+ if type(self) is Tensor and not state:
+ # Fast path for regular tensor without Python state.
+ return self._reduce_ex_internal(proto)
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__reduce_ex__, (self,), self, proto)
+ func, args = self._reduce_ex_internal(proto)
+ return (_rebuild_from_type_v2, (func, type(self), args, state))
+
+ def storage(self):
+ r"""
+ storage() -> torch.TypedStorage
+
+ Returns the underlying :class:`TypedStorage`.
+
+ .. warning::
+
+ :class:`TypedStorage` is deprecated. It will be removed in the future, and
+ :class:`UntypedStorage` will be the only storage class. To access the
+ :class:`UntypedStorage` directly, use :attr:`Tensor.untyped_storage()`.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.storage, (self,), self)
+
+ torch.storage._warn_typed_storage_removal(stacklevel=2)
+ return self._typed_storage()
+
+ # For internal use only, to avoid raising deprecation warning
+ def _typed_storage(self):
+ untyped_storage = self.untyped_storage()
+ return torch.TypedStorage(
+ wrap_storage=untyped_storage, dtype=self.dtype, _internal=True
+ )
+
+ def _reduce_ex_internal(self, proto):
+ check_serializing_named_tensor(self)
+ # See Note [Don't serialize hooks]
+ torch.utils.hooks.warn_if_has_hooks(self)
+ backward_hooks: Dict[Any, Any] = OrderedDict()
+ # Note: Numpy array is chosen to be the rebuild component for XLA, MTIA, ORT Tensors.
+ # We considered a few options:
+ # 1. CPU tensor can't be used here.
+ # Otherwise in torch.load CPU storage is reconstructed with randomly
+ # initialized data, moved onto backend device, and then storage is updated
+ # to the serialized content. This works perfectly for CPU/CUDA but not these backends;
+ # their tensors are disconnected with storage so they don't get the update.
+ # 2. Python list is not a good fit due to performance reason.
+ # `tolist()` converts every single element in the tensor into python objects
+ # and serialize them one by one.
+ if self.device.type in ["xla", "mtia", "ort"] or (
+ not torch._C._has_storage(self)
+ and self.device.type == torch._C._get_privateuse1_backend_name()
+ ):
+ # Convert BFloat16 tesors to Float32 before conversion to numpy, as numpy doesn't
+ # support BFloat16. The rebuild tensor from numpy takes in the original self.dtype,
+ # this would reconstruct the BFloat16 tensor from numpy.
+ numpy_tensor = (
+ self.cpu().numpy()
+ if self.dtype != torch.bfloat16
+ else self.cpu().to(torch.float32).numpy()
+ )
+ return (
+ torch._utils._rebuild_device_tensor_from_numpy,
+ (numpy_tensor, self.dtype, str(self.device), self.requires_grad),
+ )
+ if self.device.type == "meta":
+ # NB: This implementation BREAKS storage sharing. Current
+ # hypothesis is that no one cares for meta tensors.
+ arg_meta = (
+ self.dtype,
+ tuple(self.size()),
+ self.stride(),
+ self.requires_grad,
+ )
+ return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
+ if self.is_quantized:
+ # quantizer_params can be different type based on torch attribute
+ quantizer_params: Union[
+ Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]
+ ]
+ if self.qscheme() == torch.per_tensor_affine:
+ quantizer_params = (
+ torch.per_tensor_affine,
+ self.q_scale(),
+ self.q_zero_point(),
+ )
+ elif self.qscheme() in (
+ torch.per_channel_affine,
+ torch.per_channel_affine_float_qparams,
+ ):
+ # convert scales and zero points to tuple to avoid recursive calls
+ # when/if we get multi-axis quantized tensors in the future, the shape
+ # is recoverable from the main tensor shape
+ quantizer_params = (
+ torch.per_channel_affine,
+ self.q_per_channel_scales(),
+ self.q_per_channel_zero_points(),
+ self.q_per_channel_axis(),
+ )
+ else:
+ raise RuntimeError(
+ f"Serialization is not supported for tensors of type {self.qscheme()}"
+ )
+ # TODO: Once we decide to break serialization FC, no longer
+ # need to wrap with TypedStorage
+ args_qtensor = (
+ torch.storage.TypedStorage(
+ wrap_storage=self._typed_storage()._untyped_storage,
+ dtype=self.dtype,
+ _internal=True,
+ ),
+ self.storage_offset(),
+ tuple(self.size()),
+ self.stride(),
+ quantizer_params,
+ self.requires_grad,
+ backward_hooks,
+ )
+ return (torch._utils._rebuild_qtensor, args_qtensor)
+ elif self.is_sparse:
+ if self.layout == torch.sparse_coo:
+ args_sparse = (
+ self.layout,
+ (self._indices(), self._values(), self.size(), self.is_coalesced()),
+ )
+ else:
+ raise NotImplementedError(
+ f"sparse tensor __reduce_ex__ for layout `{self.layout}`"
+ )
+ return (torch._utils._rebuild_sparse_tensor, args_sparse)
+ elif self.layout in {
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ }:
+ if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
+ compressed_indices, plain_indices = (
+ self.crow_indices(),
+ self.col_indices(),
+ )
+ else:
+ compressed_indices, plain_indices = (
+ self.ccol_indices(),
+ self.row_indices(),
+ )
+ args_sparse_compressed = (
+ self.layout,
+ (
+ compressed_indices,
+ plain_indices,
+ self.values(),
+ self.size(),
+ ),
+ )
+ return (torch._utils._rebuild_sparse_tensor, args_sparse_compressed)
+ elif self.is_nested:
+ args_nested = (
+ # NB: values() currently returns the storage as a buffer in an unsafe way.
+ # Ideally, we'd use a private API for this instead. TODO: Switch to this if
+ # we ever get around to adding it.
+ self.values(),
+ self._nested_tensor_size(),
+ self._nested_tensor_strides(),
+ self._nested_tensor_storage_offsets(),
+ )
+ return (torch._utils._rebuild_nested_tensor, args_nested)
+ elif (
+ self.data_ptr() == 0
+ and type(self) is not torch.Tensor
+ and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
+ ):
+ arg_wrapper_subclass = (
+ type(self),
+ self.dtype,
+ tuple(self.size()),
+ self.stride(),
+ self.storage_offset(),
+ self.layout,
+ self.device,
+ self.requires_grad,
+ )
+ return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
+ else:
+ v3_dtypes = [
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
+ torch.bits8,
+ torch.bits16,
+ torch.bits1x8,
+ torch.bits2x4,
+ torch.bits4x2,
+ ]
+ if self.dtype in v3_dtypes:
+ rebuild_func = torch._utils._rebuild_tensor_v3
+ storage = self.untyped_storage()
+ else:
+ # TODO: Once we decide to break serialization FC, no longer
+ # need to wrap with TypedStorage
+ rebuild_func = torch._utils._rebuild_tensor_v2 # type: ignore[assignment]
+ storage = torch.storage.TypedStorage(
+ wrap_storage=self._typed_storage()._untyped_storage,
+ dtype=self.dtype,
+ _internal=True,
+ ) # type: ignore[assignment]
+ args = (
+ storage,
+ self.storage_offset(),
+ tuple(self.size()),
+ self.stride(),
+ self.requires_grad,
+ backward_hooks,
+ ) # previously was self._backward_hooks
+
+ if isinstance(storage, torch.storage.UntypedStorage):
+ args = args + (self.dtype,) # type: ignore[assignment]
+
+ metadata = torch._utils.get_tensor_metadata(self)
+ if metadata:
+ args = args + (metadata,) # type: ignore[assignment]
+
+ return (rebuild_func, args)
+
+ def __setstate__(self, state):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__setstate__, (self,), self, state)
+ # Warning: this method is NOT called when you torch.load() a tensor;
+ # that is managed by _rebuild_tensor_v2
+ if not self.is_leaf:
+ raise RuntimeError("__setstate__ can be only called on leaf Tensors")
+ if len(state) == 4:
+ # legacy serialization of Tensor
+ self.set_(*state)
+ return
+ elif len(state) == 5:
+ # legacy serialization of Variable
+ self.data = state[0]
+ state = (state[3], state[4], state[2])
+ # The setting of _backward_hooks is expected to be a no-op.
+ # See Note [Don't serialize hooks]
+ self.requires_grad, _, self._backward_hooks = state
+
+ def __repr__(self, *, tensor_contents=None):
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.__repr__, (self,), self, tensor_contents=tensor_contents
+ )
+ # All strings are unicode in Python 3.
+ return torch._tensor_str._str(self, tensor_contents=tensor_contents)
+
+ def backward(
+ self, gradient=None, retain_graph=None, create_graph=False, inputs=None
+ ):
+ r"""Computes the gradient of current tensor wrt graph leaves.
+
+ The graph is differentiated using the chain rule. If the tensor is
+ non-scalar (i.e. its data has more than one element) and requires
+ gradient, the function additionally requires specifying ``gradient``.
+ It should be a tensor of matching type and location, that contains
+ the gradient of the differentiated function w.r.t. ``self``.
+
+ This function accumulates gradients in the leaves - you might need to zero
+ ``.grad`` attributes or set them to ``None`` before calling it.
+ See :ref:`Default gradient layouts`
+ for details on the memory layout of accumulated gradients.
+
+ .. note::
+
+ If you run any forward ops, create ``gradient``, and/or call ``backward``
+ in a user-specified CUDA stream context, see
+ :ref:`Stream semantics of backward passes`.
+
+ .. note::
+
+ When ``inputs`` are provided and a given input is not a leaf,
+ the current implementation will call its grad_fn (though it is not strictly needed to get this gradients).
+ It is an implementation detail on which the user should not rely.
+ See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
+
+ Args:
+ gradient (Tensor or None): Gradient w.r.t. the
+ tensor. If it is a tensor, it will be automatically converted
+ to a Tensor that does not require grad unless ``create_graph`` is True.
+ None values can be specified for scalar Tensors or ones that
+ don't require grad. If a None value would be acceptable then
+ this argument is optional.
+ retain_graph (bool, optional): If ``False``, the graph used to compute
+ the grads will be freed. Note that in nearly all cases setting
+ this option to True is not needed and often can be worked around
+ in a much more efficient way. Defaults to the value of
+ ``create_graph``.
+ create_graph (bool, optional): If ``True``, graph of the derivative will
+ be constructed, allowing to compute higher order derivative
+ products. Defaults to ``False``.
+ inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
+ accumulated into ``.grad``. All other Tensors will be ignored. If not
+ provided, the gradient is accumulated into all the leaf Tensors that were
+ used to compute the attr::tensors.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.backward,
+ (self,),
+ self,
+ gradient=gradient,
+ retain_graph=retain_graph,
+ create_graph=create_graph,
+ inputs=inputs,
+ )
+ torch.autograd.backward(
+ self, gradient, retain_graph, create_graph, inputs=inputs
+ )
+
+ def register_hook(self, hook):
+ r"""Registers a backward hook.
+
+ The hook will be called every time a gradient with respect to the
+ Tensor is computed. The hook should have the following signature::
+
+ hook(grad) -> Tensor or None
+
+
+ The hook should not modify its argument, but it can optionally return
+ a new gradient which will be used in place of :attr:`grad`.
+
+ This function returns a handle with a method ``handle.remove()``
+ that removes the hook from the module.
+
+ .. note::
+ See :ref:`backward-hooks-execution` for more information on how when this hook
+ is executed, and how its execution is ordered relative to other hooks.
+
+ Example::
+
+ >>> v = torch.tensor([0., 0., 0.], requires_grad=True)
+ >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
+ >>> v.backward(torch.tensor([1., 2., 3.]))
+ >>> v.grad
+
+ 2
+ 4
+ 6
+ [torch.FloatTensor of size (3,)]
+
+ >>> h.remove() # removes the hook
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.register_hook, (self,), self, hook)
+ if not self.requires_grad:
+ raise RuntimeError(
+ "cannot register a hook on a tensor that doesn't require gradient"
+ )
+ if self._backward_hooks is None:
+ self._backward_hooks = OrderedDict()
+ if self.grad_fn is not None:
+ self.grad_fn._register_hook_dict(self)
+ handle = hooks.RemovableHandle(self._backward_hooks)
+ self._backward_hooks[handle.id] = hook
+ return handle
+
+ def register_post_accumulate_grad_hook(self, hook):
+ r"""Registers a backward hook that runs after grad accumulation.
+
+ The hook will be called after all gradients for a tensor have been accumulated,
+ meaning that the .grad field has been updated on that tensor. The post
+ accumulate grad hook is ONLY applicable for leaf tensors (tensors without a
+ .grad_fn field). Registering this hook on a non-leaf tensor will error!
+
+ The hook should have the following signature::
+
+ hook(param: Tensor) -> None
+
+ Note that, unlike other autograd hooks, this hook operates on the tensor
+ that requires grad and not the grad itself. The hook can in-place modify
+ and access its Tensor argument, including its .grad field.
+
+ This function returns a handle with a method ``handle.remove()``
+ that removes the hook from the module.
+
+ .. note::
+ See :ref:`backward-hooks-execution` for more information on how when this hook
+ is executed, and how its execution is ordered relative to other hooks. Since
+ this hook runs during the backward pass, it will run in no_grad mode (unless
+ create_graph is True). You can use torch.enable_grad() to re-enable autograd
+ within the hook if you need it.
+
+ Example::
+
+ >>> v = torch.tensor([0., 0., 0.], requires_grad=True)
+ >>> lr = 0.01
+ >>> # simulate a simple SGD update
+ >>> h = v.register_post_accumulate_grad_hook(lambda p: p.add_(p.grad, alpha=-lr))
+ >>> v.backward(torch.tensor([1., 2., 3.]))
+ >>> v
+ tensor([-0.0100, -0.0200, -0.0300], requires_grad=True)
+
+ >>> h.remove() # removes the hook
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.register_post_accumulate_grad_hook, (self,), self, hook
+ )
+ if not self.requires_grad:
+ raise RuntimeError(
+ "cannot register a hook on a tensor that doesn't require gradient"
+ )
+ if self.grad_fn is not None:
+ raise RuntimeError(
+ "post accumulate grad hooks cannot be registered on non-leaf tensors"
+ )
+ if self._post_accumulate_grad_hooks is None:
+ self._post_accumulate_grad_hooks: Dict[Any, Any] = OrderedDict()
+ handle = hooks.RemovableHandle(self._post_accumulate_grad_hooks)
+ self._post_accumulate_grad_hooks[handle.id] = hook
+ return handle
+
+ def reinforce(self, reward):
+ def trim(str):
+ return "\n".join([line.strip() for line in str.split("\n")])
+
+ raise RuntimeError(
+ trim(
+ r"""reinforce() was removed.
+ Use torch.distributions instead.
+ See https://pytorch.org/docs/master/distributions.html
+
+ Instead of:
+
+ probs = policy_network(state)
+ action = probs.multinomial()
+ next_state, reward = env.step(action)
+ action.reinforce(reward)
+ action.backward()
+
+ Use:
+
+ probs = policy_network(state)
+ # NOTE: categorical is equivalent to what used to be called multinomial
+ m = torch.distributions.Categorical(probs)
+ action = m.sample()
+ next_state, reward = env.step(action)
+ loss = -m.log_prob(action) * reward
+ loss.backward()
+ """
+ )
+ )
+
+ detach = _C._add_docstr(
+ _C.TensorBase.detach,
+ r"""
+ Returns a new Tensor, detached from the current graph.
+
+ The result will never require gradient.
+
+ This method also affects forward mode AD gradients and the result will never
+ have forward mode AD gradients.
+
+ .. note::
+
+ Returned Tensor shares the same storage with the original one.
+ In-place modifications on either of them will be seen, and may trigger
+ errors in correctness checks.
+ IMPORTANT NOTE: Previously, in-place size / stride / storage changes
+ (such as `resize_` / `resize_as_` / `set_` / `transpose_`) to the returned tensor
+ also update the original tensor. Now, these in-place changes will not update the
+ original tensor anymore, and will instead trigger an error.
+ For sparse tensors:
+ In-place indices / values changes (such as `zero_` / `copy_` / `add_`) to the
+ returned tensor will not update the original tensor anymore, and will instead
+ trigger an error.
+ """,
+ )
+
+ detach_ = _C._add_docstr(
+ _C.TensorBase.detach_,
+ r"""
+ Detaches the Tensor from the graph that created it, making it a leaf.
+ Views cannot be detached in-place.
+
+ This method also affects forward mode AD gradients and the result will never
+ have forward mode AD gradients.
+ """,
+ )
+
+ def is_shared(self):
+ r"""Checks if tensor is in shared memory.
+
+ This is always ``True`` for CUDA tensors.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.is_shared, (self,), self)
+ return self._typed_storage()._is_shared()
+
+ def share_memory_(self):
+ r"""Moves the underlying storage to shared memory.
+
+ This is a no-op if the underlying storage is already in shared memory
+ and for CUDA tensors. Tensors in shared memory cannot be resized.
+
+ See :meth:`torch.UntypedStorage.share_memory_` for more details.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.share_memory_, (self,), self)
+ self._typed_storage()._share_memory_()
+ return self
+
+ def __reversed__(self):
+ r"""Reverses the tensor along dimension 0."""
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__reversed__, (self,), self)
+ if self.dim() == 0:
+ return self
+ else:
+ return self.flip(0)
+
+ def norm(
+ self,
+ p: Optional[Union[float, str]] = "fro",
+ dim=None,
+ keepdim=False,
+ dtype=None,
+ ):
+ r"""See :func:`torch.norm`"""
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.norm, (self,), self, p=p, dim=dim, keepdim=keepdim, dtype=dtype
+ )
+ return torch.norm(self, p, dim, keepdim, dtype=dtype)
+
+ def solve(self, other):
+ from ._linalg_utils import solve
+
+ return solve(self, other)
+
+ def lstsq(self, other):
+ from ._linalg_utils import lstsq
+
+ return lstsq(self, other)
+
+ def eig(self, eigenvectors=False):
+ from ._linalg_utils import eig
+
+ return eig(self, eigenvectors=eigenvectors)
+
+ def symeig(self, eigenvectors=False):
+ from ._linalg_utils import _symeig
+
+ return _symeig(self, eigenvectors=eigenvectors)
+
+ def lu(self, pivot=True, get_infos=False):
+ r"""See :func:`torch.lu`"""
+ # If get_infos is True, then we don't need to check for errors and vice versa
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos
+ )
+
+ LU, pivots, infos = torch._lu_with_info(
+ self, pivot=pivot, check_errors=(not get_infos)
+ )
+ if get_infos:
+ return LU, pivots, infos
+ else:
+ return LU, pivots
+
+ def stft(
+ self,
+ n_fft: int,
+ hop_length: Optional[int] = None,
+ win_length: Optional[int] = None,
+ window: "Optional[Tensor]" = None,
+ center: bool = True,
+ pad_mode: str = "reflect",
+ normalized: bool = False,
+ onesided: Optional[bool] = None,
+ return_complex: Optional[bool] = None,
+ ):
+ r"""See :func:`torch.stft`
+
+ .. warning::
+ This function changed signature at version 0.4.1. Calling with
+ the previous signature may cause error or return incorrect result.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.stft,
+ (self,),
+ self,
+ n_fft,
+ hop_length=hop_length,
+ win_length=win_length,
+ window=window,
+ center=center,
+ pad_mode=pad_mode,
+ normalized=normalized,
+ onesided=onesided,
+ return_complex=return_complex,
+ )
+ return torch.stft(
+ self,
+ n_fft,
+ hop_length,
+ win_length,
+ window,
+ center,
+ pad_mode,
+ normalized,
+ onesided,
+ return_complex=return_complex,
+ )
+
+ def istft(
+ self,
+ n_fft: int,
+ hop_length: Optional[int] = None,
+ win_length: Optional[int] = None,
+ window: "Optional[Tensor]" = None,
+ center: bool = True,
+ normalized: bool = False,
+ onesided: Optional[bool] = None,
+ length: Optional[int] = None,
+ return_complex: bool = False,
+ ):
+ r"""See :func:`torch.istft`"""
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.istft,
+ (self,),
+ self,
+ n_fft,
+ hop_length=hop_length,
+ win_length=win_length,
+ window=window,
+ center=center,
+ normalized=normalized,
+ onesided=onesided,
+ length=length,
+ return_complex=return_complex,
+ )
+ return torch.istft(
+ self,
+ n_fft,
+ hop_length,
+ win_length,
+ window,
+ center,
+ normalized,
+ onesided,
+ length,
+ return_complex=return_complex,
+ )
+
+ def resize(self, *sizes):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.resize, (self,), self, *sizes)
+ warnings.warn("non-inplace resize is deprecated")
+ from torch.autograd._functions import Resize
+
+ return Resize.apply(self, sizes)
+
+ def resize_as(self, tensor):
+ if has_torch_function_variadic(self, tensor):
+ return handle_torch_function(Tensor.resize_as, (self, tensor), self, tensor)
+ warnings.warn("non-inplace resize_as is deprecated")
+ from torch.autograd._functions import Resize
+
+ return Resize.apply(self, tensor.size())
+
+ def split(self, split_size, dim=0):
+ r"""See :func:`torch.split`"""
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.split, (self,), self, split_size, dim=dim
+ )
+ if isinstance(split_size, Tensor):
+ try:
+ split_size = int(split_size)
+ except ValueError:
+ pass
+
+ if isinstance(split_size, (int, torch.SymInt)):
+ return torch._VF.split(self, split_size, dim) # type: ignore[attr-defined]
+ else:
+ return torch._VF.split_with_sizes(self, split_size, dim)
+
+ def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
+ r"""Returns the unique elements of the input tensor.
+
+ See :func:`torch.unique`
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.unique,
+ (self,),
+ self,
+ sorted=sorted,
+ return_inverse=return_inverse,
+ return_counts=return_counts,
+ dim=dim,
+ )
+ return torch.unique(
+ self,
+ sorted=sorted,
+ return_inverse=return_inverse,
+ return_counts=return_counts,
+ dim=dim,
+ )
+
+ def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
+ r"""Eliminates all but the first element from every consecutive group of equivalent elements.
+
+ See :func:`torch.unique_consecutive`
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.unique_consecutive,
+ (self,),
+ self,
+ return_inverse=return_inverse,
+ return_counts=return_counts,
+ dim=dim,
+ )
+ return torch.unique_consecutive(
+ self, return_inverse=return_inverse, return_counts=return_counts, dim=dim
+ )
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rsub__(self, other):
+ return _C._VariableFunctions.rsub(self, other)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rdiv__(self, other):
+ return self.reciprocal() * other
+
+ __rtruediv__ = __rdiv__
+ __itruediv__ = _C.TensorBase.__idiv__
+
+ __pow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
+ _C.TensorBase.pow
+ )
+ __ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
+ _C.TensorBase.pow_
+ )
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rmod__(self, other):
+ return torch.remainder(other, self)
+
+ def __format__(self, format_spec):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
+ if self.dim() == 0 and not self.is_meta and type(self) is Tensor:
+ return self.item().__format__(format_spec)
+ return object.__format__(self, format_spec)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rpow__(self, other):
+ return torch.pow(other, self)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __floordiv__(self, other):
+ return torch.floor_divide(self, other)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rfloordiv__(self, other):
+ return torch.floor_divide(other, self)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rlshift__(self, other):
+ return torch.bitwise_left_shift(other, self)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rrshift__(self, other):
+ return torch.bitwise_right_shift(other, self)
+
+ @_handle_torch_function_and_wrap_type_error_to_not_implemented
+ def __rmatmul__(self, other):
+ return torch.matmul(other, self)
+
+ __pos__ = _C.TensorBase.positive
+ __neg__ = _C.TensorBase.neg
+ __abs__ = _C.TensorBase.abs
+
+ def __len__(self):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__len__, (self,), self)
+ if self.dim() == 0:
+ raise TypeError("len() of a 0-d tensor")
+ if torch._C._get_tracing_state():
+ warnings.warn(
+ "Using len to get tensor shape might cause the trace to be incorrect. "
+ "Recommended usage would be tensor.shape[0]. "
+ "Passing a tensor of different shape might lead to errors or silently give "
+ "incorrect results.",
+ category=torch.jit.TracerWarning,
+ stacklevel=2,
+ )
+ return self.shape[0]
+
+ def __iter__(self):
+ # NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
+ # generator and don't eagerly perform all the indexes. This could
+ # save us work, and also helps keep trace ordering deterministic
+ # (e.g., if you zip(*hiddens), the eager map will force all the
+ # indexes of hiddens[0] before hiddens[1], while the generator
+ # map will interleave them.)
+ # NB: We have intentionally skipped __torch_function__ dispatch here.
+ # See gh-54457
+ if self.dim() == 0:
+ raise TypeError("iteration over a 0-d tensor")
+ if torch._C._get_tracing_state():
+ warnings.warn(
+ "Iterating over a tensor might cause the trace to be incorrect. "
+ "Passing a tensor of different shape won't change the number of "
+ "iterations executed (and might lead to errors or silently give "
+ "incorrect results).",
+ category=torch.jit.TracerWarning,
+ stacklevel=2,
+ )
+ return iter(self.unbind(0))
+
+ def __hash__(self):
+ # Do NOT handle __torch_function__ here as user's default
+ # implementation that handle most functions will most likely do it wrong.
+ # It can be easily overridden by defining this method on the user
+ # subclass if needed.
+ return id(self)
+
+ def __dir__(self):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__dir__, (self,), self)
+ tensor_methods = dir(self.__class__)
+ tensor_methods.remove("volatile") # deprecated
+ attrs = list(self.__dict__.keys())
+ keys = tensor_methods + attrs
+
+ # property only available dense, cuda tensors
+ if (not self.is_cuda) or self.is_sparse:
+ keys.remove("__cuda_array_interface__")
+
+ return sorted(keys)
+
+ # Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
+ __array_priority__ = 1000 # prefer Tensor ops over numpy ones
+
+ def __array__(self, dtype=None):
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
+ if dtype is None:
+ return self.numpy()
+ else:
+ return self.numpy().astype(dtype, copy=False)
+
+ # Wrap Numpy array again in a suitable tensor when done, to support e.g.
+ # `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
+ def __array_wrap__(self, array):
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.__array_wrap__, (self,), self, array=array
+ )
+ if array.dtype == bool:
+ # Workaround, torch has no built-in bool tensor
+ array = array.astype("uint8")
+ return torch.from_numpy(array)
+
+ def __contains__(self, element):
+ r"""Check if `element` is present in tensor
+
+ Args:
+ element (Tensor or scalar): element to be checked
+ for presence in current tensor"
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__contains__, (self,), self, element)
+ if isinstance(
+ element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)
+ ):
+ # type hint doesn't understand the __contains__ result array
+ return (element == self).any().item() # type: ignore[union-attr]
+
+ raise RuntimeError(
+ f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}."
+ )
+
+ @property
+ def __cuda_array_interface__(self):
+ """Array view description for cuda tensors.
+
+ See:
+ https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
+ """
+ if has_torch_function_unary(self):
+ # TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
+ return handle_torch_function(Tensor.__cuda_array_interface__.__get__, (self,), self) # type: ignore[attr-defined]
+
+ # raise AttributeError for unsupported tensors, so that
+ # hasattr(cpu_tensor, "__cuda_array_interface__") is False.
+ if not self.is_cuda:
+ raise AttributeError(
+ "Can't get __cuda_array_interface__ on non-CUDA tensor type: %s "
+ "If CUDA data is required use tensor.cuda() to copy tensor to device memory."
+ % self.type()
+ )
+
+ if self.is_sparse:
+ raise AttributeError(
+ "Can't get __cuda_array_interface__ on sparse type: %s "
+ "Use Tensor.to_dense() to convert to a dense tensor first."
+ % self.type()
+ )
+
+ # RuntimeError, matching tensor.__array__() behavior.
+ if self.requires_grad:
+ raise RuntimeError(
+ "Can't get __cuda_array_interface__ on Variable that requires grad. "
+ "If gradients aren't required, use var.detach() to get Variable that doesn't require grad."
+ )
+
+ # CUDA devices are little-endian and tensors are stored in native byte
+ # order. 1-byte entries are endian-agnostic.
+ typestr = {
+ torch.complex64: " 0 else 0
+ data = (data_ptr, False) # read-only is false
+
+ return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)
+
+ def storage_type(self):
+ r"""storage_type() -> type
+
+ Returns the type of the underlying storage.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.storage_type, (self,), self)
+
+ torch.storage._warn_typed_storage_removal()
+
+ return self._typed_storage()._get_legacy_storage_class()
+
+ def refine_names(self, *names):
+ r"""Refines the dimension names of :attr:`self` according to :attr:`names`.
+
+ Refining is a special case of renaming that "lifts" unnamed dimensions.
+ A ``None`` dim can be refined to have any name; a named dim can only be
+ refined to have the same name.
+
+ Because named tensors can coexist with unnamed tensors, refining names
+ gives a nice way to write named-tensor-aware code that works with both
+ named and unnamed tensors.
+
+ :attr:`names` may contain up to one Ellipsis (``...``).
+ The Ellipsis is expanded greedily; it is expanded in-place to fill
+ :attr:`names` to the same length as ``self.dim()`` using names from the
+ corresponding indices of ``self.names``.
+
+ Python 2 does not support Ellipsis but one may use a string literal
+ instead (``'...'``).
+
+ Args:
+ names (iterable of str): The desired names of the output tensor. May
+ contain up to one Ellipsis.
+
+ Examples::
+
+ >>> imgs = torch.randn(32, 3, 128, 128)
+ >>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W')
+ >>> named_imgs.names
+ ('N', 'C', 'H', 'W')
+
+ >>> tensor = torch.randn(2, 3, 5, 7, 11)
+ >>> tensor = tensor.refine_names('A', ..., 'B', 'C')
+ >>> tensor.names
+ ('A', None, None, 'B', 'C')
+
+ .. warning::
+ The named tensor API is experimental and subject to change.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.refine_names, (self,), self, *names)
+ names = resolve_ellipsis(names, self.names, "refine_names")
+ return super().refine_names(names)
+
+ def align_to(self, *names):
+ r"""Permutes the dimensions of the :attr:`self` tensor to match the order
+ specified in :attr:`names`, adding size-one dims for any new names.
+
+ All of the dims of :attr:`self` must be named in order to use this method.
+ The resulting tensor is a view on the original tensor.
+
+ All dimension names of :attr:`self` must be present in :attr:`names`.
+ :attr:`names` may contain additional names that are not in ``self.names``;
+ the output tensor has a size-one dimension for each of those new names.
+
+ :attr:`names` may contain up to one Ellipsis (``...``).
+ The Ellipsis is expanded to be equal to all dimension names of :attr:`self`
+ that are not mentioned in :attr:`names`, in the order that they appear
+ in :attr:`self`.
+
+ Python 2 does not support Ellipsis but one may use a string literal
+ instead (``'...'``).
+
+ Args:
+ names (iterable of str): The desired dimension ordering of the
+ output tensor. May contain up to one Ellipsis that is expanded
+ to all unmentioned dim names of :attr:`self`.
+
+ Examples::
+
+ >>> tensor = torch.randn(2, 2, 2, 2, 2, 2)
+ >>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F')
+
+ # Move the F and E dims to the front while keeping the rest in order
+ >>> named_tensor.align_to('F', 'E', ...)
+
+ .. warning::
+ The named tensor API is experimental and subject to change.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.align_to, (self,), self, *names)
+ ellipsis_idx = single_ellipsis_index(names, "align_to")
+ if ellipsis_idx is None:
+ return super().align_to(names)
+ return super().align_to(
+ [name for name in names if not is_ellipsis(name)], ellipsis_idx
+ )
+
+ def unflatten(self, dim, sizes):
+ r"""
+ unflatten(dim, sizes) -> Tensor
+
+ See :func:`torch.unflatten`.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.unflatten, (self,), self, dim, sizes)
+
+ if not sizes:
+ raise RuntimeError("unflatten: sizes must be non-empty")
+
+ names = None
+ if isinstance(sizes, OrderedDict) or (
+ isinstance(sizes, (tuple, list)) and isinstance(sizes[0], (tuple, list))
+ ):
+ names, sizes = unzip_namedshape(sizes)
+ return super().unflatten(dim, sizes, names)
+ else:
+ return super().unflatten(dim, sizes)
+
+ def rename_(self, *names, **rename_map):
+ """In-place version of :meth:`~Tensor.rename`."""
+
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.rename_, (self,), self, *names, **rename_map
+ )
+
+ # Note [rename_ / rename API]
+ # The Python API for these is different from the C++ API. In Python:
+ # 1) tensor.rename(*names) takes a vararglist of names
+ # 2) tensor.rename(**rename_map) takes a map of names to rename.
+ # C++ is static, making it difficult to implement similar behavior.
+ return update_names(self, names, rename_map, inplace=True)
+
+ def rename(self, *names, **rename_map):
+ """Renames dimension names of :attr:`self`.
+
+ There are two main usages:
+
+ ``self.rename(**rename_map)`` returns a view on tensor that has dims
+ renamed as specified in the mapping :attr:`rename_map`.
+
+ ``self.rename(*names)`` returns a view on tensor, renaming all
+ dimensions positionally using :attr:`names`.
+ Use ``self.rename(None)`` to drop names on a tensor.
+
+ One cannot specify both positional args :attr:`names` and keyword args
+ :attr:`rename_map`.
+
+ Examples::
+
+ >>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
+ >>> renamed_imgs = imgs.rename(N='batch', C='channels')
+ >>> renamed_imgs.names
+ ('batch', 'channels', 'H', 'W')
+
+ >>> renamed_imgs = imgs.rename(None)
+ >>> renamed_imgs.names
+ (None, None, None, None)
+
+ >>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width')
+ >>> renamed_imgs.names
+ ('batch', 'channel', 'height', 'width')
+
+ .. warning::
+ The named tensor API is experimental and subject to change.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor.rename, (self,), self, *names, **rename_map
+ )
+
+ # See Note [rename_ / rename API]
+ return update_names(self, names, rename_map, inplace=False)
+
+ def to_sparse_coo(self):
+ """Convert a tensor to :ref:`coordinate format `.
+
+ Examples::
+
+ >>> dense = torch.randn(5, 5)
+ >>> sparse = dense.to_sparse_coo()
+ >>> sparse._nnz()
+ 25
+
+ """
+ return self.to_sparse()
+
+ def dim_order(self):
+ """
+
+ dim_order() -> tuple
+
+ Returns a tuple of int describing the dim order or physical layout of :attr:`self`.
+
+ Args:
+ None
+
+ Dim order represents how dimensions are laid out in memory,
+ starting from the outermost to the innermost dimension.
+
+ Example::
+ >>> torch.empty((2, 3, 5, 7)).dim_order()
+ (0, 1, 2, 3)
+ >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).dim_order()
+ (0, 2, 3, 1)
+
+ .. warning::
+ The dim_order tensor API is experimental and subject to change.
+
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.dim_order, (self,), self)
+
+ import torch._prims_common as utils
+
+ return tuple(utils.compute_elementwise_output_logical_to_physical_perm(self))
+
+ def _update_names(self, names, inplace):
+ if has_torch_function_unary(self):
+ return handle_torch_function(
+ Tensor._update_names, (self,), self, names, inplace
+ )
+
+ # See Note [rename_ / rename API]
+ if inplace:
+ return super().rename_(names)
+ else:
+ return super().rename(names)
+
+ @classmethod
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
+ """
+ This __torch_function__ implementation wraps subclasses such that
+ methods called on subclasses return a subclass instance instead of
+ a ``torch.Tensor`` instance.
+
+ One corollary to this is that you need coverage for torch.Tensor
+ methods if implementing __torch_function__ for subclasses.
+
+ We recommend always calling ``super().__torch_function__`` as the base
+ case when doing the above.
+
+ While not mandatory, we recommend making `__torch_function__` a classmethod.
+ """
+ if kwargs is None:
+ kwargs = {}
+
+ if not all(issubclass(cls, t) for t in types):
+ return NotImplemented
+
+ with _C.DisableTorchFunctionSubclass():
+ ret = func(*args, **kwargs)
+ if func in get_default_nowrap_functions():
+ return ret
+ else:
+ return _convert(ret, cls)
+
+ __torch_dispatch__ = _C._disabled_torch_dispatch_impl
+
+ def __dlpack__(self, stream=None):
+ """
+ Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_
+ of the current tensor to be exported to other libraries.
+
+ This function will be called from the `from_dlpack` method
+ of the library that will consume the capsule. `from_dlpack` passes the current
+ stream to this method as part of the specification.
+
+ Args:
+ stream (integer or None): An optional Python integer representing a
+ pointer to a CUDA stream. The current stream is synchronized with
+ this stream before the capsule is created, and since the capsule
+ shares its storage with the tensor this make it safe to access from
+ both streams. If None or -1 is passed then no synchronization is performed.
+ If 1 (on CUDA) or 0 (on ROCM) then the default stream is used for
+ synchronization.
+ """
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__dlpack__, (self,), self, stream)
+
+ # DLPack capsules can't capture all of PyTorch's semantics,
+ # so we prohibit exporting tensors that would lose their properties like
+ # requires_grad and having the conjugate bit set.
+ if self.requires_grad:
+ raise RuntimeError(
+ "Can't export tensors that require gradient, use tensor.detach()"
+ )
+ if self.is_conj():
+ raise RuntimeError("Can't export tensors with the conjugate bit set")
+ if self.layout != torch.strided:
+ raise RuntimeError(
+ "Can't export tensors with layout other than torch.strided"
+ )
+
+ if stream is not None and type(stream) is not int:
+ # Stream pointers in CUDA/ROCm are uniquely numbered and can
+ # be retrieved from their integer value.
+ raise TypeError("stream must be ``int`` or ``none``")
+ elif stream is not None and stream != -1:
+ if self.device.type == "cuda":
+ # NB: This logic handles the special case values for default
+ # streams and must be kept in sync with from_dlpack in
+ # torch/utils/dlpack.py
+ if stream == 1 and torch.version.hip is None:
+ stream = torch.cuda.default_stream()
+ elif stream == 0 and torch.version.hip is not None:
+ stream = torch.cuda.default_stream()
+ else:
+ stream = torch.cuda.ExternalStream(stream)
+ # Only synchronize on different streams
+ sync_stream = torch.cuda.current_stream()
+ if stream != sync_stream:
+ event = torch.cuda.Event()
+ event.record(sync_stream)
+ stream.wait_event(event)
+ return torch.to_dlpack(self)
+
+ def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]:
+ if has_torch_function_unary(self):
+ return handle_torch_function(Tensor.__dlpack_device__, (self,), self)
+ device = self.device
+ idx = device.index if device.index is not None else 0
+ torch_device_type = device.type
+ if torch_device_type == "cuda" and torch.version.hip is not None:
+ device_type = DLDeviceType.kDLROCM
+ elif torch_device_type == "cpu" and self.is_pinned():
+ device_type = DLDeviceType.kDLCPUPinned
+ elif torch_device_type == "cuda":
+ device_type = DLDeviceType.kDLGPU
+ elif torch_device_type == "cpu":
+ device_type = DLDeviceType.kDLCPU
+ elif self.device.type == "xpu":
+ device_type = DLDeviceType.kDLOneAPI
+ else:
+ raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
+ return (device_type, idx)
+
+ __module__ = "torch"
+
+
+def _convert(ret, cls):
+ if cls is Tensor:
+ return ret
+
+ if isinstance(ret, Tensor) and not isinstance(ret, cls):
+ ret = ret.as_subclass(cls)
+
+ if isinstance(ret, (tuple, list)):
+ # Also handles things like namedtuples
+ ret = type(ret)(_convert(r, cls) for r in ret)
+
+ return ret
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_tensor_docs.py b/env-llmeval/lib/python3.10/site-packages/torch/_tensor_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb15749d40463d9f284b2a175282766c0f9a4bdf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_tensor_docs.py
@@ -0,0 +1,6948 @@
+"""Adds docstrings to Tensor functions"""
+
+import torch._C
+from torch._C import _add_docstr as add_docstr
+from ._torch_docs import parse_kwargs, reproducibility_notes
+
+
+def add_docstr_all(method, docstr):
+ add_docstr(getattr(torch._C.TensorBase, method), docstr)
+
+
+common_args = parse_kwargs(
+ """
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned Tensor. Default: ``torch.preserve_format``.
+"""
+)
+
+new_common_args = parse_kwargs(
+ """
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+ dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
+ Default: if None, same :class:`torch.dtype` as this tensor.
+ device (:class:`torch.device`, optional): the desired device of returned tensor.
+ Default: if None, same :class:`torch.device` as this tensor.
+ requires_grad (bool, optional): If autograd should record operations on the
+ returned tensor. Default: ``False``.
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
+ Default: ``torch.strided``.
+"""
+)
+
+add_docstr_all(
+ "new_tensor",
+ """
+new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a new Tensor with :attr:`data` as the tensor data.
+By default, the returned Tensor has the same :class:`torch.dtype` and
+:class:`torch.device` as this tensor.
+
+.. warning::
+
+ :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
+ ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
+ or :func:`torch.Tensor.detach`.
+ If you have a numpy array and want to avoid a copy, use
+ :func:`torch.from_numpy`.
+
+.. warning::
+
+ When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
+ and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
+ and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
+ The equivalents using ``clone()`` and ``detach()`` are recommended.
+
+Args:
+ data (array_like): The returned Tensor copies :attr:`data`.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.ones((2,), dtype=torch.int8)
+ >>> data = [[0, 1], [2, 3]]
+ >>> tensor.new_tensor(data)
+ tensor([[ 0, 1],
+ [ 2, 3]], dtype=torch.int8)
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "new_full",
+ """
+new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
+By default, the returned Tensor has the same :class:`torch.dtype` and
+:class:`torch.device` as this tensor.
+
+Args:
+ fill_value (scalar): the number to fill the output tensor with.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.ones((2,), dtype=torch.float64)
+ >>> tensor.new_full((3, 4), 3.141592)
+ tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
+ [ 3.1416, 3.1416, 3.1416, 3.1416],
+ [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "new_empty",
+ """
+new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a Tensor of size :attr:`size` filled with uninitialized data.
+By default, the returned Tensor has the same :class:`torch.dtype` and
+:class:`torch.device` as this tensor.
+
+Args:
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.ones(())
+ >>> tensor.new_empty((2, 3))
+ tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
+ [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "new_empty_strided",
+ """
+new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
+uninitialized data. By default, the returned Tensor has the same
+:class:`torch.dtype` and :class:`torch.device` as this tensor.
+
+Args:
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.ones(())
+ >>> tensor.new_empty_strided((2, 3), (3, 1))
+ tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
+ [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "new_ones",
+ """
+new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a Tensor of size :attr:`size` filled with ``1``.
+By default, the returned Tensor has the same :class:`torch.dtype` and
+:class:`torch.device` as this tensor.
+
+Args:
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.tensor((), dtype=torch.int32)
+ >>> tensor.new_ones((2, 3))
+ tensor([[ 1, 1, 1],
+ [ 1, 1, 1]], dtype=torch.int32)
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "new_zeros",
+ """
+new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a Tensor of size :attr:`size` filled with ``0``.
+By default, the returned Tensor has the same :class:`torch.dtype` and
+:class:`torch.device` as this tensor.
+
+Args:
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+
+Keyword args:
+ {dtype}
+ {device}
+ {requires_grad}
+ {layout}
+ {pin_memory}
+
+Example::
+
+ >>> tensor = torch.tensor((), dtype=torch.float64)
+ >>> tensor.new_zeros((2, 3))
+ tensor([[ 0., 0., 0.],
+ [ 0., 0., 0.]], dtype=torch.float64)
+
+""".format(
+ **new_common_args
+ ),
+)
+
+add_docstr_all(
+ "abs",
+ r"""
+abs() -> Tensor
+
+See :func:`torch.abs`
+""",
+)
+
+add_docstr_all(
+ "abs_",
+ r"""
+abs_() -> Tensor
+
+In-place version of :meth:`~Tensor.abs`
+""",
+)
+
+add_docstr_all(
+ "absolute",
+ r"""
+absolute() -> Tensor
+
+Alias for :func:`abs`
+""",
+)
+
+add_docstr_all(
+ "absolute_",
+ r"""
+absolute_() -> Tensor
+
+In-place version of :meth:`~Tensor.absolute`
+Alias for :func:`abs_`
+""",
+)
+
+add_docstr_all(
+ "acos",
+ r"""
+acos() -> Tensor
+
+See :func:`torch.acos`
+""",
+)
+
+add_docstr_all(
+ "acos_",
+ r"""
+acos_() -> Tensor
+
+In-place version of :meth:`~Tensor.acos`
+""",
+)
+
+add_docstr_all(
+ "arccos",
+ r"""
+arccos() -> Tensor
+
+See :func:`torch.arccos`
+""",
+)
+
+add_docstr_all(
+ "arccos_",
+ r"""
+arccos_() -> Tensor
+
+In-place version of :meth:`~Tensor.arccos`
+""",
+)
+
+add_docstr_all(
+ "acosh",
+ r"""
+acosh() -> Tensor
+
+See :func:`torch.acosh`
+""",
+)
+
+add_docstr_all(
+ "acosh_",
+ r"""
+acosh_() -> Tensor
+
+In-place version of :meth:`~Tensor.acosh`
+""",
+)
+
+add_docstr_all(
+ "arccosh",
+ r"""
+acosh() -> Tensor
+
+See :func:`torch.arccosh`
+""",
+)
+
+add_docstr_all(
+ "arccosh_",
+ r"""
+acosh_() -> Tensor
+
+In-place version of :meth:`~Tensor.arccosh`
+""",
+)
+
+add_docstr_all(
+ "add",
+ r"""
+add(other, *, alpha=1) -> Tensor
+
+Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
+and :attr:`other` are specified, each element of :attr:`other` is scaled by
+:attr:`alpha` before being used.
+
+When :attr:`other` is a tensor, the shape of :attr:`other` must be
+:ref:`broadcastable ` with the shape of the underlying
+tensor
+
+See :func:`torch.add`
+""",
+)
+
+add_docstr_all(
+ "add_",
+ r"""
+add_(other, *, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.add`
+""",
+)
+
+add_docstr_all(
+ "addbmm",
+ r"""
+addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.addbmm`
+""",
+)
+
+add_docstr_all(
+ "addbmm_",
+ r"""
+addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addbmm`
+""",
+)
+
+add_docstr_all(
+ "addcdiv",
+ r"""
+addcdiv(tensor1, tensor2, *, value=1) -> Tensor
+
+See :func:`torch.addcdiv`
+""",
+)
+
+add_docstr_all(
+ "addcdiv_",
+ r"""
+addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addcdiv`
+""",
+)
+
+add_docstr_all(
+ "addcmul",
+ r"""
+addcmul(tensor1, tensor2, *, value=1) -> Tensor
+
+See :func:`torch.addcmul`
+""",
+)
+
+add_docstr_all(
+ "addcmul_",
+ r"""
+addcmul_(tensor1, tensor2, *, value=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addcmul`
+""",
+)
+
+add_docstr_all(
+ "addmm",
+ r"""
+addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.addmm`
+""",
+)
+
+add_docstr_all(
+ "addmm_",
+ r"""
+addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addmm`
+""",
+)
+
+add_docstr_all(
+ "addmv",
+ r"""
+addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.addmv`
+""",
+)
+
+add_docstr_all(
+ "addmv_",
+ r"""
+addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addmv`
+""",
+)
+
+add_docstr_all(
+ "sspaddmm",
+ r"""
+sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.sspaddmm`
+""",
+)
+
+add_docstr_all(
+ "smm",
+ r"""
+smm(mat) -> Tensor
+
+See :func:`torch.smm`
+""",
+)
+
+add_docstr_all(
+ "addr",
+ r"""
+addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.addr`
+""",
+)
+
+add_docstr_all(
+ "addr_",
+ r"""
+addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.addr`
+""",
+)
+
+add_docstr_all(
+ "align_as",
+ r"""
+align_as(other) -> Tensor
+
+Permutes the dimensions of the :attr:`self` tensor to match the dimension order
+in the :attr:`other` tensor, adding size-one dims for any new names.
+
+This operation is useful for explicit broadcasting by names (see examples).
+
+All of the dims of :attr:`self` must be named in order to use this method.
+The resulting tensor is a view on the original tensor.
+
+All dimension names of :attr:`self` must be present in ``other.names``.
+:attr:`other` may contain named dimensions that are not in ``self.names``;
+the output tensor has a size-one dimension for each of those new names.
+
+To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
+
+Examples::
+
+ # Example 1: Applying a mask
+ >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
+ >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
+ >>> imgs.masked_fill_(mask.align_as(imgs), 0)
+
+
+ # Example 2: Applying a per-channel-scale
+ >>> def scale_channels(input, scale):
+ >>> scale = scale.refine_names('C')
+ >>> return input * scale.align_as(input)
+
+ >>> num_channels = 3
+ >>> scale = torch.randn(num_channels, names=('C',))
+ >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
+ >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
+ >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
+
+ # scale_channels is agnostic to the dimension order of the input
+ >>> scale_channels(imgs, scale)
+ >>> scale_channels(more_imgs, scale)
+ >>> scale_channels(videos, scale)
+
+.. warning::
+ The named tensor API is experimental and subject to change.
+
+""",
+)
+
+add_docstr_all(
+ "all",
+ r"""
+all(dim=None, keepdim=False) -> Tensor
+
+See :func:`torch.all`
+""",
+)
+
+add_docstr_all(
+ "allclose",
+ r"""
+allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
+
+See :func:`torch.allclose`
+""",
+)
+
+add_docstr_all(
+ "angle",
+ r"""
+angle() -> Tensor
+
+See :func:`torch.angle`
+""",
+)
+
+add_docstr_all(
+ "any",
+ r"""
+any(dim=None, keepdim=False) -> Tensor
+
+See :func:`torch.any`
+""",
+)
+
+add_docstr_all(
+ "apply_",
+ r"""
+apply_(callable) -> Tensor
+
+Applies the function :attr:`callable` to each element in the tensor, replacing
+each element with the value returned by :attr:`callable`.
+
+.. note::
+
+ This function only works with CPU tensors and should not be used in code
+ sections that require high performance.
+""",
+)
+
+add_docstr_all(
+ "asin",
+ r"""
+asin() -> Tensor
+
+See :func:`torch.asin`
+""",
+)
+
+add_docstr_all(
+ "asin_",
+ r"""
+asin_() -> Tensor
+
+In-place version of :meth:`~Tensor.asin`
+""",
+)
+
+add_docstr_all(
+ "arcsin",
+ r"""
+arcsin() -> Tensor
+
+See :func:`torch.arcsin`
+""",
+)
+
+add_docstr_all(
+ "arcsin_",
+ r"""
+arcsin_() -> Tensor
+
+In-place version of :meth:`~Tensor.arcsin`
+""",
+)
+
+add_docstr_all(
+ "asinh",
+ r"""
+asinh() -> Tensor
+
+See :func:`torch.asinh`
+""",
+)
+
+add_docstr_all(
+ "asinh_",
+ r"""
+asinh_() -> Tensor
+
+In-place version of :meth:`~Tensor.asinh`
+""",
+)
+
+add_docstr_all(
+ "arcsinh",
+ r"""
+arcsinh() -> Tensor
+
+See :func:`torch.arcsinh`
+""",
+)
+
+add_docstr_all(
+ "arcsinh_",
+ r"""
+arcsinh_() -> Tensor
+
+In-place version of :meth:`~Tensor.arcsinh`
+""",
+)
+
+add_docstr_all(
+ "as_strided",
+ r"""
+as_strided(size, stride, storage_offset=None) -> Tensor
+
+See :func:`torch.as_strided`
+""",
+)
+
+add_docstr_all(
+ "as_strided_",
+ r"""
+as_strided_(size, stride, storage_offset=None) -> Tensor
+
+In-place version of :meth:`~Tensor.as_strided`
+""",
+)
+
+add_docstr_all(
+ "atan",
+ r"""
+atan() -> Tensor
+
+See :func:`torch.atan`
+""",
+)
+
+add_docstr_all(
+ "atan_",
+ r"""
+atan_() -> Tensor
+
+In-place version of :meth:`~Tensor.atan`
+""",
+)
+
+add_docstr_all(
+ "arctan",
+ r"""
+arctan() -> Tensor
+
+See :func:`torch.arctan`
+""",
+)
+
+add_docstr_all(
+ "arctan_",
+ r"""
+arctan_() -> Tensor
+
+In-place version of :meth:`~Tensor.arctan`
+""",
+)
+
+add_docstr_all(
+ "atan2",
+ r"""
+atan2(other) -> Tensor
+
+See :func:`torch.atan2`
+""",
+)
+
+add_docstr_all(
+ "atan2_",
+ r"""
+atan2_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.atan2`
+""",
+)
+
+add_docstr_all(
+ "arctan2",
+ r"""
+arctan2(other) -> Tensor
+
+See :func:`torch.arctan2`
+""",
+)
+
+add_docstr_all(
+ "arctan2_",
+ r"""
+atan2_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.arctan2`
+""",
+)
+
+add_docstr_all(
+ "atanh",
+ r"""
+atanh() -> Tensor
+
+See :func:`torch.atanh`
+""",
+)
+
+add_docstr_all(
+ "atanh_",
+ r"""
+atanh_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.atanh`
+""",
+)
+
+add_docstr_all(
+ "arctanh",
+ r"""
+arctanh() -> Tensor
+
+See :func:`torch.arctanh`
+""",
+)
+
+add_docstr_all(
+ "arctanh_",
+ r"""
+arctanh_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.arctanh`
+""",
+)
+
+add_docstr_all(
+ "baddbmm",
+ r"""
+baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
+
+See :func:`torch.baddbmm`
+""",
+)
+
+add_docstr_all(
+ "baddbmm_",
+ r"""
+baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.baddbmm`
+""",
+)
+
+add_docstr_all(
+ "bernoulli",
+ r"""
+bernoulli(*, generator=None) -> Tensor
+
+Returns a result tensor where each :math:`\texttt{result[i]}` is independently
+sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
+floating point ``dtype``, and the result will have the same ``dtype``.
+
+See :func:`torch.bernoulli`
+""",
+)
+
+add_docstr_all(
+ "bernoulli_",
+ r"""
+bernoulli_(p=0.5, *, generator=None) -> Tensor
+
+Fills each location of :attr:`self` with an independent sample from
+:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
+``dtype``.
+
+:attr:`p` should either be a scalar or tensor containing probabilities to be
+used for drawing the binary random number.
+
+If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
+will be set to a value sampled from
+:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
+floating point ``dtype``.
+
+See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
+""",
+)
+
+add_docstr_all(
+ "bincount",
+ r"""
+bincount(weights=None, minlength=0) -> Tensor
+
+See :func:`torch.bincount`
+""",
+)
+
+add_docstr_all(
+ "bitwise_not",
+ r"""
+bitwise_not() -> Tensor
+
+See :func:`torch.bitwise_not`
+""",
+)
+
+add_docstr_all(
+ "bitwise_not_",
+ r"""
+bitwise_not_() -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_not`
+""",
+)
+
+add_docstr_all(
+ "bitwise_and",
+ r"""
+bitwise_and() -> Tensor
+
+See :func:`torch.bitwise_and`
+""",
+)
+
+add_docstr_all(
+ "bitwise_and_",
+ r"""
+bitwise_and_() -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_and`
+""",
+)
+
+add_docstr_all(
+ "bitwise_or",
+ r"""
+bitwise_or() -> Tensor
+
+See :func:`torch.bitwise_or`
+""",
+)
+
+add_docstr_all(
+ "bitwise_or_",
+ r"""
+bitwise_or_() -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_or`
+""",
+)
+
+add_docstr_all(
+ "bitwise_xor",
+ r"""
+bitwise_xor() -> Tensor
+
+See :func:`torch.bitwise_xor`
+""",
+)
+
+add_docstr_all(
+ "bitwise_xor_",
+ r"""
+bitwise_xor_() -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_xor`
+""",
+)
+
+add_docstr_all(
+ "bitwise_left_shift",
+ r"""
+bitwise_left_shift(other) -> Tensor
+
+See :func:`torch.bitwise_left_shift`
+""",
+)
+
+add_docstr_all(
+ "bitwise_left_shift_",
+ r"""
+bitwise_left_shift_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_left_shift`
+""",
+)
+
+add_docstr_all(
+ "bitwise_right_shift",
+ r"""
+bitwise_right_shift(other) -> Tensor
+
+See :func:`torch.bitwise_right_shift`
+""",
+)
+
+add_docstr_all(
+ "bitwise_right_shift_",
+ r"""
+bitwise_right_shift_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.bitwise_right_shift`
+""",
+)
+
+add_docstr_all(
+ "broadcast_to",
+ r"""
+broadcast_to(shape) -> Tensor
+
+See :func:`torch.broadcast_to`.
+""",
+)
+
+add_docstr_all(
+ "logical_and",
+ r"""
+logical_and() -> Tensor
+
+See :func:`torch.logical_and`
+""",
+)
+
+add_docstr_all(
+ "logical_and_",
+ r"""
+logical_and_() -> Tensor
+
+In-place version of :meth:`~Tensor.logical_and`
+""",
+)
+
+add_docstr_all(
+ "logical_not",
+ r"""
+logical_not() -> Tensor
+
+See :func:`torch.logical_not`
+""",
+)
+
+add_docstr_all(
+ "logical_not_",
+ r"""
+logical_not_() -> Tensor
+
+In-place version of :meth:`~Tensor.logical_not`
+""",
+)
+
+add_docstr_all(
+ "logical_or",
+ r"""
+logical_or() -> Tensor
+
+See :func:`torch.logical_or`
+""",
+)
+
+add_docstr_all(
+ "logical_or_",
+ r"""
+logical_or_() -> Tensor
+
+In-place version of :meth:`~Tensor.logical_or`
+""",
+)
+
+add_docstr_all(
+ "logical_xor",
+ r"""
+logical_xor() -> Tensor
+
+See :func:`torch.logical_xor`
+""",
+)
+
+add_docstr_all(
+ "logical_xor_",
+ r"""
+logical_xor_() -> Tensor
+
+In-place version of :meth:`~Tensor.logical_xor`
+""",
+)
+
+add_docstr_all(
+ "bmm",
+ r"""
+bmm(batch2) -> Tensor
+
+See :func:`torch.bmm`
+""",
+)
+
+add_docstr_all(
+ "cauchy_",
+ r"""
+cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
+
+Fills the tensor with numbers drawn from the Cauchy distribution:
+
+.. math::
+
+ f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
+
+.. note::
+ Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
+""",
+)
+
+add_docstr_all(
+ "ceil",
+ r"""
+ceil() -> Tensor
+
+See :func:`torch.ceil`
+""",
+)
+
+add_docstr_all(
+ "ceil_",
+ r"""
+ceil_() -> Tensor
+
+In-place version of :meth:`~Tensor.ceil`
+""",
+)
+
+add_docstr_all(
+ "cholesky",
+ r"""
+cholesky(upper=False) -> Tensor
+
+See :func:`torch.cholesky`
+""",
+)
+
+add_docstr_all(
+ "cholesky_solve",
+ r"""
+cholesky_solve(input2, upper=False) -> Tensor
+
+See :func:`torch.cholesky_solve`
+""",
+)
+
+add_docstr_all(
+ "cholesky_inverse",
+ r"""
+cholesky_inverse(upper=False) -> Tensor
+
+See :func:`torch.cholesky_inverse`
+""",
+)
+
+add_docstr_all(
+ "clamp",
+ r"""
+clamp(min=None, max=None) -> Tensor
+
+See :func:`torch.clamp`
+""",
+)
+
+add_docstr_all(
+ "clamp_",
+ r"""
+clamp_(min=None, max=None) -> Tensor
+
+In-place version of :meth:`~Tensor.clamp`
+""",
+)
+
+add_docstr_all(
+ "clip",
+ r"""
+clip(min=None, max=None) -> Tensor
+
+Alias for :meth:`~Tensor.clamp`.
+""",
+)
+
+add_docstr_all(
+ "clip_",
+ r"""
+clip_(min=None, max=None) -> Tensor
+
+Alias for :meth:`~Tensor.clamp_`.
+""",
+)
+
+add_docstr_all(
+ "clone",
+ r"""
+clone(*, memory_format=torch.preserve_format) -> Tensor
+
+See :func:`torch.clone`
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "coalesce",
+ r"""
+coalesce() -> Tensor
+
+Returns a coalesced copy of :attr:`self` if :attr:`self` is an
+:ref:`uncoalesced tensor `.
+
+Returns :attr:`self` if :attr:`self` is a coalesced tensor.
+
+.. warning::
+ Throws an error if :attr:`self` is not a sparse COO tensor.
+""",
+)
+
+add_docstr_all(
+ "contiguous",
+ r"""
+contiguous(memory_format=torch.contiguous_format) -> Tensor
+
+Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
+:attr:`self` tensor is already in the specified memory format, this function returns the
+:attr:`self` tensor.
+
+Args:
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned Tensor. Default: ``torch.contiguous_format``.
+""",
+)
+
+add_docstr_all(
+ "copy_",
+ r"""
+copy_(src, non_blocking=False) -> Tensor
+
+Copies the elements from :attr:`src` into :attr:`self` tensor and returns
+:attr:`self`.
+
+The :attr:`src` tensor must be :ref:`broadcastable `
+with the :attr:`self` tensor. It may be of a different data type or reside on a
+different device.
+
+Args:
+ src (Tensor): the source tensor to copy from
+ non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
+ the copy may occur asynchronously with respect to the host. For other
+ cases, this argument has no effect.
+""",
+)
+
+add_docstr_all(
+ "conj",
+ r"""
+conj() -> Tensor
+
+See :func:`torch.conj`
+""",
+)
+
+add_docstr_all(
+ "conj_physical",
+ r"""
+conj_physical() -> Tensor
+
+See :func:`torch.conj_physical`
+""",
+)
+
+add_docstr_all(
+ "conj_physical_",
+ r"""
+conj_physical_() -> Tensor
+
+In-place version of :meth:`~Tensor.conj_physical`
+""",
+)
+
+add_docstr_all(
+ "resolve_conj",
+ r"""
+resolve_conj() -> Tensor
+
+See :func:`torch.resolve_conj`
+""",
+)
+
+add_docstr_all(
+ "resolve_neg",
+ r"""
+resolve_neg() -> Tensor
+
+See :func:`torch.resolve_neg`
+""",
+)
+
+add_docstr_all(
+ "copysign",
+ r"""
+copysign(other) -> Tensor
+
+See :func:`torch.copysign`
+""",
+)
+
+add_docstr_all(
+ "copysign_",
+ r"""
+copysign_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.copysign`
+""",
+)
+
+add_docstr_all(
+ "cos",
+ r"""
+cos() -> Tensor
+
+See :func:`torch.cos`
+""",
+)
+
+add_docstr_all(
+ "cos_",
+ r"""
+cos_() -> Tensor
+
+In-place version of :meth:`~Tensor.cos`
+""",
+)
+
+add_docstr_all(
+ "cosh",
+ r"""
+cosh() -> Tensor
+
+See :func:`torch.cosh`
+""",
+)
+
+add_docstr_all(
+ "cosh_",
+ r"""
+cosh_() -> Tensor
+
+In-place version of :meth:`~Tensor.cosh`
+""",
+)
+
+add_docstr_all(
+ "cpu",
+ r"""
+cpu(memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of this object in CPU memory.
+
+If this object is already in CPU memory and on the correct device,
+then no copy is performed and the original object is returned.
+
+Args:
+ {memory_format}
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "count_nonzero",
+ r"""
+count_nonzero(dim=None) -> Tensor
+
+See :func:`torch.count_nonzero`
+""",
+)
+
+add_docstr_all(
+ "cov",
+ r"""
+cov(*, correction=1, fweights=None, aweights=None) -> Tensor
+
+See :func:`torch.cov`
+""",
+)
+
+add_docstr_all(
+ "corrcoef",
+ r"""
+corrcoef() -> Tensor
+
+See :func:`torch.corrcoef`
+""",
+)
+
+add_docstr_all(
+ "cross",
+ r"""
+cross(other, dim=None) -> Tensor
+
+See :func:`torch.cross`
+""",
+)
+
+add_docstr_all(
+ "cuda",
+ r"""
+cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of this object in CUDA memory.
+
+If this object is already in CUDA memory and on the correct device,
+then no copy is performed and the original object is returned.
+
+Args:
+ device (:class:`torch.device`): The destination GPU device.
+ Defaults to the current CUDA device.
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
+ the copy will be asynchronous with respect to the host.
+ Otherwise, the argument has no effect. Default: ``False``.
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "ipu",
+ r"""
+ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of this object in IPU memory.
+
+If this object is already in IPU memory and on the correct device,
+then no copy is performed and the original object is returned.
+
+Args:
+ device (:class:`torch.device`): The destination IPU device.
+ Defaults to the current IPU device.
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
+ the copy will be asynchronous with respect to the host.
+ Otherwise, the argument has no effect. Default: ``False``.
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "xpu",
+ r"""
+xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of this object in XPU memory.
+
+If this object is already in XPU memory and on the correct device,
+then no copy is performed and the original object is returned.
+
+Args:
+ device (:class:`torch.device`): The destination XPU device.
+ Defaults to the current XPU device.
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
+ the copy will be asynchronous with respect to the host.
+ Otherwise, the argument has no effect. Default: ``False``.
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "logcumsumexp",
+ r"""
+logcumsumexp(dim) -> Tensor
+
+See :func:`torch.logcumsumexp`
+""",
+)
+
+add_docstr_all(
+ "cummax",
+ r"""
+cummax(dim) -> (Tensor, Tensor)
+
+See :func:`torch.cummax`
+""",
+)
+
+add_docstr_all(
+ "cummin",
+ r"""
+cummin(dim) -> (Tensor, Tensor)
+
+See :func:`torch.cummin`
+""",
+)
+
+add_docstr_all(
+ "cumprod",
+ r"""
+cumprod(dim, dtype=None) -> Tensor
+
+See :func:`torch.cumprod`
+""",
+)
+
+add_docstr_all(
+ "cumprod_",
+ r"""
+cumprod_(dim, dtype=None) -> Tensor
+
+In-place version of :meth:`~Tensor.cumprod`
+""",
+)
+
+add_docstr_all(
+ "cumsum",
+ r"""
+cumsum(dim, dtype=None) -> Tensor
+
+See :func:`torch.cumsum`
+""",
+)
+
+add_docstr_all(
+ "cumsum_",
+ r"""
+cumsum_(dim, dtype=None) -> Tensor
+
+In-place version of :meth:`~Tensor.cumsum`
+""",
+)
+
+add_docstr_all(
+ "data_ptr",
+ r"""
+data_ptr() -> int
+
+Returns the address of the first element of :attr:`self` tensor.
+""",
+)
+
+add_docstr_all(
+ "dequantize",
+ r"""
+dequantize() -> Tensor
+
+Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
+""",
+)
+
+add_docstr_all(
+ "dense_dim",
+ r"""
+dense_dim() -> int
+
+Return the number of dense dimensions in a :ref:`sparse tensor ` :attr:`self`.
+
+.. note::
+ Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
+
+See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors `.
+""",
+)
+
+add_docstr_all(
+ "diag",
+ r"""
+diag(diagonal=0) -> Tensor
+
+See :func:`torch.diag`
+""",
+)
+
+add_docstr_all(
+ "diag_embed",
+ r"""
+diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
+
+See :func:`torch.diag_embed`
+""",
+)
+
+add_docstr_all(
+ "diagflat",
+ r"""
+diagflat(offset=0) -> Tensor
+
+See :func:`torch.diagflat`
+""",
+)
+
+add_docstr_all(
+ "diagonal",
+ r"""
+diagonal(offset=0, dim1=0, dim2=1) -> Tensor
+
+See :func:`torch.diagonal`
+""",
+)
+
+add_docstr_all(
+ "diagonal_scatter",
+ r"""
+diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
+
+See :func:`torch.diagonal_scatter`
+""",
+)
+
+add_docstr_all(
+ "as_strided_scatter",
+ r"""
+as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
+
+See :func:`torch.as_strided_scatter`
+""",
+)
+
+add_docstr_all(
+ "fill_diagonal_",
+ r"""
+fill_diagonal_(fill_value, wrap=False) -> Tensor
+
+Fill the main diagonal of a tensor that has at least 2-dimensions.
+When dims>2, all dimensions of input must be of equal length.
+This function modifies the input tensor in-place, and returns the input tensor.
+
+Arguments:
+ fill_value (Scalar): the fill value
+ wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
+
+Example::
+
+ >>> a = torch.zeros(3, 3)
+ >>> a.fill_diagonal_(5)
+ tensor([[5., 0., 0.],
+ [0., 5., 0.],
+ [0., 0., 5.]])
+ >>> b = torch.zeros(7, 3)
+ >>> b.fill_diagonal_(5)
+ tensor([[5., 0., 0.],
+ [0., 5., 0.],
+ [0., 0., 5.],
+ [0., 0., 0.],
+ [0., 0., 0.],
+ [0., 0., 0.],
+ [0., 0., 0.]])
+ >>> c = torch.zeros(7, 3)
+ >>> c.fill_diagonal_(5, wrap=True)
+ tensor([[5., 0., 0.],
+ [0., 5., 0.],
+ [0., 0., 5.],
+ [0., 0., 0.],
+ [5., 0., 0.],
+ [0., 5., 0.],
+ [0., 0., 5.]])
+
+""",
+)
+
+add_docstr_all(
+ "floor_divide",
+ r"""
+floor_divide(value) -> Tensor
+
+See :func:`torch.floor_divide`
+""",
+)
+
+add_docstr_all(
+ "floor_divide_",
+ r"""
+floor_divide_(value) -> Tensor
+
+In-place version of :meth:`~Tensor.floor_divide`
+""",
+)
+
+add_docstr_all(
+ "diff",
+ r"""
+diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
+
+See :func:`torch.diff`
+""",
+)
+
+add_docstr_all(
+ "digamma",
+ r"""
+digamma() -> Tensor
+
+See :func:`torch.digamma`
+""",
+)
+
+add_docstr_all(
+ "digamma_",
+ r"""
+digamma_() -> Tensor
+
+In-place version of :meth:`~Tensor.digamma`
+""",
+)
+
+add_docstr_all(
+ "dim",
+ r"""
+dim() -> int
+
+Returns the number of dimensions of :attr:`self` tensor.
+""",
+)
+
+add_docstr_all(
+ "dist",
+ r"""
+dist(other, p=2) -> Tensor
+
+See :func:`torch.dist`
+""",
+)
+
+add_docstr_all(
+ "div",
+ r"""
+div(value, *, rounding_mode=None) -> Tensor
+
+See :func:`torch.div`
+""",
+)
+
+add_docstr_all(
+ "div_",
+ r"""
+div_(value, *, rounding_mode=None) -> Tensor
+
+In-place version of :meth:`~Tensor.div`
+""",
+)
+
+add_docstr_all(
+ "divide",
+ r"""
+divide(value, *, rounding_mode=None) -> Tensor
+
+See :func:`torch.divide`
+""",
+)
+
+add_docstr_all(
+ "divide_",
+ r"""
+divide_(value, *, rounding_mode=None) -> Tensor
+
+In-place version of :meth:`~Tensor.divide`
+""",
+)
+
+add_docstr_all(
+ "dot",
+ r"""
+dot(other) -> Tensor
+
+See :func:`torch.dot`
+""",
+)
+
+add_docstr_all(
+ "element_size",
+ r"""
+element_size() -> int
+
+Returns the size in bytes of an individual element.
+
+Example::
+
+ >>> torch.tensor([]).element_size()
+ 4
+ >>> torch.tensor([], dtype=torch.uint8).element_size()
+ 1
+
+""",
+)
+
+add_docstr_all(
+ "eq",
+ r"""
+eq(other) -> Tensor
+
+See :func:`torch.eq`
+""",
+)
+
+add_docstr_all(
+ "eq_",
+ r"""
+eq_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.eq`
+""",
+)
+
+add_docstr_all(
+ "equal",
+ r"""
+equal(other) -> bool
+
+See :func:`torch.equal`
+""",
+)
+
+add_docstr_all(
+ "erf",
+ r"""
+erf() -> Tensor
+
+See :func:`torch.erf`
+""",
+)
+
+add_docstr_all(
+ "erf_",
+ r"""
+erf_() -> Tensor
+
+In-place version of :meth:`~Tensor.erf`
+""",
+)
+
+add_docstr_all(
+ "erfc",
+ r"""
+erfc() -> Tensor
+
+See :func:`torch.erfc`
+""",
+)
+
+add_docstr_all(
+ "erfc_",
+ r"""
+erfc_() -> Tensor
+
+In-place version of :meth:`~Tensor.erfc`
+""",
+)
+
+add_docstr_all(
+ "erfinv",
+ r"""
+erfinv() -> Tensor
+
+See :func:`torch.erfinv`
+""",
+)
+
+add_docstr_all(
+ "erfinv_",
+ r"""
+erfinv_() -> Tensor
+
+In-place version of :meth:`~Tensor.erfinv`
+""",
+)
+
+add_docstr_all(
+ "exp",
+ r"""
+exp() -> Tensor
+
+See :func:`torch.exp`
+""",
+)
+
+add_docstr_all(
+ "exp_",
+ r"""
+exp_() -> Tensor
+
+In-place version of :meth:`~Tensor.exp`
+""",
+)
+
+add_docstr_all(
+ "exp2",
+ r"""
+exp2() -> Tensor
+
+See :func:`torch.exp2`
+""",
+)
+
+add_docstr_all(
+ "exp2_",
+ r"""
+exp2_() -> Tensor
+
+In-place version of :meth:`~Tensor.exp2`
+""",
+)
+
+add_docstr_all(
+ "expm1",
+ r"""
+expm1() -> Tensor
+
+See :func:`torch.expm1`
+""",
+)
+
+add_docstr_all(
+ "expm1_",
+ r"""
+expm1_() -> Tensor
+
+In-place version of :meth:`~Tensor.expm1`
+""",
+)
+
+add_docstr_all(
+ "exponential_",
+ r"""
+exponential_(lambd=1, *, generator=None) -> Tensor
+
+Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
+
+.. math::
+
+ f(x) = \lambda e^{-\lambda x}, x > 0
+
+.. note::
+ In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
+ implying that zero can be sampled from the exponential distribution.
+ However, :func:`torch.Tensor.exponential_` does not sample zero,
+ which means that its actual support is the interval (0, :math:`\inf`).
+
+ Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
+""",
+)
+
+add_docstr_all(
+ "fill_",
+ r"""
+fill_(value) -> Tensor
+
+Fills :attr:`self` tensor with the specified value.
+""",
+)
+
+add_docstr_all(
+ "floor",
+ r"""
+floor() -> Tensor
+
+See :func:`torch.floor`
+""",
+)
+
+add_docstr_all(
+ "flip",
+ r"""
+flip(dims) -> Tensor
+
+See :func:`torch.flip`
+""",
+)
+
+add_docstr_all(
+ "fliplr",
+ r"""
+fliplr() -> Tensor
+
+See :func:`torch.fliplr`
+""",
+)
+
+add_docstr_all(
+ "flipud",
+ r"""
+flipud() -> Tensor
+
+See :func:`torch.flipud`
+""",
+)
+
+add_docstr_all(
+ "roll",
+ r"""
+roll(shifts, dims) -> Tensor
+
+See :func:`torch.roll`
+""",
+)
+
+add_docstr_all(
+ "floor_",
+ r"""
+floor_() -> Tensor
+
+In-place version of :meth:`~Tensor.floor`
+""",
+)
+
+add_docstr_all(
+ "fmod",
+ r"""
+fmod(divisor) -> Tensor
+
+See :func:`torch.fmod`
+""",
+)
+
+add_docstr_all(
+ "fmod_",
+ r"""
+fmod_(divisor) -> Tensor
+
+In-place version of :meth:`~Tensor.fmod`
+""",
+)
+
+add_docstr_all(
+ "frac",
+ r"""
+frac() -> Tensor
+
+See :func:`torch.frac`
+""",
+)
+
+add_docstr_all(
+ "frac_",
+ r"""
+frac_() -> Tensor
+
+In-place version of :meth:`~Tensor.frac`
+""",
+)
+
+add_docstr_all(
+ "frexp",
+ r"""
+frexp(input) -> (Tensor mantissa, Tensor exponent)
+
+See :func:`torch.frexp`
+""",
+)
+
+add_docstr_all(
+ "flatten",
+ r"""
+flatten(start_dim=0, end_dim=-1) -> Tensor
+
+See :func:`torch.flatten`
+""",
+)
+
+add_docstr_all(
+ "gather",
+ r"""
+gather(dim, index) -> Tensor
+
+See :func:`torch.gather`
+""",
+)
+
+add_docstr_all(
+ "gcd",
+ r"""
+gcd(other) -> Tensor
+
+See :func:`torch.gcd`
+""",
+)
+
+add_docstr_all(
+ "gcd_",
+ r"""
+gcd_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.gcd`
+""",
+)
+
+add_docstr_all(
+ "ge",
+ r"""
+ge(other) -> Tensor
+
+See :func:`torch.ge`.
+""",
+)
+
+add_docstr_all(
+ "ge_",
+ r"""
+ge_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.ge`.
+""",
+)
+
+add_docstr_all(
+ "greater_equal",
+ r"""
+greater_equal(other) -> Tensor
+
+See :func:`torch.greater_equal`.
+""",
+)
+
+add_docstr_all(
+ "greater_equal_",
+ r"""
+greater_equal_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.greater_equal`.
+""",
+)
+
+add_docstr_all(
+ "geometric_",
+ r"""
+geometric_(p, *, generator=None) -> Tensor
+
+Fills :attr:`self` tensor with elements drawn from the geometric distribution:
+
+.. math::
+
+ P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
+
+.. note::
+ :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
+ :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
+ hence draws samples in :math:`\{0, 1, \ldots\}`.
+""",
+)
+
+add_docstr_all(
+ "geqrf",
+ r"""
+geqrf() -> (Tensor, Tensor)
+
+See :func:`torch.geqrf`
+""",
+)
+
+add_docstr_all(
+ "ger",
+ r"""
+ger(vec2) -> Tensor
+
+See :func:`torch.ger`
+""",
+)
+
+add_docstr_all(
+ "inner",
+ r"""
+inner(other) -> Tensor
+
+See :func:`torch.inner`.
+""",
+)
+
+add_docstr_all(
+ "outer",
+ r"""
+outer(vec2) -> Tensor
+
+See :func:`torch.outer`.
+""",
+)
+
+add_docstr_all(
+ "hypot",
+ r"""
+hypot(other) -> Tensor
+
+See :func:`torch.hypot`
+""",
+)
+
+add_docstr_all(
+ "hypot_",
+ r"""
+hypot_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.hypot`
+""",
+)
+
+add_docstr_all(
+ "i0",
+ r"""
+i0() -> Tensor
+
+See :func:`torch.i0`
+""",
+)
+
+add_docstr_all(
+ "i0_",
+ r"""
+i0_() -> Tensor
+
+In-place version of :meth:`~Tensor.i0`
+""",
+)
+
+add_docstr_all(
+ "igamma",
+ r"""
+igamma(other) -> Tensor
+
+See :func:`torch.igamma`
+""",
+)
+
+add_docstr_all(
+ "igamma_",
+ r"""
+igamma_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.igamma`
+""",
+)
+
+add_docstr_all(
+ "igammac",
+ r"""
+igammac(other) -> Tensor
+See :func:`torch.igammac`
+""",
+)
+
+add_docstr_all(
+ "igammac_",
+ r"""
+igammac_(other) -> Tensor
+In-place version of :meth:`~Tensor.igammac`
+""",
+)
+
+add_docstr_all(
+ "indices",
+ r"""
+indices() -> Tensor
+
+Return the indices tensor of a :ref:`sparse COO tensor `.
+
+.. warning::
+ Throws an error if :attr:`self` is not a sparse COO tensor.
+
+See also :meth:`Tensor.values`.
+
+.. note::
+ This method can only be called on a coalesced sparse tensor. See
+ :meth:`Tensor.coalesce` for details.
+""",
+)
+
+add_docstr_all(
+ "get_device",
+ r"""
+get_device() -> Device ordinal (Integer)
+
+For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
+For CPU tensors, this function returns `-1`.
+
+Example::
+
+ >>> x = torch.randn(3, 4, 5, device='cuda:0')
+ >>> x.get_device()
+ 0
+ >>> x.cpu().get_device()
+ -1
+""",
+)
+
+add_docstr_all(
+ "values",
+ r"""
+values() -> Tensor
+
+Return the values tensor of a :ref:`sparse COO tensor `.
+
+.. warning::
+ Throws an error if :attr:`self` is not a sparse COO tensor.
+
+See also :meth:`Tensor.indices`.
+
+.. note::
+ This method can only be called on a coalesced sparse tensor. See
+ :meth:`Tensor.coalesce` for details.
+""",
+)
+
+add_docstr_all(
+ "gt",
+ r"""
+gt(other) -> Tensor
+
+See :func:`torch.gt`.
+""",
+)
+
+add_docstr_all(
+ "gt_",
+ r"""
+gt_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.gt`.
+""",
+)
+
+add_docstr_all(
+ "greater",
+ r"""
+greater(other) -> Tensor
+
+See :func:`torch.greater`.
+""",
+)
+
+add_docstr_all(
+ "greater_",
+ r"""
+greater_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.greater`.
+""",
+)
+
+add_docstr_all(
+ "has_names",
+ r"""
+Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
+""",
+)
+
+add_docstr_all(
+ "hardshrink",
+ r"""
+hardshrink(lambd=0.5) -> Tensor
+
+See :func:`torch.nn.functional.hardshrink`
+""",
+)
+
+add_docstr_all(
+ "heaviside",
+ r"""
+heaviside(values) -> Tensor
+
+See :func:`torch.heaviside`
+""",
+)
+
+add_docstr_all(
+ "heaviside_",
+ r"""
+heaviside_(values) -> Tensor
+
+In-place version of :meth:`~Tensor.heaviside`
+""",
+)
+
+add_docstr_all(
+ "histc",
+ r"""
+histc(bins=100, min=0, max=0) -> Tensor
+
+See :func:`torch.histc`
+""",
+)
+
+add_docstr_all(
+ "histogram",
+ r"""
+histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
+
+See :func:`torch.histogram`
+""",
+)
+
+add_docstr_all(
+ "index_add_",
+ r"""
+index_add_(dim, index, source, *, alpha=1) -> Tensor
+
+Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
+tensor by adding to the indices in the order given in :attr:`index`. For example,
+if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
+``source`` is subtracted from the ``j``\ th row of :attr:`self`.
+
+The :attr:`dim`\ th dimension of ``source`` must have the same size as the
+length of :attr:`index` (which must be a vector), and all other dimensions must
+match :attr:`self`, or an error will be raised.
+
+For a 3-D tensor the output is given as::
+
+ self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
+ self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
+ self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
+
+Note:
+ {forward_reproducibility_note}
+
+Args:
+ dim (int): dimension along which to index
+ index (Tensor): indices of ``source`` to select from,
+ should have dtype either `torch.int64` or `torch.int32`
+ source (Tensor): the tensor containing values to add
+
+Keyword args:
+ alpha (Number): the scalar multiplier for ``source``
+
+Example::
+
+ >>> x = torch.ones(5, 3)
+ >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
+ >>> index = torch.tensor([0, 4, 2])
+ >>> x.index_add_(0, index, t)
+ tensor([[ 2., 3., 4.],
+ [ 1., 1., 1.],
+ [ 8., 9., 10.],
+ [ 1., 1., 1.],
+ [ 5., 6., 7.]])
+ >>> x.index_add_(0, index, t, alpha=-1)
+ tensor([[ 1., 1., 1.],
+ [ 1., 1., 1.],
+ [ 1., 1., 1.],
+ [ 1., 1., 1.],
+ [ 1., 1., 1.]])
+""".format(
+ **reproducibility_notes
+ ),
+)
+
+add_docstr_all(
+ "index_copy_",
+ r"""
+index_copy_(dim, index, tensor) -> Tensor
+
+Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
+the indices in the order given in :attr:`index`. For example, if ``dim == 0``
+and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
+``j``\ th row of :attr:`self`.
+
+The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
+length of :attr:`index` (which must be a vector), and all other dimensions must
+match :attr:`self`, or an error will be raised.
+
+.. note::
+ If :attr:`index` contains duplicate entries, multiple elements from
+ :attr:`tensor` will be copied to the same index of :attr:`self`. The result
+ is nondeterministic since it depends on which copy occurs last.
+
+Args:
+ dim (int): dimension along which to index
+ index (LongTensor): indices of :attr:`tensor` to select from
+ tensor (Tensor): the tensor containing values to copy
+
+Example::
+
+ >>> x = torch.zeros(5, 3)
+ >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
+ >>> index = torch.tensor([0, 4, 2])
+ >>> x.index_copy_(0, index, t)
+ tensor([[ 1., 2., 3.],
+ [ 0., 0., 0.],
+ [ 7., 8., 9.],
+ [ 0., 0., 0.],
+ [ 4., 5., 6.]])
+""",
+)
+
+add_docstr_all(
+ "index_fill_",
+ r"""
+index_fill_(dim, index, value) -> Tensor
+
+Fills the elements of the :attr:`self` tensor with value :attr:`value` by
+selecting the indices in the order given in :attr:`index`.
+
+Args:
+ dim (int): dimension along which to index
+ index (LongTensor): indices of :attr:`self` tensor to fill in
+ value (float): the value to fill with
+
+Example::
+ >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
+ >>> index = torch.tensor([0, 2])
+ >>> x.index_fill_(1, index, -1)
+ tensor([[-1., 2., -1.],
+ [-1., 5., -1.],
+ [-1., 8., -1.]])
+""",
+)
+
+add_docstr_all(
+ "index_put_",
+ r"""
+index_put_(indices, values, accumulate=False) -> Tensor
+
+Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
+the indices specified in :attr:`indices` (which is a tuple of Tensors). The
+expression ``tensor.index_put_(indices, values)`` is equivalent to
+``tensor[indices] = values``. Returns :attr:`self`.
+
+If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
+:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
+contain duplicate elements.
+
+Args:
+ indices (tuple of LongTensor): tensors used to index into `self`.
+ values (Tensor): tensor of same dtype as `self`.
+ accumulate (bool): whether to accumulate into self
+""",
+)
+
+add_docstr_all(
+ "index_put",
+ r"""
+index_put(indices, values, accumulate=False) -> Tensor
+
+Out-place version of :meth:`~Tensor.index_put_`.
+""",
+)
+
+add_docstr_all(
+ "index_reduce_",
+ r"""
+index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
+
+Accumulate the elements of ``source`` into the :attr:`self`
+tensor by accumulating to the indices in the order given in :attr:`index`
+using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
+``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
+row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
+:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
+in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
+to are treated as if they were filled with the reduction identites.
+
+The :attr:`dim`\ th dimension of ``source`` must have the same size as the
+length of :attr:`index` (which must be a vector), and all other dimensions must
+match :attr:`self`, or an error will be raised.
+
+For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
+output is given as::
+
+ self[index[i], :, :] *= src[i, :, :] # if dim == 0
+ self[:, index[i], :] *= src[:, i, :] # if dim == 1
+ self[:, :, index[i]] *= src[:, :, i] # if dim == 2
+
+Note:
+ {forward_reproducibility_note}
+
+.. note::
+
+ This function only supports floating point tensors.
+
+.. warning::
+
+ This function is in beta and may change in the near future.
+
+Args:
+ dim (int): dimension along which to index
+ index (Tensor): indices of ``source`` to select from,
+ should have dtype either `torch.int64` or `torch.int32`
+ source (FloatTensor): the tensor containing values to accumulate
+ reduce (str): the reduction operation to apply
+ (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
+
+Keyword args:
+ include_self (bool): whether the elements from the ``self`` tensor are
+ included in the reduction
+
+Example::
+
+ >>> x = torch.empty(5, 3).fill_(2)
+ >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
+ >>> index = torch.tensor([0, 4, 2, 0])
+ >>> x.index_reduce_(0, index, t, 'prod')
+ tensor([[20., 44., 72.],
+ [ 2., 2., 2.],
+ [14., 16., 18.],
+ [ 2., 2., 2.],
+ [ 8., 10., 12.]])
+ >>> x = torch.empty(5, 3).fill_(2)
+ >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
+ tensor([[10., 22., 36.],
+ [ 2., 2., 2.],
+ [ 7., 8., 9.],
+ [ 2., 2., 2.],
+ [ 4., 5., 6.]])
+""".format(
+ **reproducibility_notes
+ ),
+)
+
+add_docstr_all(
+ "index_select",
+ r"""
+index_select(dim, index) -> Tensor
+
+See :func:`torch.index_select`
+""",
+)
+
+add_docstr_all(
+ "sparse_mask",
+ r"""
+sparse_mask(mask) -> Tensor
+
+Returns a new :ref:`sparse tensor ` with values from a
+strided tensor :attr:`self` filtered by the indices of the sparse
+tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
+ignored. :attr:`self` and :attr:`mask` tensors must have the same
+shape.
+
+.. note::
+
+ The returned sparse tensor might contain duplicate values if :attr:`mask`
+ is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
+ if such behavior is not desired.
+
+.. note::
+
+ The returned sparse tensor has the same indices as the sparse tensor
+ :attr:`mask`, even when the corresponding values in :attr:`self` are
+ zeros.
+
+Args:
+ mask (Tensor): a sparse tensor whose indices are used as a filter
+
+Example::
+
+ >>> nse = 5
+ >>> dims = (5, 5, 2, 2)
+ >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
+ ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
+ >>> V = torch.randn(nse, dims[2], dims[3])
+ >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
+ >>> D = torch.randn(dims)
+ >>> D.sparse_mask(S)
+ tensor(indices=tensor([[0, 0, 0, 2],
+ [0, 1, 4, 3]]),
+ values=tensor([[[ 1.6550, 0.2397],
+ [-0.1611, -0.0779]],
+
+ [[ 0.2326, -1.0558],
+ [ 1.4711, 1.9678]],
+
+ [[-0.5138, -0.0411],
+ [ 1.9417, 0.5158]],
+
+ [[ 0.0793, 0.0036],
+ [-0.2569, -0.1055]]]),
+ size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
+""",
+)
+
+add_docstr_all(
+ "inverse",
+ r"""
+inverse() -> Tensor
+
+See :func:`torch.inverse`
+""",
+)
+
+add_docstr_all(
+ "isnan",
+ r"""
+isnan() -> Tensor
+
+See :func:`torch.isnan`
+""",
+)
+
+add_docstr_all(
+ "isinf",
+ r"""
+isinf() -> Tensor
+
+See :func:`torch.isinf`
+""",
+)
+
+add_docstr_all(
+ "isposinf",
+ r"""
+isposinf() -> Tensor
+
+See :func:`torch.isposinf`
+""",
+)
+
+add_docstr_all(
+ "isneginf",
+ r"""
+isneginf() -> Tensor
+
+See :func:`torch.isneginf`
+""",
+)
+
+add_docstr_all(
+ "isfinite",
+ r"""
+isfinite() -> Tensor
+
+See :func:`torch.isfinite`
+""",
+)
+
+add_docstr_all(
+ "isclose",
+ r"""
+isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
+
+See :func:`torch.isclose`
+""",
+)
+
+add_docstr_all(
+ "isreal",
+ r"""
+isreal() -> Tensor
+
+See :func:`torch.isreal`
+""",
+)
+
+add_docstr_all(
+ "is_coalesced",
+ r"""
+is_coalesced() -> bool
+
+Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
+` that is coalesced, ``False`` otherwise.
+
+.. warning::
+ Throws an error if :attr:`self` is not a sparse COO tensor.
+
+See :meth:`coalesce` and :ref:`uncoalesced tensors `.
+""",
+)
+
+add_docstr_all(
+ "is_contiguous",
+ r"""
+is_contiguous(memory_format=torch.contiguous_format) -> bool
+
+Returns True if :attr:`self` tensor is contiguous in memory in the order specified
+by memory format.
+
+Args:
+ memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
+ order. Default: ``torch.contiguous_format``.
+""",
+)
+
+add_docstr_all(
+ "is_pinned",
+ r"""
+Returns true if this tensor resides in pinned memory.
+""",
+)
+
+add_docstr_all(
+ "is_floating_point",
+ r"""
+is_floating_point() -> bool
+
+Returns True if the data type of :attr:`self` is a floating point data type.
+""",
+)
+
+add_docstr_all(
+ "is_complex",
+ r"""
+is_complex() -> bool
+
+Returns True if the data type of :attr:`self` is a complex data type.
+""",
+)
+
+add_docstr_all(
+ "is_inference",
+ r"""
+is_inference() -> bool
+
+See :func:`torch.is_inference`
+""",
+)
+
+add_docstr_all(
+ "is_conj",
+ r"""
+is_conj() -> bool
+
+Returns True if the conjugate bit of :attr:`self` is set to true.
+""",
+)
+
+add_docstr_all(
+ "is_neg",
+ r"""
+is_neg() -> bool
+
+Returns True if the negative bit of :attr:`self` is set to true.
+""",
+)
+
+add_docstr_all(
+ "is_signed",
+ r"""
+is_signed() -> bool
+
+Returns True if the data type of :attr:`self` is a signed data type.
+""",
+)
+
+add_docstr_all(
+ "is_set_to",
+ r"""
+is_set_to(tensor) -> bool
+
+Returns True if both tensors are pointing to the exact same memory (same
+storage, offset, size and stride).
+""",
+)
+
+add_docstr_all(
+ "item",
+ r"""
+item() -> number
+
+Returns the value of this tensor as a standard Python number. This only works
+for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
+
+This operation is not differentiable.
+
+Example::
+
+ >>> x = torch.tensor([1.0])
+ >>> x.item()
+ 1.0
+
+""",
+)
+
+add_docstr_all(
+ "kron",
+ r"""
+kron(other) -> Tensor
+
+See :func:`torch.kron`
+""",
+)
+
+add_docstr_all(
+ "kthvalue",
+ r"""
+kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
+
+See :func:`torch.kthvalue`
+""",
+)
+
+add_docstr_all(
+ "ldexp",
+ r"""
+ldexp(other) -> Tensor
+
+See :func:`torch.ldexp`
+""",
+)
+
+add_docstr_all(
+ "ldexp_",
+ r"""
+ldexp_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.ldexp`
+""",
+)
+
+add_docstr_all(
+ "lcm",
+ r"""
+lcm(other) -> Tensor
+
+See :func:`torch.lcm`
+""",
+)
+
+add_docstr_all(
+ "lcm_",
+ r"""
+lcm_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.lcm`
+""",
+)
+
+add_docstr_all(
+ "le",
+ r"""
+le(other) -> Tensor
+
+See :func:`torch.le`.
+""",
+)
+
+add_docstr_all(
+ "le_",
+ r"""
+le_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.le`.
+""",
+)
+
+add_docstr_all(
+ "less_equal",
+ r"""
+less_equal(other) -> Tensor
+
+See :func:`torch.less_equal`.
+""",
+)
+
+add_docstr_all(
+ "less_equal_",
+ r"""
+less_equal_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.less_equal`.
+""",
+)
+
+add_docstr_all(
+ "lerp",
+ r"""
+lerp(end, weight) -> Tensor
+
+See :func:`torch.lerp`
+""",
+)
+
+add_docstr_all(
+ "lerp_",
+ r"""
+lerp_(end, weight) -> Tensor
+
+In-place version of :meth:`~Tensor.lerp`
+""",
+)
+
+add_docstr_all(
+ "lgamma",
+ r"""
+lgamma() -> Tensor
+
+See :func:`torch.lgamma`
+""",
+)
+
+add_docstr_all(
+ "lgamma_",
+ r"""
+lgamma_() -> Tensor
+
+In-place version of :meth:`~Tensor.lgamma`
+""",
+)
+
+add_docstr_all(
+ "log",
+ r"""
+log() -> Tensor
+
+See :func:`torch.log`
+""",
+)
+
+add_docstr_all(
+ "log_",
+ r"""
+log_() -> Tensor
+
+In-place version of :meth:`~Tensor.log`
+""",
+)
+
+add_docstr_all(
+ "log10",
+ r"""
+log10() -> Tensor
+
+See :func:`torch.log10`
+""",
+)
+
+add_docstr_all(
+ "log10_",
+ r"""
+log10_() -> Tensor
+
+In-place version of :meth:`~Tensor.log10`
+""",
+)
+
+add_docstr_all(
+ "log1p",
+ r"""
+log1p() -> Tensor
+
+See :func:`torch.log1p`
+""",
+)
+
+add_docstr_all(
+ "log1p_",
+ r"""
+log1p_() -> Tensor
+
+In-place version of :meth:`~Tensor.log1p`
+""",
+)
+
+add_docstr_all(
+ "log2",
+ r"""
+log2() -> Tensor
+
+See :func:`torch.log2`
+""",
+)
+
+add_docstr_all(
+ "log2_",
+ r"""
+log2_() -> Tensor
+
+In-place version of :meth:`~Tensor.log2`
+""",
+)
+
+add_docstr_all(
+ "logaddexp",
+ r"""
+logaddexp(other) -> Tensor
+
+See :func:`torch.logaddexp`
+""",
+)
+
+add_docstr_all(
+ "logaddexp2",
+ r"""
+logaddexp2(other) -> Tensor
+
+See :func:`torch.logaddexp2`
+""",
+)
+
+add_docstr_all(
+ "log_normal_",
+ r"""
+log_normal_(mean=1, std=2, *, generator=None)
+
+Fills :attr:`self` tensor with numbers samples from the log-normal distribution
+parameterized by the given mean :math:`\mu` and standard deviation
+:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
+standard deviation of the underlying normal distribution, and not of the
+returned distribution:
+
+.. math::
+
+ f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
+""",
+)
+
+add_docstr_all(
+ "logsumexp",
+ r"""
+logsumexp(dim, keepdim=False) -> Tensor
+
+See :func:`torch.logsumexp`
+""",
+)
+
+add_docstr_all(
+ "lt",
+ r"""
+lt(other) -> Tensor
+
+See :func:`torch.lt`.
+""",
+)
+
+add_docstr_all(
+ "lt_",
+ r"""
+lt_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.lt`.
+""",
+)
+
+add_docstr_all(
+ "less",
+ r"""
+lt(other) -> Tensor
+
+See :func:`torch.less`.
+""",
+)
+
+add_docstr_all(
+ "less_",
+ r"""
+less_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.less`.
+""",
+)
+
+add_docstr_all(
+ "lu_solve",
+ r"""
+lu_solve(LU_data, LU_pivots) -> Tensor
+
+See :func:`torch.lu_solve`
+""",
+)
+
+add_docstr_all(
+ "map_",
+ r"""
+map_(tensor, callable)
+
+Applies :attr:`callable` for each element in :attr:`self` tensor and the given
+:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
+the given :attr:`tensor` must be :ref:`broadcastable `.
+
+The :attr:`callable` should have the signature::
+
+ def callable(a, b) -> number
+""",
+)
+
+add_docstr_all(
+ "masked_scatter_",
+ r"""
+masked_scatter_(mask, source)
+
+Copies elements from :attr:`source` into :attr:`self` tensor at positions where
+the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
+starting at position 0 of :attr:`source` and continuing in order one-by-one for each
+occurrence of :attr:`mask` being True.
+The shape of :attr:`mask` must be :ref:`broadcastable `
+with the shape of the underlying tensor. The :attr:`source` should have at least
+as many elements as the number of ones in :attr:`mask`.
+
+Args:
+ mask (BoolTensor): the boolean mask
+ source (Tensor): the tensor to copy from
+
+.. note::
+
+ The :attr:`mask` operates on the :attr:`self` tensor, not on the given
+ :attr:`source` tensor.
+
+Example:
+
+ >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
+ >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]])
+ >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
+ >>> self.masked_scatter_(mask, source)
+ tensor([[0, 0, 0, 0, 1],
+ [2, 3, 0, 4, 5]])
+
+""",
+)
+
+add_docstr_all(
+ "masked_fill_",
+ r"""
+masked_fill_(mask, value)
+
+Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
+True. The shape of :attr:`mask` must be
+:ref:`broadcastable ` with the shape of the underlying
+tensor.
+
+Args:
+ mask (BoolTensor): the boolean mask
+ value (float): the value to fill in with
+""",
+)
+
+add_docstr_all(
+ "masked_select",
+ r"""
+masked_select(mask) -> Tensor
+
+See :func:`torch.masked_select`
+""",
+)
+
+add_docstr_all(
+ "matrix_power",
+ r"""
+matrix_power(n) -> Tensor
+
+.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
+
+Alias for :func:`torch.linalg.matrix_power`
+""",
+)
+
+add_docstr_all(
+ "matrix_exp",
+ r"""
+matrix_exp() -> Tensor
+
+See :func:`torch.matrix_exp`
+""",
+)
+
+add_docstr_all(
+ "max",
+ r"""
+max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
+
+See :func:`torch.max`
+""",
+)
+
+add_docstr_all(
+ "amax",
+ r"""
+amax(dim=None, keepdim=False) -> Tensor
+
+See :func:`torch.amax`
+""",
+)
+
+add_docstr_all(
+ "maximum",
+ r"""
+maximum(other) -> Tensor
+
+See :func:`torch.maximum`
+""",
+)
+
+add_docstr_all(
+ "fmax",
+ r"""
+fmax(other) -> Tensor
+
+See :func:`torch.fmax`
+""",
+)
+
+add_docstr_all(
+ "argmax",
+ r"""
+argmax(dim=None, keepdim=False) -> LongTensor
+
+See :func:`torch.argmax`
+""",
+)
+
+add_docstr_all(
+ "argwhere",
+ r"""
+argwhere() -> Tensor
+
+See :func:`torch.argwhere`
+""",
+)
+
+add_docstr_all(
+ "mean",
+ r"""
+mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
+
+See :func:`torch.mean`
+""",
+)
+
+add_docstr_all(
+ "nanmean",
+ r"""
+nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
+
+See :func:`torch.nanmean`
+""",
+)
+
+add_docstr_all(
+ "median",
+ r"""
+median(dim=None, keepdim=False) -> (Tensor, LongTensor)
+
+See :func:`torch.median`
+""",
+)
+
+add_docstr_all(
+ "nanmedian",
+ r"""
+nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
+
+See :func:`torch.nanmedian`
+""",
+)
+
+add_docstr_all(
+ "min",
+ r"""
+min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
+
+See :func:`torch.min`
+""",
+)
+
+add_docstr_all(
+ "amin",
+ r"""
+amin(dim=None, keepdim=False) -> Tensor
+
+See :func:`torch.amin`
+""",
+)
+
+add_docstr_all(
+ "minimum",
+ r"""
+minimum(other) -> Tensor
+
+See :func:`torch.minimum`
+""",
+)
+
+add_docstr_all(
+ "aminmax",
+ r"""
+aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
+
+See :func:`torch.aminmax`
+""",
+)
+
+add_docstr_all(
+ "fmin",
+ r"""
+fmin(other) -> Tensor
+
+See :func:`torch.fmin`
+""",
+)
+
+add_docstr_all(
+ "argmin",
+ r"""
+argmin(dim=None, keepdim=False) -> LongTensor
+
+See :func:`torch.argmin`
+""",
+)
+
+add_docstr_all(
+ "mm",
+ r"""
+mm(mat2) -> Tensor
+
+See :func:`torch.mm`
+""",
+)
+
+add_docstr_all(
+ "mode",
+ r"""
+mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
+
+See :func:`torch.mode`
+""",
+)
+
+add_docstr_all(
+ "movedim",
+ r"""
+movedim(source, destination) -> Tensor
+
+See :func:`torch.movedim`
+""",
+)
+
+add_docstr_all(
+ "moveaxis",
+ r"""
+moveaxis(source, destination) -> Tensor
+
+See :func:`torch.moveaxis`
+""",
+)
+
+add_docstr_all(
+ "mul",
+ r"""
+mul(value) -> Tensor
+
+See :func:`torch.mul`.
+""",
+)
+
+add_docstr_all(
+ "mul_",
+ r"""
+mul_(value) -> Tensor
+
+In-place version of :meth:`~Tensor.mul`.
+""",
+)
+
+add_docstr_all(
+ "multiply",
+ r"""
+multiply(value) -> Tensor
+
+See :func:`torch.multiply`.
+""",
+)
+
+add_docstr_all(
+ "multiply_",
+ r"""
+multiply_(value) -> Tensor
+
+In-place version of :meth:`~Tensor.multiply`.
+""",
+)
+
+add_docstr_all(
+ "multinomial",
+ r"""
+multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
+
+See :func:`torch.multinomial`
+""",
+)
+
+add_docstr_all(
+ "mv",
+ r"""
+mv(vec) -> Tensor
+
+See :func:`torch.mv`
+""",
+)
+
+add_docstr_all(
+ "mvlgamma",
+ r"""
+mvlgamma(p) -> Tensor
+
+See :func:`torch.mvlgamma`
+""",
+)
+
+add_docstr_all(
+ "mvlgamma_",
+ r"""
+mvlgamma_(p) -> Tensor
+
+In-place version of :meth:`~Tensor.mvlgamma`
+""",
+)
+
+add_docstr_all(
+ "narrow",
+ r"""
+narrow(dimension, start, length) -> Tensor
+
+See :func:`torch.narrow`.
+""",
+)
+
+add_docstr_all(
+ "narrow_copy",
+ r"""
+narrow_copy(dimension, start, length) -> Tensor
+
+See :func:`torch.narrow_copy`.
+""",
+)
+
+add_docstr_all(
+ "ndimension",
+ r"""
+ndimension() -> int
+
+Alias for :meth:`~Tensor.dim()`
+""",
+)
+
+add_docstr_all(
+ "nan_to_num",
+ r"""
+nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
+
+See :func:`torch.nan_to_num`.
+""",
+)
+
+add_docstr_all(
+ "nan_to_num_",
+ r"""
+nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
+
+In-place version of :meth:`~Tensor.nan_to_num`.
+""",
+)
+
+add_docstr_all(
+ "ne",
+ r"""
+ne(other) -> Tensor
+
+See :func:`torch.ne`.
+""",
+)
+
+add_docstr_all(
+ "ne_",
+ r"""
+ne_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.ne`.
+""",
+)
+
+add_docstr_all(
+ "not_equal",
+ r"""
+not_equal(other) -> Tensor
+
+See :func:`torch.not_equal`.
+""",
+)
+
+add_docstr_all(
+ "not_equal_",
+ r"""
+not_equal_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.not_equal`.
+""",
+)
+
+add_docstr_all(
+ "neg",
+ r"""
+neg() -> Tensor
+
+See :func:`torch.neg`
+""",
+)
+
+add_docstr_all(
+ "negative",
+ r"""
+negative() -> Tensor
+
+See :func:`torch.negative`
+""",
+)
+
+add_docstr_all(
+ "neg_",
+ r"""
+neg_() -> Tensor
+
+In-place version of :meth:`~Tensor.neg`
+""",
+)
+
+add_docstr_all(
+ "negative_",
+ r"""
+negative_() -> Tensor
+
+In-place version of :meth:`~Tensor.negative`
+""",
+)
+
+add_docstr_all(
+ "nelement",
+ r"""
+nelement() -> int
+
+Alias for :meth:`~Tensor.numel`
+""",
+)
+
+add_docstr_all(
+ "nextafter",
+ r"""
+nextafter(other) -> Tensor
+See :func:`torch.nextafter`
+""",
+)
+
+add_docstr_all(
+ "nextafter_",
+ r"""
+nextafter_(other) -> Tensor
+In-place version of :meth:`~Tensor.nextafter`
+""",
+)
+
+add_docstr_all(
+ "nonzero",
+ r"""
+nonzero() -> LongTensor
+
+See :func:`torch.nonzero`
+""",
+)
+
+add_docstr_all(
+ "nonzero_static",
+ r"""
+nonzero_static(input, *, size, fill_value=-1) -> Tensor
+
+Returns a 2-D tensor where each row is the index for a non-zero value.
+The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
+
+Args:
+ input (Tensor): the input tensor to count non-zero elements.
+
+Keyword args:
+ size (int): the size of non-zero elements expected to be included in the out
+ tensor. Pad the out tensor with `fill_value` if the `size` is larger
+ than total number of non-zero elements, truncate out tensor if `size`
+ is smaller. The size must be a non-negative integer.
+ fill_value (int): the value to fill the output tensor with when `size` is larger
+ than the total number of non-zero elements. Default is `-1` to represent
+ invalid index.
+
+Example:
+
+ # Example 1: Padding
+ >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
+ >>> static_size = 4
+ >>> t = torch.nonzero_static(input_tensor, size = static_size)
+ tensor([[ 0, 0],
+ [ 1, 0],
+ [ 1, 1],
+ [ -1, -1]], dtype=torch.int64)
+
+ # Example 2: Truncating
+ >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
+ >>> static_size = 2
+ >>> t = torch.nonzero_static(input_tensor, size = static_size)
+ tensor([[ 0, 0],
+ [ 1, 0]], dtype=torch.int64)
+
+ # Example 3: 0 size
+ >>> input_tensor = torch.tensor([10])
+ >>> static_size = 0
+ >>> t = torch.nonzero_static(input_tensor, size = static_size)
+ tensor([], size=(0, 1), dtype=torch.int64)
+
+ # Example 4: 0 rank input
+ >>> input_tensor = torch.tensor(10)
+ >>> static_size = 2
+ >>> t = torch.nonzero_static(input_tensor, size = static_size)
+ tensor([], size=(2, 0), dtype=torch.int64)
+""",
+)
+
+add_docstr_all(
+ "norm",
+ r"""
+norm(p=2, dim=None, keepdim=False) -> Tensor
+
+See :func:`torch.norm`
+""",
+)
+
+add_docstr_all(
+ "normal_",
+ r"""
+normal_(mean=0, std=1, *, generator=None) -> Tensor
+
+Fills :attr:`self` tensor with elements samples from the normal distribution
+parameterized by :attr:`mean` and :attr:`std`.
+""",
+)
+
+add_docstr_all(
+ "numel",
+ r"""
+numel() -> int
+
+See :func:`torch.numel`
+""",
+)
+
+add_docstr_all(
+ "numpy",
+ r"""
+numpy(*, force=False) -> numpy.ndarray
+
+Returns the tensor as a NumPy :class:`ndarray`.
+
+If :attr:`force` is ``False`` (the default), the conversion
+is performed only if the tensor is on the CPU, does not require grad,
+does not have its conjugate bit set, and is a dtype and layout that
+NumPy supports. The returned ndarray and the tensor will share their
+storage, so changes to the tensor will be reflected in the ndarray
+and vice versa.
+
+If :attr:`force` is ``True`` this is equivalent to
+calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
+If the tensor isn't on the CPU or the conjugate or negative bit is set,
+the tensor won't share its storage with the returned ndarray.
+Setting :attr:`force` to ``True`` can be a useful shorthand.
+
+Args:
+ force (bool): if ``True``, the ndarray may be a copy of the tensor
+ instead of always sharing memory, defaults to ``False``.
+""",
+)
+
+add_docstr_all(
+ "orgqr",
+ r"""
+orgqr(input2) -> Tensor
+
+See :func:`torch.orgqr`
+""",
+)
+
+add_docstr_all(
+ "ormqr",
+ r"""
+ormqr(input2, input3, left=True, transpose=False) -> Tensor
+
+See :func:`torch.ormqr`
+""",
+)
+
+add_docstr_all(
+ "permute",
+ r"""
+permute(*dims) -> Tensor
+
+See :func:`torch.permute`
+""",
+)
+
+add_docstr_all(
+ "polygamma",
+ r"""
+polygamma(n) -> Tensor
+
+See :func:`torch.polygamma`
+""",
+)
+
+add_docstr_all(
+ "polygamma_",
+ r"""
+polygamma_(n) -> Tensor
+
+In-place version of :meth:`~Tensor.polygamma`
+""",
+)
+
+add_docstr_all(
+ "positive",
+ r"""
+positive() -> Tensor
+
+See :func:`torch.positive`
+""",
+)
+
+add_docstr_all(
+ "pow",
+ r"""
+pow(exponent) -> Tensor
+
+See :func:`torch.pow`
+""",
+)
+
+add_docstr_all(
+ "pow_",
+ r"""
+pow_(exponent) -> Tensor
+
+In-place version of :meth:`~Tensor.pow`
+""",
+)
+
+add_docstr_all(
+ "float_power",
+ r"""
+float_power(exponent) -> Tensor
+
+See :func:`torch.float_power`
+""",
+)
+
+add_docstr_all(
+ "float_power_",
+ r"""
+float_power_(exponent) -> Tensor
+
+In-place version of :meth:`~Tensor.float_power`
+""",
+)
+
+add_docstr_all(
+ "prod",
+ r"""
+prod(dim=None, keepdim=False, dtype=None) -> Tensor
+
+See :func:`torch.prod`
+""",
+)
+
+add_docstr_all(
+ "put_",
+ r"""
+put_(index, source, accumulate=False) -> Tensor
+
+Copies the elements from :attr:`source` into the positions specified by
+:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
+it were a 1-D tensor.
+
+:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
+the same shape.
+
+If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
+:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
+contain duplicate elements.
+
+Args:
+ index (LongTensor): the indices into self
+ source (Tensor): the tensor containing values to copy from
+ accumulate (bool): whether to accumulate into self
+
+Example::
+
+ >>> src = torch.tensor([[4, 3, 5],
+ ... [6, 7, 8]])
+ >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
+ tensor([[ 4, 9, 5],
+ [ 10, 7, 8]])
+""",
+)
+
+add_docstr_all(
+ "put",
+ r"""
+put(input, index, source, accumulate=False) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.put_`.
+`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
+""",
+)
+
+add_docstr_all(
+ "qr",
+ r"""
+qr(some=True) -> (Tensor, Tensor)
+
+See :func:`torch.qr`
+""",
+)
+
+add_docstr_all(
+ "qscheme",
+ r"""
+qscheme() -> torch.qscheme
+
+Returns the quantization scheme of a given QTensor.
+""",
+)
+
+add_docstr_all(
+ "quantile",
+ r"""
+quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
+
+See :func:`torch.quantile`
+""",
+)
+
+add_docstr_all(
+ "nanquantile",
+ r"""
+nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
+
+See :func:`torch.nanquantile`
+""",
+)
+
+add_docstr_all(
+ "q_scale",
+ r"""
+q_scale() -> float
+
+Given a Tensor quantized by linear(affine) quantization,
+returns the scale of the underlying quantizer().
+""",
+)
+
+add_docstr_all(
+ "q_zero_point",
+ r"""
+q_zero_point() -> int
+
+Given a Tensor quantized by linear(affine) quantization,
+returns the zero_point of the underlying quantizer().
+""",
+)
+
+add_docstr_all(
+ "q_per_channel_scales",
+ r"""
+q_per_channel_scales() -> Tensor
+
+Given a Tensor quantized by linear (affine) per-channel quantization,
+returns a Tensor of scales of the underlying quantizer. It has the number of
+elements that matches the corresponding dimensions (from q_per_channel_axis) of
+the tensor.
+""",
+)
+
+add_docstr_all(
+ "q_per_channel_zero_points",
+ r"""
+q_per_channel_zero_points() -> Tensor
+
+Given a Tensor quantized by linear (affine) per-channel quantization,
+returns a tensor of zero_points of the underlying quantizer. It has the number of
+elements that matches the corresponding dimensions (from q_per_channel_axis) of
+the tensor.
+""",
+)
+
+add_docstr_all(
+ "q_per_channel_axis",
+ r"""
+q_per_channel_axis() -> int
+
+Given a Tensor quantized by linear (affine) per-channel quantization,
+returns the index of dimension on which per-channel quantization is applied.
+""",
+)
+
+add_docstr_all(
+ "random_",
+ r"""
+random_(from=0, to=None, *, generator=None) -> Tensor
+
+Fills :attr:`self` tensor with numbers sampled from the discrete uniform
+distribution over ``[from, to - 1]``. If not specified, the values are usually
+only bounded by :attr:`self` tensor's data type. However, for floating point
+types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
+value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
+will be uniform in ``[0, 2^53]``.
+""",
+)
+
+add_docstr_all(
+ "rad2deg",
+ r"""
+rad2deg() -> Tensor
+
+See :func:`torch.rad2deg`
+""",
+)
+
+add_docstr_all(
+ "rad2deg_",
+ r"""
+rad2deg_() -> Tensor
+
+In-place version of :meth:`~Tensor.rad2deg`
+""",
+)
+
+add_docstr_all(
+ "deg2rad",
+ r"""
+deg2rad() -> Tensor
+
+See :func:`torch.deg2rad`
+""",
+)
+
+add_docstr_all(
+ "deg2rad_",
+ r"""
+deg2rad_() -> Tensor
+
+In-place version of :meth:`~Tensor.deg2rad`
+""",
+)
+
+add_docstr_all(
+ "ravel",
+ r"""
+ravel() -> Tensor
+
+see :func:`torch.ravel`
+""",
+)
+
+add_docstr_all(
+ "reciprocal",
+ r"""
+reciprocal() -> Tensor
+
+See :func:`torch.reciprocal`
+""",
+)
+
+add_docstr_all(
+ "reciprocal_",
+ r"""
+reciprocal_() -> Tensor
+
+In-place version of :meth:`~Tensor.reciprocal`
+""",
+)
+
+add_docstr_all(
+ "record_stream",
+ r"""
+record_stream(stream)
+
+Marks the tensor as having been used by this stream. When the tensor
+is deallocated, ensure the tensor memory is not reused for another tensor
+until all work queued on :attr:`stream` at the time of deallocation is
+complete.
+
+.. note::
+
+ The caching allocator is aware of only the stream where a tensor was
+ allocated. Due to the awareness, it already correctly manages the life
+ cycle of tensors on only one stream. But if a tensor is used on a stream
+ different from the stream of origin, the allocator might reuse the memory
+ unexpectedly. Calling this method lets the allocator know which streams
+ have used the tensor.
+
+.. warning::
+
+ This method is most suitable for use cases where you are providing a
+ function that created a tensor on a side stream, and want users to be able
+ to make use of the tensor without having to think carefully about stream
+ safety when making use of them. These safety guarantees come at some
+ performance and predictability cost (analogous to the tradeoff between GC
+ and manual memory management), so if you are in a situation where
+ you manage the full lifetime of your tensors, you may consider instead
+ manually managing CUDA events so that calling this method is not necessary.
+ In particular, when you call this method, on later allocations the
+ allocator will poll the recorded stream to see if all operations have
+ completed yet; you can potentially race with side stream computation and
+ non-deterministically reuse or fail to reuse memory for an allocation.
+
+ You can safely use tensors allocated on side streams without
+ :meth:`~Tensor.record_stream`; you must manually ensure that
+ any non-creation stream uses of a tensor are synced back to the creation
+ stream before you deallocate the tensor. As the CUDA caching allocator
+ guarantees that the memory will only be reused with the same creation stream,
+ this is sufficient to ensure that writes to future reallocations of the
+ memory will be delayed until non-creation stream uses are done.
+ (Counterintuitively, you may observe that on the CPU side we have already
+ reallocated the tensor, even though CUDA kernels on the old tensor are
+ still in progress. This is fine, because CUDA operations on the new
+ tensor will appropriately wait for the old operations to complete, as they
+ are all on the same stream.)
+
+ Concretely, this looks like this::
+
+ with torch.cuda.stream(s0):
+ x = torch.zeros(N)
+
+ s1.wait_stream(s0)
+ with torch.cuda.stream(s1):
+ y = some_comm_op(x)
+
+ ... some compute on s0 ...
+
+ # synchronize creation stream s0 to side stream s1
+ # before deallocating x
+ s0.wait_stream(s1)
+ del x
+
+ Note that some discretion is required when deciding when to perform
+ ``s0.wait_stream(s1)``. In particular, if we were to wait immediately
+ after ``some_comm_op``, there wouldn't be any point in having the side
+ stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
+ Instead, the synchronization must be placed at some appropriate, later
+ point in time where you expect the side stream ``s1`` to have finished
+ work. This location is typically identified via profiling, e.g., using
+ Chrome traces produced
+ :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you
+ place the wait too early, work on s0 will block until ``s1`` has finished,
+ preventing further overlapping of communication and computation. If you
+ place the wait too late, you will use more memory than is strictly
+ necessary (as you are keeping ``x`` live for longer.) For a concrete
+ example of how this guidance can be applied in practice, see this post:
+ `FSDP and CUDACachingAllocator
+ `_.
+""",
+)
+
+add_docstr_all(
+ "remainder",
+ r"""
+remainder(divisor) -> Tensor
+
+See :func:`torch.remainder`
+""",
+)
+
+add_docstr_all(
+ "remainder_",
+ r"""
+remainder_(divisor) -> Tensor
+
+In-place version of :meth:`~Tensor.remainder`
+""",
+)
+
+add_docstr_all(
+ "renorm",
+ r"""
+renorm(p, dim, maxnorm) -> Tensor
+
+See :func:`torch.renorm`
+""",
+)
+
+add_docstr_all(
+ "renorm_",
+ r"""
+renorm_(p, dim, maxnorm) -> Tensor
+
+In-place version of :meth:`~Tensor.renorm`
+""",
+)
+
+add_docstr_all(
+ "repeat",
+ r"""
+repeat(*sizes) -> Tensor
+
+Repeats this tensor along the specified dimensions.
+
+Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
+
+.. warning::
+
+ :meth:`~Tensor.repeat` behaves differently from
+ `numpy.repeat `_,
+ but is more similar to
+ `numpy.tile `_.
+ For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
+
+Args:
+ sizes (torch.Size or int...): The number of times to repeat this tensor along each
+ dimension
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> x.repeat(4, 2)
+ tensor([[ 1, 2, 3, 1, 2, 3],
+ [ 1, 2, 3, 1, 2, 3],
+ [ 1, 2, 3, 1, 2, 3],
+ [ 1, 2, 3, 1, 2, 3]])
+ >>> x.repeat(4, 2, 1).size()
+ torch.Size([4, 2, 3])
+""",
+)
+
+add_docstr_all(
+ "repeat_interleave",
+ r"""
+repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
+
+See :func:`torch.repeat_interleave`.
+""",
+)
+
+add_docstr_all(
+ "requires_grad_",
+ r"""
+requires_grad_(requires_grad=True) -> Tensor
+
+Change if autograd should record operations on this tensor: sets this tensor's
+:attr:`requires_grad` attribute in-place. Returns this tensor.
+
+:func:`requires_grad_`'s main use case is to tell autograd to begin recording
+operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
+(because it was obtained through a DataLoader, or required preprocessing or
+initialization), ``tensor.requires_grad_()`` makes it so that autograd will
+begin to record operations on ``tensor``.
+
+Args:
+ requires_grad (bool): If autograd should record operations on this tensor.
+ Default: ``True``.
+
+Example::
+
+ >>> # Let's say we want to preprocess some saved weights and use
+ >>> # the result as new weights.
+ >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
+ >>> loaded_weights = torch.tensor(saved_weights)
+ >>> weights = preprocess(loaded_weights) # some function
+ >>> weights
+ tensor([-0.5503, 0.4926, -2.1158, -0.8303])
+
+ >>> # Now, start to record operations done to weights
+ >>> weights.requires_grad_()
+ >>> out = weights.pow(2).sum()
+ >>> out.backward()
+ >>> weights.grad
+ tensor([-1.1007, 0.9853, -4.2316, -1.6606])
+
+""",
+)
+
+add_docstr_all(
+ "reshape",
+ r"""
+reshape(*shape) -> Tensor
+
+Returns a tensor with the same data and number of elements as :attr:`self`
+but with the specified shape. This method returns a view if :attr:`shape` is
+compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
+possible to return a view.
+
+See :func:`torch.reshape`
+
+Args:
+ shape (tuple of ints or int...): the desired shape
+
+""",
+)
+
+add_docstr_all(
+ "reshape_as",
+ r"""
+reshape_as(other) -> Tensor
+
+Returns this tensor as the same shape as :attr:`other`.
+``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
+This method returns a view if ``other.sizes()`` is compatible with the current
+shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
+
+Please see :meth:`reshape` for more information about ``reshape``.
+
+Args:
+ other (:class:`torch.Tensor`): The result tensor has the same shape
+ as :attr:`other`.
+""",
+)
+
+add_docstr_all(
+ "resize_",
+ r"""
+resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
+
+Resizes :attr:`self` tensor to the specified size. If the number of elements is
+larger than the current storage size, then the underlying storage is resized
+to fit the new number of elements. If the number of elements is smaller, the
+underlying storage is not changed. Existing elements are preserved but any new
+memory is uninitialized.
+
+.. warning::
+
+ This is a low-level method. The storage is reinterpreted as C-contiguous,
+ ignoring the current strides (unless the target size equals the current
+ size, in which case the tensor is left unchanged). For most purposes, you
+ will instead want to use :meth:`~Tensor.view()`, which checks for
+ contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
+ change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
+
+.. note::
+
+ If :func:`torch.use_deterministic_algorithms()` and
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
+ ``True``, new elements are initialized to prevent nondeterministic behavior
+ from using the result as an input to an operation. Floating point and
+ complex values are set to NaN, and integer values are set to the maximum
+ value.
+
+Args:
+ sizes (torch.Size or int...): the desired size
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ Tensor. Default: ``torch.contiguous_format``. Note that memory format of
+ :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
+
+Example::
+
+ >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
+ >>> x.resize_(2, 2)
+ tensor([[ 1, 2],
+ [ 3, 4]])
+""",
+)
+
+add_docstr_all(
+ "resize_as_",
+ r"""
+resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
+
+Resizes the :attr:`self` tensor to be the same size as the specified
+:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
+
+Args:
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ Tensor. Default: ``torch.contiguous_format``. Note that memory format of
+ :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
+
+""",
+)
+
+add_docstr_all(
+ "rot90",
+ r"""
+rot90(k, dims) -> Tensor
+
+See :func:`torch.rot90`
+""",
+)
+
+add_docstr_all(
+ "round",
+ r"""
+round(decimals=0) -> Tensor
+
+See :func:`torch.round`
+""",
+)
+
+add_docstr_all(
+ "round_",
+ r"""
+round_(decimals=0) -> Tensor
+
+In-place version of :meth:`~Tensor.round`
+""",
+)
+
+add_docstr_all(
+ "rsqrt",
+ r"""
+rsqrt() -> Tensor
+
+See :func:`torch.rsqrt`
+""",
+)
+
+add_docstr_all(
+ "rsqrt_",
+ r"""
+rsqrt_() -> Tensor
+
+In-place version of :meth:`~Tensor.rsqrt`
+""",
+)
+
+add_docstr_all(
+ "scatter_",
+ r"""
+scatter_(dim, index, src, reduce=None) -> Tensor
+
+Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
+specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
+index is specified by its index in :attr:`src` for ``dimension != dim`` and by
+the corresponding value in :attr:`index` for ``dimension = dim``.
+
+For a 3-D tensor, :attr:`self` is updated as::
+
+ self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
+ self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
+ self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
+
+This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
+
+:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
+the same number of dimensions. It is also required that
+``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
+``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
+Note that ``index`` and ``src`` do not broadcast.
+
+Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
+between ``0`` and ``self.size(dim) - 1`` inclusive.
+
+.. warning::
+
+ When indices are not unique, the behavior is non-deterministic (one of the
+ values from ``src`` will be picked arbitrarily) and the gradient will be
+ incorrect (it will be propagated to all locations in the source that
+ correspond to the same index)!
+
+.. note::
+
+ The backward pass is implemented only for ``src.shape == index.shape``.
+
+Additionally accepts an optional :attr:`reduce` argument that allows
+specification of an optional reduction operation, which is applied to all
+values in the tensor :attr:`src` into :attr:`self` at the indices
+specified in the :attr:`index`. For each value in :attr:`src`, the reduction
+operation is applied to an index in :attr:`self` which is specified by
+its index in :attr:`src` for ``dimension != dim`` and by the corresponding
+value in :attr:`index` for ``dimension = dim``.
+
+Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
+is updated as::
+
+ self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
+ self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
+ self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
+
+Reducing with the addition operation is the same as using
+:meth:`~torch.Tensor.scatter_add_`.
+
+.. warning::
+ The reduce argument with Tensor ``src`` is deprecated and will be removed in
+ a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
+ instead for more reduction options.
+
+Args:
+ dim (int): the axis along which to index
+ index (LongTensor): the indices of elements to scatter, can be either empty
+ or of the same dimensionality as ``src``. When empty, the operation
+ returns ``self`` unchanged.
+ src (Tensor or float): the source element(s) to scatter.
+ reduce (str, optional): reduction operation to apply, can be either
+ ``'add'`` or ``'multiply'``.
+
+Example::
+
+ >>> src = torch.arange(1, 11).reshape((2, 5))
+ >>> src
+ tensor([[ 1, 2, 3, 4, 5],
+ [ 6, 7, 8, 9, 10]])
+ >>> index = torch.tensor([[0, 1, 2, 0]])
+ >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
+ tensor([[1, 0, 0, 4, 0],
+ [0, 2, 0, 0, 0],
+ [0, 0, 3, 0, 0]])
+ >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
+ >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
+ tensor([[1, 2, 3, 0, 0],
+ [6, 7, 0, 0, 8],
+ [0, 0, 0, 0, 0]])
+
+ >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
+ ... 1.23, reduce='multiply')
+ tensor([[2.0000, 2.0000, 2.4600, 2.0000],
+ [2.0000, 2.0000, 2.0000, 2.4600]])
+ >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
+ ... 1.23, reduce='add')
+ tensor([[2.0000, 2.0000, 3.2300, 2.0000],
+ [2.0000, 2.0000, 2.0000, 3.2300]])
+
+""",
+)
+
+add_docstr_all(
+ "scatter_add_",
+ r"""
+scatter_add_(dim, index, src) -> Tensor
+
+Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
+specified in the :attr:`index` tensor in a similar fashion as
+:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
+an index in :attr:`self` which is specified by its index in :attr:`src`
+for ``dimension != dim`` and by the corresponding value in :attr:`index` for
+``dimension = dim``.
+
+For a 3-D tensor, :attr:`self` is updated as::
+
+ self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
+ self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
+ self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
+
+:attr:`self`, :attr:`index` and :attr:`src` should have same number of
+dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
+dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
+``d != dim``. Note that ``index`` and ``src`` do not broadcast.
+
+Note:
+ {forward_reproducibility_note}
+
+.. note::
+
+ The backward pass is implemented only for ``src.shape == index.shape``.
+
+Args:
+ dim (int): the axis along which to index
+ index (LongTensor): the indices of elements to scatter and add, can be
+ either empty or of the same dimensionality as ``src``. When empty, the
+ operation returns ``self`` unchanged.
+ src (Tensor): the source elements to scatter and add
+
+Example::
+
+ >>> src = torch.ones((2, 5))
+ >>> index = torch.tensor([[0, 1, 2, 0, 0]])
+ >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
+ tensor([[1., 0., 0., 1., 1.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.]])
+ >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
+ >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
+ tensor([[2., 0., 0., 1., 1.],
+ [0., 2., 0., 0., 0.],
+ [0., 0., 2., 1., 1.]])
+
+""".format(
+ **reproducibility_notes
+ ),
+)
+
+add_docstr_all(
+ "scatter_reduce_",
+ r"""
+scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
+
+Reduces all values from the :attr:`src` tensor to the indices specified in
+the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
+defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
+:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
+index in :attr:`self` which is specified by its index in :attr:`src` for
+``dimension != dim`` and by the corresponding value in :attr:`index` for
+``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
+tensor are included in the reduction.
+
+:attr:`self`, :attr:`index` and :attr:`src` should all have
+the same number of dimensions. It is also required that
+``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
+``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
+Note that ``index`` and ``src`` do not broadcast.
+
+For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
+output is given as::
+
+ self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
+ self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
+ self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
+
+Note:
+ {forward_reproducibility_note}
+
+.. note::
+
+ The backward pass is implemented only for ``src.shape == index.shape``.
+
+.. warning::
+
+ This function is in beta and may change in the near future.
+
+Args:
+ dim (int): the axis along which to index
+ index (LongTensor): the indices of elements to scatter and reduce.
+ src (Tensor): the source elements to scatter and reduce
+ reduce (str): the reduction operation to apply for non-unique indices
+ (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
+ include_self (bool): whether elements from the :attr:`self` tensor are
+ included in the reduction
+
+Example::
+
+ >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
+ >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
+ >>> input = torch.tensor([1., 2., 3., 4.])
+ >>> input.scatter_reduce(0, index, src, reduce="sum")
+ tensor([5., 14., 8., 4.])
+ >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
+ tensor([4., 12., 5., 4.])
+ >>> input2 = torch.tensor([5., 4., 3., 2.])
+ >>> input2.scatter_reduce(0, index, src, reduce="amax")
+ tensor([5., 6., 5., 2.])
+ >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
+ tensor([3., 6., 5., 2.])
+
+
+""".format(
+ **reproducibility_notes
+ ),
+)
+
+add_docstr_all(
+ "select",
+ r"""
+select(dim, index) -> Tensor
+
+See :func:`torch.select`
+""",
+)
+
+add_docstr_all(
+ "select_scatter",
+ r"""
+select_scatter(src, dim, index) -> Tensor
+
+See :func:`torch.select_scatter`
+""",
+)
+
+add_docstr_all(
+ "slice_scatter",
+ r"""
+slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
+
+See :func:`torch.slice_scatter`
+""",
+)
+
+add_docstr_all(
+ "set_",
+ r"""
+set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
+
+Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
+:attr:`self` tensor will share the same storage and have the same size and
+strides as :attr:`source`. Changes to elements in one tensor will be reflected
+in the other.
+
+If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
+storage, offset, size, and stride.
+
+Args:
+ source (Tensor or Storage): the tensor or storage to use
+ storage_offset (int, optional): the offset in the storage
+ size (torch.Size, optional): the desired size. Defaults to the size of the source.
+ stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
+""",
+)
+
+add_docstr_all(
+ "sigmoid",
+ r"""
+sigmoid() -> Tensor
+
+See :func:`torch.sigmoid`
+""",
+)
+
+add_docstr_all(
+ "sigmoid_",
+ r"""
+sigmoid_() -> Tensor
+
+In-place version of :meth:`~Tensor.sigmoid`
+""",
+)
+
+add_docstr_all(
+ "logit",
+ r"""
+logit() -> Tensor
+
+See :func:`torch.logit`
+""",
+)
+
+add_docstr_all(
+ "logit_",
+ r"""
+logit_() -> Tensor
+
+In-place version of :meth:`~Tensor.logit`
+""",
+)
+
+add_docstr_all(
+ "sign",
+ r"""
+sign() -> Tensor
+
+See :func:`torch.sign`
+""",
+)
+
+add_docstr_all(
+ "sign_",
+ r"""
+sign_() -> Tensor
+
+In-place version of :meth:`~Tensor.sign`
+""",
+)
+
+add_docstr_all(
+ "signbit",
+ r"""
+signbit() -> Tensor
+
+See :func:`torch.signbit`
+""",
+)
+
+add_docstr_all(
+ "sgn",
+ r"""
+sgn() -> Tensor
+
+See :func:`torch.sgn`
+""",
+)
+
+add_docstr_all(
+ "sgn_",
+ r"""
+sgn_() -> Tensor
+
+In-place version of :meth:`~Tensor.sgn`
+""",
+)
+
+add_docstr_all(
+ "sin",
+ r"""
+sin() -> Tensor
+
+See :func:`torch.sin`
+""",
+)
+
+add_docstr_all(
+ "sin_",
+ r"""
+sin_() -> Tensor
+
+In-place version of :meth:`~Tensor.sin`
+""",
+)
+
+add_docstr_all(
+ "sinc",
+ r"""
+sinc() -> Tensor
+
+See :func:`torch.sinc`
+""",
+)
+
+add_docstr_all(
+ "sinc_",
+ r"""
+sinc_() -> Tensor
+
+In-place version of :meth:`~Tensor.sinc`
+""",
+)
+
+add_docstr_all(
+ "sinh",
+ r"""
+sinh() -> Tensor
+
+See :func:`torch.sinh`
+""",
+)
+
+add_docstr_all(
+ "sinh_",
+ r"""
+sinh_() -> Tensor
+
+In-place version of :meth:`~Tensor.sinh`
+""",
+)
+
+add_docstr_all(
+ "size",
+ r"""
+size(dim=None) -> torch.Size or int
+
+Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
+the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
+If ``dim`` is specified, returns an int holding the size of that dimension.
+
+Args:
+ dim (int, optional): The dimension for which to retrieve the size.
+
+Example::
+
+ >>> t = torch.empty(3, 4, 5)
+ >>> t.size()
+ torch.Size([3, 4, 5])
+ >>> t.size(dim=1)
+ 4
+
+""",
+)
+
+add_docstr_all(
+ "shape",
+ r"""
+shape() -> torch.Size
+
+Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.
+
+See also :meth:`Tensor.size`.
+
+Example::
+
+ >>> t = torch.empty(3, 4, 5)
+ >>> t.size()
+ torch.Size([3, 4, 5])
+ >>> t.shape
+ torch.Size([3, 4, 5])
+
+""",
+)
+
+add_docstr_all(
+ "sort",
+ r"""
+sort(dim=-1, descending=False) -> (Tensor, LongTensor)
+
+See :func:`torch.sort`
+""",
+)
+
+add_docstr_all(
+ "msort",
+ r"""
+msort() -> Tensor
+
+See :func:`torch.msort`
+""",
+)
+
+add_docstr_all(
+ "argsort",
+ r"""
+argsort(dim=-1, descending=False) -> LongTensor
+
+See :func:`torch.argsort`
+""",
+)
+
+add_docstr_all(
+ "sparse_dim",
+ r"""
+sparse_dim() -> int
+
+Return the number of sparse dimensions in a :ref:`sparse tensor ` :attr:`self`.
+
+.. note::
+ Returns ``0`` if :attr:`self` is not a sparse tensor.
+
+See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors `.
+""",
+)
+
+add_docstr_all(
+ "sparse_resize_",
+ r"""
+sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
+
+Resizes :attr:`self` :ref:`sparse tensor ` to the desired
+size and the number of sparse and dense dimensions.
+
+.. note::
+ If the number of specified elements in :attr:`self` is zero, then
+ :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
+ size and positive integers such that ``len(size) == sparse_dim +
+ dense_dim``.
+
+ If :attr:`self` specifies one or more elements, however, then each
+ dimension in :attr:`size` must not be smaller than the corresponding
+ dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
+ of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
+ equal the number of dense dimensions in :attr:`self`.
+
+.. warning::
+ Throws an error if :attr:`self` is not a sparse tensor.
+
+Args:
+ size (torch.Size): the desired size. If :attr:`self` is non-empty
+ sparse tensor, the desired size cannot be smaller than the
+ original size.
+ sparse_dim (int): the number of sparse dimensions
+ dense_dim (int): the number of dense dimensions
+""",
+)
+
+add_docstr_all(
+ "sparse_resize_and_clear_",
+ r"""
+sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
+
+Removes all specified elements from a :ref:`sparse tensor
+` :attr:`self` and resizes :attr:`self` to the desired
+size and the number of sparse and dense dimensions.
+
+.. warning:
+ Throws an error if :attr:`self` is not a sparse tensor.
+
+Args:
+ size (torch.Size): the desired size.
+ sparse_dim (int): the number of sparse dimensions
+ dense_dim (int): the number of dense dimensions
+""",
+)
+
+add_docstr_all(
+ "sqrt",
+ r"""
+sqrt() -> Tensor
+
+See :func:`torch.sqrt`
+""",
+)
+
+add_docstr_all(
+ "sqrt_",
+ r"""
+sqrt_() -> Tensor
+
+In-place version of :meth:`~Tensor.sqrt`
+""",
+)
+
+add_docstr_all(
+ "square",
+ r"""
+square() -> Tensor
+
+See :func:`torch.square`
+""",
+)
+
+add_docstr_all(
+ "square_",
+ r"""
+square_() -> Tensor
+
+In-place version of :meth:`~Tensor.square`
+""",
+)
+
+add_docstr_all(
+ "squeeze",
+ r"""
+squeeze(dim=None) -> Tensor
+
+See :func:`torch.squeeze`
+""",
+)
+
+add_docstr_all(
+ "squeeze_",
+ r"""
+squeeze_(dim=None) -> Tensor
+
+In-place version of :meth:`~Tensor.squeeze`
+""",
+)
+
+add_docstr_all(
+ "std",
+ r"""
+std(dim=None, *, correction=1, keepdim=False) -> Tensor
+
+See :func:`torch.std`
+""",
+)
+
+add_docstr_all(
+ "storage_offset",
+ r"""
+storage_offset() -> int
+
+Returns :attr:`self` tensor's offset in the underlying storage in terms of
+number of storage elements (not bytes).
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3, 4, 5])
+ >>> x.storage_offset()
+ 0
+ >>> x[3:].storage_offset()
+ 3
+
+""",
+)
+
+add_docstr_all(
+ "untyped_storage",
+ r"""
+untyped_storage() -> torch.UntypedStorage
+
+Returns the underlying :class:`UntypedStorage`.
+""",
+)
+
+add_docstr_all(
+ "stride",
+ r"""
+stride(dim) -> tuple or int
+
+Returns the stride of :attr:`self` tensor.
+
+Stride is the jump necessary to go from one element to the next one in the
+specified dimension :attr:`dim`. A tuple of all strides is returned when no
+argument is passed in. Otherwise, an integer value is returned as the stride in
+the particular dimension :attr:`dim`.
+
+Args:
+ dim (int, optional): the desired dimension in which stride is required
+
+Example::
+
+ >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
+ >>> x.stride()
+ (5, 1)
+ >>> x.stride(0)
+ 5
+ >>> x.stride(-1)
+ 1
+
+""",
+)
+
+add_docstr_all(
+ "sub",
+ r"""
+sub(other, *, alpha=1) -> Tensor
+
+See :func:`torch.sub`.
+""",
+)
+
+add_docstr_all(
+ "sub_",
+ r"""
+sub_(other, *, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.sub`
+""",
+)
+
+add_docstr_all(
+ "subtract",
+ r"""
+subtract(other, *, alpha=1) -> Tensor
+
+See :func:`torch.subtract`.
+""",
+)
+
+add_docstr_all(
+ "subtract_",
+ r"""
+subtract_(other, *, alpha=1) -> Tensor
+
+In-place version of :meth:`~Tensor.subtract`.
+""",
+)
+
+add_docstr_all(
+ "sum",
+ r"""
+sum(dim=None, keepdim=False, dtype=None) -> Tensor
+
+See :func:`torch.sum`
+""",
+)
+
+add_docstr_all(
+ "nansum",
+ r"""
+nansum(dim=None, keepdim=False, dtype=None) -> Tensor
+
+See :func:`torch.nansum`
+""",
+)
+
+add_docstr_all(
+ "svd",
+ r"""
+svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
+
+See :func:`torch.svd`
+""",
+)
+
+add_docstr_all(
+ "swapdims",
+ r"""
+swapdims(dim0, dim1) -> Tensor
+
+See :func:`torch.swapdims`
+""",
+)
+
+add_docstr_all(
+ "swapdims_",
+ r"""
+swapdims_(dim0, dim1) -> Tensor
+
+In-place version of :meth:`~Tensor.swapdims`
+""",
+)
+
+add_docstr_all(
+ "swapaxes",
+ r"""
+swapaxes(axis0, axis1) -> Tensor
+
+See :func:`torch.swapaxes`
+""",
+)
+
+add_docstr_all(
+ "swapaxes_",
+ r"""
+swapaxes_(axis0, axis1) -> Tensor
+
+In-place version of :meth:`~Tensor.swapaxes`
+""",
+)
+
+add_docstr_all(
+ "t",
+ r"""
+t() -> Tensor
+
+See :func:`torch.t`
+""",
+)
+
+add_docstr_all(
+ "t_",
+ r"""
+t_() -> Tensor
+
+In-place version of :meth:`~Tensor.t`
+""",
+)
+
+add_docstr_all(
+ "tile",
+ r"""
+tile(dims) -> Tensor
+
+See :func:`torch.tile`
+""",
+)
+
+add_docstr_all(
+ "to",
+ r"""
+to(*args, **kwargs) -> Tensor
+
+Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
+inferred from the arguments of ``self.to(*args, **kwargs)``.
+
+.. note::
+
+ If the ``self`` Tensor already
+ has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
+ Otherwise, the returned tensor is a copy of ``self`` with the desired
+ :class:`torch.dtype` and :class:`torch.device`.
+
+Here are the ways to call ``to``:
+
+.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
+ :noindex:
+
+ Returns a Tensor with the specified :attr:`dtype`
+
+ Args:
+ {memory_format}
+
+.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
+ :noindex:
+
+ Returns a Tensor with the specified :attr:`device` and (optional)
+ :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
+ When :attr:`non_blocking`, tries to convert asynchronously with respect to
+ the host if possible, e.g., converting a CPU Tensor with pinned memory to a
+ CUDA Tensor.
+ When :attr:`copy` is set, a new Tensor is created even when the Tensor
+ already matches the desired conversion.
+
+ Args:
+ {memory_format}
+
+.. method:: to(other, non_blocking=False, copy=False) -> Tensor
+ :noindex:
+
+ Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
+ the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
+ asynchronously with respect to the host if possible, e.g., converting a CPU
+ Tensor with pinned memory to a CUDA Tensor.
+ When :attr:`copy` is set, a new Tensor is created even when the Tensor
+ already matches the desired conversion.
+
+Example::
+
+ >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
+ >>> tensor.to(torch.float64)
+ tensor([[-0.5044, 0.0005],
+ [ 0.3310, -0.0584]], dtype=torch.float64)
+
+ >>> cuda0 = torch.device('cuda:0')
+ >>> tensor.to(cuda0)
+ tensor([[-0.5044, 0.0005],
+ [ 0.3310, -0.0584]], device='cuda:0')
+
+ >>> tensor.to(cuda0, dtype=torch.float64)
+ tensor([[-0.5044, 0.0005],
+ [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
+
+ >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
+ >>> tensor.to(other, non_blocking=True)
+ tensor([[-0.5044, 0.0005],
+ [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "byte",
+ r"""
+byte(memory_format=torch.preserve_format) -> Tensor
+
+``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "bool",
+ r"""
+bool(memory_format=torch.preserve_format) -> Tensor
+
+``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "char",
+ r"""
+char(memory_format=torch.preserve_format) -> Tensor
+
+``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "bfloat16",
+ r"""
+bfloat16(memory_format=torch.preserve_format) -> Tensor
+``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "double",
+ r"""
+double(memory_format=torch.preserve_format) -> Tensor
+
+``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "float",
+ r"""
+float(memory_format=torch.preserve_format) -> Tensor
+
+``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "cdouble",
+ r"""
+cdouble(memory_format=torch.preserve_format) -> Tensor
+
+``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "cfloat",
+ r"""
+cfloat(memory_format=torch.preserve_format) -> Tensor
+
+``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "chalf",
+ r"""
+chalf(memory_format=torch.preserve_format) -> Tensor
+
+``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
+
+Args:
+ {memory_format}
+ """.format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "half",
+ r"""
+half(memory_format=torch.preserve_format) -> Tensor
+
+``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "int",
+ r"""
+int(memory_format=torch.preserve_format) -> Tensor
+
+``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "int_repr",
+ r"""
+int_repr() -> Tensor
+
+Given a quantized Tensor,
+``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
+underlying uint8_t values of the given Tensor.
+""",
+)
+
+
+add_docstr_all(
+ "long",
+ r"""
+long(memory_format=torch.preserve_format) -> Tensor
+
+``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "short",
+ r"""
+short(memory_format=torch.preserve_format) -> Tensor
+
+``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
+
+Args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr_all(
+ "take",
+ r"""
+take(indices) -> Tensor
+
+See :func:`torch.take`
+""",
+)
+
+add_docstr_all(
+ "take_along_dim",
+ r"""
+take_along_dim(indices, dim) -> Tensor
+
+See :func:`torch.take_along_dim`
+""",
+)
+
+add_docstr_all(
+ "tan",
+ r"""
+tan() -> Tensor
+
+See :func:`torch.tan`
+""",
+)
+
+add_docstr_all(
+ "tan_",
+ r"""
+tan_() -> Tensor
+
+In-place version of :meth:`~Tensor.tan`
+""",
+)
+
+add_docstr_all(
+ "tanh",
+ r"""
+tanh() -> Tensor
+
+See :func:`torch.tanh`
+""",
+)
+
+add_docstr_all(
+ "softmax",
+ r"""
+softmax(dim) -> Tensor
+
+Alias for :func:`torch.nn.functional.softmax`.
+""",
+)
+
+add_docstr_all(
+ "tanh_",
+ r"""
+tanh_() -> Tensor
+
+In-place version of :meth:`~Tensor.tanh`
+""",
+)
+
+add_docstr_all(
+ "tolist",
+ r"""
+tolist() -> list or number
+
+Returns the tensor as a (nested) list. For scalars, a standard
+Python number is returned, just like with :meth:`~Tensor.item`.
+Tensors are automatically moved to the CPU first if necessary.
+
+This operation is not differentiable.
+
+Examples::
+
+ >>> a = torch.randn(2, 2)
+ >>> a.tolist()
+ [[0.012766935862600803, 0.5415473580360413],
+ [-0.08909505605697632, 0.7729271650314331]]
+ >>> a[0,0].tolist()
+ 0.012766935862600803
+""",
+)
+
+add_docstr_all(
+ "topk",
+ r"""
+topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
+
+See :func:`torch.topk`
+""",
+)
+
+add_docstr_all(
+ "to_dense",
+ r"""
+to_dense(dtype=None, *, masked_grad=True) -> Tensor
+
+Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
+
+Keyword args:
+ {dtype}
+ masked_grad (bool, optional): If set to ``True`` (default) and
+ :attr:`self` has a sparse layout then the backward of
+ :meth:`to_dense` returns ``grad.sparse_mask(self)``.
+
+Example::
+
+ >>> s = torch.sparse_coo_tensor(
+ ... torch.tensor([[1, 1],
+ ... [0, 2]]),
+ ... torch.tensor([9, 10]),
+ ... size=(3, 3))
+ >>> s.to_dense()
+ tensor([[ 0, 0, 0],
+ [ 9, 0, 10],
+ [ 0, 0, 0]])
+""",
+)
+
+add_docstr_all(
+ "to_sparse",
+ r"""
+to_sparse(sparseDims) -> Tensor
+
+Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
+:ref:`coordinate format `.
+
+Args:
+ sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
+
+Example::
+
+ >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
+ >>> d
+ tensor([[ 0, 0, 0],
+ [ 9, 0, 10],
+ [ 0, 0, 0]])
+ >>> d.to_sparse()
+ tensor(indices=tensor([[1, 1],
+ [0, 2]]),
+ values=tensor([ 9, 10]),
+ size=(3, 3), nnz=2, layout=torch.sparse_coo)
+ >>> d.to_sparse(1)
+ tensor(indices=tensor([[1]]),
+ values=tensor([[ 9, 0, 10]]),
+ size=(3, 3), nnz=1, layout=torch.sparse_coo)
+
+.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
+ :noindex:
+
+Returns a sparse tensor with the specified layout and blocksize. If
+the :attr:`self` is strided, the number of dense dimensions could be
+specified, and a hybrid sparse tensor will be created, with
+`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
+dimension.
+
+.. note:: If the :attr:`self` layout and blocksize parameters match
+ with the specified layout and blocksize, return
+ :attr:`self`. Otherwise, return a sparse tensor copy of
+ :attr:`self`.
+
+Args:
+
+ layout (:class:`torch.layout`, optional): The desired sparse
+ layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
+ ``torch.sparse_csc``, ``torch.sparse_bsr``, or
+ ``torch.sparse_bsc``. Default: if ``None``,
+ ``torch.sparse_coo``.
+
+ blocksize (list, tuple, :class:`torch.Size`, optional): Block size
+ of the resulting BSR or BSC tensor. For other layouts,
+ specifying the block size that is not ``None`` will result in a
+ RuntimeError exception. A block size must be a tuple of length
+ two such that its items evenly divide the two sparse dimensions.
+
+ dense_dim (int, optional): Number of dense dimensions of the
+ resulting CSR, CSC, BSR or BSC tensor. This argument should be
+ used only if :attr:`self` is a strided tensor, and must be a
+ value between 0 and dimension of :attr:`self` tensor minus two.
+
+Example::
+
+ >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
+ >>> x.to_sparse(layout=torch.sparse_coo)
+ tensor(indices=tensor([[0, 2, 2],
+ [0, 0, 1]]),
+ values=tensor([1, 2, 3]),
+ size=(3, 2), nnz=3, layout=torch.sparse_coo)
+ >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
+ tensor(crow_indices=tensor([0, 1, 1, 2]),
+ col_indices=tensor([0, 0]),
+ values=tensor([[[1, 0]],
+ [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
+ >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
+ RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
+ >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
+ RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
+
+ >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
+ >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
+ tensor(crow_indices=tensor([0, 1, 1, 3]),
+ col_indices=tensor([0, 0, 1]),
+ values=tensor([[1],
+ [2],
+ [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
+
+""",
+)
+
+add_docstr_all(
+ "to_sparse_csr",
+ r"""
+to_sparse_csr(dense_dim=None) -> Tensor
+
+Convert a tensor to compressed row storage format (CSR). Except for
+strided tensors, only works with 2D tensors. If the :attr:`self` is
+strided, then the number of dense dimensions could be specified, and a
+hybrid CSR tensor will be created, with `dense_dim` dense dimensions
+and `self.dim() - 2 - dense_dim` batch dimension.
+
+Args:
+
+ dense_dim (int, optional): Number of dense dimensions of the
+ resulting CSR tensor. This argument should be used only if
+ :attr:`self` is a strided tensor, and must be a value between 0
+ and dimension of :attr:`self` tensor minus two.
+
+Example::
+
+ >>> dense = torch.randn(5, 5)
+ >>> sparse = dense.to_sparse_csr()
+ >>> sparse._nnz()
+ 25
+
+ >>> dense = torch.zeros(3, 3, 1, 1)
+ >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
+ >>> dense.to_sparse_csr(dense_dim=2)
+ tensor(crow_indices=tensor([0, 1, 2, 3]),
+ col_indices=tensor([0, 2, 1]),
+ values=tensor([[[1.]],
+
+ [[1.]],
+
+ [[1.]]]), size=(3, 3, 1, 1), nnz=3,
+ layout=torch.sparse_csr)
+
+""",
+)
+
+add_docstr_all(
+ "to_sparse_csc",
+ r"""
+to_sparse_csc() -> Tensor
+
+Convert a tensor to compressed column storage (CSC) format. Except
+for strided tensors, only works with 2D tensors. If the :attr:`self`
+is strided, then the number of dense dimensions could be specified,
+and a hybrid CSC tensor will be created, with `dense_dim` dense
+dimensions and `self.dim() - 2 - dense_dim` batch dimension.
+
+Args:
+
+ dense_dim (int, optional): Number of dense dimensions of the
+ resulting CSC tensor. This argument should be used only if
+ :attr:`self` is a strided tensor, and must be a value between 0
+ and dimension of :attr:`self` tensor minus two.
+
+Example::
+
+ >>> dense = torch.randn(5, 5)
+ >>> sparse = dense.to_sparse_csc()
+ >>> sparse._nnz()
+ 25
+
+ >>> dense = torch.zeros(3, 3, 1, 1)
+ >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
+ >>> dense.to_sparse_csc(dense_dim=2)
+ tensor(ccol_indices=tensor([0, 1, 2, 3]),
+ row_indices=tensor([0, 2, 1]),
+ values=tensor([[[1.]],
+
+ [[1.]],
+
+ [[1.]]]), size=(3, 3, 1, 1), nnz=3,
+ layout=torch.sparse_csc)
+
+""",
+)
+
+add_docstr_all(
+ "to_sparse_bsr",
+ r"""
+to_sparse_bsr(blocksize, dense_dim) -> Tensor
+
+Convert a tensor to a block sparse row (BSR) storage format of given
+blocksize. If the :attr:`self` is strided, then the number of dense
+dimensions could be specified, and a hybrid BSR tensor will be
+created, with `dense_dim` dense dimensions and `self.dim() - 2 -
+dense_dim` batch dimension.
+
+Args:
+
+ blocksize (list, tuple, :class:`torch.Size`, optional): Block size
+ of the resulting BSR tensor. A block size must be a tuple of
+ length two such that its items evenly divide the two sparse
+ dimensions.
+
+ dense_dim (int, optional): Number of dense dimensions of the
+ resulting BSR tensor. This argument should be used only if
+ :attr:`self` is a strided tensor, and must be a value between 0
+ and dimension of :attr:`self` tensor minus two.
+
+Example::
+
+ >>> dense = torch.randn(10, 10)
+ >>> sparse = dense.to_sparse_csr()
+ >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
+ >>> sparse_bsr.col_indices()
+ tensor([0, 1, 0, 1])
+
+ >>> dense = torch.zeros(4, 3, 1)
+ >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
+ >>> dense.to_sparse_bsr((2, 1), 1)
+ tensor(crow_indices=tensor([0, 2, 3]),
+ col_indices=tensor([0, 2, 1]),
+ values=tensor([[[[1.]],
+
+ [[1.]]],
+
+
+ [[[1.]],
+
+ [[1.]]],
+
+
+ [[[1.]],
+
+ [[1.]]]]), size=(4, 3, 1), nnz=3,
+ layout=torch.sparse_bsr)
+
+""",
+)
+
+add_docstr_all(
+ "to_sparse_bsc",
+ r"""
+to_sparse_bsc(blocksize, dense_dim) -> Tensor
+
+Convert a tensor to a block sparse column (BSC) storage format of
+given blocksize. If the :attr:`self` is strided, then the number of
+dense dimensions could be specified, and a hybrid BSC tensor will be
+created, with `dense_dim` dense dimensions and `self.dim() - 2 -
+dense_dim` batch dimension.
+
+Args:
+
+ blocksize (list, tuple, :class:`torch.Size`, optional): Block size
+ of the resulting BSC tensor. A block size must be a tuple of
+ length two such that its items evenly divide the two sparse
+ dimensions.
+
+ dense_dim (int, optional): Number of dense dimensions of the
+ resulting BSC tensor. This argument should be used only if
+ :attr:`self` is a strided tensor, and must be a value between 0
+ and dimension of :attr:`self` tensor minus two.
+
+Example::
+
+ >>> dense = torch.randn(10, 10)
+ >>> sparse = dense.to_sparse_csr()
+ >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
+ >>> sparse_bsc.row_indices()
+ tensor([0, 1, 0, 1])
+
+ >>> dense = torch.zeros(4, 3, 1)
+ >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
+ >>> dense.to_sparse_bsc((2, 1), 1)
+ tensor(ccol_indices=tensor([0, 1, 2, 3]),
+ row_indices=tensor([0, 1, 0]),
+ values=tensor([[[[1.]],
+
+ [[1.]]],
+
+
+ [[[1.]],
+
+ [[1.]]],
+
+
+ [[[1.]],
+
+ [[1.]]]]), size=(4, 3, 1), nnz=3,
+ layout=torch.sparse_bsc)
+
+""",
+)
+
+add_docstr_all(
+ "to_mkldnn",
+ r"""
+to_mkldnn() -> Tensor
+Returns a copy of the tensor in ``torch.mkldnn`` layout.
+
+""",
+)
+
+add_docstr_all(
+ "trace",
+ r"""
+trace() -> Tensor
+
+See :func:`torch.trace`
+""",
+)
+
+add_docstr_all(
+ "transpose",
+ r"""
+transpose(dim0, dim1) -> Tensor
+
+See :func:`torch.transpose`
+""",
+)
+
+add_docstr_all(
+ "transpose_",
+ r"""
+transpose_(dim0, dim1) -> Tensor
+
+In-place version of :meth:`~Tensor.transpose`
+""",
+)
+
+add_docstr_all(
+ "triangular_solve",
+ r"""
+triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
+
+See :func:`torch.triangular_solve`
+""",
+)
+
+add_docstr_all(
+ "tril",
+ r"""
+tril(diagonal=0) -> Tensor
+
+See :func:`torch.tril`
+""",
+)
+
+add_docstr_all(
+ "tril_",
+ r"""
+tril_(diagonal=0) -> Tensor
+
+In-place version of :meth:`~Tensor.tril`
+""",
+)
+
+add_docstr_all(
+ "triu",
+ r"""
+triu(diagonal=0) -> Tensor
+
+See :func:`torch.triu`
+""",
+)
+
+add_docstr_all(
+ "triu_",
+ r"""
+triu_(diagonal=0) -> Tensor
+
+In-place version of :meth:`~Tensor.triu`
+""",
+)
+
+add_docstr_all(
+ "true_divide",
+ r"""
+true_divide(value) -> Tensor
+
+See :func:`torch.true_divide`
+""",
+)
+
+add_docstr_all(
+ "true_divide_",
+ r"""
+true_divide_(value) -> Tensor
+
+In-place version of :meth:`~Tensor.true_divide_`
+""",
+)
+
+add_docstr_all(
+ "trunc",
+ r"""
+trunc() -> Tensor
+
+See :func:`torch.trunc`
+""",
+)
+
+add_docstr_all(
+ "fix",
+ r"""
+fix() -> Tensor
+
+See :func:`torch.fix`.
+""",
+)
+
+add_docstr_all(
+ "trunc_",
+ r"""
+trunc_() -> Tensor
+
+In-place version of :meth:`~Tensor.trunc`
+""",
+)
+
+add_docstr_all(
+ "fix_",
+ r"""
+fix_() -> Tensor
+
+In-place version of :meth:`~Tensor.fix`
+""",
+)
+
+add_docstr_all(
+ "type",
+ r"""
+type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
+Returns the type if `dtype` is not provided, else casts this object to
+the specified type.
+
+If this is already of the correct type, no copy is performed and the
+original object is returned.
+
+Args:
+ dtype (dtype or string): The desired type
+ non_blocking (bool): If ``True``, and the source is in pinned memory
+ and destination is on the GPU or vice versa, the copy is performed
+ asynchronously with respect to the host. Otherwise, the argument
+ has no effect.
+ **kwargs: For compatibility, may contain the key ``async`` in place of
+ the ``non_blocking`` argument. The ``async`` arg is deprecated.
+""",
+)
+
+add_docstr_all(
+ "type_as",
+ r"""
+type_as(tensor) -> Tensor
+
+Returns this tensor cast to the type of the given tensor.
+
+This is a no-op if the tensor is already of the correct type. This is
+equivalent to ``self.type(tensor.type())``
+
+Args:
+ tensor (Tensor): the tensor which has the desired type
+""",
+)
+
+add_docstr_all(
+ "unfold",
+ r"""
+unfold(dimension, size, step) -> Tensor
+
+Returns a view of the original tensor which contains all slices of size :attr:`size` from
+:attr:`self` tensor in the dimension :attr:`dimension`.
+
+Step between two slices is given by :attr:`step`.
+
+If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
+dimension :attr:`dimension` in the returned tensor will be
+`(sizedim - size) / step + 1`.
+
+An additional dimension of size :attr:`size` is appended in the returned tensor.
+
+Args:
+ dimension (int): dimension in which unfolding happens
+ size (int): the size of each slice that is unfolded
+ step (int): the step between each slice
+
+Example::
+
+ >>> x = torch.arange(1., 8)
+ >>> x
+ tensor([ 1., 2., 3., 4., 5., 6., 7.])
+ >>> x.unfold(0, 2, 1)
+ tensor([[ 1., 2.],
+ [ 2., 3.],
+ [ 3., 4.],
+ [ 4., 5.],
+ [ 5., 6.],
+ [ 6., 7.]])
+ >>> x.unfold(0, 2, 2)
+ tensor([[ 1., 2.],
+ [ 3., 4.],
+ [ 5., 6.]])
+""",
+)
+
+add_docstr_all(
+ "uniform_",
+ r"""
+uniform_(from=0, to=1, *, generator=None) -> Tensor
+
+Fills :attr:`self` tensor with numbers sampled from the continuous uniform
+distribution:
+
+.. math::
+ f(x) = \dfrac{1}{\text{to} - \text{from}}
+""",
+)
+
+add_docstr_all(
+ "unsqueeze",
+ r"""
+unsqueeze(dim) -> Tensor
+
+See :func:`torch.unsqueeze`
+""",
+)
+
+add_docstr_all(
+ "unsqueeze_",
+ r"""
+unsqueeze_(dim) -> Tensor
+
+In-place version of :meth:`~Tensor.unsqueeze`
+""",
+)
+
+add_docstr_all(
+ "var",
+ r"""
+var(dim=None, *, correction=1, keepdim=False) -> Tensor
+
+See :func:`torch.var`
+""",
+)
+
+add_docstr_all(
+ "vdot",
+ r"""
+vdot(other) -> Tensor
+
+See :func:`torch.vdot`
+""",
+)
+
+add_docstr_all(
+ "view",
+ r"""
+view(*shape) -> Tensor
+
+Returns a new tensor with the same data as the :attr:`self` tensor but of a
+different :attr:`shape`.
+
+The returned tensor shares the same data and must have the same number
+of elements, but may have a different size. For a tensor to be viewed, the new
+view size must be compatible with its original size and stride, i.e., each new
+view dimension must either be a subspace of an original dimension, or only span
+across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
+contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
+
+.. math::
+
+ \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
+
+Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
+without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
+:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
+returns a view if the shapes are compatible, and copies (equivalent to calling
+:meth:`contiguous`) otherwise.
+
+Args:
+ shape (torch.Size or int...): the desired size
+
+Example::
+
+ >>> x = torch.randn(4, 4)
+ >>> x.size()
+ torch.Size([4, 4])
+ >>> y = x.view(16)
+ >>> y.size()
+ torch.Size([16])
+ >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
+ >>> z.size()
+ torch.Size([2, 8])
+
+ >>> a = torch.randn(1, 2, 3, 4)
+ >>> a.size()
+ torch.Size([1, 2, 3, 4])
+ >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
+ >>> b.size()
+ torch.Size([1, 3, 2, 4])
+ >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
+ >>> c.size()
+ torch.Size([1, 3, 2, 4])
+ >>> torch.equal(b, c)
+ False
+
+
+.. method:: view(dtype) -> Tensor
+ :noindex:
+
+Returns a new tensor with the same data as the :attr:`self` tensor but of a
+different :attr:`dtype`.
+
+If the element size of :attr:`dtype` is different than that of ``self.dtype``,
+then the size of the last dimension of the output will be scaled
+proportionally. For instance, if :attr:`dtype` element size is twice that of
+``self.dtype``, then each pair of elements in the last dimension of
+:attr:`self` will be combined, and the size of the last dimension of the output
+will be half that of :attr:`self`. If :attr:`dtype` element size is half that
+of ``self.dtype``, then each element in the last dimension of :attr:`self` will
+be split in two, and the size of the last dimension of the output will be
+double that of :attr:`self`. For this to be possible, the following conditions
+must be true:
+
+ * ``self.dim()`` must be greater than 0.
+ * ``self.stride(-1)`` must be 1.
+
+Additionally, if the element size of :attr:`dtype` is greater than that of
+``self.dtype``, the following conditions must be true as well:
+
+ * ``self.size(-1)`` must be divisible by the ratio between the element
+ sizes of the dtypes.
+ * ``self.storage_offset()`` must be divisible by the ratio between the
+ element sizes of the dtypes.
+ * The strides of all dimensions, except the last dimension, must be
+ divisible by the ratio between the element sizes of the dtypes.
+
+If any of the above conditions are not met, an error is thrown.
+
+.. warning::
+
+ This overload is not supported by TorchScript, and using it in a Torchscript
+ program will cause undefined behavior.
+
+
+Args:
+ dtype (:class:`torch.dtype`): the desired dtype
+
+Example::
+
+ >>> x = torch.randn(4, 4)
+ >>> x
+ tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
+ [-0.1520, 0.7472, 0.5617, -0.8649],
+ [-2.4724, -0.0334, -0.2976, -0.8499],
+ [-0.2109, 1.9913, -0.9607, -0.6123]])
+ >>> x.dtype
+ torch.float32
+
+ >>> y = x.view(torch.int32)
+ >>> y
+ tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
+ [-1105482831, 1061112040, 1057999968, -1084397505],
+ [-1071760287, -1123489973, -1097310419, -1084649136],
+ [-1101533110, 1073668768, -1082790149, -1088634448]],
+ dtype=torch.int32)
+ >>> y[0, 0] = 1000000000
+ >>> x
+ tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
+ [-0.1520, 0.7472, 0.5617, -0.8649],
+ [-2.4724, -0.0334, -0.2976, -0.8499],
+ [-0.2109, 1.9913, -0.9607, -0.6123]])
+
+ >>> x.view(torch.cfloat)
+ tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
+ [-0.1520+0.7472j, 0.5617-0.8649j],
+ [-2.4724-0.0334j, -0.2976-0.8499j],
+ [-0.2109+1.9913j, -0.9607-0.6123j]])
+ >>> x.view(torch.cfloat).size()
+ torch.Size([4, 2])
+
+ >>> x.view(torch.uint8)
+ tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
+ 8, 191],
+ [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
+ 93, 191],
+ [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
+ 89, 191],
+ [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
+ 28, 191]], dtype=torch.uint8)
+ >>> x.view(torch.uint8).size()
+ torch.Size([4, 16])
+""",
+)
+
+add_docstr_all(
+ "view_as",
+ r"""
+view_as(other) -> Tensor
+
+View this tensor as the same size as :attr:`other`.
+``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
+
+Please see :meth:`~Tensor.view` for more information about ``view``.
+
+Args:
+ other (:class:`torch.Tensor`): The result tensor has the same size
+ as :attr:`other`.
+""",
+)
+
+add_docstr_all(
+ "expand",
+ r"""
+expand(*sizes) -> Tensor
+
+Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
+to a larger size.
+
+Passing -1 as the size for a dimension means not changing the size of
+that dimension.
+
+Tensor can be also expanded to a larger number of dimensions, and the
+new ones will be appended at the front. For the new dimensions, the
+size cannot be set to -1.
+
+Expanding a tensor does not allocate new memory, but only creates a
+new view on the existing tensor where a dimension of size one is
+expanded to a larger size by setting the ``stride`` to 0. Any dimension
+of size 1 can be expanded to an arbitrary value without allocating new
+memory.
+
+Args:
+ *sizes (torch.Size or int...): the desired expanded size
+
+.. warning::
+
+ More than one element of an expanded tensor may refer to a single
+ memory location. As a result, in-place operations (especially ones that
+ are vectorized) may result in incorrect behavior. If you need to write
+ to the tensors, please clone them first.
+
+Example::
+
+ >>> x = torch.tensor([[1], [2], [3]])
+ >>> x.size()
+ torch.Size([3, 1])
+ >>> x.expand(3, 4)
+ tensor([[ 1, 1, 1, 1],
+ [ 2, 2, 2, 2],
+ [ 3, 3, 3, 3]])
+ >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
+ tensor([[ 1, 1, 1, 1],
+ [ 2, 2, 2, 2],
+ [ 3, 3, 3, 3]])
+""",
+)
+
+add_docstr_all(
+ "expand_as",
+ r"""
+expand_as(other) -> Tensor
+
+Expand this tensor to the same size as :attr:`other`.
+``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
+
+Please see :meth:`~Tensor.expand` for more information about ``expand``.
+
+Args:
+ other (:class:`torch.Tensor`): The result tensor has the same size
+ as :attr:`other`.
+""",
+)
+
+add_docstr_all(
+ "sum_to_size",
+ r"""
+sum_to_size(*size) -> Tensor
+
+Sum ``this`` tensor to :attr:`size`.
+:attr:`size` must be broadcastable to ``this`` tensor size.
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+""",
+)
+
+
+add_docstr_all(
+ "zero_",
+ r"""
+zero_() -> Tensor
+
+Fills :attr:`self` tensor with zeros.
+""",
+)
+
+add_docstr_all(
+ "matmul",
+ r"""
+matmul(tensor2) -> Tensor
+
+See :func:`torch.matmul`
+""",
+)
+
+add_docstr_all(
+ "chunk",
+ r"""
+chunk(chunks, dim=0) -> List of Tensors
+
+See :func:`torch.chunk`
+""",
+)
+
+add_docstr_all(
+ "unsafe_chunk",
+ r"""
+unsafe_chunk(chunks, dim=0) -> List of Tensors
+
+See :func:`torch.unsafe_chunk`
+""",
+)
+
+add_docstr_all(
+ "unsafe_split",
+ r"""
+unsafe_split(split_size, dim=0) -> List of Tensors
+
+See :func:`torch.unsafe_split`
+""",
+)
+
+add_docstr_all(
+ "tensor_split",
+ r"""
+tensor_split(indices_or_sections, dim=0) -> List of Tensors
+
+See :func:`torch.tensor_split`
+""",
+)
+
+add_docstr_all(
+ "hsplit",
+ r"""
+hsplit(split_size_or_sections) -> List of Tensors
+
+See :func:`torch.hsplit`
+""",
+)
+
+add_docstr_all(
+ "vsplit",
+ r"""
+vsplit(split_size_or_sections) -> List of Tensors
+
+See :func:`torch.vsplit`
+""",
+)
+
+add_docstr_all(
+ "dsplit",
+ r"""
+dsplit(split_size_or_sections) -> List of Tensors
+
+See :func:`torch.dsplit`
+""",
+)
+
+add_docstr_all(
+ "stft",
+ r"""
+stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
+
+See :func:`torch.stft`
+""",
+)
+
+add_docstr_all(
+ "istft",
+ r"""
+istft(n_fft, hop_length=None, win_length=None, window=None,
+ center=True, normalized=False, onesided=True, length=None) -> Tensor
+
+See :func:`torch.istft`
+""",
+)
+
+add_docstr_all(
+ "det",
+ r"""
+det() -> Tensor
+
+See :func:`torch.det`
+""",
+)
+
+add_docstr_all(
+ "where",
+ r"""
+where(condition, y) -> Tensor
+
+``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
+See :func:`torch.where`
+""",
+)
+
+add_docstr_all(
+ "logdet",
+ r"""
+logdet() -> Tensor
+
+See :func:`torch.logdet`
+""",
+)
+
+add_docstr_all(
+ "slogdet",
+ r"""
+slogdet() -> (Tensor, Tensor)
+
+See :func:`torch.slogdet`
+""",
+)
+
+add_docstr_all(
+ "unbind",
+ r"""
+unbind(dim=0) -> seq
+
+See :func:`torch.unbind`
+""",
+)
+
+add_docstr_all(
+ "pin_memory",
+ r"""
+pin_memory() -> Tensor
+
+Copies the tensor to pinned memory, if it's not already pinned.
+""",
+)
+
+add_docstr_all(
+ "pinverse",
+ r"""
+pinverse() -> Tensor
+
+See :func:`torch.pinverse`
+""",
+)
+
+add_docstr_all(
+ "index_add",
+ r"""
+index_add(dim, index, source, *, alpha=1) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.index_add_`.
+""",
+)
+
+add_docstr_all(
+ "index_copy",
+ r"""
+index_copy(dim, index, tensor2) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.index_copy_`.
+""",
+)
+
+add_docstr_all(
+ "index_fill",
+ r"""
+index_fill(dim, index, value) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.index_fill_`.
+""",
+)
+
+add_docstr_all(
+ "scatter",
+ r"""
+scatter(dim, index, src) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_`
+""",
+)
+
+add_docstr_all(
+ "scatter_add",
+ r"""
+scatter_add(dim, index, src) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_add_`
+""",
+)
+
+add_docstr_all(
+ "scatter_reduce",
+ r"""
+scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
+""",
+)
+
+add_docstr_all(
+ "masked_scatter",
+ r"""
+masked_scatter(mask, tensor) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
+
+.. note::
+
+ The inputs :attr:`self` and :attr:`mask`
+ :ref:`broadcast `.
+
+Example:
+
+ >>> self = torch.tensor([0, 0, 0, 0, 0])
+ >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]])
+ >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
+ >>> self.masked_scatter(mask, source)
+ tensor([[0, 0, 0, 0, 1],
+ [2, 3, 0, 4, 5]])
+
+""",
+)
+
+add_docstr_all(
+ "xlogy",
+ r"""
+xlogy(other) -> Tensor
+
+See :func:`torch.xlogy`
+""",
+)
+
+add_docstr_all(
+ "xlogy_",
+ r"""
+xlogy_(other) -> Tensor
+
+In-place version of :meth:`~Tensor.xlogy`
+""",
+)
+
+add_docstr_all(
+ "masked_fill",
+ r"""
+masked_fill(mask, value) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.masked_fill_`
+""",
+)
+
+add_docstr_all(
+ "grad",
+ r"""
+This attribute is ``None`` by default and becomes a Tensor the first time a call to
+:func:`backward` computes gradients for ``self``.
+The attribute will then contain the gradients computed and future calls to
+:func:`backward` will accumulate (add) gradients into it.
+""",
+)
+
+add_docstr_all(
+ "retain_grad",
+ r"""
+retain_grad() -> None
+
+Enables this Tensor to have their :attr:`grad` populated during
+:func:`backward`. This is a no-op for leaf tensors.
+""",
+)
+
+add_docstr_all(
+ "retains_grad",
+ r"""
+Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
+populated during :func:`backward`, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "requires_grad",
+ r"""
+Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
+
+.. note::
+
+ The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
+ attribute will be populated, see :attr:`is_leaf` for more details.
+
+""",
+)
+
+add_docstr_all(
+ "is_leaf",
+ r"""
+All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
+
+For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
+created by the user. This means that they are not the result of an operation and so
+:attr:`grad_fn` is None.
+
+Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
+To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
+
+Example::
+
+ >>> a = torch.rand(10, requires_grad=True)
+ >>> a.is_leaf
+ True
+ >>> b = torch.rand(10, requires_grad=True).cuda()
+ >>> b.is_leaf
+ False
+ # b was created by the operation that cast a cpu Tensor into a cuda Tensor
+ >>> c = torch.rand(10, requires_grad=True) + 2
+ >>> c.is_leaf
+ False
+ # c was created by the addition operation
+ >>> d = torch.rand(10).cuda()
+ >>> d.is_leaf
+ True
+ # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
+ >>> e = torch.rand(10).cuda().requires_grad_()
+ >>> e.is_leaf
+ True
+ # e requires gradients and has no operations creating it
+ >>> f = torch.rand(10, requires_grad=True, device="cuda")
+ >>> f.is_leaf
+ True
+ # f requires grad, has no operation creating it
+
+
+""",
+)
+
+add_docstr_all(
+ "names",
+ r"""
+Stores names for each of this tensor's dimensions.
+
+``names[idx]`` corresponds to the name of tensor dimension ``idx``.
+Names are either a string if the dimension is named or ``None`` if the
+dimension is unnamed.
+
+Dimension names may contain characters or underscore. Furthermore, a dimension
+name must be a valid Python variable name (i.e., does not start with underscore).
+
+Tensors may not have two named dimensions with the same name.
+
+.. warning::
+ The named tensor API is experimental and subject to change.
+
+""",
+)
+
+add_docstr_all(
+ "is_cuda",
+ r"""
+Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_cpu",
+ r"""
+Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_xla",
+ r"""
+Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_ipu",
+ r"""
+Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_xpu",
+ r"""
+Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_quantized",
+ r"""
+Is ``True`` if the Tensor is quantized, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_meta",
+ r"""
+Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
+are like normal tensors, but they carry no data.
+""",
+)
+
+add_docstr_all(
+ "is_mps",
+ r"""
+Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_sparse",
+ r"""
+Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "is_sparse_csr",
+ r"""
+Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
+""",
+)
+
+add_docstr_all(
+ "device",
+ r"""
+Is the :class:`torch.device` where this Tensor is.
+""",
+)
+
+add_docstr_all(
+ "ndim",
+ r"""
+Alias for :meth:`~Tensor.dim()`
+""",
+)
+
+add_docstr_all(
+ "itemsize",
+ r"""
+Alias for :meth:`~Tensor.element_size()`
+""",
+)
+
+add_docstr_all(
+ "nbytes",
+ r"""
+Returns the number of bytes consumed by the "view" of elements of the Tensor
+if the Tensor does not use sparse storage layout.
+Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
+""",
+)
+
+add_docstr_all(
+ "T",
+ r"""
+Returns a view of this tensor with its dimensions reversed.
+
+If ``n`` is the number of dimensions in ``x``,
+``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
+
+.. warning::
+ The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
+ is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
+ to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
+ the dimensions of a tensor.
+""",
+)
+
+add_docstr_all(
+ "H",
+ r"""
+Returns a view of a matrix (2-D tensor) conjugated and transposed.
+
+``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
+``x.transpose(0, 1)`` for real matrices.
+
+.. seealso::
+
+ :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
+""",
+)
+
+add_docstr_all(
+ "mT",
+ r"""
+Returns a view of this tensor with the last two dimensions transposed.
+
+``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
+""",
+)
+
+add_docstr_all(
+ "mH",
+ r"""
+Accessing this property is equivalent to calling :func:`adjoint`.
+""",
+)
+
+add_docstr_all(
+ "adjoint",
+ r"""
+adjoint() -> Tensor
+
+Alias for :func:`adjoint`
+""",
+)
+
+add_docstr_all(
+ "real",
+ r"""
+Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
+The returned tensor and :attr:`self` share the same underlying storage.
+
+Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
+
+Example::
+ >>> x=torch.randn(4, dtype=torch.cfloat)
+ >>> x
+ tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
+ >>> x.real
+ tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
+
+""",
+)
+
+add_docstr_all(
+ "imag",
+ r"""
+Returns a new tensor containing imaginary values of the :attr:`self` tensor.
+The returned tensor and :attr:`self` share the same underlying storage.
+
+.. warning::
+ :func:`imag` is only supported for tensors with complex dtypes.
+
+Example::
+ >>> x=torch.randn(4, dtype=torch.cfloat)
+ >>> x
+ tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
+ >>> x.imag
+ tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
+
+""",
+)
+
+add_docstr_all(
+ "as_subclass",
+ r"""
+as_subclass(cls) -> Tensor
+
+Makes a ``cls`` instance with the same data pointer as ``self``. Changes
+in the output mirror changes in ``self``, and the output stays attached
+to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
+""",
+)
+
+add_docstr_all(
+ "crow_indices",
+ r"""
+crow_indices() -> IntTensor
+
+Returns the tensor containing the compressed row indices of the :attr:`self`
+tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
+The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
+and of type ``int32`` or ``int64``. When using MKL routines such as sparse
+matrix multiplication, it is necessary to use ``int32`` indexing in order
+to avoid downcasting and potentially losing information.
+
+Example::
+ >>> csr = torch.eye(5,5).to_sparse_csr()
+ >>> csr.crow_indices()
+ tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
+
+""",
+)
+
+add_docstr_all(
+ "col_indices",
+ r"""
+col_indices() -> IntTensor
+
+Returns the tensor containing the column indices of the :attr:`self`
+tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
+The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
+and of type ``int32`` or ``int64``. When using MKL routines such as sparse
+matrix multiplication, it is necessary to use ``int32`` indexing in order
+to avoid downcasting and potentially losing information.
+
+Example::
+ >>> csr = torch.eye(5,5).to_sparse_csr()
+ >>> csr.col_indices()
+ tensor([0, 1, 2, 3, 4], dtype=torch.int32)
+
+""",
+)
+
+add_docstr_all(
+ "to_padded_tensor",
+ r"""
+to_padded_tensor(padding, output_size=None) -> Tensor
+See :func:`to_padded_tensor`
+""",
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_tensor_str.py b/env-llmeval/lib/python3.10/site-packages/torch/_tensor_str.py
new file mode 100644
index 0000000000000000000000000000000000000000..1293a0fd61aec91368e36c733c3687a2361366fb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_tensor_str.py
@@ -0,0 +1,677 @@
+import contextlib
+import dataclasses
+import math
+import textwrap
+from typing import Any, Dict, Optional
+
+import torch
+from torch import inf
+
+
+@dataclasses.dataclass
+class __PrinterOptions:
+ precision: int = 4
+ threshold: float = 1000
+ edgeitems: int = 3
+ linewidth: int = 80
+ sci_mode: Optional[bool] = None
+
+
+PRINT_OPTS = __PrinterOptions()
+
+
+# We could use **kwargs, but this will give better docs
+def set_printoptions(
+ precision=None,
+ threshold=None,
+ edgeitems=None,
+ linewidth=None,
+ profile=None,
+ sci_mode=None,
+):
+ r"""Set options for printing. Items shamelessly taken from NumPy
+
+ Args:
+ precision: Number of digits of precision for floating point output
+ (default = 4).
+ threshold: Total number of array elements which trigger summarization
+ rather than full `repr` (default = 1000).
+ edgeitems: Number of array items in summary at beginning and end of
+ each dimension (default = 3).
+ linewidth: The number of characters per line for the purpose of
+ inserting line breaks (default = 80). Thresholded matrices will
+ ignore this parameter.
+ profile: Sane defaults for pretty printing. Can override with any of
+ the above options. (any one of `default`, `short`, `full`)
+ sci_mode: Enable (True) or disable (False) scientific notation. If
+ None (default) is specified, the value is defined by
+ `torch._tensor_str._Formatter`. This value is automatically chosen
+ by the framework.
+
+ Example::
+
+ >>> # Limit the precision of elements
+ >>> torch.set_printoptions(precision=2)
+ >>> torch.tensor([1.12345])
+ tensor([1.12])
+ >>> # Limit the number of elements shown
+ >>> torch.set_printoptions(threshold=5)
+ >>> torch.arange(10)
+ tensor([0, 1, 2, ..., 7, 8, 9])
+ >>> # Restore defaults
+ >>> torch.set_printoptions(profile='default')
+ >>> torch.tensor([1.12345])
+ tensor([1.1235])
+ >>> torch.arange(10)
+ tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ """
+ if profile is not None:
+ if profile == "default":
+ PRINT_OPTS.precision = 4
+ PRINT_OPTS.threshold = 1000
+ PRINT_OPTS.edgeitems = 3
+ PRINT_OPTS.linewidth = 80
+ elif profile == "short":
+ PRINT_OPTS.precision = 2
+ PRINT_OPTS.threshold = 1000
+ PRINT_OPTS.edgeitems = 2
+ PRINT_OPTS.linewidth = 80
+ elif profile == "full":
+ PRINT_OPTS.precision = 4
+ PRINT_OPTS.threshold = inf
+ PRINT_OPTS.edgeitems = 3
+ PRINT_OPTS.linewidth = 80
+
+ if precision is not None:
+ PRINT_OPTS.precision = precision
+ if threshold is not None:
+ PRINT_OPTS.threshold = threshold
+ if edgeitems is not None:
+ PRINT_OPTS.edgeitems = edgeitems
+ if linewidth is not None:
+ PRINT_OPTS.linewidth = linewidth
+ PRINT_OPTS.sci_mode = sci_mode
+
+
+def get_printoptions() -> Dict[str, Any]:
+ r"""Gets the current options for printing, as a dictionary that
+ can be passed as ``**kwargs`` to set_printoptions().
+ """
+ return dataclasses.asdict(PRINT_OPTS)
+
+
+@contextlib.contextmanager
+def printoptions(**kwargs):
+ r"""Context manager that temporarily changes the print options. Accepted
+ arguments are same as :func:`set_printoptions`."""
+ old_kwargs = get_printoptions()
+ set_printoptions(**kwargs)
+ try:
+ yield
+ finally:
+ set_printoptions(**old_kwargs)
+
+
+def tensor_totype(t):
+ dtype = torch.float if t.is_mps else torch.double
+ return t.to(dtype=dtype)
+
+
+class _Formatter:
+ def __init__(self, tensor):
+ self.floating_dtype = tensor.dtype.is_floating_point
+ self.int_mode = True
+ self.sci_mode = False
+ self.max_width = 1
+
+ with torch.no_grad():
+ tensor_view = tensor.reshape(-1)
+
+ if not self.floating_dtype:
+ for value in tensor_view:
+ value_str = f"{value}"
+ self.max_width = max(self.max_width, len(value_str))
+
+ else:
+ nonzero_finite_vals = torch.masked_select(
+ tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
+ )
+
+ if nonzero_finite_vals.numel() == 0:
+ # no valid number, do nothing
+ return
+
+ # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
+ nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
+ nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
+ nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
+
+ for value in nonzero_finite_vals:
+ if value != torch.ceil(value):
+ self.int_mode = False
+ break
+
+ if self.int_mode:
+ # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
+ # to indicate that the tensor is of floating type. add 1 to the len to account for this.
+ if (
+ nonzero_finite_max / nonzero_finite_min > 1000.0
+ or nonzero_finite_max > 1.0e8
+ ):
+ self.sci_mode = True
+ for value in nonzero_finite_vals:
+ value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
+ self.max_width = max(self.max_width, len(value_str))
+ else:
+ for value in nonzero_finite_vals:
+ value_str = f"{value:.0f}"
+ self.max_width = max(self.max_width, len(value_str) + 1)
+ else:
+ # Check if scientific representation should be used.
+ if (
+ nonzero_finite_max / nonzero_finite_min > 1000.0
+ or nonzero_finite_max > 1.0e8
+ or nonzero_finite_min < 1.0e-4
+ ):
+ self.sci_mode = True
+ for value in nonzero_finite_vals:
+ value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
+ self.max_width = max(self.max_width, len(value_str))
+ else:
+ for value in nonzero_finite_vals:
+ value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
+ self.max_width = max(self.max_width, len(value_str))
+
+ if PRINT_OPTS.sci_mode is not None:
+ self.sci_mode = PRINT_OPTS.sci_mode
+
+ def width(self):
+ return self.max_width
+
+ def format(self, value):
+ if self.floating_dtype:
+ if self.sci_mode:
+ ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
+ elif self.int_mode:
+ ret = f"{value:.0f}"
+ if not (math.isinf(value) or math.isnan(value)):
+ ret += "."
+ else:
+ ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
+ else:
+ ret = f"{value}"
+ return (self.max_width - len(ret)) * " " + ret
+
+
+def _scalar_str(self, formatter1, formatter2=None):
+ if formatter2 is not None:
+ real_str = _scalar_str(self.real, formatter1)
+ imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
+ # handles negative numbers, +0.0, -0.0
+ if imag_str[0] == "+" or imag_str[0] == "-":
+ return real_str + imag_str
+ else:
+ return real_str + "+" + imag_str
+ else:
+ return formatter1.format(self.item())
+
+
+def _vector_str(self, indent, summarize, formatter1, formatter2=None):
+ # length includes spaces and comma between elements
+ element_length = formatter1.width() + 2
+ if formatter2 is not None:
+ # width for imag_formatter + an extra j for complex
+ element_length += formatter2.width() + 1
+
+ elements_per_line = max(
+ 1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
+ )
+
+ def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
+ if formatter2 is not None:
+ real_str = formatter1.format(val.real)
+ imag_str = (formatter2.format(val.imag) + "j").lstrip()
+ # handles negative numbers, +0.0, -0.0
+ if imag_str[0] == "+" or imag_str[0] == "-":
+ return real_str + imag_str
+ else:
+ return real_str + "+" + imag_str
+ else:
+ return formatter1.format(val)
+
+ if summarize and not PRINT_OPTS.edgeitems:
+ # Deal with edge case that negative zero is zero
+ data = ["..."]
+ elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
+ data = (
+ [_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
+ + [" ..."]
+ + [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
+ )
+ else:
+ data = [_val_formatter(val) for val in self.tolist()]
+
+ data_lines = [
+ data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
+ ]
+ lines = [", ".join(line) for line in data_lines]
+ return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
+
+
+# formatter2 is only used for printing complex tensors.
+# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
+# and tensor.imag respesectively
+def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
+ dim = self.dim()
+
+ if dim == 0:
+ return _scalar_str(self, formatter1, formatter2)
+
+ if dim == 1:
+ return _vector_str(self, indent, summarize, formatter1, formatter2)
+
+ if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
+ slices = (
+ [
+ _tensor_str_with_formatter(
+ self[i], indent + 1, summarize, formatter1, formatter2
+ )
+ for i in range(0, PRINT_OPTS.edgeitems)
+ ]
+ + ["..."]
+ + [
+ _tensor_str_with_formatter(
+ self[i], indent + 1, summarize, formatter1, formatter2
+ )
+ for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
+ ]
+ )
+ else:
+ slices = [
+ _tensor_str_with_formatter(
+ self[i], indent + 1, summarize, formatter1, formatter2
+ )
+ for i in range(0, self.size(0))
+ ]
+
+ tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
+ return "[" + tensor_str + "]"
+
+
+def _tensor_str(self, indent):
+ if self.numel() == 0:
+ return "[]"
+
+ if self.has_names():
+ # There are two main codepaths (possibly more) that tensor printing goes through:
+ # - tensor data can fit comfortably on screen
+ # - tensor data needs to be summarized
+ # Some of the codepaths don't fully support named tensors, so we send in
+ # an unnamed tensor to the formatting code as a workaround.
+ self = self.rename(None)
+
+ summarize = self.numel() > PRINT_OPTS.threshold
+
+ if self._is_zerotensor():
+ self = self.clone()
+
+ # handle the negative bit
+ if self.is_neg():
+ self = self.resolve_neg()
+
+ if self.dtype in [
+ torch.float16,
+ torch.bfloat16,
+ torch.float8_e5m2,
+ torch.float8_e5m2fnuz,
+ torch.float8_e4m3fn,
+ torch.float8_e4m3fnuz,
+ ]:
+ self = self.float()
+
+ if self.dtype is torch.complex32:
+ self = self.cfloat()
+
+ if self.dtype.is_complex:
+ # handle the conjugate bit
+ self = self.resolve_conj()
+ real_formatter = _Formatter(
+ get_summarized_data(self.real) if summarize else self.real
+ )
+ imag_formatter = _Formatter(
+ get_summarized_data(self.imag) if summarize else self.imag
+ )
+ return _tensor_str_with_formatter(
+ self, indent, summarize, real_formatter, imag_formatter
+ )
+ else:
+ formatter = _Formatter(get_summarized_data(self) if summarize else self)
+ return _tensor_str_with_formatter(self, indent, summarize, formatter)
+
+
+def _add_suffixes(tensor_str, suffixes, indent, force_newline):
+ tensor_strs = [tensor_str]
+ last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
+ for suffix in suffixes:
+ suffix_len = len(suffix)
+ if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
+ tensor_strs.append(",\n" + " " * indent + suffix)
+ last_line_len = indent + suffix_len
+ force_newline = False
+ else:
+ tensor_strs.append(", " + suffix)
+ last_line_len += suffix_len + 2
+ tensor_strs.append(")")
+ return "".join(tensor_strs)
+
+
+def get_summarized_data(self):
+ dim = self.dim()
+ if dim == 0:
+ return self
+ if dim == 1:
+ if self.size(0) > 2 * PRINT_OPTS.edgeitems:
+ return torch.cat(
+ (self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
+ )
+ else:
+ return self
+ if not PRINT_OPTS.edgeitems:
+ return self.new_empty([0] * self.dim())
+ elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
+ start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
+ end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
+ return torch.stack([get_summarized_data(x) for x in (start + end)])
+ else:
+ return torch.stack([get_summarized_data(x) for x in self])
+
+
+def _str_intern(inp, *, tensor_contents=None):
+ if torch._C._functorch.is_functorch_wrapped_tensor(inp):
+ return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
+ is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
+ if inp.is_nested:
+ prefix = "nested_tensor("
+ elif is_plain_tensor:
+ prefix = "tensor("
+ else:
+ prefix = f"{type(inp).__name__}("
+ indent = len(prefix)
+ suffixes = []
+ custom_contents_provided = tensor_contents is not None
+ if custom_contents_provided:
+ tensor_str = tensor_contents
+
+ # This is used to extract the primal value and thus disable the forward AD
+ # within this function.
+ # TODO(albanD) This needs to be updated when more than one level is supported
+ self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
+
+ # Note [Print tensor device]:
+ # A general logic here is we only print device when it doesn't match
+ # the device specified in default tensor type.
+ # Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
+ # torch._C._get_default_device() only returns either cpu or cuda.
+ # In other cases, we don't have a way to set them as default yet,
+ # and we should always print out device for them.
+ if (
+ self.device.type != torch._C._get_default_device()
+ or (
+ self.device.type == "cuda"
+ and torch.cuda.current_device() != self.device.index
+ )
+ or (self.device.type == "mps")
+ ):
+ suffixes.append("device='" + str(self.device) + "'")
+
+ # Tensor printing performs tensor operations like slice, indexing, etc to make it in a
+ # representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
+ # to avoid compilations, copying the tensor to cpu before printing.
+ if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
+ self = self.to("cpu")
+
+ # TODO: add an API to map real -> complex dtypes
+ _default_complex_dtype = (
+ torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
+ )
+ has_default_dtype = self.dtype in (
+ torch.get_default_dtype(),
+ _default_complex_dtype,
+ torch.int64,
+ torch.bool,
+ )
+ if self.is_sparse:
+ suffixes.append("size=" + str(tuple(self.shape)))
+ from torch._subclasses.fake_tensor import FakeTensor
+
+ if not self.is_meta and not isinstance(self, FakeTensor):
+ suffixes.append("nnz=" + str(self._nnz()))
+ if not has_default_dtype:
+ suffixes.append("dtype=" + str(self.dtype))
+ if not custom_contents_provided:
+ indices_prefix = "indices=tensor("
+ indices = self._indices().detach()
+ indices_str = _tensor_str(indices, indent + len(indices_prefix))
+ if indices.numel() == 0:
+ indices_str += ", size=" + str(tuple(indices.shape))
+ values_prefix = "values=tensor("
+ values = self._values().detach()
+ values_str = _tensor_str(values, indent + len(values_prefix))
+ if values.numel() == 0:
+ values_str += ", size=" + str(tuple(values.shape))
+ tensor_str = (
+ indices_prefix
+ + indices_str
+ + "),\n"
+ + " " * indent
+ + values_prefix
+ + values_str
+ + ")"
+ )
+ elif self.layout in {
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ }:
+ suffixes.append("size=" + str(tuple(self.shape)))
+ suffixes.append("nnz=" + str(self._nnz()))
+ if not has_default_dtype:
+ suffixes.append("dtype=" + str(self.dtype))
+ if not custom_contents_provided:
+ compressed_indices_method, plain_indices_method = {
+ torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
+ torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
+ torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
+ torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
+ }[self.layout]
+ if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
+ cdimname, pdimname = "row", "column"
+ else:
+ cdimname, pdimname = "column", "row"
+ compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
+ compressed_indices = compressed_indices_method(self).detach()
+ compressed_indices_str = _tensor_str(
+ compressed_indices, indent + len(compressed_indices_prefix)
+ )
+ if compressed_indices.numel() == 0:
+ compressed_indices_str += ", size=" + str(
+ tuple(compressed_indices.shape)
+ )
+ plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
+ plain_indices = plain_indices_method(self).detach()
+ plain_indices_str = _tensor_str(
+ plain_indices, indent + len(plain_indices_prefix)
+ )
+ if plain_indices.numel() == 0:
+ plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
+ values_prefix = "values=tensor("
+ values = self.values().detach()
+ values_str = _tensor_str(values, indent + len(values_prefix))
+ if values.numel() == 0:
+ values_str += ", size=" + str(tuple(values.shape))
+ tensor_str = (
+ compressed_indices_prefix
+ + compressed_indices_str
+ + "),\n"
+ + " " * indent
+ + plain_indices_prefix
+ + plain_indices_str
+ + "),\n"
+ + " " * indent
+ + values_prefix
+ + values_str
+ + ")"
+ )
+ elif self.is_quantized:
+ suffixes.append("size=" + str(tuple(self.shape)))
+ if not has_default_dtype:
+ suffixes.append("dtype=" + str(self.dtype))
+ suffixes.append("quantization_scheme=" + str(self.qscheme()))
+ if (
+ self.qscheme() == torch.per_tensor_affine
+ or self.qscheme() == torch.per_tensor_symmetric
+ ):
+ suffixes.append("scale=" + str(self.q_scale()))
+ suffixes.append("zero_point=" + str(self.q_zero_point()))
+ elif (
+ self.qscheme() == torch.per_channel_affine
+ or self.qscheme() == torch.per_channel_symmetric
+ or self.qscheme() == torch.per_channel_affine_float_qparams
+ ):
+ suffixes.append("scale=" + str(self.q_per_channel_scales()))
+ suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
+ suffixes.append("axis=" + str(self.q_per_channel_axis()))
+ if not custom_contents_provided:
+ tensor_str = _tensor_str(self.dequantize(), indent)
+ elif self.is_nested:
+ if not custom_contents_provided:
+
+ def indented_str(s, indent):
+ return "\n".join(f" {line}" for line in s.split("\n"))
+
+ strs = ",\n".join(
+ indented_str(str(t), indent + 1)
+ for t in torch.ops.aten.unbind.int(self, 0)
+ )
+ tensor_str = f"[\n{strs}\n]"
+ elif torch._is_functional_tensor(self):
+ prefix = "_to_functional_tensor("
+ tensor_str = repr(torch._from_functional_tensor(self))
+ else:
+ # Circular import problem, so we import it here
+ from torch._subclasses.fake_tensor import FakeTensor
+
+ if self.is_meta or isinstance(self, FakeTensor):
+ suffixes.append("size=" + str(tuple(self.shape)))
+ if self.dtype != torch.get_default_dtype():
+ suffixes.append("dtype=" + str(self.dtype))
+ # TODO: This implies that ellipses is valid syntax for allocating
+ # a meta tensor or FakeTensor, which it could be, but it isn't right now
+ if not custom_contents_provided:
+ tensor_str = "..."
+ else:
+ if self.numel() == 0 and not self.is_sparse:
+ # Explicitly print the shape if it is not (0,), to match NumPy behavior
+ if self.dim() != 1:
+ suffixes.append("size=" + str(tuple(self.shape)))
+
+ # In an empty tensor, there are no elements to infer if the dtype
+ # should be int64, so it must be shown explicitly.
+ if self.dtype != torch.get_default_dtype():
+ suffixes.append("dtype=" + str(self.dtype))
+ if not custom_contents_provided:
+ tensor_str = "[]"
+ else:
+ if not PRINT_OPTS.edgeitems:
+ suffixes.append("size=" + str(tuple(self.shape)))
+
+ if not has_default_dtype:
+ suffixes.append("dtype=" + str(self.dtype))
+
+ if not custom_contents_provided:
+ if self.layout != torch.strided:
+ tensor_str = _tensor_str(self.to_dense(), indent)
+ else:
+ tensor_str = _tensor_str(self, indent)
+
+ if self.layout != torch.strided:
+ suffixes.append("layout=" + str(self.layout))
+
+ # Use inp here to get the original grad_fn and not the one generated by the forward grad
+ # unpacking.
+ grad_fn_name = None
+ try:
+ grad_fn = inp.grad_fn
+ except RuntimeError:
+ # Accessing the grad_fn calls rebasing logic which would cause an error
+ # if that tensor is a view created in no-grad mode modified in-place in
+ # no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
+ grad_fn_name = "Invalid"
+
+ if grad_fn_name is None and grad_fn is not None:
+ grad_fn_name = type(grad_fn).__name__
+ if grad_fn_name == "CppFunction":
+ grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
+
+ if grad_fn_name is not None:
+ suffixes.append(f"grad_fn=<{grad_fn_name}>")
+ elif inp.requires_grad:
+ suffixes.append("requires_grad=True")
+
+ if self.has_names():
+ suffixes.append(f"names={self.names}")
+
+ if tangent is not None:
+ suffixes.append(f"tangent={tangent}")
+
+ string_repr = _add_suffixes(
+ prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse
+ )
+
+ # Check if this instance is flagged as a parameter and change the repr accordingly.
+ # Unfortunately, this function has to be aware of this detail.
+ # NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
+ # this should be done for those as well to produce a valid repr.
+ if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
+ string_repr = f"Parameter({string_repr})"
+
+ return string_repr
+
+
+def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
+ level = torch._C._functorch.maybe_get_level(tensor)
+ assert level != -1
+
+ if torch._C._functorch.is_functionaltensor(tensor):
+ # Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
+ # that it's up to date first
+ torch._sync(tensor)
+
+ value = torch._C._functorch.get_unwrapped(tensor)
+ value_repr = repr(value)
+
+ indented_value_repr = textwrap.indent(value_repr, " " * 4)
+ if torch._C._functorch.is_batchedtensor(tensor):
+ bdim = torch._C._functorch.maybe_get_bdim(tensor)
+ assert bdim != -1
+ return (
+ f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
+ f"{indented_value_repr}\n"
+ f")"
+ )
+ if torch._C._functorch.is_gradtrackingtensor(tensor):
+ return (
+ f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
+ )
+ if torch._C._functorch.is_functionaltensor(tensor):
+ return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
+
+ raise ValueError("We don't know how to print this, please file us an issue")
+
+
+def _str(self, *, tensor_contents=None):
+ with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
+ guard = torch._C._DisableFuncTorch()
+ return _str_intern(self, tensor_contents=tensor_contents)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_torch_docs.py b/env-llmeval/lib/python3.10/site-packages/torch/_torch_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..045d3c14ed4b93f442ee436b861558f14d40660a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_torch_docs.py
@@ -0,0 +1,14149 @@
+"""Adds docstrings to functions defined in the torch._C"""
+
+import re
+
+import torch._C
+from torch._C import _add_docstr as add_docstr
+
+
+def parse_kwargs(desc):
+ """Maps a description of args to a dictionary of {argname: description}.
+ Input:
+ (' weight (Tensor): a weight tensor\n' +
+ ' Some optional description')
+ Output: {
+ 'weight': \
+ 'weight (Tensor): a weight tensor\n Some optional description'
+ }
+ """
+ # Split on exactly 4 spaces after a newline
+ regx = re.compile(r"\n\s{4}(?!\s)")
+ kwargs = [section.strip() for section in regx.split(desc)]
+ kwargs = [section for section in kwargs if len(section) > 0]
+ return {desc.split(" ")[0]: desc for desc in kwargs}
+
+
+def merge_dicts(*dicts):
+ return {x: d[x] for d in dicts for x in d}
+
+
+common_args = parse_kwargs(
+ """
+ input (Tensor): the input tensor.
+ generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
+ out (Tensor, optional): the output tensor.
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned tensor. Default: ``torch.preserve_format``.
+"""
+)
+
+reduceops_common_args = merge_dicts(
+ common_args,
+ parse_kwargs(
+ """
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ If specified, the input tensor is casted to :attr:`dtype` before the operation
+ is performed. This is useful for preventing data type overflows. Default: None.
+ keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
+"""
+ ),
+)
+
+multi_dim_common = merge_dicts(
+ reduceops_common_args,
+ parse_kwargs(
+ """
+ dim (int or tuple of ints): the dimension or dimensions to reduce.
+"""
+ ),
+ {
+ "keepdim_details": """
+If :attr:`keepdim` is ``True``, the output tensor is of the same size
+as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
+Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
+output tensor having 1 (or ``len(dim)``) fewer dimension(s).
+"""
+ },
+ {
+ "opt_dim": """
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
+ If ``None``, all dimensions are reduced.
+"""
+ },
+)
+
+single_dim_common = merge_dicts(
+ reduceops_common_args,
+ parse_kwargs(
+ """
+ dim (int): the dimension to reduce.
+"""
+ ),
+ {
+ "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
+as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
+Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
+the output tensor having 1 fewer dimension than :attr:`input`."""
+ },
+)
+
+factory_common_args = merge_dicts(
+ common_args,
+ parse_kwargs(
+ """
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
+ Default: ``torch.strided``.
+ device (:class:`torch.device`, optional): the desired device of returned tensor.
+ Default: if ``None``, uses the current device for the default tensor type
+ (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
+ for CPU tensor types and the current CUDA device for CUDA tensor types.
+ requires_grad (bool, optional): If autograd should record operations on the
+ returned tensor. Default: ``False``.
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned Tensor. Default: ``torch.contiguous_format``.
+ check_invariants (bool, optional): If sparse tensor invariants are checked.
+ Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
+ initially False.
+"""
+ ),
+ {
+ "sparse_factory_device_note": """\
+.. note::
+
+ If the ``device`` argument is not specified the device of the given
+ :attr:`values` and indices tensor(s) must match. If, however, the
+ argument is specified the input Tensors will be converted to the
+ given device and in turn determine the device of the constructed
+ sparse tensor."""
+ },
+)
+
+factory_like_common_args = parse_kwargs(
+ """
+ input (Tensor): the size of :attr:`input` will determine size of the output tensor.
+ layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
+ Default: if ``None``, defaults to the layout of :attr:`input`.
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
+ Default: if ``None``, defaults to the dtype of :attr:`input`.
+ device (:class:`torch.device`, optional): the desired device of returned tensor.
+ Default: if ``None``, defaults to the device of :attr:`input`.
+ requires_grad (bool, optional): If autograd should record operations on the
+ returned tensor. Default: ``False``.
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned Tensor. Default: ``torch.preserve_format``.
+"""
+)
+
+factory_data_common_args = parse_kwargs(
+ """
+ data (array_like): Initial data for the tensor. Can be a list, tuple,
+ NumPy ``ndarray``, scalar, and other types.
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: if ``None``, infers data type from :attr:`data`.
+ device (:class:`torch.device`, optional): the desired device of returned tensor.
+ Default: if ``None``, uses the current device for the default tensor type
+ (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
+ for CPU tensor types and the current CUDA device for CUDA tensor types.
+ requires_grad (bool, optional): If autograd should record operations on the
+ returned tensor. Default: ``False``.
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
+"""
+)
+
+tf32_notes = {
+ "tf32_note": """This operator supports :ref:`TensorFloat32`."""
+}
+
+rocm_fp16_notes = {
+ "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
+:ref:`different precision` for backward."""
+}
+
+reproducibility_notes = {
+ "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
+a CUDA device. See :doc:`/notes/randomness` for more information.""",
+ "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
+a CUDA device. See :doc:`/notes/randomness` for more information.""",
+ "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
+and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
+undesirable, you can try to make the operation deterministic (potentially at \
+a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
+See :doc:`/notes/randomness` for more information.""",
+}
+
+sparse_support_notes = {
+ "sparse_beta_warning": """
+.. warning::
+ Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
+ or may not have autograd support. If you notice missing functionality please
+ open a feature request.""",
+}
+
+add_docstr(
+ torch.abs,
+ r"""
+abs(input, *, out=None) -> Tensor
+
+Computes the absolute value of each element in :attr:`input`.
+
+.. math::
+ \text{out}_{i} = |\text{input}_{i}|
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.abs(torch.tensor([-1, -2, 3]))
+ tensor([ 1, 2, 3])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.absolute,
+ r"""
+absolute(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.abs`
+""",
+)
+
+add_docstr(
+ torch.acos,
+ r"""
+acos(input, *, out=None) -> Tensor
+
+Computes the inverse cosine of each element in :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \cos^{-1}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
+ >>> torch.acos(a)
+ tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arccos,
+ r"""
+arccos(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.acos`.
+""",
+)
+
+add_docstr(
+ torch.acosh,
+ r"""
+acosh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
+
+Note:
+ The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
+ will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4).uniform_(1, 2)
+ >>> a
+ tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
+ >>> torch.acosh(a)
+ tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arccosh,
+ r"""
+arccosh(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.acosh`.
+""",
+)
+
+add_docstr(
+ torch.index_add,
+ r"""
+index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
+
+See :meth:`~Tensor.index_add_` for function description.
+""",
+)
+
+add_docstr(
+ torch.index_copy,
+ r"""
+index_copy(input, dim, index, source, *, out=None) -> Tensor
+
+See :meth:`~Tensor.index_add_` for function description.
+""",
+)
+
+add_docstr(
+ torch.index_reduce,
+ r"""
+index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
+
+See :meth:`~Tensor.index_reduce_` for function description.
+""",
+)
+
+add_docstr(
+ torch.add,
+ r"""
+add(input, other, *, alpha=1, out=None) -> Tensor
+
+Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
+
+.. math::
+ \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
+"""
+ + r"""
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer, float, and complex inputs.
+
+Args:
+ {input}
+ other (Tensor or Number): the tensor or number to add to :attr:`input`.
+
+Keyword arguments:
+ alpha (Number): the multiplier for :attr:`other`.
+ {out}
+
+Examples::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
+ >>> torch.add(a, 20)
+ tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
+
+ >>> b = torch.randn(4)
+ >>> b
+ tensor([-0.9732, -0.3497, 0.6245, 0.4022])
+ >>> c = torch.randn(4, 1)
+ >>> c
+ tensor([[ 0.3743],
+ [-1.7724],
+ [-0.5811],
+ [-0.8017]])
+ >>> torch.add(b, c, alpha=10)
+ tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
+ [-18.6971, -18.0736, -17.0994, -17.3216],
+ [ -6.7845, -6.1610, -5.1868, -5.4090],
+ [ -8.9902, -8.3667, -7.3925, -7.6147]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.addbmm,
+ r"""
+addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
+
+Performs a batch matrix-matrix product of matrices stored
+in :attr:`batch1` and :attr:`batch2`,
+with a reduced add step (all matrix multiplications get accumulated
+along the first dimension).
+:attr:`input` is added to the final result.
+
+:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
+same number of matrices.
+
+If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
+:math:`(b \times m \times p)` tensor, :attr:`input` must be
+:ref:`broadcastable ` with a :math:`(n \times p)` tensor
+and :attr:`out` will be a :math:`(n \times p)` tensor.
+
+.. math::
+ out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
+
+If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
+it will not be propagated.
+"""
+ + r"""
+For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
+must be real numbers, otherwise they should be integers.
+
+{tf32_note}
+
+{rocm_fp16_note}
+
+Args:
+ batch1 (Tensor): the first batch of matrices to be multiplied
+ batch2 (Tensor): the second batch of matrices to be multiplied
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
+ input (Tensor): matrix to be added
+ alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
+ {out}
+
+Example::
+
+ >>> M = torch.randn(3, 5)
+ >>> batch1 = torch.randn(10, 3, 4)
+ >>> batch2 = torch.randn(10, 4, 5)
+ >>> torch.addbmm(M, batch1, batch2)
+ tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
+ [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
+ [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes
+ ),
+)
+
+add_docstr(
+ torch.addcdiv,
+ r"""
+addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
+
+Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
+multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
+
+.. warning::
+ Integer division with addcdiv is no longer supported, and in a future
+ release addcdiv will perform a true division of tensor1 and tensor2.
+ The historic addcdiv behavior can be implemented as
+ (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
+ for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
+ The future addcdiv behavior is just the latter implementation:
+ (input + value * tensor1 / tensor2), for all dtypes.
+
+.. math::
+ \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
+"""
+ + r"""
+
+The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
+:ref:`broadcastable `.
+
+For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
+a real number, otherwise an integer.
+
+Args:
+ input (Tensor): the tensor to be added
+ tensor1 (Tensor): the numerator tensor
+ tensor2 (Tensor): the denominator tensor
+
+Keyword args:
+ value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
+ {out}
+
+Example::
+
+ >>> t = torch.randn(1, 3)
+ >>> t1 = torch.randn(3, 1)
+ >>> t2 = torch.randn(1, 3)
+ >>> torch.addcdiv(t, t1, t2, value=0.1)
+ tensor([[-0.2312, -3.6496, 0.1312],
+ [-1.0428, 3.4292, -0.1030],
+ [-0.5369, -0.9829, 0.0430]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.addcmul,
+ r"""
+addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
+
+Performs the element-wise multiplication of :attr:`tensor1`
+by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
+and adds it to :attr:`input`.
+
+.. math::
+ \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
+"""
+ + r"""
+The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
+:ref:`broadcastable `.
+
+For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
+a real number, otherwise an integer.
+
+Args:
+ input (Tensor): the tensor to be added
+ tensor1 (Tensor): the tensor to be multiplied
+ tensor2 (Tensor): the tensor to be multiplied
+
+Keyword args:
+ value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
+ {out}
+
+Example::
+
+ >>> t = torch.randn(1, 3)
+ >>> t1 = torch.randn(3, 1)
+ >>> t2 = torch.randn(1, 3)
+ >>> torch.addcmul(t, t1, t2, value=0.1)
+ tensor([[-0.8635, -0.6391, 1.6174],
+ [-0.7617, -0.5879, 1.7388],
+ [-0.8353, -0.6249, 1.6511]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.addmm,
+ r"""
+addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
+
+Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
+The matrix :attr:`input` is added to the final result.
+
+If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
+:math:`(m \times p)` tensor, then :attr:`input` must be
+:ref:`broadcastable ` with a :math:`(n \times p)` tensor
+and :attr:`out` will be a :math:`(n \times p)` tensor.
+
+:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
+:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
+
+.. math::
+ \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
+
+If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
+it will not be propagated.
+"""
+ + r"""
+For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
+:attr:`alpha` must be real numbers, otherwise they should be integers.
+
+This operation has support for arguments with :ref:`sparse layouts`. If
+:attr:`input` is sparse the result will have the same layout and if :attr:`out`
+is provided it must have the same layout as :attr:`input`.
+
+{sparse_beta_warning}
+
+{tf32_note}
+
+{rocm_fp16_note}
+
+Args:
+ input (Tensor): matrix to be added
+ mat1 (Tensor): the first matrix to be matrix multiplied
+ mat2 (Tensor): the second matrix to be matrix multiplied
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
+ alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
+ {out}
+
+Example::
+
+ >>> M = torch.randn(2, 3)
+ >>> mat1 = torch.randn(2, 3)
+ >>> mat2 = torch.randn(3, 3)
+ >>> torch.addmm(M, mat1, mat2)
+ tensor([[-4.8716, 1.4671, -1.3746],
+ [ 0.7573, -3.9555, -2.8681]])
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
+ ),
+)
+
+add_docstr(
+ torch.adjoint,
+ r"""
+adjoint(Tensor) -> Tensor
+Returns a view of the tensor conjugated and with the last two dimensions transposed.
+
+``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
+to ``x.transpose(-2, -1)`` for real tensors.
+
+Example::
+ >>> x = torch.arange(4, dtype=torch.float)
+ >>> A = torch.complex(x, x).reshape(2, 2)
+ >>> A
+ tensor([[0.+0.j, 1.+1.j],
+ [2.+2.j, 3.+3.j]])
+ >>> A.adjoint()
+ tensor([[0.-0.j, 2.-2.j],
+ [1.-1.j, 3.-3.j]])
+ >>> (A.adjoint() == A.mH).all()
+ tensor(True)
+""",
+)
+
+add_docstr(
+ torch.sspaddmm,
+ r"""
+sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
+
+Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
+:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
+
+Note: This function is equivalent to :func:`torch.addmm`, except
+:attr:`input` and :attr:`mat1` are sparse.
+
+Args:
+ input (Tensor): a sparse matrix to be added
+ mat1 (Tensor): a sparse matrix to be matrix multiplied
+ mat2 (Tensor): a dense matrix to be matrix multiplied
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
+ alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
+ {out}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.smm,
+ r"""
+smm(input, mat) -> Tensor
+
+Performs a matrix multiplication of the sparse matrix :attr:`input`
+with the dense matrix :attr:`mat`.
+
+Args:
+ input (Tensor): a sparse matrix to be matrix multiplied
+ mat (Tensor): a dense matrix to be matrix multiplied
+""",
+)
+
+add_docstr(
+ torch.addmv,
+ r"""
+addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
+
+Performs a matrix-vector product of the matrix :attr:`mat` and
+the vector :attr:`vec`.
+The vector :attr:`input` is added to the final result.
+
+If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
+size `m`, then :attr:`input` must be
+:ref:`broadcastable ` with a 1-D tensor of size `n` and
+:attr:`out` will be 1-D tensor of size `n`.
+
+:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
+:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
+
+.. math::
+ \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
+
+If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
+it will not be propagated.
+"""
+ + r"""
+For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
+:attr:`alpha` must be real numbers, otherwise they should be integers.
+
+Args:
+ input (Tensor): vector to be added
+ mat (Tensor): matrix to be matrix multiplied
+ vec (Tensor): vector to be matrix multiplied
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
+ alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
+ {out}
+
+Example::
+
+ >>> M = torch.randn(2)
+ >>> mat = torch.randn(2, 3)
+ >>> vec = torch.randn(3)
+ >>> torch.addmv(M, mat, vec)
+ tensor([-0.3768, -5.5565])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.addr,
+ r"""
+addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
+
+Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
+and adds it to the matrix :attr:`input`.
+
+Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
+outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
+:attr:`input` respectively.
+
+.. math::
+ \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
+
+If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
+it will not be propagated.
+"""
+ + r"""
+If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
+of size `m`, then :attr:`input` must be
+:ref:`broadcastable ` with a matrix of size
+:math:`(n \times m)` and :attr:`out` will be a matrix of size
+:math:`(n \times m)`.
+
+Args:
+ input (Tensor): matrix to be added
+ vec1 (Tensor): the first vector of the outer product
+ vec2 (Tensor): the second vector of the outer product
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
+ alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
+ {out}
+
+Example::
+
+ >>> vec1 = torch.arange(1., 4.)
+ >>> vec2 = torch.arange(1., 3.)
+ >>> M = torch.zeros(3, 2)
+ >>> torch.addr(M, vec1, vec2)
+ tensor([[ 1., 2.],
+ [ 2., 4.],
+ [ 3., 6.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.allclose,
+ r"""
+allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
+
+This function checks if :attr:`input` and :attr:`other` satisfy the condition:
+
+.. math::
+ \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
+"""
+ + r"""
+elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
+`numpy.allclose `_
+
+Args:
+ input (Tensor): first tensor to compare
+ other (Tensor): second tensor to compare
+ atol (float, optional): absolute tolerance. Default: 1e-08
+ rtol (float, optional): relative tolerance. Default: 1e-05
+ equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
+
+Example::
+
+ >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
+ False
+ >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
+ True
+ >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
+ False
+ >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
+ True
+""",
+)
+
+add_docstr(
+ torch.all,
+ r"""
+all(input) -> Tensor
+
+Tests if all elements in :attr:`input` evaluate to `True`.
+
+.. note:: This function matches the behaviour of NumPy in returning
+ output of dtype `bool` for all supported dtypes except `uint8`.
+ For `uint8` the dtype of output is `uint8` itself.
+
+Example::
+
+ >>> a = torch.rand(1, 2).bool()
+ >>> a
+ tensor([[False, True]], dtype=torch.bool)
+ >>> torch.all(a)
+ tensor(False, dtype=torch.bool)
+ >>> a = torch.arange(0, 3)
+ >>> a
+ tensor([0, 1, 2])
+ >>> torch.all(a)
+ tensor(False)
+
+.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
+ :noindex:
+
+For each row of :attr:`input` in the given dimension :attr:`dim`,
+returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.rand(4, 2).bool()
+ >>> a
+ tensor([[True, True],
+ [True, False],
+ [True, True],
+ [True, True]], dtype=torch.bool)
+ >>> torch.all(a, dim=1)
+ tensor([ True, False, True, True], dtype=torch.bool)
+ >>> torch.all(a, dim=0)
+ tensor([ True, False], dtype=torch.bool)
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.any,
+ r"""
+any(input) -> Tensor
+
+Tests if any element in :attr:`input` evaluates to `True`.
+
+.. note:: This function matches the behaviour of NumPy in returning
+ output of dtype `bool` for all supported dtypes except `uint8`.
+ For `uint8` the dtype of output is `uint8` itself.
+
+Example::
+
+ >>> a = torch.rand(1, 2).bool()
+ >>> a
+ tensor([[False, True]], dtype=torch.bool)
+ >>> torch.any(a)
+ tensor(True, dtype=torch.bool)
+ >>> a = torch.arange(0, 3)
+ >>> a
+ tensor([0, 1, 2])
+ >>> torch.any(a)
+ tensor(True)
+
+.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
+ :noindex:
+
+For each row of :attr:`input` in the given dimension :attr:`dim`,
+returns `True` if any element in the row evaluate to `True` and `False` otherwise.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4, 2) < 0
+ >>> a
+ tensor([[ True, True],
+ [False, True],
+ [ True, True],
+ [False, False]])
+ >>> torch.any(a, 1)
+ tensor([ True, True, True, False])
+ >>> torch.any(a, 0)
+ tensor([True, True])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.angle,
+ r"""
+angle(input, *, out=None) -> Tensor
+
+Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
+
+.. math::
+ \text{out}_{i} = angle(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
+ zero for non-negative real numbers, and propagates NaNs. Previously
+ the function would return zero for all real numbers and not propagate
+ floating-point NaNs.
+
+Example::
+
+ >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
+ tensor([ 135., 135, -45])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.as_strided,
+ r"""
+as_strided(input, size, stride, storage_offset=None) -> Tensor
+
+Create a view of an existing `torch.Tensor` :attr:`input` with specified
+:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
+
+.. warning::
+ Prefer using other view functions, like :meth:`torch.Tensor.expand`,
+ to setting a view's strides manually with `as_strided`, as this
+ function's behavior depends on the implementation of a tensor's storage.
+ The constructed view of the storage must only refer to elements within
+ the storage or a runtime error will be thrown, and if the view is
+ "overlapped" (with multiple indices referring to the same element in
+ memory) its behavior is undefined.
+
+Args:
+ {input}
+ size (tuple or ints): the shape of the output tensor
+ stride (tuple or ints): the stride of the output tensor
+ storage_offset (int, optional): the offset in the underlying storage of the output tensor.
+ If ``None``, the storage_offset of the output tensor will match the input tensor.
+
+Example::
+
+ >>> x = torch.randn(3, 3)
+ >>> x
+ tensor([[ 0.9039, 0.6291, 1.0795],
+ [ 0.1586, 2.1939, -0.4900],
+ [-0.1909, -0.7503, 1.9355]])
+ >>> t = torch.as_strided(x, (2, 2), (1, 2))
+ >>> t
+ tensor([[0.9039, 1.0795],
+ [0.6291, 0.1586]])
+ >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
+ tensor([[0.6291, 0.1586],
+ [1.0795, 2.1939]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.as_tensor,
+ r"""
+as_tensor(data, dtype=None, device=None) -> Tensor
+
+Converts :attr:`data` into a tensor, sharing data and preserving autograd
+history if possible.
+
+If :attr:`data` is already a tensor with the requested dtype and device
+then :attr:`data` itself is returned, but if :attr:`data` is a
+tensor with a different dtype or device then it's copied as if using
+`data.to(dtype=dtype, device=device)`.
+
+If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
+tensor is constructed using :func:`torch.from_numpy`.
+
+.. seealso::
+
+ :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
+
+
+Args:
+ {data}
+ {dtype}
+ device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
+ then the device of data is used. If None and data is not a tensor then
+ the result tensor is constructed on the current device.
+
+
+Example::
+
+ >>> a = numpy.array([1, 2, 3])
+ >>> t = torch.as_tensor(a)
+ >>> t
+ tensor([ 1, 2, 3])
+ >>> t[0] = -1
+ >>> a
+ array([-1, 2, 3])
+
+ >>> a = numpy.array([1, 2, 3])
+ >>> t = torch.as_tensor(a, device=torch.device('cuda'))
+ >>> t
+ tensor([ 1, 2, 3])
+ >>> t[0] = -1
+ >>> a
+ array([1, 2, 3])
+""".format(
+ **factory_data_common_args
+ ),
+)
+
+add_docstr(
+ torch.asin,
+ r"""
+asin(input, *, out=None) -> Tensor
+
+Returns a new tensor with the arcsine of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \sin^{-1}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.5962, 1.4985, -0.4396, 1.4525])
+ >>> torch.asin(a)
+ tensor([-0.6387, nan, -0.4552, nan])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arcsin,
+ r"""
+arcsin(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.asin`.
+""",
+)
+
+add_docstr(
+ torch.asinh,
+ r"""
+asinh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
+ >>> torch.asinh(a)
+ tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arcsinh,
+ r"""
+arcsinh(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.asinh`.
+""",
+)
+
+add_docstr(
+ torch.atan,
+ r"""
+atan(input, *, out=None) -> Tensor
+
+Returns a new tensor with the arctangent of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \tan^{-1}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
+ >>> torch.atan(a)
+ tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arctan,
+ r"""
+arctan(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.atan`.
+""",
+)
+
+add_docstr(
+ torch.atan2,
+ r"""
+atan2(input, other, *, out=None) -> Tensor
+
+Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
+with consideration of the quadrant. Returns a new tensor with the signed angles
+in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
+and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
+parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
+parameter, is the y-coordinate.)
+
+The shapes of ``input`` and ``other`` must be
+:ref:`broadcastable `.
+
+Args:
+ input (Tensor): the first input tensor
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
+ >>> torch.atan2(a, torch.randn(4))
+ tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arctan2,
+ r"""
+arctan2(input, other, *, out=None) -> Tensor
+Alias for :func:`torch.atan2`.
+""",
+)
+
+add_docstr(
+ torch.atanh,
+ r"""
+atanh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
+
+Note:
+ The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
+ will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
+ mapped to `+/-INF` respectively.
+
+.. math::
+ \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4).uniform_(-1, 1)
+ >>> a
+ tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
+ >>> torch.atanh(a)
+ tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.arctanh,
+ r"""
+arctanh(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.atanh`.
+""",
+)
+
+add_docstr(
+ torch.asarray,
+ r"""
+asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
+
+Converts :attr:`obj` to a tensor.
+
+:attr:`obj` can be one of:
+
+1. a tensor
+2. a NumPy array or a NumPy scalar
+3. a DLPack capsule
+4. an object that implements Python's buffer protocol
+5. a scalar
+6. a sequence of scalars
+
+When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
+by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
+same device, and share memory with it. These properties can be controlled with the
+:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
+If the returned tensor is of a different datatype, on a different device, or a copy is
+requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
+is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
+also a tensor with an autograd history then the returned tensor will have the same history.
+
+When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
+buffer protocol then the buffer is interpreted as an array of bytes grouped according to
+the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
+passed then the default floating point datatype is used, instead.) The returned tensor
+will have the specified datatype (or default floating point datatype if none is specified)
+and, by default, be on the CPU device and share memory with the buffer.
+
+When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
+the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
+be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
+
+When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
+returned tensor will, by default, infer its datatype from the scalar values, be on the
+current default device, and not share its memory.
+
+.. seealso::
+
+ :func:`torch.tensor` creates a tensor that always copies the data from the input object.
+ :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
+ :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
+ implement the buffer protocol.
+ :func:`torch.from_dlpack` creates a tensor that always shares memory from
+ DLPack capsules.
+
+Args:
+ obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
+ buffer protocol, scalar, or sequence of scalars.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
+ Default: ``None``, which causes the datatype of the returned tensor to be
+ inferred from :attr:`obj`.
+ copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
+ Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
+ whenever possible. If ``True`` then the returned tensor does not share its memory.
+ If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
+ error is thrown if it cannot.
+ device (:class:`torch.device`, optional): the device of the returned tensor.
+ Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
+ :attr:`obj` is a Python sequence, the current default device will be used.
+ requires_grad (bool, optional): whether the returned tensor requires grad.
+ Default: ``False``, which causes the returned tensor not to require a gradient.
+ If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
+ is also a tensor with an autograd history then the returned tensor will have
+ the same history.
+
+Example::
+
+ >>> a = torch.tensor([1, 2, 3])
+ >>> # Shares memory with tensor 'a'
+ >>> b = torch.asarray(a)
+ >>> a.data_ptr() == b.data_ptr()
+ True
+ >>> # Forces memory copy
+ >>> c = torch.asarray(a, copy=True)
+ >>> a.data_ptr() == c.data_ptr()
+ False
+
+ >>> a = torch.tensor([1., 2., 3.], requires_grad=True)
+ >>> b = a + 2
+ >>> b
+ tensor([3., 4., 5.], grad_fn=)
+ >>> # Shares memory with tensor 'b', with no grad
+ >>> c = torch.asarray(b)
+ >>> c
+ tensor([3., 4., 5.])
+ >>> # Shares memory with tensor 'b', retaining autograd history
+ >>> d = torch.asarray(b, requires_grad=True)
+ >>> d
+ tensor([3., 4., 5.], grad_fn=)
+
+ >>> array = numpy.array([1, 2, 3])
+ >>> # Shares memory with array 'array'
+ >>> t1 = torch.asarray(array)
+ >>> array.__array_interface__['data'][0] == t1.data_ptr()
+ True
+ >>> # Copies memory due to dtype mismatch
+ >>> t2 = torch.asarray(array, dtype=torch.float32)
+ >>> array.__array_interface__['data'][0] == t2.data_ptr()
+ False
+
+ >>> scalar = numpy.float64(0.5)
+ >>> torch.asarray(scalar)
+ tensor(0.5000, dtype=torch.float64)
+""",
+)
+
+add_docstr(
+ torch.baddbmm,
+ r"""
+baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
+
+Performs a batch matrix-matrix product of matrices in :attr:`batch1`
+and :attr:`batch2`.
+:attr:`input` is added to the final result.
+
+:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
+number of matrices.
+
+If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
+:math:`(b \times m \times p)` tensor, then :attr:`input` must be
+:ref:`broadcastable ` with a
+:math:`(b \times n \times p)` tensor and :attr:`out` will be a
+:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
+same as the scaling factors used in :meth:`torch.addbmm`.
+
+.. math::
+ \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
+
+If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
+it will not be propagated.
+"""
+ + r"""
+For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
+:attr:`alpha` must be real numbers, otherwise they should be integers.
+
+{tf32_note}
+
+{rocm_fp16_note}
+
+Args:
+ input (Tensor): the tensor to be added
+ batch1 (Tensor): the first batch of matrices to be multiplied
+ batch2 (Tensor): the second batch of matrices to be multiplied
+
+Keyword args:
+ beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
+ alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
+ {out}
+
+Example::
+
+ >>> M = torch.randn(10, 3, 5)
+ >>> batch1 = torch.randn(10, 3, 4)
+ >>> batch2 = torch.randn(10, 4, 5)
+ >>> torch.baddbmm(M, batch1, batch2).size()
+ torch.Size([10, 3, 5])
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes
+ ),
+)
+
+add_docstr(
+ torch.bernoulli,
+ r"""
+bernoulli(input, *, generator=None, out=None) -> Tensor
+
+Draws binary random numbers (0 or 1) from a Bernoulli distribution.
+
+The :attr:`input` tensor should be a tensor containing probabilities
+to be used for drawing the binary random number.
+Hence, all values in :attr:`input` have to be in the range:
+:math:`0 \leq \text{input}_i \leq 1`.
+
+The :math:`\text{i}^{th}` element of the output tensor will draw a
+value :math:`1` according to the :math:`\text{i}^{th}` probability value given
+in :attr:`input`.
+
+.. math::
+ \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
+"""
+ + r"""
+The returned :attr:`out` tensor only has values 0 or 1 and is of the same
+shape as :attr:`input`.
+
+:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
+point ``dtype``.
+
+Args:
+ input (Tensor): the input tensor of probability values for the Bernoulli distribution
+
+Keyword args:
+ {generator}
+ {out}
+
+Example::
+
+ >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
+ >>> a
+ tensor([[ 0.1737, 0.0950, 0.3609],
+ [ 0.7148, 0.0289, 0.2676],
+ [ 0.9456, 0.8937, 0.7202]])
+ >>> torch.bernoulli(a)
+ tensor([[ 1., 0., 0.],
+ [ 0., 0., 0.],
+ [ 1., 1., 1.]])
+
+ >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
+ >>> torch.bernoulli(a)
+ tensor([[ 1., 1., 1.],
+ [ 1., 1., 1.],
+ [ 1., 1., 1.]])
+ >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
+ >>> torch.bernoulli(a)
+ tensor([[ 0., 0., 0.],
+ [ 0., 0., 0.],
+ [ 0., 0., 0.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bincount,
+ r"""
+bincount(input, weights=None, minlength=0) -> Tensor
+
+Count the frequency of each value in an array of non-negative ints.
+
+The number of bins (size 1) is one larger than the largest value in
+:attr:`input` unless :attr:`input` is empty, in which case the result is a
+tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
+:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
+:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
+``out[n] += weights[i]`` if :attr:`weights` is specified else
+``out[n] += 1``.
+
+Note:
+ {backward_reproducibility_note}
+
+Arguments:
+ input (Tensor): 1-d int tensor
+ weights (Tensor): optional, weight for each value in the input tensor.
+ Should be of same size as input tensor.
+ minlength (int): optional, minimum number of bins. Should be non-negative.
+
+Returns:
+ output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
+ :attr:`input` is non-empty, else ``Size(0)``
+
+Example::
+
+ >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
+ >>> weights = torch.linspace(0, 1, steps=5)
+ >>> input, weights
+ (tensor([4, 3, 6, 3, 4]),
+ tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
+
+ >>> torch.bincount(input)
+ tensor([0, 0, 0, 2, 2, 0, 1])
+
+ >>> input.bincount(weights)
+ tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
+""".format(
+ **reproducibility_notes
+ ),
+)
+
+add_docstr(
+ torch.bitwise_not,
+ r"""
+bitwise_not(input, *, out=None) -> Tensor
+
+Computes the bitwise NOT of the given input tensor. The input tensor must be of
+integral or Boolean types. For bool tensors, it computes the logical NOT.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
+ tensor([ 0, 1, -4], dtype=torch.int8)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bmm,
+ r"""
+bmm(input, mat2, *, out=None) -> Tensor
+
+Performs a batch matrix-matrix product of matrices stored in :attr:`input`
+and :attr:`mat2`.
+
+:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
+the same number of matrices.
+
+If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
+:math:`(b \times m \times p)` tensor, :attr:`out` will be a
+:math:`(b \times n \times p)` tensor.
+
+.. math::
+ \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
+"""
+ + r"""
+{tf32_note}
+
+{rocm_fp16_note}
+
+.. note:: This function does not :ref:`broadcast `.
+ For broadcasting matrix products, see :func:`torch.matmul`.
+
+Args:
+ input (Tensor): the first batch of matrices to be multiplied
+ mat2 (Tensor): the second batch of matrices to be multiplied
+
+Keyword Args:
+ {out}
+
+Example::
+
+ >>> input = torch.randn(10, 3, 4)
+ >>> mat2 = torch.randn(10, 4, 5)
+ >>> res = torch.bmm(input, mat2)
+ >>> res.size()
+ torch.Size([10, 3, 5])
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes
+ ),
+)
+
+add_docstr(
+ torch.bitwise_and,
+ r"""
+bitwise_and(input, other, *, out=None) -> Tensor
+
+Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
+integral or Boolean types. For bool tensors, it computes the logical AND.
+
+Args:
+ input: the first input tensor
+ other: the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
+ tensor([1, 0, 3], dtype=torch.int8)
+ >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
+ tensor([ False, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bitwise_or,
+ r"""
+bitwise_or(input, other, *, out=None) -> Tensor
+
+Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
+integral or Boolean types. For bool tensors, it computes the logical OR.
+
+Args:
+ input: the first input tensor
+ other: the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
+ tensor([-1, -2, 3], dtype=torch.int8)
+ >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
+ tensor([ True, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bitwise_xor,
+ r"""
+bitwise_xor(input, other, *, out=None) -> Tensor
+
+Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
+integral or Boolean types. For bool tensors, it computes the logical XOR.
+
+Args:
+ input: the first input tensor
+ other: the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
+ tensor([-2, -2, 0], dtype=torch.int8)
+ >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
+ tensor([ True, False, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bitwise_left_shift,
+ r"""
+bitwise_left_shift(input, other, *, out=None) -> Tensor
+
+Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
+The input tensor must be of integral type. This operator supports
+:ref:`broadcasting to a common shape ` and
+:ref:`type promotion `.
+
+The operation applied is:
+
+.. math::
+ \text{{out}}_i = \text{{input}}_i << \text{{other}}_i
+
+Args:
+ input (Tensor or Scalar): the first input tensor
+ other (Tensor or Scalar): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
+ tensor([-2, -2, 24], dtype=torch.int8)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.bitwise_right_shift,
+ r"""
+bitwise_right_shift(input, other, *, out=None) -> Tensor
+
+Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
+The input tensor must be of integral type. This operator supports
+:ref:`broadcasting to a common shape ` and
+:ref:`type promotion `.
+In any case, if the value of the right operand is negative or is greater
+or equal to the number of bits in the promoted left operand, the behavior is undefined.
+
+The operation applied is:
+
+.. math::
+ \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
+
+Args:
+ input (Tensor or Scalar): the first input tensor
+ other (Tensor or Scalar): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
+ tensor([-1, -7, 3], dtype=torch.int8)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.broadcast_to,
+ r"""
+broadcast_to(input, shape) -> Tensor
+
+Broadcasts :attr:`input` to the shape :attr:`\shape`.
+Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
+
+Args:
+ {input}
+ shape (list, tuple, or :class:`torch.Size`): the new shape.
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> torch.broadcast_to(x, (3, 3))
+ tensor([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.stack,
+ r"""
+stack(tensors, dim=0, *, out=None) -> Tensor
+
+Concatenates a sequence of tensors along a new dimension.
+
+All tensors need to be of the same size.
+
+.. seealso::
+
+ :func:`torch.cat` concatenates the given sequence along an existing dimension.
+
+Arguments:
+ tensors (sequence of Tensors): sequence of tensors to concatenate
+ dim (int): dimension to insert. Has to be between 0 and the number
+ of dimensions of concatenated tensors (inclusive)
+
+Keyword args:
+ {out}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.hstack,
+ r"""
+hstack(tensors, *, out=None) -> Tensor
+
+Stack tensors in sequence horizontally (column wise).
+
+This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
+
+Args:
+ tensors (sequence of Tensors): sequence of tensors to concatenate
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([1, 2, 3])
+ >>> b = torch.tensor([4, 5, 6])
+ >>> torch.hstack((a,b))
+ tensor([1, 2, 3, 4, 5, 6])
+ >>> a = torch.tensor([[1],[2],[3]])
+ >>> b = torch.tensor([[4],[5],[6]])
+ >>> torch.hstack((a,b))
+ tensor([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.vstack,
+ r"""
+vstack(tensors, *, out=None) -> Tensor
+
+Stack tensors in sequence vertically (row wise).
+
+This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
+
+Args:
+ tensors (sequence of Tensors): sequence of tensors to concatenate
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([1, 2, 3])
+ >>> b = torch.tensor([4, 5, 6])
+ >>> torch.vstack((a,b))
+ tensor([[1, 2, 3],
+ [4, 5, 6]])
+ >>> a = torch.tensor([[1],[2],[3]])
+ >>> b = torch.tensor([[4],[5],[6]])
+ >>> torch.vstack((a,b))
+ tensor([[1],
+ [2],
+ [3],
+ [4],
+ [5],
+ [6]])
+
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.dstack,
+ r"""
+dstack(tensors, *, out=None) -> Tensor
+
+Stack tensors in sequence depthwise (along third axis).
+
+This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
+
+Args:
+ tensors (sequence of Tensors): sequence of tensors to concatenate
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([1, 2, 3])
+ >>> b = torch.tensor([4, 5, 6])
+ >>> torch.dstack((a,b))
+ tensor([[[1, 4],
+ [2, 5],
+ [3, 6]]])
+ >>> a = torch.tensor([[1],[2],[3]])
+ >>> b = torch.tensor([[4],[5],[6]])
+ >>> torch.dstack((a,b))
+ tensor([[[1, 4]],
+ [[2, 5]],
+ [[3, 6]]])
+
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.tensor_split,
+ r"""
+tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
+
+Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
+along dimension :attr:`dim` according to the indices or number of sections specified
+by :attr:`indices_or_sections`. This function is based on NumPy's
+:func:`numpy.array_split`.
+
+Args:
+ input (Tensor): the tensor to split
+ indices_or_sections (Tensor, int or list or tuple of ints):
+ If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
+ with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
+ If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
+ section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
+ is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
+ sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
+ have size :code:`int(input.size(dim) / n)`.
+
+ If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
+ tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
+ in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
+ would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
+
+ If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
+ long tensor on the CPU.
+
+ dim (int, optional): dimension along which to split the tensor. Default: ``0``
+
+Example::
+
+ >>> x = torch.arange(8)
+ >>> torch.tensor_split(x, 3)
+ (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
+
+ >>> x = torch.arange(7)
+ >>> torch.tensor_split(x, 3)
+ (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
+ >>> torch.tensor_split(x, (1, 6))
+ (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
+
+ >>> x = torch.arange(14).reshape(2, 7)
+ >>> x
+ tensor([[ 0, 1, 2, 3, 4, 5, 6],
+ [ 7, 8, 9, 10, 11, 12, 13]])
+ >>> torch.tensor_split(x, 3, dim=1)
+ (tensor([[0, 1, 2],
+ [7, 8, 9]]),
+ tensor([[ 3, 4],
+ [10, 11]]),
+ tensor([[ 5, 6],
+ [12, 13]]))
+ >>> torch.tensor_split(x, (1, 6), dim=1)
+ (tensor([[0],
+ [7]]),
+ tensor([[ 1, 2, 3, 4, 5],
+ [ 8, 9, 10, 11, 12]]),
+ tensor([[ 6],
+ [13]]))
+""",
+)
+
+add_docstr(
+ torch.chunk,
+ r"""
+chunk(input, chunks, dim=0) -> List of Tensors
+
+Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
+the input tensor.
+
+
+.. note::
+
+ This function may return fewer than the specified number of chunks!
+
+.. seealso::
+
+ :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
+
+If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
+all returned chunks will be the same size.
+If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
+all returned chunks will be the same size, except the last one.
+If such division is not possible, this function may return fewer
+than the specified number of chunks.
+
+Arguments:
+ input (Tensor): the tensor to split
+ chunks (int): number of chunks to return
+ dim (int): dimension along which to split the tensor
+
+Example:
+ >>> torch.arange(11).chunk(6)
+ (tensor([0, 1]),
+ tensor([2, 3]),
+ tensor([4, 5]),
+ tensor([6, 7]),
+ tensor([8, 9]),
+ tensor([10]))
+ >>> torch.arange(12).chunk(6)
+ (tensor([0, 1]),
+ tensor([2, 3]),
+ tensor([4, 5]),
+ tensor([6, 7]),
+ tensor([8, 9]),
+ tensor([10, 11]))
+ >>> torch.arange(13).chunk(6)
+ (tensor([0, 1, 2]),
+ tensor([3, 4, 5]),
+ tensor([6, 7, 8]),
+ tensor([ 9, 10, 11]),
+ tensor([12]))
+""",
+)
+
+add_docstr(
+ torch.unsafe_chunk,
+ r"""
+unsafe_chunk(input, chunks, dim=0) -> List of Tensors
+
+Works like :func:`torch.chunk` but without enforcing the autograd restrictions
+on inplace modification of the outputs.
+
+.. warning::
+ This function is safe to use as long as only the input, or only the outputs
+ are modified inplace after calling this function. It is user's
+ responsibility to ensure that is the case. If both the input and one or more
+ of the outputs are modified inplace, gradients computed by autograd will be
+ silently incorrect.
+""",
+)
+
+add_docstr(
+ torch.unsafe_split,
+ r"""
+unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
+
+Works like :func:`torch.split` but without enforcing the autograd restrictions
+on inplace modification of the outputs.
+
+.. warning::
+ This function is safe to use as long as only the input, or only the outputs
+ are modified inplace after calling this function. It is user's
+ responsibility to ensure that is the case. If both the input and one or more
+ of the outputs are modified inplace, gradients computed by autograd will be
+ silently incorrect.
+""",
+)
+
+add_docstr(
+ torch.hsplit,
+ r"""
+hsplit(input, indices_or_sections) -> List of Tensors
+
+Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
+horizontally according to :attr:`indices_or_sections`. Each split is a view of
+:attr:`input`.
+
+If :attr:`input` is one dimensional this is equivalent to calling
+torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
+zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
+torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
+except that if :attr:`indices_or_sections` is an integer it must evenly divide
+the split dimension or a runtime error will be thrown.
+
+This function is based on NumPy's :func:`numpy.hsplit`.
+
+Args:
+ input (Tensor): tensor to split.
+ indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
+
+Example::
+ >>> t = torch.arange(16.0).reshape(4,4)
+ >>> t
+ tensor([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> torch.hsplit(t, 2)
+ (tensor([[ 0., 1.],
+ [ 4., 5.],
+ [ 8., 9.],
+ [12., 13.]]),
+ tensor([[ 2., 3.],
+ [ 6., 7.],
+ [10., 11.],
+ [14., 15.]]))
+ >>> torch.hsplit(t, [3, 6])
+ (tensor([[ 0., 1., 2.],
+ [ 4., 5., 6.],
+ [ 8., 9., 10.],
+ [12., 13., 14.]]),
+ tensor([[ 3.],
+ [ 7.],
+ [11.],
+ [15.]]),
+ tensor([], size=(4, 0)))
+
+""",
+)
+
+add_docstr(
+ torch.vsplit,
+ r"""
+vsplit(input, indices_or_sections) -> List of Tensors
+
+Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
+vertically according to :attr:`indices_or_sections`. Each split is a view of
+:attr:`input`.
+
+This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
+(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
+it must evenly divide the split dimension or a runtime error will be thrown.
+
+This function is based on NumPy's :func:`numpy.vsplit`.
+
+Args:
+ input (Tensor): tensor to split.
+ indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
+
+Example::
+ >>> t = torch.arange(16.0).reshape(4,4)
+ >>> t
+ tensor([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> torch.vsplit(t, 2)
+ (tensor([[0., 1., 2., 3.],
+ [4., 5., 6., 7.]]),
+ tensor([[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]]))
+ >>> torch.vsplit(t, [3, 6])
+ (tensor([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]]),
+ tensor([[12., 13., 14., 15.]]),
+ tensor([], size=(0, 4)))
+
+""",
+)
+
+add_docstr(
+ torch.dsplit,
+ r"""
+dsplit(input, indices_or_sections) -> List of Tensors
+
+Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
+depthwise according to :attr:`indices_or_sections`. Each split is a view of
+:attr:`input`.
+
+This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
+(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
+it must evenly divide the split dimension or a runtime error will be thrown.
+
+This function is based on NumPy's :func:`numpy.dsplit`.
+
+Args:
+ input (Tensor): tensor to split.
+ indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
+
+Example::
+ >>> t = torch.arange(16.0).reshape(2, 2, 4)
+ >>> t
+ tensor([[[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.]],
+ [[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]]])
+ >>> torch.dsplit(t, 2)
+ (tensor([[[ 0., 1.],
+ [ 4., 5.]],
+ [[ 8., 9.],
+ [12., 13.]]]),
+ tensor([[[ 2., 3.],
+ [ 6., 7.]],
+ [[10., 11.],
+ [14., 15.]]]))
+
+ >>> torch.dsplit(t, [3, 6])
+ (tensor([[[ 0., 1., 2.],
+ [ 4., 5., 6.]],
+ [[ 8., 9., 10.],
+ [12., 13., 14.]]]),
+ tensor([[[ 3.],
+ [ 7.]],
+ [[11.],
+ [15.]]]),
+ tensor([], size=(2, 2, 0)))
+
+""",
+)
+
+add_docstr(
+ torch.can_cast,
+ r"""
+can_cast(from, to) -> bool
+
+Determines if a type conversion is allowed under PyTorch casting rules
+described in the type promotion :ref:`documentation `.
+
+Args:
+ from (dtype): The original :class:`torch.dtype`.
+ to (dtype): The target :class:`torch.dtype`.
+
+Example::
+
+ >>> torch.can_cast(torch.double, torch.float)
+ True
+ >>> torch.can_cast(torch.float, torch.int)
+ False
+""",
+)
+
+add_docstr(
+ torch.corrcoef,
+ r"""
+corrcoef(input) -> Tensor
+
+Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
+where rows are the variables and columns are the observations.
+
+.. note::
+
+ The correlation coefficient matrix R is computed using the covariance matrix C as given by
+ :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
+
+.. note::
+
+ Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
+ The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
+
+Args:
+ input (Tensor): A 2D matrix containing multiple variables and observations, or a
+ Scalar or 1D vector representing a single variable.
+
+Returns:
+ (Tensor) The correlation coefficient matrix of the variables.
+
+.. seealso::
+
+ :func:`torch.cov` covariance matrix.
+
+Example::
+
+ >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
+ >>> torch.corrcoef(x)
+ tensor([[ 1., -1.],
+ [-1., 1.]])
+ >>> x = torch.randn(2, 4)
+ >>> x
+ tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
+ [-0.5812, 0.1535, 0.2387, 0.2350]])
+ >>> torch.corrcoef(x)
+ tensor([[1.0000, 0.3582],
+ [0.3582, 1.0000]])
+ >>> torch.corrcoef(x[0])
+ tensor(1.)
+""",
+)
+
+add_docstr(
+ torch.cov,
+ r"""
+cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
+
+Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
+the variables and columns are the observations.
+
+A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
+the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
+a single variable (Scalar or 1D) then its variance is returned.
+
+The sample covariance of the variables :math:`x` and :math:`y` is given by:
+
+.. math::
+ \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
+
+where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
+:math:`\delta N` is the :attr:`correction`.
+
+If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
+is calculated, which is given by:
+
+.. math::
+ \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
+ {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
+
+where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
+provided, or :math:`w = f \times a` if both are provided, and
+:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
+provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
+
+Args:
+ input (Tensor): A 2D matrix containing multiple variables and observations, or a
+ Scalar or 1D vector representing a single variable.
+
+Keyword Args:
+ correction (int, optional): difference between the sample size and sample degrees of freedom.
+ Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
+ even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
+ will return the simple average. Defaults to ``1``.
+ fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
+ times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
+ Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
+ aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
+ These relative weights are typically large for observations considered “important” and smaller for
+ observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
+ Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
+
+Returns:
+ (Tensor) The covariance matrix of the variables.
+
+.. seealso::
+
+ :func:`torch.corrcoef` normalized covariance matrix.
+
+Example::
+ >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
+ >>> x
+ tensor([[0, 1, 2],
+ [2, 1, 0]])
+ >>> torch.cov(x)
+ tensor([[ 1., -1.],
+ [-1., 1.]])
+ >>> torch.cov(x, correction=0)
+ tensor([[ 0.6667, -0.6667],
+ [-0.6667, 0.6667]])
+ >>> fw = torch.randint(1, 10, (3,))
+ >>> fw
+ tensor([1, 6, 9])
+ >>> aw = torch.rand(3)
+ >>> aw
+ tensor([0.4282, 0.0255, 0.4144])
+ >>> torch.cov(x, fweights=fw, aweights=aw)
+ tensor([[ 0.4169, -0.4169],
+ [-0.4169, 0.4169]])
+""",
+)
+
+add_docstr(
+ torch.cat,
+ r"""
+cat(tensors, dim=0, *, out=None) -> Tensor
+
+Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
+All tensors must either have the same shape (except in the concatenating
+dimension) or be empty.
+
+:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
+and :func:`torch.chunk`.
+
+:func:`torch.cat` can be best understood via examples.
+
+.. seealso::
+
+ :func:`torch.stack` concatenates the given sequence along a new dimension.
+
+Args:
+ tensors (sequence of Tensors): any python sequence of tensors of the same type.
+ Non-empty tensors provided must have the same shape, except in the
+ cat dimension.
+ dim (int, optional): the dimension over which the tensors are concatenated
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.randn(2, 3)
+ >>> x
+ tensor([[ 0.6580, -1.0969, -0.4614],
+ [-0.1034, -0.5790, 0.1497]])
+ >>> torch.cat((x, x, x), 0)
+ tensor([[ 0.6580, -1.0969, -0.4614],
+ [-0.1034, -0.5790, 0.1497],
+ [ 0.6580, -1.0969, -0.4614],
+ [-0.1034, -0.5790, 0.1497],
+ [ 0.6580, -1.0969, -0.4614],
+ [-0.1034, -0.5790, 0.1497]])
+ >>> torch.cat((x, x, x), 1)
+ tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
+ -1.0969, -0.4614],
+ [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
+ -0.5790, 0.1497]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.concat,
+ r"""
+concat(tensors, dim=0, *, out=None) -> Tensor
+
+Alias of :func:`torch.cat`.
+""",
+)
+
+add_docstr(
+ torch.concatenate,
+ r"""
+concatenate(tensors, axis=0, out=None) -> Tensor
+
+Alias of :func:`torch.cat`.
+""",
+)
+
+add_docstr(
+ torch.ceil,
+ r"""
+ceil(input, *, out=None) -> Tensor
+
+Returns a new tensor with the ceil of the elements of :attr:`input`,
+the smallest integer greater than or equal to each element.
+
+For integer inputs, follows the array-api convention of returning a
+copy of the input tensor.
+
+.. math::
+ \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.6341, -1.4208, -1.0900, 0.5826])
+ >>> torch.ceil(a)
+ tensor([-0., -1., -1., 1.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.real,
+ r"""
+real(input) -> Tensor
+
+Returns a new tensor containing real values of the :attr:`self` tensor.
+The returned tensor and :attr:`self` share the same underlying storage.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x=torch.randn(4, dtype=torch.cfloat)
+ >>> x
+ tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
+ >>> x.real
+ tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.imag,
+ r"""
+imag(input) -> Tensor
+
+Returns a new tensor containing imaginary values of the :attr:`self` tensor.
+The returned tensor and :attr:`self` share the same underlying storage.
+
+.. warning::
+ :func:`imag` is only supported for tensors with complex dtypes.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x=torch.randn(4, dtype=torch.cfloat)
+ >>> x
+ tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
+ >>> x.imag
+ tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.view_as_real,
+ r"""
+view_as_real(input) -> Tensor
+
+Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
+:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
+real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
+represents the real and imaginary components of complex numbers.
+
+.. warning::
+ :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x=torch.randn(4, dtype=torch.cfloat)
+ >>> x
+ tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
+ >>> torch.view_as_real(x)
+ tensor([[ 0.4737, -0.3839],
+ [-0.2098, -0.6699],
+ [ 0.3470, -0.9451],
+ [-0.5174, -1.3136]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.view_as_complex,
+ r"""
+view_as_complex(input) -> Tensor
+
+Returns a view of :attr:`input` as a complex tensor. For an input complex
+tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
+new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
+dimension of the input tensor is expected to represent the real and imaginary
+components of complex numbers.
+
+.. warning::
+ :func:`view_as_complex` is only supported for tensors with
+ :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
+ expected to have the last dimension of :attr:`size` 2. In addition, the
+ tensor must have a `stride` of 1 for its last dimension. The strides of all
+ other dimensions must be even numbers.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x=torch.randn(4, 2)
+ >>> x
+ tensor([[ 1.6116, -0.5772],
+ [-1.4606, -0.9120],
+ [ 0.0786, -1.7497],
+ [-0.6561, -1.6623]])
+ >>> torch.view_as_complex(x)
+ tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.reciprocal,
+ r"""
+reciprocal(input, *, out=None) -> Tensor
+
+Returns a new tensor with the reciprocal of the elements of :attr:`input`
+
+.. math::
+ \text{out}_{i} = \frac{1}{\text{input}_{i}}
+
+.. note::
+ Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
+ inputs to reciprocal are automatically :ref:`promoted ` to
+ the default scalar type.
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.4595, -2.1219, -1.4314, 0.7298])
+ >>> torch.reciprocal(a)
+ tensor([-2.1763, -0.4713, -0.6986, 1.3702])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.cholesky,
+ r"""
+cholesky(input, upper=False, *, out=None) -> Tensor
+
+Computes the Cholesky decomposition of a symmetric positive-definite
+matrix :math:`A` or for batches of symmetric positive-definite matrices.
+
+If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
+the decomposition has the form:
+
+.. math::
+
+ A = U^TU
+
+If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
+the decomposition has the form:
+
+.. math::
+
+ A = LL^T
+
+If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
+matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
+of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
+tensor will be composed of lower-triangular Cholesky factors of each of the individual
+matrices.
+
+.. warning::
+
+ :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
+ and will be removed in a future PyTorch release.
+
+ ``L = torch.cholesky(A)`` should be replaced with
+
+ .. code:: python
+
+ L = torch.linalg.cholesky(A)
+
+ ``U = torch.cholesky(A, upper=True)`` should be replaced with
+
+ .. code:: python
+
+ U = torch.linalg.cholesky(A).mH
+
+ This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
+
+Args:
+ input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
+ batch dimensions consisting of symmetric positive-definite matrices.
+ upper (bool, optional): flag that indicates whether to return a
+ upper or lower triangular matrix. Default: ``False``
+
+Keyword args:
+ out (Tensor, optional): the output matrix
+
+Example::
+
+ >>> a = torch.randn(3, 3)
+ >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
+ >>> l = torch.cholesky(a)
+ >>> a
+ tensor([[ 2.4112, -0.7486, 1.4551],
+ [-0.7486, 1.3544, 0.1294],
+ [ 1.4551, 0.1294, 1.6724]])
+ >>> l
+ tensor([[ 1.5528, 0.0000, 0.0000],
+ [-0.4821, 1.0592, 0.0000],
+ [ 0.9371, 0.5487, 0.7023]])
+ >>> l @ l.mT
+ tensor([[ 2.4112, -0.7486, 1.4551],
+ [-0.7486, 1.3544, 0.1294],
+ [ 1.4551, 0.1294, 1.6724]])
+ >>> a = torch.randn(3, 2, 2) # Example for batched input
+ >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
+ >>> l = torch.cholesky(a)
+ >>> z = l @ l.mT
+ >>> torch.dist(z, a)
+ tensor(2.3842e-07)
+""",
+)
+
+add_docstr(
+ torch.cholesky_solve,
+ r"""
+cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
+
+Computes the solution of a system of linear equations with complex Hermitian
+or real symmetric positive-definite lhs given its Cholesky decomposition.
+
+Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
+and :math:`L` its Cholesky decomposition such that:
+
+.. math::
+
+ A = LL^{\text{H}}
+
+where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
+and the transpose when :math:`L` is real-valued.
+
+Returns the solution :math:`X` of the following linear system:
+
+.. math::
+
+ AX = B
+
+Supports inputs of float, double, cfloat and cdouble dtypes.
+Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
+then the output has the same batch dimensions.
+
+Args:
+ B (Tensor): right-hand side tensor of shape `(*, n, k)`
+ where :math:`*` is zero or more batch dimensions
+ L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
+ consisting of lower or upper triangular Cholesky decompositions of
+ symmetric or Hermitian positive-definite matrices.
+ upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
+ or upper triangular. Default: ``False``.
+
+Keyword args:
+ out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
+
+Example::
+
+ >>> A = torch.randn(3, 3)
+ >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
+ >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
+ >>> B = torch.randn(3, 2)
+ >>> torch.cholesky_solve(B, L)
+ tensor([[ -8.1625, 19.6097],
+ [ -5.8398, 14.2387],
+ [ -4.3771, 10.4173]])
+ >>> A.inverse() @ B
+ tensor([[ -8.1626, 19.6097],
+ [ -5.8398, 14.2387],
+ [ -4.3771, 10.4173]])
+
+ >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
+ >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
+ >>> L = torch.linalg.cholesky(A)
+ >>> B = torch.randn(2, 1, dtype=torch.complex64)
+ >>> X = torch.cholesky_solve(B, L)
+ >>> torch.dist(X, A.inverse() @ B)
+ tensor(1.6881e-5)
+""",
+)
+
+add_docstr(
+ torch.cholesky_inverse,
+ r"""
+cholesky_inverse(L, upper=False, *, out=None) -> Tensor
+
+Computes the inverse of a complex Hermitian or real symmetric
+positive-definite matrix given its Cholesky decomposition.
+
+Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
+and :math:`L` its Cholesky decomposition such that:
+
+.. math::
+
+ A = LL^{\text{H}}
+
+where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
+and the transpose when :math:`L` is real-valued.
+
+Computes the inverse matrix :math:`A^{-1}`.
+
+Supports input of float, double, cfloat and cdouble dtypes.
+Also supports batches of matrices, and if :math:`A` is a batch of matrices
+then the output has the same batch dimensions.
+
+Args:
+ L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
+ consisting of lower or upper triangular Cholesky decompositions of
+ symmetric or Hermitian positive-definite matrices.
+ upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
+ or upper triangular. Default: ``False``
+
+Keyword args:
+ out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
+
+Example::
+
+ >>> A = torch.randn(3, 3)
+ >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
+ >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
+ >>> torch.cholesky_inverse(L)
+ tensor([[ 1.9314, 1.2251, -0.0889],
+ [ 1.2251, 2.4439, 0.2122],
+ [-0.0889, 0.2122, 0.1412]])
+ >>> A.inverse()
+ tensor([[ 1.9314, 1.2251, -0.0889],
+ [ 1.2251, 2.4439, 0.2122],
+ [-0.0889, 0.2122, 0.1412]])
+
+ >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
+ >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
+ >>> L = torch.linalg.cholesky(A)
+ >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
+ tensor(5.6358e-7)
+""",
+)
+
+add_docstr(
+ torch.clone,
+ r"""
+clone(input, *, memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of :attr:`input`.
+
+.. note::
+
+ This function is differentiable, so gradients will flow back from the
+ result of this operation to :attr:`input`. To create a tensor without an
+ autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
+
+Args:
+ {input}
+
+Keyword args:
+ {memory_format}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.clamp,
+ r"""
+clamp(input, min=None, max=None, *, out=None) -> Tensor
+
+Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
+Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
+
+.. math::
+ y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
+
+If :attr:`min` is ``None``, there is no lower bound.
+Or, if :attr:`max` is ``None`` there is no upper bound.
+"""
+ + r"""
+
+.. note::
+ If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) `
+ sets all elements in :attr:`input` to the value of :attr:`max`.
+
+Args:
+ {input}
+ min (Number or Tensor, optional): lower-bound of the range to be clamped to
+ max (Number or Tensor, optional): upper-bound of the range to be clamped to
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-1.7120, 0.1734, -0.0478, -0.0922])
+ >>> torch.clamp(a, min=-0.5, max=0.5)
+ tensor([-0.5000, 0.1734, -0.0478, -0.0922])
+
+ >>> min = torch.linspace(-1, 1, steps=4)
+ >>> torch.clamp(a, min=min)
+ tensor([-1.0000, 0.1734, 0.3333, 1.0000])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.clip,
+ r"""
+clip(input, min=None, max=None, *, out=None) -> Tensor
+
+Alias for :func:`torch.clamp`.
+""",
+)
+
+add_docstr(
+ torch.column_stack,
+ r"""
+column_stack(tensors, *, out=None) -> Tensor
+
+Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
+
+Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
+in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
+
+Args:
+ tensors (sequence of Tensors): sequence of tensors to concatenate
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([1, 2, 3])
+ >>> b = torch.tensor([4, 5, 6])
+ >>> torch.column_stack((a, b))
+ tensor([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> a = torch.arange(5)
+ >>> b = torch.arange(10).reshape(5, 2)
+ >>> torch.column_stack((a, b, b))
+ tensor([[0, 0, 1, 0, 1],
+ [1, 2, 3, 2, 3],
+ [2, 4, 5, 4, 5],
+ [3, 6, 7, 6, 7],
+ [4, 8, 9, 8, 9]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.complex,
+ r"""
+complex(real, imag, *, out=None) -> Tensor
+
+Constructs a complex tensor with its real part equal to :attr:`real` and its
+imaginary part equal to :attr:`imag`.
+
+Args:
+ real (Tensor): The real part of the complex tensor. Must be half, float or double.
+ imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
+ as :attr:`real`.
+
+Keyword args:
+ out (Tensor): If the inputs are ``torch.float32``, must be
+ ``torch.complex64``. If the inputs are ``torch.float64``, must be
+ ``torch.complex128``.
+
+Example::
+
+ >>> real = torch.tensor([1, 2], dtype=torch.float32)
+ >>> imag = torch.tensor([3, 4], dtype=torch.float32)
+ >>> z = torch.complex(real, imag)
+ >>> z
+ tensor([(1.+3.j), (2.+4.j)])
+ >>> z.dtype
+ torch.complex64
+
+""",
+)
+
+add_docstr(
+ torch.polar,
+ r"""
+polar(abs, angle, *, out=None) -> Tensor
+
+Constructs a complex tensor whose elements are Cartesian coordinates
+corresponding to the polar coordinates with absolute value :attr:`abs` and angle
+:attr:`angle`.
+
+.. math::
+ \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
+
+.. note::
+ `torch.polar` is similar to
+ `std::polar `_
+ and does not compute the polar decomposition
+ of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
+ The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
+ infinite.
+
+"""
+ + r"""
+Args:
+ abs (Tensor): The absolute value the complex tensor. Must be float or double.
+ angle (Tensor): The angle of the complex tensor. Must be same dtype as
+ :attr:`abs`.
+
+Keyword args:
+ out (Tensor): If the inputs are ``torch.float32``, must be
+ ``torch.complex64``. If the inputs are ``torch.float64``, must be
+ ``torch.complex128``.
+
+Example::
+
+ >>> import numpy as np
+ >>> abs = torch.tensor([1, 2], dtype=torch.float64)
+ >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
+ >>> z = torch.polar(abs, angle)
+ >>> z
+ tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
+""",
+)
+
+add_docstr(
+ torch.conj_physical,
+ r"""
+conj_physical(input, *, out=None) -> Tensor
+
+Computes the element-wise conjugate of the given :attr:`input` tensor.
+If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
+
+.. note::
+ This performs the conjugate operation regardless of the fact conjugate bit is set or not.
+
+.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
+ non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
+ when :attr:`input` is of non-complex dtype to be compatible with this change.
+
+.. math::
+ \text{out}_{i} = conj(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
+ tensor([-1 - 1j, -2 - 2j, 3 + 3j])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.conj,
+ r"""
+conj(input) -> Tensor
+
+Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
+this function just returns :attr:`input`.
+
+.. note::
+ :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
+ at any time using :func:`torch.resolve_conj`.
+
+.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
+ non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
+ when :attr:`input` is of non-complex dtype to be compatible with this change.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
+ >>> x.is_conj()
+ False
+ >>> y = torch.conj(x)
+ >>> y.is_conj()
+ True
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.resolve_conj,
+ r"""
+resolve_conj(input) -> Tensor
+
+Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
+else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
+ >>> y = x.conj()
+ >>> y.is_conj()
+ True
+ >>> z = y.resolve_conj()
+ >>> z
+ tensor([-1 - 1j, -2 - 2j, 3 + 3j])
+ >>> z.is_conj()
+ False
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.resolve_neg,
+ r"""
+resolve_neg(input) -> Tensor
+
+Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
+else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
+ >>> y = x.conj()
+ >>> z = y.imag
+ >>> z.is_neg()
+ True
+ >>> out = z.resolve_neg()
+ >>> out
+ tensor([-1., -2., 3.])
+ >>> out.is_neg()
+ False
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.copysign,
+ r"""
+copysign(input, other, *, out=None) -> Tensor
+
+Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
+
+.. math::
+ \text{out}_{i} = \begin{cases}
+ -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
+ |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
+ \end{cases}
+"""
+ + r"""
+
+Supports :ref:`broadcasting to a common shape `,
+and integer and float inputs.
+
+Args:
+ input (Tensor): magnitudes.
+ other (Tensor or Number): contains value(s) whose signbit(s) are
+ applied to the magnitudes in :attr:`input`.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(5)
+ >>> a
+ tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
+ >>> torch.copysign(a, 1)
+ tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
+ [-0.0059, -0.2600, -0.4475, -1.3948],
+ [ 0.3667, -0.9567, -2.5757, -0.1751],
+ [ 0.2046, -0.0742, 0.2998, -0.1054]])
+ >>> b = torch.randn(4)
+ tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
+ >>> torch.copysign(a, b)
+ tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
+ [ 0.0059, 0.2600, 0.4475, -1.3948],
+ [ 0.3667, 0.9567, 2.5757, -0.1751],
+ [ 0.2046, 0.0742, 0.2998, -0.1054]])
+ >>> a = torch.tensor([1.])
+ >>> b = torch.tensor([-0.])
+ >>> torch.copysign(a, b)
+ tensor([-1.])
+
+.. note::
+ copysign handles signed zeros. If the other argument has a negative zero (-0),
+ the corresponding output value will be negative.
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.cos,
+ r"""
+cos(input, *, out=None) -> Tensor
+
+Returns a new tensor with the cosine of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \cos(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
+ >>> torch.cos(a)
+ tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.cosh,
+ r"""
+cosh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the hyperbolic cosine of the elements of
+:attr:`input`.
+
+.. math::
+ \text{out}_{i} = \cosh(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
+ >>> torch.cosh(a)
+ tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
+
+.. note::
+ When :attr:`input` is on the CPU, the implementation of torch.cosh may use
+ the Sleef library, which rounds very large results to infinity or negative
+ infinity. See `here `_ for details.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.cross,
+ r"""
+cross(input, other, dim=None, *, out=None) -> Tensor
+
+
+Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
+and :attr:`other`.
+
+Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
+of vectors, for which it computes the product along the dimension :attr:`dim`.
+In this case, the output has the same batch dimensions as the inputs.
+
+.. warning::
+ If :attr:`dim` is not given, it defaults to the first dimension found
+ with the size 3. Note that this might be unexpected.
+
+ This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
+ in a future release.
+
+.. seealso::
+ :func:`torch.linalg.cross` which has dim=-1 as default.
+
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+ dim (int, optional): the dimension to take the cross-product in.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4, 3)
+ >>> a
+ tensor([[-0.3956, 1.1455, 1.6895],
+ [-0.5849, 1.3672, 0.3599],
+ [-1.1626, 0.7180, -0.0521],
+ [-0.1339, 0.9902, -2.0225]])
+ >>> b = torch.randn(4, 3)
+ >>> b
+ tensor([[-0.0257, -1.4725, -1.2251],
+ [-1.1479, -0.7005, -1.9757],
+ [-1.3904, 0.3726, -1.1836],
+ [-0.9688, -0.7153, 0.2159]])
+ >>> torch.cross(a, b, dim=1)
+ tensor([[ 1.0844, -0.5281, 0.6120],
+ [-2.4490, -1.5687, 1.9792],
+ [-0.8304, -1.3037, 0.5650],
+ [-1.2329, 1.9883, 1.0551]])
+ >>> torch.cross(a, b)
+ tensor([[ 1.0844, -0.5281, 0.6120],
+ [-2.4490, -1.5687, 1.9792],
+ [-0.8304, -1.3037, 0.5650],
+ [-1.2329, 1.9883, 1.0551]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logcumsumexp,
+ r"""
+logcumsumexp(input, dim, *, out=None) -> Tensor
+Returns the logarithm of the cumulative summation of the exponentiation of
+elements of :attr:`input` in the dimension :attr:`dim`.
+
+For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
+
+ .. math::
+ \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
+
+Args:
+ {input}
+ dim (int): the dimension to do the operation over
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(10)
+ >>> torch.logcumsumexp(a, dim=0)
+ tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
+ 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.cummax,
+ r"""
+cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
+Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
+elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
+location of each maximum value found in the dimension :attr:`dim`.
+
+.. math::
+ y_i = max(x_1, x_2, x_3, \dots, x_i)
+
+Args:
+ {input}
+ dim (int): the dimension to do the operation over
+
+Keyword args:
+ out (tuple, optional): the result tuple of two output tensors (values, indices)
+
+Example::
+
+ >>> a = torch.randn(10)
+ >>> a
+ tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
+ 1.9946, -0.8209])
+ >>> torch.cummax(a, dim=0)
+ torch.return_types.cummax(
+ values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
+ 1.9946, 1.9946]),
+ indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.cummin,
+ r"""
+cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
+Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
+elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
+location of each maximum value found in the dimension :attr:`dim`.
+
+.. math::
+ y_i = min(x_1, x_2, x_3, \dots, x_i)
+
+Args:
+ {input}
+ dim (int): the dimension to do the operation over
+
+Keyword args:
+ out (tuple, optional): the result tuple of two output tensors (values, indices)
+
+Example::
+
+ >>> a = torch.randn(10)
+ >>> a
+ tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
+ 0.9165, 1.6684])
+ >>> torch.cummin(a, dim=0)
+ torch.return_types.cummin(
+ values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
+ -1.3298, -1.3298]),
+ indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.cumprod,
+ r"""
+cumprod(input, dim, *, dtype=None, out=None) -> Tensor
+
+Returns the cumulative product of elements of :attr:`input` in the dimension
+:attr:`dim`.
+
+For example, if :attr:`input` is a vector of size N, the result will also be
+a vector of size N, with elements.
+
+.. math::
+ y_i = x_1 \times x_2\times x_3\times \dots \times x_i
+
+Args:
+ {input}
+ dim (int): the dimension to do the operation over
+
+Keyword args:
+ {dtype}
+ {out}
+
+Example::
+
+ >>> a = torch.randn(10)
+ >>> a
+ tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
+ -0.2129, -0.4206, 0.1968])
+ >>> torch.cumprod(a, dim=0)
+ tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
+ 0.0014, -0.0006, -0.0001])
+
+ >>> a[5] = 0.0
+ >>> torch.cumprod(a, dim=0)
+ tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
+ 0.0000, -0.0000, -0.0000])
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.cumsum,
+ r"""
+cumsum(input, dim, *, dtype=None, out=None) -> Tensor
+
+Returns the cumulative sum of elements of :attr:`input` in the dimension
+:attr:`dim`.
+
+For example, if :attr:`input` is a vector of size N, the result will also be
+a vector of size N, with elements.
+
+.. math::
+ y_i = x_1 + x_2 + x_3 + \dots + x_i
+
+Args:
+ {input}
+ dim (int): the dimension to do the operation over
+
+Keyword args:
+ {dtype}
+ {out}
+
+Example::
+
+ >>> a = torch.randn(10)
+ >>> a
+ tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
+ 0.1850, -1.1571, -0.4243])
+ >>> torch.cumsum(a, dim=0)
+ tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
+ -1.8209, -2.9780, -3.4022])
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.count_nonzero,
+ r"""
+count_nonzero(input, dim=None) -> Tensor
+
+Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
+If no dim is specified then all non-zeros in the tensor are counted.
+
+Args:
+ {input}
+ dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
+
+Example::
+
+ >>> x = torch.zeros(3,3)
+ >>> x[torch.randn(3,3) > 0.5] = 1
+ >>> x
+ tensor([[0., 1., 1.],
+ [0., 0., 0.],
+ [0., 0., 1.]])
+ >>> torch.count_nonzero(x)
+ tensor(3)
+ >>> torch.count_nonzero(x, dim=0)
+ tensor([0, 1, 2])
+""".format(
+ **reduceops_common_args
+ ),
+)
+
+add_docstr(
+ torch.dequantize,
+ r"""
+dequantize(tensor) -> Tensor
+
+Returns an fp32 Tensor by dequantizing a quantized Tensor
+
+Args:
+ tensor (Tensor): A quantized Tensor
+
+.. function:: dequantize(tensors) -> sequence of Tensors
+ :noindex:
+
+Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
+
+Args:
+ tensors (sequence of Tensors): A list of quantized Tensors
+""",
+)
+
+add_docstr(
+ torch.diag,
+ r"""
+diag(input, diagonal=0, *, out=None) -> Tensor
+
+- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
+ with the elements of :attr:`input` as the diagonal.
+- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
+ the diagonal elements of :attr:`input`.
+
+The argument :attr:`diagonal` controls which diagonal to consider:
+
+- If :attr:`diagonal` = 0, it is the main diagonal.
+- If :attr:`diagonal` > 0, it is above the main diagonal.
+- If :attr:`diagonal` < 0, it is below the main diagonal.
+
+Args:
+ {input}
+ diagonal (int, optional): the diagonal to consider
+
+Keyword args:
+ {out}
+
+.. seealso::
+
+ :func:`torch.diagonal` always returns the diagonal of its input.
+
+ :func:`torch.diagflat` always constructs a tensor with diagonal elements
+ specified by the input.
+
+Examples:
+
+Get the square matrix where the input vector is the diagonal::
+
+ >>> a = torch.randn(3)
+ >>> a
+ tensor([ 0.5950,-0.0872, 2.3298])
+ >>> torch.diag(a)
+ tensor([[ 0.5950, 0.0000, 0.0000],
+ [ 0.0000,-0.0872, 0.0000],
+ [ 0.0000, 0.0000, 2.3298]])
+ >>> torch.diag(a, 1)
+ tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
+ [ 0.0000, 0.0000,-0.0872, 0.0000],
+ [ 0.0000, 0.0000, 0.0000, 2.3298],
+ [ 0.0000, 0.0000, 0.0000, 0.0000]])
+
+Get the k-th diagonal of a given matrix::
+
+ >>> a = torch.randn(3, 3)
+ >>> a
+ tensor([[-0.4264, 0.0255,-0.1064],
+ [ 0.8795,-0.2429, 0.1374],
+ [ 0.1029,-0.6482,-1.6300]])
+ >>> torch.diag(a, 0)
+ tensor([-0.4264,-0.2429,-1.6300])
+ >>> torch.diag(a, 1)
+ tensor([ 0.0255, 0.1374])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.diag_embed,
+ r"""
+diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
+
+Creates a tensor whose diagonals of certain 2D planes (specified by
+:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
+To facilitate creating batched diagonal matrices, the 2D planes formed by
+the last two dimensions of the returned tensor are chosen by default.
+
+The argument :attr:`offset` controls which diagonal to consider:
+
+- If :attr:`offset` = 0, it is the main diagonal.
+- If :attr:`offset` > 0, it is above the main diagonal.
+- If :attr:`offset` < 0, it is below the main diagonal.
+
+The size of the new matrix will be calculated to make the specified diagonal
+of the size of the last input dimension.
+Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
+and :attr:`dim2` matters. Exchanging them is equivalent to changing the
+sign of :attr:`offset`.
+
+Applying :meth:`torch.diagonal` to the output of this function with
+the same arguments yields a matrix identical to input. However,
+:meth:`torch.diagonal` has different default dimensions, so those
+need to be explicitly specified.
+
+Args:
+ {input} Must be at least 1-dimensional.
+ offset (int, optional): which diagonal to consider. Default: 0
+ (main diagonal).
+ dim1 (int, optional): first dimension with respect to which to
+ take diagonal. Default: -2.
+ dim2 (int, optional): second dimension with respect to which to
+ take diagonal. Default: -1.
+
+Example::
+
+ >>> a = torch.randn(2, 3)
+ >>> torch.diag_embed(a)
+ tensor([[[ 1.5410, 0.0000, 0.0000],
+ [ 0.0000, -0.2934, 0.0000],
+ [ 0.0000, 0.0000, -2.1788]],
+
+ [[ 0.5684, 0.0000, 0.0000],
+ [ 0.0000, -1.0845, 0.0000],
+ [ 0.0000, 0.0000, -1.3986]]])
+
+ >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
+ tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
+ [ 0.0000, 0.5684, 0.0000, 0.0000]],
+
+ [[ 0.0000, 0.0000, -0.2934, 0.0000],
+ [ 0.0000, 0.0000, -1.0845, 0.0000]],
+
+ [[ 0.0000, 0.0000, 0.0000, -2.1788],
+ [ 0.0000, 0.0000, 0.0000, -1.3986]],
+
+ [[ 0.0000, 0.0000, 0.0000, 0.0000],
+ [ 0.0000, 0.0000, 0.0000, 0.0000]]])
+""".format(
+ **common_args
+ ),
+)
+
+
+add_docstr(
+ torch.diagflat,
+ r"""
+diagflat(input, offset=0) -> Tensor
+
+- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
+ with the elements of :attr:`input` as the diagonal.
+- If :attr:`input` is a tensor with more than one dimension, then returns a
+ 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
+
+The argument :attr:`offset` controls which diagonal to consider:
+
+- If :attr:`offset` = 0, it is the main diagonal.
+- If :attr:`offset` > 0, it is above the main diagonal.
+- If :attr:`offset` < 0, it is below the main diagonal.
+
+Args:
+ {input}
+ offset (int, optional): the diagonal to consider. Default: 0 (main
+ diagonal).
+
+Examples::
+
+ >>> a = torch.randn(3)
+ >>> a
+ tensor([-0.2956, -0.9068, 0.1695])
+ >>> torch.diagflat(a)
+ tensor([[-0.2956, 0.0000, 0.0000],
+ [ 0.0000, -0.9068, 0.0000],
+ [ 0.0000, 0.0000, 0.1695]])
+ >>> torch.diagflat(a, 1)
+ tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
+ [ 0.0000, 0.0000, -0.9068, 0.0000],
+ [ 0.0000, 0.0000, 0.0000, 0.1695],
+ [ 0.0000, 0.0000, 0.0000, 0.0000]])
+
+ >>> a = torch.randn(2, 2)
+ >>> a
+ tensor([[ 0.2094, -0.3018],
+ [-0.1516, 1.9342]])
+ >>> torch.diagflat(a)
+ tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
+ [ 0.0000, -0.3018, 0.0000, 0.0000],
+ [ 0.0000, 0.0000, -0.1516, 0.0000],
+ [ 0.0000, 0.0000, 0.0000, 1.9342]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.diagonal,
+ r"""
+diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
+
+Returns a partial view of :attr:`input` with the its diagonal elements
+with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
+at the end of the shape.
+
+The argument :attr:`offset` controls which diagonal to consider:
+
+- If :attr:`offset` = 0, it is the main diagonal.
+- If :attr:`offset` > 0, it is above the main diagonal.
+- If :attr:`offset` < 0, it is below the main diagonal.
+
+Applying :meth:`torch.diag_embed` to the output of this function with
+the same arguments yields a diagonal matrix with the diagonal entries
+of the input. However, :meth:`torch.diag_embed` has different default
+dimensions, so those need to be explicitly specified.
+
+Args:
+ {input} Must be at least 2-dimensional.
+ offset (int, optional): which diagonal to consider. Default: 0
+ (main diagonal).
+ dim1 (int, optional): first dimension with respect to which to
+ take diagonal. Default: 0.
+ dim2 (int, optional): second dimension with respect to which to
+ take diagonal. Default: 1.
+
+.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
+
+Examples::
+
+ >>> a = torch.randn(3, 3)
+ >>> a
+ tensor([[-1.0854, 1.1431, -0.1752],
+ [ 0.8536, -0.0905, 0.0360],
+ [ 0.6927, -0.3735, -0.4945]])
+
+
+ >>> torch.diagonal(a, 0)
+ tensor([-1.0854, -0.0905, -0.4945])
+
+
+ >>> torch.diagonal(a, 1)
+ tensor([ 1.1431, 0.0360])
+
+
+ >>> x = torch.randn(2, 5, 4, 2)
+ >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
+ tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
+ [-1.1065, 1.0401, -0.2235, -0.7938]],
+
+ [[-1.7325, -0.3081, 0.6166, 0.2335],
+ [ 1.0500, 0.7336, -0.3836, -1.1015]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.diagonal_scatter,
+ r"""
+diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
+
+Embeds the values of the :attr:`src` tensor into :attr:`input` along
+the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
+and :attr:`dim2`.
+
+This function returns a tensor with fresh storage; it does not
+return a view.
+
+The argument :attr:`offset` controls which diagonal to consider:
+
+- If :attr:`offset` = 0, it is the main diagonal.
+- If :attr:`offset` > 0, it is above the main diagonal.
+- If :attr:`offset` < 0, it is below the main diagonal.
+
+Args:
+ {input} Must be at least 2-dimensional.
+ src (Tensor): the tensor to embed into :attr:`input`.
+ offset (int, optional): which diagonal to consider. Default: 0
+ (main diagonal).
+ dim1 (int, optional): first dimension with respect to which to
+ take diagonal. Default: 0.
+ dim2 (int, optional): second dimension with respect to which to
+ take diagonal. Default: 1.
+
+.. note::
+
+ :attr:`src` must be of the proper size in order to be embedded
+ into :attr:`input`. Specifically, it should have the same shape as
+ ``torch.diagonal(input, offset, dim1, dim2)``
+
+Examples::
+
+ >>> a = torch.zeros(3, 3)
+ >>> a
+ tensor([[0., 0., 0.],
+ [0., 0., 0.],
+ [0., 0., 0.]])
+
+ >>> torch.diagonal_scatter(a, torch.ones(3), 0)
+ tensor([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ >>> torch.diagonal_scatter(a, torch.ones(2), 1)
+ tensor([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.as_strided_scatter,
+ r"""
+as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
+
+Embeds the values of the :attr:`src` tensor into :attr:`input` along
+the elements corresponding to the result of calling
+input.as_strided(size, stride, storage_offset).
+
+This function returns a tensor with fresh storage; it does not
+return a view.
+
+Args:
+ {input}
+ size (tuple or ints): the shape of the output tensor
+ stride (tuple or ints): the stride of the output tensor
+ storage_offset (int, optional): the offset in the underlying storage of the output tensor
+
+.. note::
+
+ :attr:`src` must be of the proper size in order to be embedded
+ into :attr:`input`. Specifically, it should have the same shape as
+ `torch.as_strided(input, size, stride, storage_offset)`
+
+Example::
+
+ >>> a = torch.arange(4).reshape(2, 2) + 1
+ >>> a
+ tensor([[1, 2],
+ [3, 4]])
+ >>> b = torch.zeros(3, 3)
+ >>> b
+ tensor([[0., 0., 0.],
+ [0., 0., 0.],
+ [0., 0., 0.]])
+ >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
+ tensor([[1., 3., 2.],
+ [4., 0., 0.],
+ [0., 0., 0.]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.diff,
+ r"""
+diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
+
+Computes the n-th forward difference along the given dimension.
+
+The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
+differences are calculated by using :func:`torch.diff` recursively.
+
+Args:
+ input (Tensor): the tensor to compute the differences on
+ n (int, optional): the number of times to recursively compute the difference
+ dim (int, optional): the dimension to compute the difference along.
+ Default is the last dimension.
+ prepend, append (Tensor, optional): values to prepend or append to
+ :attr:`input` along :attr:`dim` before computing the difference.
+ Their dimensions must be equivalent to that of input, and their shapes
+ must match input's shape except on :attr:`dim`.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([1, 3, 2])
+ >>> torch.diff(a)
+ tensor([ 2, -1])
+ >>> b = torch.tensor([4, 5])
+ >>> torch.diff(a, append=b)
+ tensor([ 2, -1, 2, 1])
+ >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
+ >>> torch.diff(c, dim=0)
+ tensor([[2, 2, 2]])
+ >>> torch.diff(c, dim=1)
+ tensor([[1, 1],
+ [1, 1]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.digamma,
+ r"""
+digamma(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.digamma`.
+""",
+)
+
+add_docstr(
+ torch.dist,
+ r"""
+dist(input, other, p=2) -> Tensor
+
+Returns the p-norm of (:attr:`input` - :attr:`other`)
+
+The shapes of :attr:`input` and :attr:`other` must be
+:ref:`broadcastable `.
+
+Args:
+ {input}
+ other (Tensor): the Right-hand-side input tensor
+ p (float, optional): the norm to be computed
+
+Example::
+
+ >>> x = torch.randn(4)
+ >>> x
+ tensor([-1.5393, -0.8675, 0.5916, 1.6321])
+ >>> y = torch.randn(4)
+ >>> y
+ tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
+ >>> torch.dist(x, y, 3.5)
+ tensor(1.6727)
+ >>> torch.dist(x, y, 3)
+ tensor(1.6973)
+ >>> torch.dist(x, y, 0)
+ tensor(4.)
+ >>> torch.dist(x, y, 1)
+ tensor(2.6537)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.div,
+ r"""
+div(input, other, *, rounding_mode=None, out=None) -> Tensor
+
+Divides each element of the input ``input`` by the corresponding element of
+:attr:`other`.
+
+.. math::
+ \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
+
+.. note::
+ By default, this performs a "true" division like Python 3.
+ See the :attr:`rounding_mode` argument for floor division.
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer, float, and complex inputs.
+Always promotes integer types to the default scalar type.
+
+Args:
+ input (Tensor): the dividend
+ other (Tensor or Number): the divisor
+
+Keyword args:
+ rounding_mode (str, optional): Type of rounding applied to the result:
+
+ * None - default behavior. Performs no rounding and, if both :attr:`input` and
+ :attr:`other` are integer types, promotes the inputs to the default scalar type.
+ Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
+ * ``"trunc"`` - rounds the results of the division towards zero.
+ Equivalent to C-style integer division.
+ * ``"floor"`` - rounds the results of the division down.
+ Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
+
+ {out}
+
+Examples::
+
+ >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
+ >>> torch.div(x, 0.5)
+ tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
+
+ >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
+ ... [ 0.1815, -1.0111, 0.9805, -1.5923],
+ ... [ 0.1062, 1.4581, 0.7759, -1.2344],
+ ... [-0.1830, -0.0313, 1.1908, -1.4757]])
+ >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
+ >>> torch.div(a, b)
+ tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
+ [ 0.2260, -3.4509, -1.2086, 6.8990],
+ [ 0.1322, 4.9764, -0.9564, 5.3484],
+ [-0.2278, -0.1068, -1.4678, 6.3938]])
+
+ >>> torch.div(a, b, rounding_mode='trunc')
+ tensor([[-0., -6., 0., 1.],
+ [ 0., -3., -1., 6.],
+ [ 0., 4., -0., 5.],
+ [-0., -0., -1., 6.]])
+
+ >>> torch.div(a, b, rounding_mode='floor')
+ tensor([[-1., -7., 0., 1.],
+ [ 0., -4., -2., 6.],
+ [ 0., 4., -1., 5.],
+ [-1., -1., -2., 6.]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.divide,
+ r"""
+divide(input, other, *, rounding_mode=None, out=None) -> Tensor
+
+Alias for :func:`torch.div`.
+""",
+)
+
+add_docstr(
+ torch.dot,
+ r"""
+dot(input, other, *, out=None) -> Tensor
+
+Computes the dot product of two 1D tensors.
+
+.. note::
+
+ Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
+ of two 1D tensors with the same number of elements.
+
+Args:
+ input (Tensor): first tensor in the dot product, must be 1D.
+ other (Tensor): second tensor in the dot product, must be 1D.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
+ tensor(7)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.vdot,
+ r"""
+vdot(input, other, *, out=None) -> Tensor
+
+Computes the dot product of two 1D vectors along a dimension.
+
+In symbols, this function computes
+
+.. math::
+
+ \sum_{i=1}^n \overline{x_i}y_i.
+
+where :math:`\overline{x_i}` denotes the conjugate for complex
+vectors, and it is the identity for real vectors.
+
+.. note::
+
+ Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
+ of two 1D tensors with the same number of elements.
+
+.. seealso::
+
+ :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
+
+Args:
+ input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
+ other (Tensor): second tensor in the dot product, must be 1D.
+
+Keyword args:
+"""
+ + rf"""
+.. note:: {common_args["out"]}
+"""
+ + r"""
+
+Example::
+
+ >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
+ tensor(7)
+ >>> a = torch.tensor((1 +2j, 3 - 1j))
+ >>> b = torch.tensor((2 +1j, 4 - 0j))
+ >>> torch.vdot(a, b)
+ tensor([16.+1.j])
+ >>> torch.vdot(b, a)
+ tensor([16.-1.j])
+""",
+)
+
+add_docstr(
+ torch.eq,
+ r"""
+eq(input, other, *, out=None) -> Tensor
+
+Computes element-wise equality
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or float): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[ True, False],
+ [False, True]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.equal,
+ r"""
+equal(input, other) -> bool
+
+``True`` if two tensors have the same size and elements, ``False`` otherwise.
+
+Example::
+
+ >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
+ True
+""",
+)
+
+add_docstr(
+ torch.erf,
+ r"""
+erf(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.erf`.
+""",
+)
+
+add_docstr(
+ torch.erfc,
+ r"""
+erfc(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.erfc`.
+""",
+)
+
+add_docstr(
+ torch.erfinv,
+ r"""
+erfinv(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.erfinv`.
+""",
+)
+
+add_docstr(
+ torch.exp,
+ r"""
+exp(input, *, out=None) -> Tensor
+
+Returns a new tensor with the exponential of the elements
+of the input tensor :attr:`input`.
+
+.. math::
+ y_{i} = e^{x_{i}}
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.exp(torch.tensor([0, math.log(2.)]))
+ tensor([ 1., 2.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.exp2,
+ r"""
+exp2(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.exp2`.
+""",
+)
+
+add_docstr(
+ torch.expm1,
+ r"""
+expm1(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.expm1`.
+""",
+)
+
+add_docstr(
+ torch.eye,
+ r"""
+eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
+
+Args:
+ n (int): the number of rows
+ m (int, optional): the number of columns with default being :attr:`n`
+
+Keyword arguments:
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+
+Returns:
+ Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
+
+Example::
+
+ >>> torch.eye(3)
+ tensor([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.floor,
+ r"""
+floor(input, *, out=None) -> Tensor
+
+Returns a new tensor with the floor of the elements of :attr:`input`,
+the largest integer less than or equal to each element.
+
+For integer inputs, follows the array-api convention of returning a
+copy of the input tensor.
+
+.. math::
+ \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.8166, 1.5308, -0.2530, -0.2091])
+ >>> torch.floor(a)
+ tensor([-1., 1., -1., -1.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.floor_divide,
+ r"""
+floor_divide(input, other, *, out=None) -> Tensor
+
+.. note::
+
+ Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
+ truncation division. To restore the previous behavior use
+ :func:`torch.div` with ``rounding_mode='trunc'``.
+
+Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
+the result.
+
+.. math::
+ \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
+
+"""
+ + r"""
+
+Supports broadcasting to a common shape, type promotion, and integer and float inputs.
+
+Args:
+ input (Tensor or Number): the dividend
+ other (Tensor or Number): the divisor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([4.0, 3.0])
+ >>> b = torch.tensor([2.0, 2.0])
+ >>> torch.floor_divide(a, b)
+ tensor([2.0, 1.0])
+ >>> torch.floor_divide(a, 1.4)
+ tensor([2.0, 2.0])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.fmod,
+ r"""
+fmod(input, other, *, out=None) -> Tensor
+
+Applies C++'s `std::fmod `_ entrywise.
+The result has the same sign as the dividend :attr:`input` and its absolute value
+is less than that of :attr:`other`.
+
+This function may be defined in terms of :func:`torch.div` as
+
+.. code:: python
+
+ torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer and float inputs.
+
+.. note::
+
+ When the divisor is zero, returns ``NaN`` for floating point dtypes
+ on both CPU and GPU; raises ``RuntimeError`` for integer division by
+ zero on CPU; Integer division by zero on GPU may return any value.
+
+.. note::
+
+ Complex inputs are not supported. In some cases, it is not mathematically
+ possible to satisfy the definition of a modulo operation with complex numbers.
+
+.. seealso::
+
+ :func:`torch.remainder` which implements Python's modulus operator.
+ This one is defined using division rounding down the result.
+
+Args:
+ input (Tensor): the dividend
+ other (Tensor or Scalar): the divisor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
+ tensor([-1., -0., -1., 1., 0., 1.])
+ >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
+ tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.frac,
+ r"""
+frac(input, *, out=None) -> Tensor
+
+Computes the fractional portion of each element in :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
+
+Example::
+
+ >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
+ tensor([ 0.0000, 0.5000, -0.2000])
+""",
+)
+
+add_docstr(
+ torch.frexp,
+ r"""
+frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
+
+Decomposes :attr:`input` into mantissa and exponent tensors
+such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
+
+The range of mantissa is the open interval (-1, 1).
+
+Supports float inputs.
+
+Args:
+ input (Tensor): the input tensor
+
+
+Keyword args:
+ out (tuple, optional): the output tensors
+
+Example::
+
+ >>> x = torch.arange(9.)
+ >>> mantissa, exponent = torch.frexp(x)
+ >>> mantissa
+ tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
+ >>> exponent
+ tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
+ >>> torch.ldexp(mantissa, exponent)
+ tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
+""",
+)
+
+add_docstr(
+ torch.from_numpy,
+ r"""
+from_numpy(ndarray) -> Tensor
+
+Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
+
+The returned tensor and :attr:`ndarray` share the same memory. Modifications to
+the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
+tensor is not resizable.
+
+It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
+``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
+``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
+and ``bool``.
+
+.. warning::
+ Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
+
+Example::
+
+ >>> a = numpy.array([1, 2, 3])
+ >>> t = torch.from_numpy(a)
+ >>> t
+ tensor([ 1, 2, 3])
+ >>> t[0] = -1
+ >>> a
+ array([-1, 2, 3])
+""",
+)
+
+add_docstr(
+ torch.frombuffer,
+ r"""
+frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
+
+Creates a 1-dimensional :class:`Tensor` from an object that implements
+the Python buffer protocol.
+
+Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
+the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
+elements.
+
+Note that either of the following must be true:
+
+1. :attr:`count` is a positive non-zero number, and the total number of bytes
+in the buffer is more than :attr:`offset` plus :attr:`count` times the size
+(in bytes) of :attr:`dtype`.
+
+2. :attr:`count` is negative, and the length (number of bytes) of the buffer
+subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
+:attr:`dtype`.
+
+The returned tensor and buffer share the same memory. Modifications to
+the tensor will be reflected in the buffer and vice versa. The returned
+tensor is not resizable.
+
+.. note::
+ This function increments the reference count for the object that
+ owns the shared memory. Therefore, such memory will not be deallocated
+ before the returned tensor goes out of scope.
+
+.. warning::
+ This function's behavior is undefined when passed an object implementing
+ the buffer protocol whose data is not on the CPU. Doing so is likely to
+ cause a segmentation fault.
+
+.. warning::
+ This function does not try to infer the :attr:`dtype` (hence, it is not
+ optional). Passing a different :attr:`dtype` than its source may result
+ in unexpected behavior.
+
+Args:
+ buffer (object): a Python object that exposes the buffer interface.
+
+Keyword args:
+ dtype (:class:`torch.dtype`): the desired data type of returned tensor.
+ count (int, optional): the number of desired elements to be read.
+ If negative, all the elements (until the end of the buffer) will be
+ read. Default: -1.
+ offset (int, optional): the number of bytes to skip at the start of
+ the buffer. Default: 0.
+ {requires_grad}
+
+Example::
+
+ >>> import array
+ >>> a = array.array('i', [1, 2, 3])
+ >>> t = torch.frombuffer(a, dtype=torch.int32)
+ >>> t
+ tensor([ 1, 2, 3])
+ >>> t[0] = -1
+ >>> a
+ array([-1, 2, 3])
+
+ >>> # Interprets the signed char bytes as 32-bit integers.
+ >>> # Each 4 signed char elements will be interpreted as
+ >>> # 1 signed 32-bit integer.
+ >>> import array
+ >>> a = array.array('b', [-1, 0, 0, 0])
+ >>> torch.frombuffer(a, dtype=torch.int32)
+ tensor([255], dtype=torch.int32)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.from_file,
+ r"""
+from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
+
+Creates a CPU tensor with a storage backed by a memory-mapped file.
+
+If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
+If ``shared`` is False, then changes to the tensor do not affect the file.
+
+``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
+at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
+
+.. note::
+ Only CPU tensors can be mapped to files.
+
+.. note::
+ For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
+
+
+Args:
+ filename (str): file name to map
+ shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
+ underlying `mmap(2) call `_)
+ size (int): number of elements in the tensor
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {pin_memory}
+
+Example::
+ >>> t = torch.randn(2, 5, dtype=torch.float64)
+ >>> t.numpy().tofile('storage.pt')
+ >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
+ """.format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.flatten,
+ r"""
+flatten(input, start_dim=0, end_dim=-1) -> Tensor
+
+Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
+are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
+The order of elements in :attr:`input` is unchanged.
+
+Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
+or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
+be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
+flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
+
+.. note::
+ Flattening a zero-dimensional tensor will return a one-dimensional view.
+
+Args:
+ {input}
+ start_dim (int): the first dim to flatten
+ end_dim (int): the last dim to flatten
+
+Example::
+
+ >>> t = torch.tensor([[[1, 2],
+ ... [3, 4]],
+ ... [[5, 6],
+ ... [7, 8]]])
+ >>> torch.flatten(t)
+ tensor([1, 2, 3, 4, 5, 6, 7, 8])
+ >>> torch.flatten(t, start_dim=1)
+ tensor([[1, 2, 3, 4],
+ [5, 6, 7, 8]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.unflatten,
+ r"""
+unflatten(input, dim, sizes) -> Tensor
+
+Expands a dimension of the input tensor over multiple dimensions.
+
+.. seealso::
+
+ :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
+
+Args:
+ {input}
+ dim (int): Dimension to be unflattened, specified as an index into
+ ``input.shape``.
+ sizes (Tuple[int]): New shape of the unflattened dimension.
+ One of its elements can be `-1` in which case the corresponding output
+ dimension is inferred. Otherwise, the product of ``sizes`` *must*
+ equal ``input.shape[dim]``.
+
+Returns:
+ A View of input with the specified dimension unflattened.
+
+Examples::
+ >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
+ torch.Size([3, 2, 2, 1])
+ >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
+ torch.Size([3, 2, 2, 1])
+ >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
+ torch.Size([5, 2, 2, 3, 1, 1, 3])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.gather,
+ r"""
+gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
+
+Gathers values along an axis specified by `dim`.
+
+For a 3-D tensor the output is specified by::
+
+ out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
+ out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
+ out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
+
+:attr:`input` and :attr:`index` must have the same number of dimensions.
+It is also required that ``index.size(d) <= input.size(d)`` for all
+dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
+Note that ``input`` and ``index`` do not broadcast against each other.
+
+Args:
+ input (Tensor): the source tensor
+ dim (int): the axis along which to index
+ index (LongTensor): the indices of elements to gather
+
+Keyword arguments:
+ sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
+ out (Tensor, optional): the destination tensor
+
+Example::
+
+ >>> t = torch.tensor([[1, 2], [3, 4]])
+ >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
+ tensor([[ 1, 1],
+ [ 4, 3]])
+""",
+)
+
+
+add_docstr(
+ torch.gcd,
+ r"""
+gcd(input, other, *, out=None) -> Tensor
+
+Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
+
+Both :attr:`input` and :attr:`other` must have integer types.
+
+.. note::
+ This defines :math:`gcd(0, 0) = 0`.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([5, 10, 15])
+ >>> b = torch.tensor([3, 4, 5])
+ >>> torch.gcd(a, b)
+ tensor([1, 2, 5])
+ >>> c = torch.tensor([3])
+ >>> torch.gcd(a, c)
+ tensor([1, 1, 3])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.ge,
+ r"""
+ge(input, other, *, out=None) -> Tensor
+
+Computes :math:`\text{input} \geq \text{other}` element-wise.
+"""
+ + r"""
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or float): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[True, True], [False, True]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.greater_equal,
+ r"""
+greater_equal(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.ge`.
+""",
+)
+
+add_docstr(
+ torch.gradient,
+ r"""
+gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
+
+Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
+one or more dimensions using the `second-order accurate central differences method
+`_ and
+either first or second order estimates at the boundaries.
+
+The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
+specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
+to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
+:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
+:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
+
+When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
+This is detailed in the "Keyword Arguments" section below.
+
+The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
+accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
+improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
+is estimated using `Taylor’s theorem with remainder `_.
+Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
+it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
+
+.. math::
+ \begin{aligned}
+ f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
+ f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
+ \end{aligned}
+
+Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
+
+.. math::
+ f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
+
+.. note::
+ We estimate the gradient of functions in complex domain
+ :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
+
+The value of each partial derivative at the boundary points is computed differently. See edge_order below.
+
+Args:
+ input (``Tensor``): the tensor that represents the values of the function
+
+Keyword args:
+ spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
+ how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
+ the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
+ indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
+ indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
+ Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
+ the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
+ the coordinates are (t0[1], t1[2], t2[3])
+
+ dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
+ the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
+ the :attr:`spacing` argument must correspond with the specified dims."
+
+ edge_order (``int``, optional): 1 or 2, for `first-order
+ `_ or
+ `second-order `_
+ estimation of the boundary ("edge") values, respectively.
+
+Examples::
+
+ >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
+ >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
+ >>> values = torch.tensor([4., 1., 1., 16.], )
+ >>> torch.gradient(values, spacing = coordinates)
+ (tensor([-3., -2., 2., 5.]),)
+
+ >>> # Estimates the gradient of the R^2 -> R function whose samples are
+ >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
+ >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
+ >>> # partial derivative for both dimensions.
+ >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
+ >>> torch.gradient(t)
+ (tensor([[ 9., 18., 36., 72.],
+ [ 9., 18., 36., 72.]]),
+ tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
+ [10.0000, 15.0000, 30.0000, 40.0000]]))
+
+ >>> # A scalar value for spacing modifies the relationship between tensor indices
+ >>> # and input coordinates by multiplying the indices to find the
+ >>> # coordinates. For example, below the indices of the innermost
+ >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
+ >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
+ >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
+ (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
+ [ 4.5000, 9.0000, 18.0000, 36.0000]]),
+ tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
+ [ 5.0000, 7.5000, 15.0000, 20.0000]]))
+ >>> # doubling the spacing between samples halves the estimated partial gradients.
+
+ >>>
+ >>> # Estimates only the partial derivative for dimension 1
+ >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
+ (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
+ [10.0000, 15.0000, 30.0000, 40.0000]]),)
+
+ >>> # When spacing is a list of scalars, the relationship between the tensor
+ >>> # indices and input coordinates changes based on dimension.
+ >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
+ >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
+ >>> # 0, 1 translate to coordinates of [0, 2].
+ >>> torch.gradient(t, spacing = [3., 2.])
+ (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
+ [ 4.5000, 9.0000, 18.0000, 36.0000]]),
+ tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
+ [ 3.3333, 5.0000, 10.0000, 13.3333]]))
+
+ >>> # The following example is a replication of the previous one with explicit
+ >>> # coordinates.
+ >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
+ >>> torch.gradient(t, spacing = coords)
+ (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
+ [ 4.5000, 9.0000, 18.0000, 36.0000]]),
+ tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
+ [ 3.3333, 5.0000, 10.0000, 13.3333]]))
+
+""",
+)
+
+add_docstr(
+ torch.geqrf,
+ r"""
+geqrf(input, *, out=None) -> (Tensor, Tensor)
+
+This is a low-level function for calling LAPACK's geqrf directly. This function
+returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
+
+Computes a QR decomposition of :attr:`input`.
+Both `Q` and `R` matrices are stored in the same output tensor `a`.
+The elements of `R` are stored on and above the diagonal.
+Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
+are stored below the diagonal.
+The results of this function can be used together with :func:`torch.linalg.householder_product`
+to obtain the `Q` matrix or
+with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
+for an efficient matrix-matrix multiplication.
+
+See `LAPACK documentation for geqrf`_ for further details.
+
+.. note::
+ See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
+ with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
+
+Args:
+ input (Tensor): the input matrix
+
+Keyword args:
+ out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
+
+.. _LAPACK documentation for geqrf:
+ http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
+
+""",
+)
+
+add_docstr(
+ torch.inner,
+ r"""
+inner(input, other, *, out=None) -> Tensor
+
+Computes the dot product for 1D tensors. For higher dimensions, sums the product
+of elements from :attr:`input` and :attr:`other` along their last dimension.
+
+.. note::
+
+ If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
+ to `torch.mul(input, other)`.
+
+ If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
+ dimension must match and the result is equivalent to `torch.tensordot(input,
+ other, dims=([-1], [-1]))`
+
+Args:
+ input (Tensor): First input tensor
+ other (Tensor): Second input tensor
+
+Keyword args:
+ out (Tensor, optional): Optional output tensor to write result into. The output
+ shape is `input.shape[:-1] + other.shape[:-1]`.
+
+Example::
+
+ # Dot product
+ >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
+ tensor(7)
+
+ # Multidimensional input tensors
+ >>> a = torch.randn(2, 3)
+ >>> a
+ tensor([[0.8173, 1.0874, 1.1784],
+ [0.3279, 0.1234, 2.7894]])
+ >>> b = torch.randn(2, 4, 3)
+ >>> b
+ tensor([[[-0.4682, -0.7159, 0.1506],
+ [ 0.4034, -0.3657, 1.0387],
+ [ 0.9892, -0.6684, 0.1774],
+ [ 0.9482, 1.3261, 0.3917]],
+
+ [[ 0.4537, 0.7493, 1.1724],
+ [ 0.2291, 0.5749, -0.2267],
+ [-0.7920, 0.3607, -0.3701],
+ [ 1.3666, -0.5850, -1.7242]]])
+ >>> torch.inner(a, b)
+ tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
+ [ 2.5671, 0.5452, -0.6912, -1.5509]],
+
+ [[ 0.1782, 2.9843, 0.7366, 1.5672],
+ [ 3.5115, -0.4864, -1.2476, -4.4337]]])
+
+ # Scalar input
+ >>> torch.inner(a, torch.tensor(2))
+ tensor([[1.6347, 2.1748, 2.3567],
+ [0.6558, 0.2469, 5.5787]])
+""",
+)
+
+add_docstr(
+ torch.outer,
+ r"""
+outer(input, vec2, *, out=None) -> Tensor
+
+Outer product of :attr:`input` and :attr:`vec2`.
+If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
+size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
+
+.. note:: This function does not :ref:`broadcast `.
+
+Args:
+ input (Tensor): 1-D input vector
+ vec2 (Tensor): 1-D input vector
+
+Keyword args:
+ out (Tensor, optional): optional output matrix
+
+Example::
+
+ >>> v1 = torch.arange(1., 5.)
+ >>> v2 = torch.arange(1., 4.)
+ >>> torch.outer(v1, v2)
+ tensor([[ 1., 2., 3.],
+ [ 2., 4., 6.],
+ [ 3., 6., 9.],
+ [ 4., 8., 12.]])
+""",
+)
+
+add_docstr(
+ torch.ger,
+ r"""
+ger(input, vec2, *, out=None) -> Tensor
+
+Alias of :func:`torch.outer`.
+
+.. warning::
+ This function is deprecated and will be removed in a future PyTorch release.
+ Use :func:`torch.outer` instead.
+""",
+)
+
+add_docstr(
+ torch.get_default_dtype,
+ r"""
+get_default_dtype() -> torch.dtype
+
+Get the current default floating point :class:`torch.dtype`.
+
+Example::
+
+ >>> torch.get_default_dtype() # initial default for floating point is torch.float32
+ torch.float32
+ >>> torch.set_default_dtype(torch.float64)
+ >>> torch.get_default_dtype() # default is now changed to torch.float64
+ torch.float64
+
+""",
+)
+
+add_docstr(
+ torch.get_num_threads,
+ r"""
+get_num_threads() -> int
+
+Returns the number of threads used for parallelizing CPU operations
+""",
+)
+
+add_docstr(
+ torch.get_num_interop_threads,
+ r"""
+get_num_interop_threads() -> int
+
+Returns the number of threads used for inter-op parallelism on CPU
+(e.g. in JIT interpreter)
+""",
+)
+
+add_docstr(
+ torch.gt,
+ r"""
+gt(input, other, *, out=None) -> Tensor
+
+Computes :math:`\text{input} > \text{other}` element-wise.
+"""
+ + r"""
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or float): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[False, True], [False, False]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.greater,
+ r"""
+greater(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.gt`.
+""",
+)
+
+add_docstr(
+ torch.histc,
+ r"""
+histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
+
+Computes the histogram of a tensor.
+
+The elements are sorted into equal width bins between :attr:`min` and
+:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
+maximum values of the data are used.
+
+Elements lower than min and higher than max and ``NaN`` elements are ignored.
+
+Args:
+ {input}
+ bins (int): number of histogram bins
+ min (Scalar): lower end of the range (inclusive)
+ max (Scalar): upper end of the range (inclusive)
+
+Keyword args:
+ {out}
+
+Returns:
+ Tensor: Histogram represented as a tensor
+
+Example::
+
+ >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
+ tensor([ 0., 2., 1., 0.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.histogram,
+ r"""
+histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
+
+Computes a histogram of the values in a tensor.
+
+:attr:`bins` can be an integer or a 1D tensor.
+
+If :attr:`bins` is an int, it specifies the number of equal-width bins.
+By default, the lower and upper range of the bins is determined by the
+minimum and maximum elements of the input tensor. The :attr:`range`
+argument can be provided to specify a range for the bins.
+
+If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
+including the rightmost edge. It should contain at least 2 elements
+and its elements should be increasing.
+
+Args:
+ {input}
+ bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
+ defines the sequence of bin edges including the rightmost edge.
+
+Keyword args:
+ range (tuple of float): Defines the range of the bins.
+ weight (Tensor): If provided, weight should have the same shape as input. Each value in
+ input contributes its associated weight towards its bin's result.
+ density (bool): If False, the result will contain the count (or total weight) in each bin.
+ If True, the result is the value of the probability density function over the bins,
+ normalized such that the integral over the range of the bins is 1.
+ {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
+
+Returns:
+ hist (Tensor): 1D Tensor containing the values of the histogram.
+ bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
+
+Example::
+
+ >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
+ (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
+ >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
+ (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.histogramdd,
+ r"""
+histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
+
+Computes a multi-dimensional histogram of the values in a tensor.
+
+Interprets the elements of an input tensor whose innermost dimension has size N
+as a collection of N-dimensional points. Maps each of the points into a set of
+N-dimensional bins and returns the number of points (or total weight) in each bin.
+
+:attr:`input` must be a tensor with at least 2 dimensions.
+If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
+If input has three or more dimensions, all but the last dimension are flattened.
+
+Each dimension is independently associated with its own strictly increasing sequence
+of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
+tensors. Alternatively, bin edges may be constructed automatically by passing a
+sequence of integers specifying the number of equal-width bins in each dimension.
+
+For each N-dimensional point in input:
+ - Each of its coordinates is binned independently among the bin edges
+ corresponding to its dimension
+ - Binning results are combined to identify the N-dimensional bin (if any)
+ into which the point falls
+ - If the point falls into a bin, the bin's count (or total weight) is incremented
+ - Points which do not fall into any bin do not contribute to the output
+
+:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
+
+If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
+of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
+least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
+the left and right edges of all bins. Every bin is exclusive of its left edge. Only
+the rightmost bin is inclusive of its right edge.
+
+If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
+in each dimension. By default, the leftmost and rightmost bin edges in each dimension
+are determined by the minimum and maximum elements of the input tensor in the
+corresponding dimension. The :attr:`range` argument can be provided to manually
+specify the leftmost and rightmost bin edges in each dimension.
+
+If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
+
+.. note::
+ See also :func:`torch.histogram`, which specifically computes 1D histograms.
+ While :func:`torch.histogramdd` infers the dimensionality of its bins and
+ binned values from the shape of :attr:`input`, :func:`torch.histogram`
+ accepts and flattens :attr:`input` of any shape.
+
+Args:
+ {input}
+ bins: Tensor[], int[], or int.
+ If Tensor[], defines the sequences of bin edges.
+ If int[], defines the number of equal-width bins in each dimension.
+ If int, defines the number of equal-width bins for all dimensions.
+Keyword args:
+ range (sequence of float): Defines the leftmost and rightmost bin edges
+ in each dimension.
+ weight (Tensor): By default, each value in the input has weight 1. If a weight
+ tensor is passed, each N-dimensional coordinate in input
+ contributes its associated weight towards its bin's result.
+ The weight tensor should have the same shape as the :attr:`input`
+ tensor excluding its innermost dimension N.
+ density (bool): If False (default), the result will contain the count (or total weight)
+ in each bin. If True, each count (weight) is divided by the total count
+ (total weight), then divided by the volume of its associated bin.
+Returns:
+ hist (Tensor): N-dimensional Tensor containing the values of the histogram.
+ bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
+
+Example::
+ >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
+ ... weight=torch.tensor([1., 2., 4., 8.]))
+ torch.return_types.histogramdd(
+ hist=tensor([[0., 1., 0.],
+ [2., 0., 0.],
+ [4., 0., 8.]]),
+ bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
+ tensor([0.0000, 0.6667, 1.3333, 2.0000])))
+
+ >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
+ ... range=[0., 1., 0., 1.], density=True)
+ torch.return_types.histogramdd(
+ hist=tensor([[2., 0.],
+ [0., 2.]]),
+ bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
+ tensor([0.0000, 0.5000, 1.0000])))
+
+""".format(
+ **common_args
+ ),
+)
+# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
+torch.histogramdd.__module__ = "torch"
+
+add_docstr(
+ torch.hypot,
+ r"""
+hypot(input, other, *, out=None) -> Tensor
+
+Given the legs of a right triangle, return its hypotenuse.
+
+.. math::
+ \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
+
+The shapes of ``input`` and ``other`` must be
+:ref:`broadcastable `.
+"""
+ + r"""
+Args:
+ input (Tensor): the first input tensor
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
+ tensor([5.0000, 5.6569, 6.4031])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.i0,
+ r"""
+i0(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.i0`.
+""",
+)
+
+add_docstr(
+ torch.igamma,
+ r"""
+igamma(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.gammainc`.
+""",
+)
+
+add_docstr(
+ torch.igammac,
+ r"""
+igammac(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.gammaincc`.
+""",
+)
+
+add_docstr(
+ torch.index_select,
+ r"""
+index_select(input, dim, index, *, out=None) -> Tensor
+
+Returns a new tensor which indexes the :attr:`input` tensor along dimension
+:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
+
+The returned tensor has the same number of dimensions as the original tensor
+(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
+of :attr:`index`; other dimensions have the same size as in the original tensor.
+
+.. note:: The returned tensor does **not** use the same storage as the original
+ tensor. If :attr:`out` has a different shape than expected, we
+ silently change it to the correct shape, reallocating the underlying
+ storage if necessary.
+
+Args:
+ {input}
+ dim (int): the dimension in which we index
+ index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.randn(3, 4)
+ >>> x
+ tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
+ [-0.4664, 0.2647, -0.1228, -1.1068],
+ [-1.1734, -0.6571, 0.7230, -0.6004]])
+ >>> indices = torch.tensor([0, 2])
+ >>> torch.index_select(x, 0, indices)
+ tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
+ [-1.1734, -0.6571, 0.7230, -0.6004]])
+ >>> torch.index_select(x, 1, indices)
+ tensor([[ 0.1427, -0.5414],
+ [-0.4664, -0.1228],
+ [-1.1734, 0.7230]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.inverse,
+ r"""
+inverse(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.linalg.inv`
+""",
+)
+
+add_docstr(
+ torch.isin,
+ r"""
+isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
+
+Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
+a boolean tensor of the same shape as :attr:`elements` that is True for elements
+in :attr:`test_elements` and False otherwise.
+
+.. note::
+ One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
+
+Args:
+ elements (Tensor or Scalar): Input elements
+ test_elements (Tensor or Scalar): Values against which to test for each input element
+ assume_unique (bool, optional): If True, assumes both :attr:`elements` and
+ :attr:`test_elements` contain unique elements, which can speed up the
+ calculation. Default: False
+ invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
+ values for elements *not* in :attr:`test_elements`. Default: False
+
+Returns:
+ A boolean tensor of the same shape as :attr:`elements` that is True for elements in
+ :attr:`test_elements` and False otherwise
+
+Example:
+ >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
+ tensor([[False, True],
+ [ True, False]])
+""",
+)
+
+add_docstr(
+ torch.isinf,
+ r"""
+isinf(input) -> Tensor
+
+Tests if each element of :attr:`input` is infinite
+(positive or negative infinity) or not.
+
+.. note::
+ Complex values are infinite when their real or imaginary part is
+ infinite.
+
+Args:
+ {input}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
+
+Example::
+
+ >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
+ tensor([False, True, False, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.isposinf,
+ r"""
+isposinf(input, *, out=None) -> Tensor
+Tests if each element of :attr:`input` is positive infinity or not.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
+ >>> torch.isposinf(a)
+ tensor([False, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.isneginf,
+ r"""
+isneginf(input, *, out=None) -> Tensor
+Tests if each element of :attr:`input` is negative infinity or not.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
+ >>> torch.isneginf(a)
+ tensor([ True, False, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.isclose,
+ r"""
+isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
+
+Returns a new tensor with boolean elements representing if each element of
+:attr:`input` is "close" to the corresponding element of :attr:`other`.
+Closeness is defined as:
+
+.. math::
+ \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
+"""
+ + r"""
+
+where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
+and/or :attr:`other` are nonfinite they are close if and only if
+they are equal, with NaNs being considered equal to each other when
+:attr:`equal_nan` is True.
+
+Args:
+ input (Tensor): first tensor to compare
+ other (Tensor): second tensor to compare
+ atol (float, optional): absolute tolerance. Default: 1e-08
+ rtol (float, optional): relative tolerance. Default: 1e-05
+ equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
+
+Examples::
+
+ >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
+ tensor([ True, False, False])
+ >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
+ tensor([True, True])
+""",
+)
+
+add_docstr(
+ torch.isfinite,
+ r"""
+isfinite(input) -> Tensor
+
+Returns a new tensor with boolean elements representing if each element is `finite` or not.
+
+Real values are finite when they are not NaN, negative infinity, or infinity.
+Complex values are finite when both their real and imaginary parts are finite.
+
+Args:
+ {input}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is finite and False elsewhere
+
+Example::
+
+ >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
+ tensor([True, False, True, False, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.isnan,
+ r"""
+isnan(input) -> Tensor
+
+Returns a new tensor with boolean elements representing if each element of :attr:`input`
+is NaN or not. Complex values are considered NaN when either their real
+and/or imaginary part is NaN.
+
+Arguments:
+ {input}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
+
+Example::
+
+ >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
+ tensor([False, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.isreal,
+ r"""
+isreal(input) -> Tensor
+
+Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
+All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
+
+Arguments:
+ {input}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is real and False elsewhere
+
+Example::
+
+ >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
+ tensor([True, False, True])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_floating_point,
+ r"""
+is_floating_point(input) -> (bool)
+
+Returns True if the data type of :attr:`input` is a floating point data type i.e.,
+one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
+
+Args:
+ {input}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_complex,
+ r"""
+is_complex(input) -> (bool)
+
+Returns True if the data type of :attr:`input` is a complex data type i.e.,
+one of ``torch.complex64``, and ``torch.complex128``.
+
+Args:
+ {input}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_grad_enabled,
+ r"""
+is_grad_enabled() -> (bool)
+
+Returns True if grad mode is currently enabled.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_inference_mode_enabled,
+ r"""
+is_inference_mode_enabled() -> (bool)
+
+Returns True if inference mode is currently enabled.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_inference,
+ r"""
+is_inference(input) -> (bool)
+
+Returns True if :attr:`input` is an inference tensor.
+
+A non-view tensor is an inference tensor if and only if it was
+allocated during inference mode. A view tensor is an inference
+tensor if and only if the tensor it is a view of is an inference tensor.
+
+For details on inference mode please see
+`Inference Mode `_.
+
+Args:
+ {input}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_conj,
+ r"""
+is_conj(input) -> (bool)
+
+Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
+
+Args:
+ {input}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.is_nonzero,
+ r"""
+is_nonzero(input) -> (bool)
+
+Returns True if the :attr:`input` is a single element tensor which is not equal to zero
+after type conversions.
+i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
+``torch.tensor([False])``.
+Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
+of sparse tensors).
+
+Args:
+ {input}
+
+Examples::
+
+ >>> torch.is_nonzero(torch.tensor([0.]))
+ False
+ >>> torch.is_nonzero(torch.tensor([1.5]))
+ True
+ >>> torch.is_nonzero(torch.tensor([False]))
+ False
+ >>> torch.is_nonzero(torch.tensor([3]))
+ True
+ >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
+ Traceback (most recent call last):
+ ...
+ RuntimeError: bool value of Tensor with more than one value is ambiguous
+ >>> torch.is_nonzero(torch.tensor([]))
+ Traceback (most recent call last):
+ ...
+ RuntimeError: bool value of Tensor with no values is ambiguous
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.kron,
+ r"""
+kron(input, other, *, out=None) -> Tensor
+
+Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
+
+If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
+:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
+:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
+
+.. math::
+ (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
+ \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
+
+where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
+If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
+
+Supports real-valued and complex-valued inputs.
+
+.. note::
+ This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
+ as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
+ :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
+
+ .. math::
+ \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
+ a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
+ \vdots & \ddots & \vdots \\
+ a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
+
+ where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
+
+Arguments:
+ input (Tensor)
+ other (Tensor)
+
+Keyword args:
+ out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
+
+Examples::
+
+ >>> mat1 = torch.eye(2)
+ >>> mat2 = torch.ones(2, 2)
+ >>> torch.kron(mat1, mat2)
+ tensor([[1., 1., 0., 0.],
+ [1., 1., 0., 0.],
+ [0., 0., 1., 1.],
+ [0., 0., 1., 1.]])
+
+ >>> mat1 = torch.eye(2)
+ >>> mat2 = torch.arange(1, 5).reshape(2, 2)
+ >>> torch.kron(mat1, mat2)
+ tensor([[1., 2., 0., 0.],
+ [3., 4., 0., 0.],
+ [0., 0., 1., 2.],
+ [0., 0., 3., 4.]])
+""",
+)
+
+add_docstr(
+ torch.kthvalue,
+ r"""
+kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+
+Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
+smallest element of each row of the :attr:`input` tensor in the given dimension
+:attr:`dim`. And ``indices`` is the index location of each element found.
+
+If :attr:`dim` is not given, the last dimension of the `input` is chosen.
+
+If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
+are the same size as :attr:`input`, except in the dimension :attr:`dim` where
+they are of size 1. Otherwise, :attr:`dim` is squeezed
+(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
+:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
+
+.. note::
+ When :attr:`input` is a CUDA tensor and there are multiple valid
+ :attr:`k` th values, this function may nondeterministically return
+ :attr:`indices` for any of them.
+
+Args:
+ {input}
+ k (int): k for the k-th smallest element
+ dim (int, optional): the dimension to find the kth value along
+ {keepdim}
+
+Keyword args:
+ out (tuple, optional): the output tuple of (Tensor, LongTensor)
+ can be optionally given to be used as output buffers
+
+Example::
+
+ >>> x = torch.arange(1., 6.)
+ >>> x
+ tensor([ 1., 2., 3., 4., 5.])
+ >>> torch.kthvalue(x, 4)
+ torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
+
+ >>> x=torch.arange(1.,7.).resize_(2,3)
+ >>> x
+ tensor([[ 1., 2., 3.],
+ [ 4., 5., 6.]])
+ >>> torch.kthvalue(x, 2, 0, True)
+ torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.lcm,
+ r"""
+lcm(input, other, *, out=None) -> Tensor
+
+Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
+
+Both :attr:`input` and :attr:`other` must have integer types.
+
+.. note::
+ This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([5, 10, 15])
+ >>> b = torch.tensor([3, 4, 5])
+ >>> torch.lcm(a, b)
+ tensor([15, 20, 15])
+ >>> c = torch.tensor([3])
+ >>> torch.lcm(a, c)
+ tensor([15, 30, 15])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.ldexp,
+ r"""
+ldexp(input, other, *, out=None) -> Tensor
+
+Multiplies :attr:`input` by 2 ** :attr:`other`.
+
+.. math::
+ \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
+"""
+ + r"""
+
+Typically this function is used to construct floating point numbers by multiplying
+mantissas in :attr:`input` with integral powers of two created from the exponents
+in :attr:`other`.
+
+Args:
+ {input}
+ other (Tensor): a tensor of exponents, typically integers.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
+ tensor([2.])
+ >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
+ tensor([ 2., 4., 8., 16.])
+
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.le,
+ r"""
+le(input, other, *, out=None) -> Tensor
+
+Computes :math:`\text{input} \leq \text{other}` element-wise.
+"""
+ + r"""
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or Scalar): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is less than or equal to
+ :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[True, False], [True, True]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.less_equal,
+ r"""
+less_equal(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.le`.
+""",
+)
+
+add_docstr(
+ torch.lerp,
+ r"""
+lerp(input, end, weight, *, out=None)
+
+Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
+on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
+
+.. math::
+ \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
+"""
+ + r"""
+The shapes of :attr:`start` and :attr:`end` must be
+:ref:`broadcastable `. If :attr:`weight` is a tensor, then
+the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `.
+
+Args:
+ input (Tensor): the tensor with the starting points
+ end (Tensor): the tensor with the ending points
+ weight (float or tensor): the weight for the interpolation formula
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> start = torch.arange(1., 5.)
+ >>> end = torch.empty(4).fill_(10)
+ >>> start
+ tensor([ 1., 2., 3., 4.])
+ >>> end
+ tensor([ 10., 10., 10., 10.])
+ >>> torch.lerp(start, end, 0.5)
+ tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
+ >>> torch.lerp(start, end, torch.full_like(start, 0.5))
+ tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.lgamma,
+ r"""
+lgamma(input, *, out=None) -> Tensor
+
+Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
+"""
+ + """
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.arange(0.5, 2, 0.5)
+ >>> torch.lgamma(a)
+ tensor([ 0.5724, 0.0000, -0.1208])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.linspace,
+ r"""
+linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
+spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
+
+.. math::
+ (\text{start},
+ \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
+ \ldots,
+ \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
+ \text{end})
+"""
+ + """
+
+From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
+
+Args:
+ start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
+ end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
+ steps (int): size of the constructed tensor
+
+Keyword arguments:
+ {out}
+ dtype (torch.dtype, optional): the data type to perform the computation in.
+ Default: if None, uses the global default dtype (see torch.get_default_dtype())
+ when both :attr:`start` and :attr:`end` are real,
+ and corresponding complex dtype when either is complex.
+ {layout}
+ {device}
+ {requires_grad}
+
+
+Example::
+
+ >>> torch.linspace(3, 10, steps=5)
+ tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
+ >>> torch.linspace(-10, 10, steps=5)
+ tensor([-10., -5., 0., 5., 10.])
+ >>> torch.linspace(start=-10, end=10, steps=5)
+ tensor([-10., -5., 0., 5., 10.])
+ >>> torch.linspace(start=-10, end=10, steps=1)
+ tensor([-10.])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.log,
+ r"""
+log(input, *, out=None) -> Tensor
+
+Returns a new tensor with the natural logarithm of the elements
+of :attr:`input`.
+
+.. math::
+ y_{i} = \log_{e} (x_{i})
+"""
+ + r"""
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.rand(5) * 5
+ >>> a
+ tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
+ >>> torch.log(a)
+ tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.log10,
+ r"""
+log10(input, *, out=None) -> Tensor
+
+Returns a new tensor with the logarithm to the base 10 of the elements
+of :attr:`input`.
+
+.. math::
+ y_{i} = \log_{10} (x_{i})
+"""
+ + r"""
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.rand(5)
+ >>> a
+ tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
+
+
+ >>> torch.log10(a)
+ tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.log1p,
+ r"""
+log1p(input, *, out=None) -> Tensor
+
+Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
+
+.. math::
+ y_i = \log_{e} (x_i + 1)
+"""
+ + r"""
+.. note:: This function is more accurate than :func:`torch.log` for small
+ values of :attr:`input`
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(5)
+ >>> a
+ tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
+ >>> torch.log1p(a)
+ tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.log2,
+ r"""
+log2(input, *, out=None) -> Tensor
+
+Returns a new tensor with the logarithm to the base 2 of the elements
+of :attr:`input`.
+
+.. math::
+ y_{i} = \log_{2} (x_{i})
+"""
+ + r"""
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.rand(5)
+ >>> a
+ tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
+
+
+ >>> torch.log2(a)
+ tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logaddexp,
+ r"""
+logaddexp(input, other, *, out=None) -> Tensor
+
+Logarithm of the sum of exponentiations of the inputs.
+
+Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
+in statistics where the calculated probabilities of events may be so small as to
+exceed the range of normal floating point numbers. In such cases the logarithm
+of the calculated probability is stored. This function allows adding
+probabilities stored in such a fashion.
+
+This op should be disambiguated with :func:`torch.logsumexp` which performs a
+reduction on a single tensor.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
+ tensor([-0.3069, -0.6867, -0.8731])
+ >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
+ tensor([-1., -2., -3.])
+ >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
+ tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logaddexp2,
+ r"""
+logaddexp2(input, other, *, out=None) -> Tensor
+
+Logarithm of the sum of exponentiations of the inputs in base-2.
+
+Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
+:func:`torch.logaddexp` for more details.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword arguments:
+ {out}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.xlogy,
+ r"""
+xlogy(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.xlogy`.
+""",
+)
+
+add_docstr(
+ torch.logical_and,
+ r"""
+logical_and(input, other, *, out=None) -> Tensor
+
+Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
+treated as ``True``.
+
+Args:
+ {input}
+ other (Tensor): the tensor to compute AND with
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
+ tensor([ True, False, False])
+ >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
+ >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
+ >>> torch.logical_and(a, b)
+ tensor([False, False, True, False])
+ >>> torch.logical_and(a.double(), b.double())
+ tensor([False, False, True, False])
+ >>> torch.logical_and(a.double(), b)
+ tensor([False, False, True, False])
+ >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
+ tensor([False, False, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logical_not,
+ r"""
+logical_not(input, *, out=None) -> Tensor
+
+Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
+dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.logical_not(torch.tensor([True, False]))
+ tensor([False, True])
+ >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
+ tensor([ True, False, False])
+ >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
+ tensor([ True, False, False])
+ >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
+ tensor([1, 0, 0], dtype=torch.int16)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logical_or,
+ r"""
+logical_or(input, other, *, out=None) -> Tensor
+
+Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
+treated as ``True``.
+
+Args:
+ {input}
+ other (Tensor): the tensor to compute OR with
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
+ tensor([ True, False, True])
+ >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
+ >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
+ >>> torch.logical_or(a, b)
+ tensor([ True, True, True, False])
+ >>> torch.logical_or(a.double(), b.double())
+ tensor([ True, True, True, False])
+ >>> torch.logical_or(a.double(), b)
+ tensor([ True, True, True, False])
+ >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
+ tensor([ True, True, True, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logical_xor,
+ r"""
+logical_xor(input, other, *, out=None) -> Tensor
+
+Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
+treated as ``True``.
+
+Args:
+ {input}
+ other (Tensor): the tensor to compute XOR with
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
+ tensor([False, False, True])
+ >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
+ >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
+ >>> torch.logical_xor(a, b)
+ tensor([ True, True, False, False])
+ >>> torch.logical_xor(a.double(), b.double())
+ tensor([ True, True, False, False])
+ >>> torch.logical_xor(a.double(), b)
+ tensor([ True, True, False, False])
+ >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
+ tensor([ True, True, False, False])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logspace,
+ """
+logspace(start, end, steps, base=10.0, *, \
+ out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+
+Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
+spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
+:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
+with base :attr:`base`. That is, the values are:
+
+.. math::
+ (\text{base}^{\text{start}},
+ \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
+ \ldots,
+ \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
+ \text{base}^{\text{end}})
+"""
+ + """
+
+
+From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
+
+Args:
+ start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
+ end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
+ steps (int): size of the constructed tensor
+ base (float, optional): base of the logarithm function. Default: ``10.0``.
+
+Keyword arguments:
+ {out}
+ dtype (torch.dtype, optional): the data type to perform the computation in.
+ Default: if None, uses the global default dtype (see torch.get_default_dtype())
+ when both :attr:`start` and :attr:`end` are real,
+ and corresponding complex dtype when either is complex.
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.logspace(start=-10, end=10, steps=5)
+ tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
+ >>> torch.logspace(start=0.1, end=1.0, steps=5)
+ tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
+ >>> torch.logspace(start=0.1, end=1.0, steps=1)
+ tensor([1.2589])
+ >>> torch.logspace(start=2, end=2, steps=1, base=2)
+ tensor([4.0])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.logsumexp,
+ r"""
+logsumexp(input, dim, keepdim=False, *, out=None)
+
+Returns the log of summed exponentials of each row of the :attr:`input`
+tensor in the given dimension :attr:`dim`. The computation is numerically
+stabilized.
+
+For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
+
+ .. math::
+ \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+ {keepdim}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(3, 3)
+ >>> torch.logsumexp(a, 1)
+ tensor([1.4907, 1.0593, 1.5696])
+ >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
+ tensor(1.6859e-07)
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.lt,
+ r"""
+lt(input, other, *, out=None) -> Tensor
+
+Computes :math:`\text{input} < \text{other}` element-wise.
+"""
+ + r"""
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or float): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[False, False], [True, False]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.lu_unpack,
+ r"""
+lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
+
+Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
+
+.. seealso::
+
+ :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
+ than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
+
+Args:
+ LU_data (Tensor): the packed LU factorization data
+ LU_pivots (Tensor): the packed LU factorization pivots
+ unpack_data (bool): flag indicating if the data should be unpacked.
+ If ``False``, then the returned ``L`` and ``U`` are empty tensors.
+ Default: ``True``
+ unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
+ If ``False``, then the returned ``P`` is an empty tensor.
+ Default: ``True``
+
+Keyword args:
+ out (tuple, optional): output tuple of three tensors. Ignored if `None`.
+
+Returns:
+ A namedtuple ``(P, L, U)``
+
+Examples::
+
+ >>> A = torch.randn(2, 3, 3)
+ >>> LU, pivots = torch.linalg.lu_factor(A)
+ >>> P, L, U = torch.lu_unpack(LU, pivots)
+ >>> # We can recover A from the factorization
+ >>> A_ = P @ L @ U
+ >>> torch.allclose(A, A_)
+ True
+
+ >>> # LU factorization of a rectangular matrix:
+ >>> A = torch.randn(2, 3, 2)
+ >>> LU, pivots = torch.linalg.lu_factor(A)
+ >>> P, L, U = torch.lu_unpack(LU, pivots)
+ >>> # P, L, U are the same as returned by linalg.lu
+ >>> P_, L_, U_ = torch.linalg.lu(A)
+ >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
+ True
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.less,
+ r"""
+less(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.lt`.
+""",
+)
+
+add_docstr(
+ torch.lu_solve,
+ r"""
+lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
+
+Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
+LU factorization of A from :func:`~linalg.lu_factor`.
+
+This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
+
+.. warning::
+
+ :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
+ :func:`torch.lu_solve` will be removed in a future PyTorch release.
+ ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
+
+ .. code:: python
+
+ X = linalg.lu_solve(LU, pivots, B)
+
+Arguments:
+ b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
+ is zero or more batch dimensions.
+ LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
+ where :math:`*` is zero or more batch dimensions.
+ LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
+ where :math:`*` is zero or more batch dimensions.
+ The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
+ :attr:`LU_data`.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> A = torch.randn(2, 3, 3)
+ >>> b = torch.randn(2, 3, 1)
+ >>> LU, pivots = torch.linalg.lu_factor(A)
+ >>> x = torch.lu_solve(b, LU, pivots)
+ >>> torch.dist(A @ x, b)
+ tensor(1.00000e-07 *
+ 2.8312)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.masked_select,
+ r"""
+masked_select(input, mask, *, out=None) -> Tensor
+
+Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
+the boolean mask :attr:`mask` which is a `BoolTensor`.
+
+The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
+to match, but they must be :ref:`broadcastable `.
+
+.. note:: The returned tensor does **not** use the same storage
+ as the original tensor
+
+Args:
+ {input}
+ mask (BoolTensor): the tensor containing the binary mask to index with
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.randn(3, 4)
+ >>> x
+ tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
+ [-1.2035, 1.2252, 0.5002, 0.6248],
+ [ 0.1307, -2.0608, 0.1244, 2.0139]])
+ >>> mask = x.ge(0.5)
+ >>> mask
+ tensor([[False, False, False, False],
+ [False, True, True, True],
+ [False, False, False, True]])
+ >>> torch.masked_select(x, mask)
+ tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.matrix_power,
+ r"""
+matrix_power(input, n, *, out=None) -> Tensor
+
+Alias for :func:`torch.linalg.matrix_power`
+""",
+)
+
+add_docstr(
+ torch.matrix_exp,
+ r"""
+matrix_exp(A) -> Tensor
+
+Alias for :func:`torch.linalg.matrix_exp`.
+""",
+)
+
+add_docstr(
+ torch.max,
+ r"""
+max(input) -> Tensor
+
+Returns the maximum value of all elements in the ``input`` tensor.
+
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``max(dim=0)``
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[ 0.6763, 0.7445, -2.2369]])
+ >>> torch.max(a)
+ tensor(0.7445)
+
+.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+ :noindex:
+
+Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
+value of each row of the :attr:`input` tensor in the given dimension
+:attr:`dim`. And ``indices`` is the index location of each maximum value found
+(argmax).
+
+If ``keepdim`` is ``True``, the output tensors are of the same size
+as ``input`` except in the dimension ``dim`` where they are of size 1.
+Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
+in the output tensors having 1 fewer dimension than ``input``.
+
+.. note:: If there are multiple maximal values in a reduced row then
+ the indices of the first maximal value are returned.
+
+Args:
+ {input}
+ {dim}
+ {keepdim} Default: ``False``.
+
+Keyword args:
+ out (tuple, optional): the result tuple of two output tensors (max, max_indices)
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
+ [ 1.1949, -1.1127, -2.2379, -0.6702],
+ [ 1.5717, -0.9207, 0.1297, -1.8768],
+ [-0.6172, 1.0036, -0.6060, -0.2432]])
+ >>> torch.max(a, 1)
+ torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
+
+.. function:: max(input, other, *, out=None) -> Tensor
+ :noindex:
+
+See :func:`torch.maximum`.
+
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.maximum,
+ r"""
+maximum(input, other, *, out=None) -> Tensor
+
+Computes the element-wise maximum of :attr:`input` and :attr:`other`.
+
+.. note::
+ If one of the elements being compared is a NaN, then that element is returned.
+ :func:`maximum` is not supported for tensors with complex dtypes.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor((1, 2, -1))
+ >>> b = torch.tensor((3, 0, 4))
+ >>> torch.maximum(a, b)
+ tensor([3, 2, 4])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.fmax,
+ r"""
+fmax(input, other, *, out=None) -> Tensor
+
+Computes the element-wise maximum of :attr:`input` and :attr:`other`.
+
+This is like :func:`torch.maximum` except it handles NaNs differently:
+if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
+Only if both elements are NaN is NaN propagated.
+
+This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer and floating-point inputs.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
+ >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
+ >>> torch.fmax(a, b)
+ tensor([9.7000, 0.5000, 3.1000, nan])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.amax,
+ r"""
+amax(input, dim, keepdim=False, *, out=None) -> Tensor
+
+Returns the maximum value of each slice of the :attr:`input` tensor in the given
+dimension(s) :attr:`dim`.
+
+.. note::
+ The difference between ``max``/``min`` and ``amax``/``amin`` is:
+ - ``amax``/``amin`` supports reducing on multiple dimensions,
+ - ``amax``/``amin`` does not return indices,
+ - ``amax``/``amin`` evenly distributes gradient between equal values,
+ while ``max(dim)``/``min(dim)`` propagates gradient only to a single
+ index in the source tensor.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
+ [-0.7158, 1.1775, 2.0992, 0.4817],
+ [-0.0053, 0.0164, -1.3738, -0.0507],
+ [ 1.9700, 1.1106, -1.0318, -1.0816]])
+ >>> torch.amax(a, 1)
+ tensor([1.4878, 2.0992, 0.0164, 1.9700])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.argmax,
+ r"""
+argmax(input) -> LongTensor
+
+Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
+
+This is the second value returned by :meth:`torch.max`. See its
+documentation for the exact semantics of this method.
+
+.. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
+ [-0.7401, -0.8805, -0.3402, -1.1936],
+ [ 0.4907, -1.3948, -1.0691, -0.3132],
+ [-1.6092, 0.5419, -0.2993, 0.3195]])
+ >>> torch.argmax(a)
+ tensor(0)
+
+.. function:: argmax(input, dim, keepdim=False) -> LongTensor
+ :noindex:
+
+Returns the indices of the maximum values of a tensor across a dimension.
+
+This is the second value returned by :meth:`torch.max`. See its
+documentation for the exact semantics of this method.
+
+Args:
+ {input}
+ {dim} If ``None``, the argmax of the flattened input is returned.
+ {keepdim}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
+ [-0.7401, -0.8805, -0.3402, -1.1936],
+ [ 0.4907, -1.3948, -1.0691, -0.3132],
+ [-1.6092, 0.5419, -0.2993, 0.3195]])
+ >>> torch.argmax(a, dim=1)
+ tensor([ 0, 2, 0, 1])
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.argwhere,
+ r"""
+argwhere(input) -> Tensor
+
+Returns a tensor containing the indices of all non-zero elements of
+:attr:`input`. Each row in the result contains the indices of a non-zero
+element in :attr:`input`. The result is sorted lexicographically, with
+the last index changing the fastest (C-style).
+
+If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
+:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
+non-zero elements in the :attr:`input` tensor.
+
+.. note::
+ This function is similar to NumPy's `argwhere`.
+
+ When :attr:`input` is on CUDA, this function causes host-device synchronization.
+
+Args:
+ {input}
+
+Example::
+
+ >>> t = torch.tensor([1, 0, 1])
+ >>> torch.argwhere(t)
+ tensor([[0],
+ [2]])
+ >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
+ >>> torch.argwhere(t)
+ tensor([[0, 0],
+ [0, 2],
+ [1, 1],
+ [1, 2]])
+""",
+)
+
+add_docstr(
+ torch.mean,
+ r"""
+mean(input, *, dtype=None) -> Tensor
+
+Returns the mean value of all elements in the :attr:`input` tensor.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[ 0.2294, -0.5481, 1.3288]])
+ >>> torch.mean(a)
+ tensor(0.3367)
+
+.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
+ :noindex:
+
+Returns the mean value of each row of the :attr:`input` tensor in the given
+dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
+reduce over all of them.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {dtype}
+ {out}
+
+.. seealso::
+
+ :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
+ [-0.9644, 1.0131, -0.6549, -1.4279],
+ [-0.2951, -1.3350, -0.7694, 0.5600],
+ [ 1.0842, -0.9580, 0.3623, 0.2343]])
+ >>> torch.mean(a, 1)
+ tensor([-0.0163, -0.5085, -0.4599, 0.1807])
+ >>> torch.mean(a, 1, True)
+ tensor([[-0.0163],
+ [-0.5085],
+ [-0.4599],
+ [ 0.1807]])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.nanmean,
+ r"""
+nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
+
+Computes the mean of all `non-NaN` elements along the specified dimensions.
+
+This function is identical to :func:`torch.mean` when there are no `NaN` values
+in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
+propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
+`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+ {keepdim}
+
+Keyword args:
+ {dtype}
+ {out}
+
+.. seealso::
+
+ :func:`torch.mean` computes the mean value, propagating `NaN`.
+
+Example::
+
+ >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
+ >>> x.mean()
+ tensor(nan)
+ >>> x.nanmean()
+ tensor(1.8000)
+ >>> x.mean(dim=0)
+ tensor([ nan, 1.5000, 2.5000])
+ >>> x.nanmean(dim=0)
+ tensor([1.0000, 1.5000, 2.5000])
+
+ # If all elements in the reduced dimensions are NaN then the result is NaN
+ >>> torch.tensor([torch.nan]).nanmean()
+ tensor(nan)
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.median,
+ r"""
+median(input) -> Tensor
+
+Returns the median of the values in :attr:`input`.
+
+.. note::
+ The median is not unique for :attr:`input` tensors with an even number
+ of elements. In this case the lower of the two medians is returned. To
+ compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
+
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``median(dim=0)``
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[ 1.5219, -1.5212, 0.2202]])
+ >>> torch.median(a)
+ tensor(0.2202)
+
+.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+ :noindex:
+
+Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
+in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
+
+By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
+
+If :attr:`keepdim` is ``True``, the output tensors are of the same size
+as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
+Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
+the outputs tensor having 1 fewer dimension than :attr:`input`.
+
+.. note::
+ The median is not unique for :attr:`input` tensors with an even number
+ of elements in the dimension :attr:`dim`. In this case the lower of the
+ two medians is returned. To compute the mean of both medians in
+ :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
+
+.. warning::
+ ``indices`` does not necessarily contain the first occurrence of each
+ median value found, unless it is unique.
+ The exact implementation details are device-specific.
+ Do not expect the same result when run on CPU and GPU in general.
+ For the same reason do not expect the gradients to be deterministic.
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
+ tensor, which must have dtype long, with their indices in the dimension
+ :attr:`dim` of :attr:`input`.
+
+Example::
+
+ >>> a = torch.randn(4, 5)
+ >>> a
+ tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
+ [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
+ [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
+ [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
+ >>> torch.median(a, 1)
+ torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.nanmedian,
+ r"""
+nanmedian(input) -> Tensor
+
+Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
+
+This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
+When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
+while this function will return the median of the non-``NaN`` elements in :attr:`input`.
+If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.tensor([1, float('nan'), 3, 2])
+ >>> a.median()
+ tensor(nan)
+ >>> a.nanmedian()
+ tensor(2.)
+
+.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+ :noindex:
+
+Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
+in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
+found in the dimension :attr:`dim`.
+
+This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
+one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
+median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
+ tensor, which must have dtype long, with their indices in the dimension
+ :attr:`dim` of :attr:`input`.
+
+Example::
+
+ >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
+ >>> a
+ tensor([[2., 3., 1.],
+ [nan, 1., nan]])
+ >>> a.median(0)
+ torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
+ >>> a.nanmedian(0)
+ torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.quantile,
+ r"""
+quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
+
+Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
+
+To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
+of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
+indices ``i`` and ``j`` in the sorted order, result is computed according to the given
+:attr:`interpolation` method as follows:
+
+- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
+- ``lower``: ``a``.
+- ``higher``: ``b``.
+- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
+- ``midpoint``: ``(a + b) / 2``.
+
+If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
+equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
+
+.. note::
+ By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
+
+Args:
+ {input}
+ q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
+ {dim}
+ {keepdim}
+
+Keyword arguments:
+ interpolation (str): interpolation method to use when the desired quantile lies between two data points.
+ Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
+ Default is ``linear``.
+ {out}
+
+Example::
+
+ >>> a = torch.randn(2, 3)
+ >>> a
+ tensor([[ 0.0795, -1.2117, 0.9765],
+ [ 1.1707, 0.6706, 0.4884]])
+ >>> q = torch.tensor([0.25, 0.5, 0.75])
+ >>> torch.quantile(a, q, dim=1, keepdim=True)
+ tensor([[[-0.5661],
+ [ 0.5795]],
+
+ [[ 0.0795],
+ [ 0.6706]],
+
+ [[ 0.5280],
+ [ 0.9206]]])
+ >>> torch.quantile(a, q, dim=1, keepdim=True).shape
+ torch.Size([3, 2, 1])
+ >>> a = torch.arange(4.)
+ >>> a
+ tensor([0., 1., 2., 3.])
+ >>> torch.quantile(a, 0.6, interpolation='linear')
+ tensor(1.8000)
+ >>> torch.quantile(a, 0.6, interpolation='lower')
+ tensor(1.)
+ >>> torch.quantile(a, 0.6, interpolation='higher')
+ tensor(2.)
+ >>> torch.quantile(a, 0.6, interpolation='midpoint')
+ tensor(1.5000)
+ >>> torch.quantile(a, 0.6, interpolation='nearest')
+ tensor(2.)
+ >>> torch.quantile(a, 0.4, interpolation='nearest')
+ tensor(1.)
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.nanquantile,
+ r"""
+nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
+
+This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
+computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
+not exist. If all values in a reduced row are ``NaN`` then the quantiles for
+that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
+
+Args:
+ {input}
+ q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
+ {dim}
+ {keepdim}
+
+Keyword arguments:
+ interpolation (str): interpolation method to use when the desired quantile lies between two data points.
+ Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
+ Default is ``linear``.
+ {out}
+
+Example::
+
+ >>> t = torch.tensor([float('nan'), 1, 2])
+ >>> t.quantile(0.5)
+ tensor(nan)
+ >>> t.nanquantile(0.5)
+ tensor(1.5000)
+ >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
+ >>> t
+ tensor([[nan, nan],
+ [1., 2.]])
+ >>> t.nanquantile(0.5, dim=0)
+ tensor([1., 2.])
+ >>> t.nanquantile(0.5, dim=1)
+ tensor([ nan, 1.5000])
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.min,
+ r"""
+min(input) -> Tensor
+
+Returns the minimum value of all elements in the :attr:`input` tensor.
+
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``min(dim=0)``
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[ 0.6750, 1.0857, 1.7197]])
+ >>> torch.min(a)
+ tensor(0.6750)
+
+.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+ :noindex:
+
+Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
+value of each row of the :attr:`input` tensor in the given dimension
+:attr:`dim`. And ``indices`` is the index location of each minimum value found
+(argmin).
+
+If :attr:`keepdim` is ``True``, the output tensors are of the same size as
+:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
+Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
+the output tensors having 1 fewer dimension than :attr:`input`.
+
+.. note:: If there are multiple minimal values in a reduced row then
+ the indices of the first minimal value are returned.
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ out (tuple, optional): the tuple of two output tensors (min, min_indices)
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
+ [-1.4644, -0.2635, -0.3651, 0.6134],
+ [ 0.2457, 0.0384, 1.0128, 0.7015],
+ [-0.1153, 2.9849, 2.1458, 0.5788]])
+ >>> torch.min(a, 1)
+ torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
+
+.. function:: min(input, other, *, out=None) -> Tensor
+ :noindex:
+
+See :func:`torch.minimum`.
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.minimum,
+ r"""
+minimum(input, other, *, out=None) -> Tensor
+
+Computes the element-wise minimum of :attr:`input` and :attr:`other`.
+
+.. note::
+ If one of the elements being compared is a NaN, then that element is returned.
+ :func:`minimum` is not supported for tensors with complex dtypes.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor((1, 2, -1))
+ >>> b = torch.tensor((3, 0, 4))
+ >>> torch.minimum(a, b)
+ tensor([1, 0, -1])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.fmin,
+ r"""
+fmin(input, other, *, out=None) -> Tensor
+
+Computes the element-wise minimum of :attr:`input` and :attr:`other`.
+
+This is like :func:`torch.minimum` except it handles NaNs differently:
+if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
+Only if both elements are NaN is NaN propagated.
+
+This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer and floating-point inputs.
+
+Args:
+ {input}
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
+ >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
+ >>> torch.fmin(a, b)
+ tensor([-9.3000, 0.1000, 2.1000, nan])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.amin,
+ r"""
+amin(input, dim, keepdim=False, *, out=None) -> Tensor
+
+Returns the minimum value of each slice of the :attr:`input` tensor in the given
+dimension(s) :attr:`dim`.
+
+.. note::
+ The difference between ``max``/``min`` and ``amax``/``amin`` is:
+ - ``amax``/``amin`` supports reducing on multiple dimensions,
+ - ``amax``/``amin`` does not return indices,
+ - ``amax``/``amin`` evenly distributes gradient between equal values,
+ while ``max(dim)``/``min(dim)`` propagates gradient only to a single
+ index in the source tensor.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
+ [-0.5744, 1.2980, 1.8397, -0.2713],
+ [ 0.9128, 0.9214, -1.7268, -0.2995],
+ [ 0.9023, 0.4853, 0.9075, -1.6165]])
+ >>> torch.amin(a, 1)
+ tensor([-1.3312, -0.5744, -1.7268, -1.6165])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.aminmax,
+ r"""
+aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
+
+Computes the minimum and maximum values of the :attr:`input` tensor.
+
+Args:
+ input (Tensor):
+ The input tensor
+
+Keyword Args:
+ dim (Optional[int]):
+ The dimension along which to compute the values. If `None`,
+ computes the values over the entire :attr:`input` tensor.
+ Default is `None`.
+ keepdim (bool):
+ If `True`, the reduced dimensions will be kept in the output
+ tensor as dimensions with size 1 for broadcasting, otherwise
+ they will be removed, as if calling (:func:`torch.squeeze`).
+ Default is `False`.
+ out (Optional[Tuple[Tensor, Tensor]]):
+ Optional tensors on which to write the result. Must have the same
+ shape and dtype as the expected output.
+ Default is `None`.
+
+Returns:
+ A named tuple `(min, max)` containing the minimum and maximum values.
+
+Raises:
+ RuntimeError
+ If any of the dimensions to compute the values over has size 0.
+
+.. note::
+ NaN values are propagated to the output if at least one value is NaN.
+
+.. seealso::
+ :func:`torch.amin` computes just the minimum value
+ :func:`torch.amax` computes just the maximum value
+
+Example::
+
+ >>> torch.aminmax(torch.tensor([1, -3, 5]))
+ torch.return_types.aminmax(
+ min=tensor(-3),
+ max=tensor(5))
+
+ >>> # aminmax propagates NaNs
+ >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
+ torch.return_types.aminmax(
+ min=tensor(nan),
+ max=tensor(nan))
+
+ >>> t = torch.arange(10).view(2, 5)
+ >>> t
+ tensor([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
+ >>> t.aminmax(dim=0, keepdim=True)
+ torch.return_types.aminmax(
+ min=tensor([[0, 1, 2, 3, 4]]),
+ max=tensor([[5, 6, 7, 8, 9]]))
+""",
+)
+
+add_docstr(
+ torch.argmin,
+ r"""
+argmin(input, dim=None, keepdim=False) -> LongTensor
+
+Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
+
+This is the second value returned by :meth:`torch.min`. See its
+documentation for the exact semantics of this method.
+
+.. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
+
+Args:
+ {input}
+ {dim} If ``None``, the argmin of the flattened input is returned.
+ {keepdim}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
+ [ 1.0100, -1.1975, -0.0102, -0.4732],
+ [-0.9240, 0.1207, -0.7506, -1.0213],
+ [ 1.7809, -1.2960, 0.9384, 0.1438]])
+ >>> torch.argmin(a)
+ tensor(13)
+ >>> torch.argmin(a, dim=1)
+ tensor([ 2, 1, 3, 1])
+ >>> torch.argmin(a, dim=1, keepdim=True)
+ tensor([[2],
+ [1],
+ [3],
+ [1]])
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.mm,
+ r"""
+mm(input, mat2, *, out=None) -> Tensor
+
+Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
+
+If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
+:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
+
+.. note:: This function does not :ref:`broadcast `.
+ For broadcasting matrix products, see :func:`torch.matmul`.
+
+Supports strided and sparse 2-D tensors as inputs, autograd with
+respect to strided inputs.
+
+This operation has support for arguments with :ref:`sparse layouts`.
+If :attr:`out` is provided it's layout will be used. Otherwise, the result
+layout will be deduced from that of :attr:`input`.
+
+{sparse_beta_warning}
+
+{tf32_note}
+
+{rocm_fp16_note}
+
+Args:
+ input (Tensor): the first matrix to be matrix multiplied
+ mat2 (Tensor): the second matrix to be matrix multiplied
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> mat1 = torch.randn(2, 3)
+ >>> mat2 = torch.randn(3, 3)
+ >>> torch.mm(mat1, mat2)
+ tensor([[ 0.4851, 0.5037, -0.3633],
+ [-0.0760, -3.6705, 2.4784]])
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
+ ),
+)
+
+add_docstr(
+ torch.hspmm,
+ r"""
+hspmm(mat1, mat2, *, out=None) -> Tensor
+
+Performs a matrix multiplication of a :ref:`sparse COO matrix
+` :attr:`mat1` and a strided matrix :attr:`mat2`. The
+result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
+`.
+
+Args:
+ mat1 (Tensor): the first sparse matrix to be matrix multiplied
+ mat2 (Tensor): the second strided matrix to be matrix multiplied
+
+Keyword args:
+ {out}
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.matmul,
+ r"""
+matmul(input, other, *, out=None) -> Tensor
+
+Matrix product of two tensors.
+
+The behavior depends on the dimensionality of the tensors as follows:
+
+- If both tensors are 1-dimensional, the dot product (scalar) is returned.
+- If both arguments are 2-dimensional, the matrix-matrix product is returned.
+- If the first argument is 1-dimensional and the second argument is 2-dimensional,
+ a 1 is prepended to its dimension for the purpose of the matrix multiply.
+ After the matrix multiply, the prepended dimension is removed.
+- If the first argument is 2-dimensional and the second argument is 1-dimensional,
+ the matrix-vector product is returned.
+- If both arguments are at least 1-dimensional and at least one argument is
+ N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
+ argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
+ batched matrix multiply and removed after. If the second argument is 1-dimensional, a
+ 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
+ The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus
+ must be broadcastable). For example, if :attr:`input` is a
+ :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
+ tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
+
+ Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
+ are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
+ :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
+ tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
+ matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
+
+This operation has support for arguments with :ref:`sparse layouts`. In particular the
+matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
+as :func:`torch.mm`
+
+{sparse_beta_warning}
+
+{tf32_note}
+
+{rocm_fp16_note}
+
+.. note::
+
+ The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
+
+Arguments:
+ input (Tensor): the first tensor to be multiplied
+ other (Tensor): the second tensor to be multiplied
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> # vector x vector
+ >>> tensor1 = torch.randn(3)
+ >>> tensor2 = torch.randn(3)
+ >>> torch.matmul(tensor1, tensor2).size()
+ torch.Size([])
+ >>> # matrix x vector
+ >>> tensor1 = torch.randn(3, 4)
+ >>> tensor2 = torch.randn(4)
+ >>> torch.matmul(tensor1, tensor2).size()
+ torch.Size([3])
+ >>> # batched matrix x broadcasted vector
+ >>> tensor1 = torch.randn(10, 3, 4)
+ >>> tensor2 = torch.randn(4)
+ >>> torch.matmul(tensor1, tensor2).size()
+ torch.Size([10, 3])
+ >>> # batched matrix x batched matrix
+ >>> tensor1 = torch.randn(10, 3, 4)
+ >>> tensor2 = torch.randn(10, 4, 5)
+ >>> torch.matmul(tensor1, tensor2).size()
+ torch.Size([10, 3, 5])
+ >>> # batched matrix x broadcasted matrix
+ >>> tensor1 = torch.randn(10, 3, 4)
+ >>> tensor2 = torch.randn(4, 5)
+ >>> torch.matmul(tensor1, tensor2).size()
+ torch.Size([10, 3, 5])
+
+""".format(
+ **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
+ ),
+)
+
+add_docstr(
+ torch.mode,
+ r"""
+mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
+
+Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
+value of each row of the :attr:`input` tensor in the given dimension
+:attr:`dim`, i.e. a value which appears most often
+in that row, and ``indices`` is the index location of each mode value found.
+
+By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
+
+If :attr:`keepdim` is ``True``, the output tensors are of the same size as
+:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
+Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
+in the output tensors having 1 fewer dimension than :attr:`input`.
+
+.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ out (tuple, optional): the result tuple of two output tensors (values, indices)
+
+Example::
+
+ >>> a = torch.randint(10, (5,))
+ >>> a
+ tensor([6, 5, 1, 0, 2])
+ >>> b = a + (torch.randn(50, 1) * 5).long()
+ >>> torch.mode(b, 0)
+ torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.mul,
+ r"""
+mul(input, other, *, out=None) -> Tensor
+
+Multiplies :attr:`input` by :attr:`other`.
+
+
+.. math::
+ \text{out}_i = \text{input}_i \times \text{other}_i
+"""
+ + r"""
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer, float, and complex inputs.
+
+Args:
+ {input}
+ other (Tensor or Number) - the tensor or number to multiply input by.
+
+Keyword args:
+ {out}
+
+Examples::
+
+ >>> a = torch.randn(3)
+ >>> a
+ tensor([ 0.2015, -0.4255, 2.6087])
+ >>> torch.mul(a, 100)
+ tensor([ 20.1494, -42.5491, 260.8663])
+
+ >>> b = torch.randn(4, 1)
+ >>> b
+ tensor([[ 1.1207],
+ [-0.3137],
+ [ 0.0700],
+ [ 0.8378]])
+ >>> c = torch.randn(1, 4)
+ >>> c
+ tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
+ >>> torch.mul(b, c)
+ tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
+ [-0.1614, -0.0382, 0.1645, -0.7021],
+ [ 0.0360, 0.0085, -0.0367, 0.1567],
+ [ 0.4312, 0.1019, -0.4394, 1.8753]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.multiply,
+ r"""
+multiply(input, other, *, out=None)
+
+Alias for :func:`torch.mul`.
+""",
+)
+
+add_docstr(
+ torch.multinomial,
+ r"""
+multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
+
+Returns a tensor where each row contains :attr:`num_samples` indices sampled
+from the multinomial (a stricter definition would be multivariate,
+refer to torch.distributions.multinomial.Multinomial for more details)
+probability distribution located in the corresponding row
+of tensor :attr:`input`.
+
+.. note::
+ The rows of :attr:`input` do not need to sum to one (in which case we use
+ the values as weights), but must be non-negative, finite and have
+ a non-zero sum.
+
+Indices are ordered from left to right according to when each was sampled
+(first samples are placed in first column).
+
+If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
+
+If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
+:math:`(m \times \text{{num\_samples}})`.
+
+If replacement is ``True``, samples are drawn with replacement.
+
+If not, they are drawn without replacement, which means that when a
+sample index is drawn for a row, it cannot be drawn again for that row.
+
+.. note::
+ When drawn without replacement, :attr:`num_samples` must be lower than
+ number of non-zero elements in :attr:`input` (or the min number of non-zero
+ elements in each row of :attr:`input` if it is a matrix).
+
+Args:
+ input (Tensor): the input tensor containing probabilities
+ num_samples (int): number of samples to draw
+ replacement (bool, optional): whether to draw with replacement or not
+
+Keyword args:
+ {generator}
+ {out}
+
+Example::
+
+ >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
+ >>> torch.multinomial(weights, 2)
+ tensor([1, 2])
+ >>> torch.multinomial(weights, 4) # ERROR!
+ RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
+ not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
+ >>> torch.multinomial(weights, 4, replacement=True)
+ tensor([ 2, 1, 1, 1])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.mv,
+ r"""
+mv(input, vec, *, out=None) -> Tensor
+
+Performs a matrix-vector product of the matrix :attr:`input` and the vector
+:attr:`vec`.
+
+If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
+size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
+
+.. note:: This function does not :ref:`broadcast `.
+
+Args:
+ input (Tensor): matrix to be multiplied
+ vec (Tensor): vector to be multiplied
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> mat = torch.randn(2, 3)
+ >>> vec = torch.randn(3)
+ >>> torch.mv(mat, vec)
+ tensor([ 1.0404, -0.6361])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.mvlgamma,
+ r"""
+mvlgamma(input, p, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.multigammaln`.
+""",
+)
+
+add_docstr(
+ torch.movedim,
+ r"""
+movedim(input, source, destination) -> Tensor
+
+Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
+to the position(s) in :attr:`destination`.
+
+Other dimensions of :attr:`input` that are not explicitly moved remain in
+their original order and appear at the positions not specified in :attr:`destination`.
+
+Args:
+ {input}
+ source (int or tuple of ints): Original positions of the dims to move. These must be unique.
+ destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
+
+Examples::
+
+ >>> t = torch.randn(3,2,1)
+ >>> t
+ tensor([[[-0.3362],
+ [-0.8437]],
+
+ [[-0.9627],
+ [ 0.1727]],
+
+ [[ 0.5173],
+ [-0.1398]]])
+ >>> torch.movedim(t, 1, 0).shape
+ torch.Size([2, 3, 1])
+ >>> torch.movedim(t, 1, 0)
+ tensor([[[-0.3362],
+ [-0.9627],
+ [ 0.5173]],
+
+ [[-0.8437],
+ [ 0.1727],
+ [-0.1398]]])
+ >>> torch.movedim(t, (1, 2), (0, 1)).shape
+ torch.Size([2, 1, 3])
+ >>> torch.movedim(t, (1, 2), (0, 1))
+ tensor([[[-0.3362, -0.9627, 0.5173]],
+
+ [[-0.8437, 0.1727, -0.1398]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.moveaxis,
+ r"""
+moveaxis(input, source, destination) -> Tensor
+
+Alias for :func:`torch.movedim`.
+
+This function is equivalent to NumPy's moveaxis function.
+
+Examples::
+
+ >>> t = torch.randn(3,2,1)
+ >>> t
+ tensor([[[-0.3362],
+ [-0.8437]],
+
+ [[-0.9627],
+ [ 0.1727]],
+
+ [[ 0.5173],
+ [-0.1398]]])
+ >>> torch.moveaxis(t, 1, 0).shape
+ torch.Size([2, 3, 1])
+ >>> torch.moveaxis(t, 1, 0)
+ tensor([[[-0.3362],
+ [-0.9627],
+ [ 0.5173]],
+
+ [[-0.8437],
+ [ 0.1727],
+ [-0.1398]]])
+ >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
+ torch.Size([2, 1, 3])
+ >>> torch.moveaxis(t, (1, 2), (0, 1))
+ tensor([[[-0.3362, -0.9627, 0.5173]],
+
+ [[-0.8437, 0.1727, -0.1398]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.swapdims,
+ r"""
+swapdims(input, dim0, dim1) -> Tensor
+
+Alias for :func:`torch.transpose`.
+
+This function is equivalent to NumPy's swapaxes function.
+
+Examples::
+
+ >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
+ >>> x
+ tensor([[[0, 1],
+ [2, 3]],
+
+ [[4, 5],
+ [6, 7]]])
+ >>> torch.swapdims(x, 0, 1)
+ tensor([[[0, 1],
+ [4, 5]],
+
+ [[2, 3],
+ [6, 7]]])
+ >>> torch.swapdims(x, 0, 2)
+ tensor([[[0, 4],
+ [2, 6]],
+
+ [[1, 5],
+ [3, 7]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.swapaxes,
+ r"""
+swapaxes(input, axis0, axis1) -> Tensor
+
+Alias for :func:`torch.transpose`.
+
+This function is equivalent to NumPy's swapaxes function.
+
+Examples::
+
+ >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
+ >>> x
+ tensor([[[0, 1],
+ [2, 3]],
+
+ [[4, 5],
+ [6, 7]]])
+ >>> torch.swapaxes(x, 0, 1)
+ tensor([[[0, 1],
+ [4, 5]],
+
+ [[2, 3],
+ [6, 7]]])
+ >>> torch.swapaxes(x, 0, 2)
+ tensor([[[0, 4],
+ [2, 6]],
+
+ [[1, 5],
+ [3, 7]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.narrow,
+ r"""
+narrow(input, dim, start, length) -> Tensor
+
+Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
+dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
+returned tensor and :attr:`input` tensor share the same underlying storage.
+
+Args:
+ input (Tensor): the tensor to narrow
+ dim (int): the dimension along which to narrow
+ start (int or Tensor): index of the element to start the narrowed dimension
+ from. Can be negative, which means indexing from the end of `dim`. If
+ `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
+ length (int): length of the narrowed dimension, must be weakly positive
+
+Example::
+
+ >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> torch.narrow(x, 0, 0, 2)
+ tensor([[ 1, 2, 3],
+ [ 4, 5, 6]])
+ >>> torch.narrow(x, 1, 1, 2)
+ tensor([[ 2, 3],
+ [ 5, 6],
+ [ 8, 9]])
+ >>> torch.narrow(x, -1, torch.tensor(-1), 1)
+ tensor([[3],
+ [6],
+ [9]])
+""",
+)
+
+add_docstr(
+ torch.narrow_copy,
+ r"""
+narrow_copy(input, dim, start, length, *, out=None) -> Tensor
+
+Same as :meth:`Tensor.narrow` except this returns a copy rather
+than shared storage. This is primarily for sparse tensors, which
+do not have a shared-storage narrow method.
+
+Args:
+ input (Tensor): the tensor to narrow
+ dim (int): the dimension along which to narrow
+ start (int): index of the element to start the narrowed dimension from. Can
+ be negative, which means indexing from the end of `dim`
+ length (int): length of the narrowed dimension, must be weakly positive
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> torch.narrow_copy(x, 0, 0, 2)
+ tensor([[ 1, 2, 3],
+ [ 4, 5, 6]])
+ >>> torch.narrow_copy(x, 1, 1, 2)
+ tensor([[ 2, 3],
+ [ 5, 6],
+ [ 8, 9]])
+ >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
+ >>> torch.narrow_copy(s, 0, 0, 1)
+ tensor(indices=tensor([[0, 0],
+ [0, 1]]),
+ values=tensor([[[0, 1],
+ [2, 3]],
+
+ [[4, 5],
+ [6, 7]]]),
+ size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
+
+.. seealso::
+
+ :func:`torch.narrow` for a non copy variant
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.nan_to_num,
+ r"""
+nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
+
+Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
+with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
+By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
+greatest finite value representable by :attr:`input`'s dtype, and negative infinity
+is replaced with the least finite value representable by :attr:`input`'s dtype.
+
+Args:
+ {input}
+ nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
+ posinf (Number, optional): if a Number, the value to replace positive infinity values with.
+ If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
+ Default is None.
+ neginf (Number, optional): if a Number, the value to replace negative infinity values with.
+ If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
+ Default is None.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
+ >>> torch.nan_to_num(x)
+ tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
+ >>> torch.nan_to_num(x, nan=2.0)
+ tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
+ >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
+ tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.ne,
+ r"""
+ne(input, other, *, out=None) -> Tensor
+
+Computes :math:`\text{input} \neq \text{other}` element-wise.
+"""
+ + r"""
+
+The second argument can be a number or a tensor whose shape is
+:ref:`broadcastable ` with the first argument.
+
+Args:
+ input (Tensor): the tensor to compare
+ other (Tensor or float): the tensor or value to compare
+
+Keyword args:
+ {out}
+
+Returns:
+ A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
+
+Example::
+
+ >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
+ tensor([[False, True], [True, False]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.not_equal,
+ r"""
+not_equal(input, other, *, out=None) -> Tensor
+
+Alias for :func:`torch.ne`.
+""",
+)
+
+add_docstr(
+ torch.neg,
+ r"""
+neg(input, *, out=None) -> Tensor
+
+Returns a new tensor with the negative of the elements of :attr:`input`.
+
+.. math::
+ \text{out} = -1 \times \text{input}
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(5)
+ >>> a
+ tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
+ >>> torch.neg(a)
+ tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.negative,
+ r"""
+negative(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.neg`
+""",
+)
+
+add_docstr(
+ torch.nextafter,
+ r"""
+nextafter(input, other, *, out=None) -> Tensor
+
+Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
+
+The shapes of ``input`` and ``other`` must be
+:ref:`broadcastable `.
+
+Args:
+ input (Tensor): the first input tensor
+ other (Tensor): the second input tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> eps = torch.finfo(torch.float32).eps
+ >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
+ tensor([True, True])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.nonzero,
+ r"""
+nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
+
+.. note::
+ :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a
+ 2-D tensor where each row is the index for a nonzero value.
+
+ :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D
+ index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
+ gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
+ contains nonzero indices for a certain dimension.
+
+ See below for more details on the two behaviors.
+
+ When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes
+ host-device synchronization.
+
+**When** :attr:`as_tuple` **is** ``False`` **(default)**:
+
+Returns a tensor containing the indices of all non-zero elements of
+:attr:`input`. Each row in the result contains the indices of a non-zero
+element in :attr:`input`. The result is sorted lexicographically, with
+the last index changing the fastest (C-style).
+
+If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
+:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
+non-zero elements in the :attr:`input` tensor.
+
+**When** :attr:`as_tuple` **is** ``True``:
+
+Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
+each containing the indices (in that dimension) of all non-zero elements of
+:attr:`input` .
+
+If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
+tensors of size :math:`z`, where :math:`z` is the total number of
+non-zero elements in the :attr:`input` tensor.
+
+As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
+value, it is treated as a one-dimensional tensor with one element.
+
+Args:
+ {input}
+
+Keyword args:
+ out (LongTensor, optional): the output tensor containing indices
+
+Returns:
+ LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
+ tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
+ each dimension, containing the indices of each nonzero element along that
+ dimension.
+
+Example::
+
+ >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
+ tensor([[ 0],
+ [ 1],
+ [ 2],
+ [ 4]])
+ >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
+ ... [0.0, 0.4, 0.0, 0.0],
+ ... [0.0, 0.0, 1.2, 0.0],
+ ... [0.0, 0.0, 0.0,-0.4]]))
+ tensor([[ 0, 0],
+ [ 1, 1],
+ [ 2, 2],
+ [ 3, 3]])
+ >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
+ (tensor([0, 1, 2, 4]),)
+ >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
+ ... [0.0, 0.4, 0.0, 0.0],
+ ... [0.0, 0.0, 1.2, 0.0],
+ ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
+ (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
+ >>> torch.nonzero(torch.tensor(5), as_tuple=True)
+ (tensor([0]),)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.normal,
+ r"""
+normal(mean, std, *, generator=None, out=None) -> Tensor
+
+Returns a tensor of random numbers drawn from separate normal distributions
+whose mean and standard deviation are given.
+
+The :attr:`mean` is a tensor with the mean of
+each output element's normal distribution
+
+The :attr:`std` is a tensor with the standard deviation of
+each output element's normal distribution
+
+The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
+total number of elements in each tensor need to be the same.
+
+.. note:: When the shapes do not match, the shape of :attr:`mean`
+ is used as the shape for the returned output tensor
+
+.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
+ its device with the CPU.
+
+Args:
+ mean (Tensor): the tensor of per-element means
+ std (Tensor): the tensor of per-element standard deviations
+
+Keyword args:
+ {generator}
+ {out}
+
+Example::
+
+ >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
+ tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
+ 8.0505, 8.1408, 9.0563, 10.0566])
+
+.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
+ :noindex:
+
+Similar to the function above, but the means are shared among all drawn
+elements.
+
+Args:
+ mean (float, optional): the mean for all distributions
+ std (Tensor): the tensor of per-element standard deviations
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
+ tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
+
+.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
+ :noindex:
+
+Similar to the function above, but the standard deviations are shared among
+all drawn elements.
+
+Args:
+ mean (Tensor): the tensor of per-element means
+ std (float, optional): the standard deviation for all distributions
+
+Keyword args:
+ out (Tensor, optional): the output tensor
+
+Example::
+
+ >>> torch.normal(mean=torch.arange(1., 6.))
+ tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
+
+.. function:: normal(mean, std, size, *, out=None) -> Tensor
+ :noindex:
+
+Similar to the function above, but the means and standard deviations are shared
+among all drawn elements. The resulting tensor has size given by :attr:`size`.
+
+Args:
+ mean (float): the mean for all distributions
+ std (float): the standard deviation for all distributions
+ size (int...): a sequence of integers defining the shape of the output tensor.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.normal(2, 3, size=(1, 4))
+ tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.numel,
+ r"""
+numel(input) -> int
+
+Returns the total number of elements in the :attr:`input` tensor.
+
+Args:
+ {input}
+
+Example::
+
+ >>> a = torch.randn(1, 2, 3, 4, 5)
+ >>> torch.numel(a)
+ 120
+ >>> a = torch.zeros(4,4)
+ >>> torch.numel(a)
+ 16
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.ones,
+ r"""
+ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a tensor filled with the scalar value `1`, with the shape defined
+by the variable argument :attr:`size`.
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+ Can be a variable number of arguments or a collection like a list or tuple.
+
+Keyword arguments:
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.ones(2, 3)
+ tensor([[ 1., 1., 1.],
+ [ 1., 1., 1.]])
+
+ >>> torch.ones(5)
+ tensor([ 1., 1., 1., 1., 1.])
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.ones_like,
+ r"""
+ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor filled with the scalar value `1`, with the same size as
+:attr:`input`. ``torch.ones_like(input)`` is equivalent to
+``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+.. warning::
+ As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
+ the old ``torch.ones_like(input, out=output)`` is equivalent to
+ ``torch.ones(input.size(), out=output)``.
+
+Args:
+ {input}
+
+Keyword arguments:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+Example::
+
+ >>> input = torch.empty(2, 3)
+ >>> torch.ones_like(input)
+ tensor([[ 1., 1., 1.],
+ [ 1., 1., 1.]])
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.orgqr,
+ r"""
+orgqr(input, tau) -> Tensor
+
+Alias for :func:`torch.linalg.householder_product`.
+""",
+)
+
+add_docstr(
+ torch.ormqr,
+ r"""
+ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
+
+Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
+
+Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
+where `Q` is represented using Householder reflectors `(input, tau)`.
+See `Representation of Orthogonal or Unitary Matrices`_ for further details.
+
+If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
+When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
+It has size :math:`n \times n` otherwise.
+If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
+
+Supports inputs of float, double, cfloat and cdouble dtypes.
+Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
+
+.. seealso::
+ :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
+ from the QR decomposition.
+
+.. note::
+ This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
+ and/or ``tau.size(-1)`` is very small.
+ ``
+
+Args:
+ input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
+ and `mn` equals to `m` or `n` depending on the :attr:`left`.
+ tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
+ other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
+ left (bool): controls the order of multiplication.
+ transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
+
+Keyword args:
+ out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
+
+.. _Representation of Orthogonal or Unitary Matrices:
+ https://www.netlib.org/lapack/lug/node128.html
+""",
+)
+
+add_docstr(
+ torch.permute,
+ r"""
+permute(input, dims) -> Tensor
+
+Returns a view of the original tensor :attr:`input` with its dimensions permuted.
+
+Args:
+ {input}
+ dims (tuple of int): The desired ordering of dimensions
+
+Example:
+ >>> x = torch.randn(2, 3, 5)
+ >>> x.size()
+ torch.Size([2, 3, 5])
+ >>> torch.permute(x, (2, 0, 1)).size()
+ torch.Size([5, 2, 3])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.poisson,
+ r"""
+poisson(input, generator=None) -> Tensor
+
+Returns a tensor of the same size as :attr:`input` with each element
+sampled from a Poisson distribution with rate parameter given by the corresponding
+element in :attr:`input` i.e.,
+
+.. math::
+ \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
+
+:attr:`input` must be non-negative.
+
+Args:
+ input (Tensor): the input tensor containing the rates of the Poisson distribution
+
+Keyword args:
+ {generator}
+
+Example::
+
+ >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
+ >>> torch.poisson(rates)
+ tensor([[9., 1., 3., 5.],
+ [8., 6., 6., 0.],
+ [0., 4., 5., 3.],
+ [2., 1., 4., 2.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.polygamma,
+ r"""
+polygamma(n, input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.polygamma`.
+""",
+)
+
+add_docstr(
+ torch.positive,
+ r"""
+positive(input) -> Tensor
+
+Returns :attr:`input`.
+Throws a runtime error if :attr:`input` is a bool tensor.
+"""
+ + r"""
+Args:
+ {input}
+
+Example::
+
+ >>> t = torch.randn(5)
+ >>> t
+ tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
+ >>> torch.positive(t)
+ tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.pow,
+ r"""
+pow(input, exponent, *, out=None) -> Tensor
+
+Takes the power of each element in :attr:`input` with :attr:`exponent` and
+returns a tensor with the result.
+
+:attr:`exponent` can be either a single ``float`` number or a `Tensor`
+with the same number of elements as :attr:`input`.
+
+When :attr:`exponent` is a scalar value, the operation applied is:
+
+.. math::
+ \text{out}_i = x_i ^ \text{exponent}
+
+When :attr:`exponent` is a tensor, the operation applied is:
+
+.. math::
+ \text{out}_i = x_i ^ {\text{exponent}_i}
+"""
+ + r"""
+When :attr:`exponent` is a tensor, the shapes of :attr:`input`
+and :attr:`exponent` must be :ref:`broadcastable `.
+
+Args:
+ {input}
+ exponent (float or tensor): the exponent value
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
+ >>> torch.pow(a, 2)
+ tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
+ >>> exp = torch.arange(1., 5.)
+
+ >>> a = torch.arange(1., 5.)
+ >>> a
+ tensor([ 1., 2., 3., 4.])
+ >>> exp
+ tensor([ 1., 2., 3., 4.])
+ >>> torch.pow(a, exp)
+ tensor([ 1., 4., 27., 256.])
+
+.. function:: pow(self, exponent, *, out=None) -> Tensor
+ :noindex:
+
+:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
+The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
+
+The operation applied is:
+
+.. math::
+ \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
+
+Args:
+ self (float): the scalar base value for the power operation
+ exponent (Tensor): the exponent tensor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> exp = torch.arange(1., 5.)
+ >>> base = 2
+ >>> torch.pow(base, exp)
+ tensor([ 2., 4., 8., 16.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.float_power,
+ r"""
+float_power(input, exponent, *, out=None) -> Tensor
+
+Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
+If neither input is complex returns a ``torch.float64`` tensor,
+and if one or more inputs is complex returns a ``torch.complex128`` tensor.
+
+.. note::
+ This function always computes in double precision, unlike :func:`torch.pow`,
+ which implements more typical :ref:`type promotion `.
+ This is useful when the computation needs to be performed in a wider or more precise dtype,
+ or the results of the computation may contain fractional values not representable in the input dtypes,
+ like when an integer base is raised to a negative integer exponent.
+
+Args:
+ input (Tensor or Number): the base value(s)
+ exponent (Tensor or Number): the exponent value(s)
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randint(10, (4,))
+ >>> a
+ tensor([6, 4, 7, 1])
+ >>> torch.float_power(a, 2)
+ tensor([36., 16., 49., 1.], dtype=torch.float64)
+
+ >>> a = torch.arange(1, 5)
+ >>> a
+ tensor([ 1, 2, 3, 4])
+ >>> exp = torch.tensor([2, -3, 4, -5])
+ >>> exp
+ tensor([ 2, -3, 4, -5])
+ >>> torch.float_power(a, exp)
+ tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.prod,
+ r"""
+prod(input, *, dtype=None) -> Tensor
+
+Returns the product of all elements in the :attr:`input` tensor.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[-0.8020, 0.5428, -1.5854]])
+ >>> torch.prod(a)
+ tensor(0.6902)
+
+.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
+ :noindex:
+
+Returns the product of each row of the :attr:`input` tensor in the given
+dimension :attr:`dim`.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+ {keepdim}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.randn(4, 2)
+ >>> a
+ tensor([[ 0.5261, -0.3837],
+ [ 1.1857, -0.2498],
+ [-1.1646, 0.0705],
+ [ 1.1131, -1.0629]])
+ >>> torch.prod(a, 1)
+ tensor([-0.2018, -0.2962, -0.0821, -1.1831])
+""".format(
+ **single_dim_common
+ ),
+)
+
+add_docstr(
+ torch.promote_types,
+ r"""
+promote_types(type1, type2) -> dtype
+
+Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
+not smaller nor of lower kind than either `type1` or `type2`. See type promotion
+:ref:`documentation ` for more information on the type
+promotion logic.
+
+Args:
+ type1 (:class:`torch.dtype`)
+ type2 (:class:`torch.dtype`)
+
+Example::
+
+ >>> torch.promote_types(torch.int32, torch.float32)
+ torch.float32
+ >>> torch.promote_types(torch.uint8, torch.long)
+ torch.long
+""",
+)
+
+add_docstr(
+ torch.qr,
+ r"""
+qr(input, some=True, *, out=None) -> (Tensor, Tensor)
+
+Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
+and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
+with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
+:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
+
+If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
+Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
+
+.. warning::
+
+ :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
+ and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
+ replaced with a string parameter :attr:`mode`.
+
+ ``Q, R = torch.qr(A)`` should be replaced with
+
+ .. code:: python
+
+ Q, R = torch.linalg.qr(A)
+
+ ``Q, R = torch.qr(A, some=False)`` should be replaced with
+
+ .. code:: python
+
+ Q, R = torch.linalg.qr(A, mode="complete")
+
+.. warning::
+ If you plan to backpropagate through QR, note that the current backward implementation
+ is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
+ columns of :attr:`input` are linearly independent.
+ This behavior will probably change once QR supports pivoting.
+
+.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
+ and may produce different (valid) decompositions on different device types
+ or different platforms.
+
+Args:
+ input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
+ batch dimensions consisting of matrices of dimension :math:`m \times n`.
+ some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
+ complete QR decomposition. If `k = min(m, n)` then:
+
+ * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
+
+ * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
+
+Keyword args:
+ out (tuple, optional): tuple of `Q` and `R` tensors.
+ The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
+
+Example::
+
+ >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
+ >>> q, r = torch.qr(a)
+ >>> q
+ tensor([[-0.8571, 0.3943, 0.3314],
+ [-0.4286, -0.9029, -0.0343],
+ [ 0.2857, -0.1714, 0.9429]])
+ >>> r
+ tensor([[ -14.0000, -21.0000, 14.0000],
+ [ 0.0000, -175.0000, 70.0000],
+ [ 0.0000, 0.0000, -35.0000]])
+ >>> torch.mm(q, r).round()
+ tensor([[ 12., -51., 4.],
+ [ 6., 167., -68.],
+ [ -4., 24., -41.]])
+ >>> torch.mm(q.t(), q).round()
+ tensor([[ 1., 0., 0.],
+ [ 0., 1., -0.],
+ [ 0., -0., 1.]])
+ >>> a = torch.randn(3, 4, 5)
+ >>> q, r = torch.qr(a, some=False)
+ >>> torch.allclose(torch.matmul(q, r), a)
+ True
+ >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
+ True
+""",
+)
+
+add_docstr(
+ torch.rad2deg,
+ r"""
+rad2deg(input, *, out=None) -> Tensor
+
+Returns a new tensor with each of the elements of :attr:`input`
+converted from angles in radians to degrees.
+
+Args:
+ {input}
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
+ >>> torch.rad2deg(a)
+ tensor([[ 180.0233, -180.0233],
+ [ 359.9894, -359.9894],
+ [ 89.9544, -89.9544]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.deg2rad,
+ r"""
+deg2rad(input, *, out=None) -> Tensor
+
+Returns a new tensor with each of the elements of :attr:`input`
+converted from angles in degrees to radians.
+
+Args:
+ {input}
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
+ >>> torch.deg2rad(a)
+ tensor([[ 3.1416, -3.1416],
+ [ 6.2832, -6.2832],
+ [ 1.5708, -1.5708]])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.heaviside,
+ r"""
+heaviside(input, values, *, out=None) -> Tensor
+
+Computes the Heaviside step function for each element in :attr:`input`.
+The Heaviside step function is defined as:
+
+.. math::
+ \text{{heaviside}}(input, values) = \begin{cases}
+ 0, & \text{if input < 0}\\
+ values, & \text{if input == 0}\\
+ 1, & \text{if input > 0}
+ \end{cases}
+"""
+ + r"""
+
+Args:
+ {input}
+ values (Tensor): The values to use where :attr:`input` is zero.
+
+Keyword arguments:
+ {out}
+
+Example::
+
+ >>> input = torch.tensor([-1.5, 0, 2.0])
+ >>> values = torch.tensor([0.5])
+ >>> torch.heaviside(input, values)
+ tensor([0.0000, 0.5000, 1.0000])
+ >>> values = torch.tensor([1.2, -2.0, 3.5])
+ >>> torch.heaviside(input, values)
+ tensor([0., -2., 1.])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.rand,
+ """
+rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \
+requires_grad=False, pin_memory=False) -> Tensor
+"""
+ + r"""
+Returns a tensor filled with random numbers from a uniform distribution
+on the interval :math:`[0, 1)`
+
+The shape of the tensor is defined by the variable argument :attr:`size`.
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+ Can be a variable number of arguments or a collection like a list or tuple.
+
+Keyword args:
+ {generator}
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+
+Example::
+
+ >>> torch.rand(4)
+ tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
+ >>> torch.rand(2, 3)
+ tensor([[ 0.8237, 0.5781, 0.6879],
+ [ 0.3816, 0.7249, 0.0998]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.rand_like,
+ r"""
+rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor with the same size as :attr:`input` that is filled with
+random numbers from a uniform distribution on the interval :math:`[0, 1)`.
+``torch.rand_like(input)`` is equivalent to
+``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.randint,
+ """
+randint(low=0, high, size, \\*, generator=None, out=None, \
+dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a tensor filled with random integers generated uniformly
+between :attr:`low` (inclusive) and :attr:`high` (exclusive).
+
+The shape of the tensor is defined by the variable argument :attr:`size`.
+
+.. note::
+ With the global dtype default (``torch.float32``), this function returns
+ a tensor with dtype ``torch.int64``.
+
+Args:
+ low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
+ high (int): One above the highest integer to be drawn from the distribution.
+ size (tuple): a tuple defining the shape of the output tensor.
+
+Keyword args:
+ {generator}
+ {out}
+ dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
+ this function returns a tensor with dtype ``torch.int64``.
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.randint(3, 5, (3,))
+ tensor([4, 3, 4])
+
+
+ >>> torch.randint(10, (2, 2))
+ tensor([[0, 2],
+ [5, 5]])
+
+
+ >>> torch.randint(3, 10, (2, 2))
+ tensor([[4, 5],
+ [6, 7]])
+
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.randint_like,
+ """
+randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
+memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor with the same shape as Tensor :attr:`input` filled with
+random integers generated uniformly between :attr:`low` (inclusive) and
+:attr:`high` (exclusive).
+
+.. note:
+ With the global dtype default (``torch.float32``), this function returns
+ a tensor with dtype ``torch.int64``.
+
+Args:
+ {input}
+ low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
+ high (int): One above the highest integer to be drawn from the distribution.
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.randn,
+ """
+randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
+pin_memory=False) -> Tensor
+"""
+ + r"""
+
+Returns a tensor filled with random numbers from a normal distribution
+with mean `0` and variance `1` (also called the standard normal
+distribution).
+
+.. math::
+ \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
+
+For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
+unit variance as
+
+.. math::
+ \text{{out}}_{{i}} \sim \mathcal{{CN}}(0, 1)
+
+This is equivalent to separately sampling the real :math:`(\operatorname{{Re}})` and imaginary
+:math:`(\operatorname{{Im}})` part of :math:`\text{{out}}_i` as
+
+.. math::
+ \operatorname{{Re}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}),\quad
+ \operatorname{{Im}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}})
+
+The shape of the tensor is defined by the variable argument :attr:`size`.
+
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+ Can be a variable number of arguments or a collection like a list or tuple.
+
+Keyword args:
+ {generator}
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+
+Example::
+
+ >>> torch.randn(4)
+ tensor([-2.1436, 0.9966, 2.3426, -0.6366])
+ >>> torch.randn(2, 3)
+ tensor([[ 1.5954, 2.8929, -1.0923],
+ [ 1.1719, -0.4709, -0.1996]])
+
+.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.randn_like,
+ r"""
+randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor with the same size as :attr:`input` that is filled with
+random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
+sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
+``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.randperm,
+ """
+randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
+device=None, requires_grad=False, pin_memory=False) -> Tensor
+"""
+ + r"""
+Returns a random permutation of integers from ``0`` to ``n - 1``.
+
+Args:
+ n (int): the upper bound (exclusive)
+
+Keyword args:
+ {generator}
+ {out}
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: ``torch.int64``.
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+
+Example::
+
+ >>> torch.randperm(4)
+ tensor([2, 1, 0, 3])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.tensor,
+ r"""
+tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
+
+Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
+
+.. warning::
+
+ When working with tensors prefer using :func:`torch.Tensor.clone`,
+ :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
+ readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
+ ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
+ is equivalent to ``t.clone().detach().requires_grad_(True)``.
+
+.. seealso::
+
+ :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
+ :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
+
+Args:
+ {data}
+
+Keyword args:
+ {dtype}
+ device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
+ then the device of data is used. If None and data is not a tensor then
+ the result tensor is constructed on the current device.
+ {requires_grad}
+ {pin_memory}
+
+
+Example::
+
+ >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
+ tensor([[ 0.1000, 1.2000],
+ [ 2.2000, 3.1000],
+ [ 4.9000, 5.2000]])
+
+ >>> torch.tensor([0, 1]) # Type inference on data
+ tensor([ 0, 1])
+
+ >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
+ ... dtype=torch.float64,
+ ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
+ tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
+
+ >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
+ tensor(3.1416)
+
+ >>> torch.tensor([]) # Create an empty tensor (of size (0,))
+ tensor([])
+""".format(
+ **factory_data_common_args
+ ),
+)
+
+add_docstr(
+ torch.range,
+ r"""
+range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
+with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
+the gap between two values in the tensor.
+
+.. math::
+ \text{out}_{i+1} = \text{out}_i + \text{step}.
+"""
+ + r"""
+.. warning::
+ This function is deprecated and will be removed in a future release because its behavior is inconsistent with
+ Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
+
+Args:
+ start (float): the starting value for the set of points. Default: ``0``.
+ end (float): the ending value for the set of points
+ step (float): the gap between each pair of adjacent points. Default: ``1``.
+
+Keyword args:
+ {out}
+ {dtype} If `dtype` is not given, infer the data type from the other input
+ arguments. If any of `start`, `end`, or `stop` are floating-point, the
+ `dtype` is inferred to be the default dtype, see
+ :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
+ be `torch.int64`.
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.range(1, 4)
+ tensor([ 1., 2., 3., 4.])
+ >>> torch.range(1, 4, 0.5)
+ tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.arange,
+ r"""
+arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
+with values from the interval ``[start, end)`` taken with common difference
+:attr:`step` beginning from `start`.
+
+Note that non-integer :attr:`step` is subject to floating point rounding errors when
+comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
+in such cases.
+
+.. math::
+ \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
+"""
+ + r"""
+Args:
+ start (Number): the starting value for the set of points. Default: ``0``.
+ end (Number): the ending value for the set of points
+ step (Number): the gap between each pair of adjacent points. Default: ``1``.
+
+Keyword args:
+ {out}
+ {dtype} If `dtype` is not given, infer the data type from the other input
+ arguments. If any of `start`, `end`, or `stop` are floating-point, the
+ `dtype` is inferred to be the default dtype, see
+ :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
+ be `torch.int64`.
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.arange(5)
+ tensor([ 0, 1, 2, 3, 4])
+ >>> torch.arange(1, 4)
+ tensor([ 1, 2, 3])
+ >>> torch.arange(1, 2.5, 0.5)
+ tensor([ 1.0000, 1.5000, 2.0000])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.ravel,
+ r"""
+ravel(input) -> Tensor
+
+Return a contiguous flattened tensor. A copy is made only if needed.
+
+Args:
+ {input}
+
+Example::
+
+ >>> t = torch.tensor([[[1, 2],
+ ... [3, 4]],
+ ... [[5, 6],
+ ... [7, 8]]])
+ >>> torch.ravel(t)
+ tensor([1, 2, 3, 4, 5, 6, 7, 8])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.remainder,
+ r"""
+remainder(input, other, *, out=None) -> Tensor
+
+Computes
+`Python's modulus operation `_
+entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
+is less than that of :attr:`other`.
+
+It may also be defined in terms of :func:`torch.div` as
+
+.. code:: python
+
+ torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer and float inputs.
+
+.. note::
+ Complex inputs are not supported. In some cases, it is not mathematically
+ possible to satisfy the definition of a modulo operation with complex numbers.
+ See :func:`torch.fmod` for how division by zero is handled.
+
+.. seealso::
+
+ :func:`torch.fmod` which implements C++'s `std::fmod `_.
+ This one is defined in terms of division rounding towards zero.
+
+Args:
+ input (Tensor or Scalar): the dividend
+ other (Tensor or Scalar): the divisor
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
+ tensor([ 1., 0., 1., 1., 0., 1.])
+ >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
+ tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.renorm,
+ r"""
+renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
+
+Returns a tensor where each sub-tensor of :attr:`input` along dimension
+:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
+than the value :attr:`maxnorm`
+
+.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
+
+Args:
+ {input}
+ p (float): the power for the norm computation
+ dim (int): the dimension to slice over to get the sub-tensors
+ maxnorm (float): the maximum norm to keep each sub-tensor under
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> x = torch.ones(3, 3)
+ >>> x[1].fill_(2)
+ tensor([ 2., 2., 2.])
+ >>> x[2].fill_(3)
+ tensor([ 3., 3., 3.])
+ >>> x
+ tensor([[ 1., 1., 1.],
+ [ 2., 2., 2.],
+ [ 3., 3., 3.]])
+ >>> torch.renorm(x, 1, 0, 5)
+ tensor([[ 1.0000, 1.0000, 1.0000],
+ [ 1.6667, 1.6667, 1.6667],
+ [ 1.6667, 1.6667, 1.6667]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.reshape,
+ r"""
+reshape(input, shape) -> Tensor
+
+Returns a tensor with the same data and number of elements as :attr:`input`,
+but with the specified shape. When possible, the returned tensor will be a view
+of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
+with compatible strides can be reshaped without copying, but you should not
+depend on the copying vs. viewing behavior.
+
+See :meth:`torch.Tensor.view` on when it is possible to return a view.
+
+A single dimension may be -1, in which case it's inferred from the remaining
+dimensions and the number of elements in :attr:`input`.
+
+Args:
+ input (Tensor): the tensor to be reshaped
+ shape (tuple of int): the new shape
+
+Example::
+
+ >>> a = torch.arange(4.)
+ >>> torch.reshape(a, (2, 2))
+ tensor([[ 0., 1.],
+ [ 2., 3.]])
+ >>> b = torch.tensor([[0, 1], [2, 3]])
+ >>> torch.reshape(b, (-1,))
+ tensor([ 0, 1, 2, 3])
+""",
+)
+
+
+add_docstr(
+ torch.result_type,
+ r"""
+result_type(tensor1, tensor2) -> dtype
+
+Returns the :class:`torch.dtype` that would result from performing an arithmetic
+operation on the provided input tensors. See type promotion :ref:`documentation `
+for more information on the type promotion logic.
+
+Args:
+ tensor1 (Tensor or Number): an input tensor or number
+ tensor2 (Tensor or Number): an input tensor or number
+
+Example::
+
+ >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
+ torch.float32
+ >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
+ torch.uint8
+""",
+)
+
+add_docstr(
+ torch.row_stack,
+ r"""
+row_stack(tensors, *, out=None) -> Tensor
+
+Alias of :func:`torch.vstack`.
+""",
+)
+
+add_docstr(
+ torch.round,
+ r"""
+round(input, *, decimals=0, out=None) -> Tensor
+
+Rounds elements of :attr:`input` to the nearest integer.
+
+For integer inputs, follows the array-api convention of returning a
+copy of the input tensor.
+The return type of output is same as that of input's dtype.
+
+.. note::
+ This function implements the "round half to even" to
+ break ties when a number is equidistant from two
+ integers (e.g. `round(2.5)` is 2).
+
+ When the :attr:\`decimals\` argument is specified the
+ algorithm used is similar to NumPy's `around`. This
+ algorithm is fast but inexact and it can easily
+ overflow for low precision dtypes.
+ Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
+
+.. seealso::
+ :func:`torch.ceil`, which rounds up.
+ :func:`torch.floor`, which rounds down.
+ :func:`torch.trunc`, which rounds towards zero.
+
+Args:
+ {input}
+ decimals (int): Number of decimal places to round to (default: 0).
+ If decimals is negative, it specifies the number of positions
+ to the left of the decimal point.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
+ tensor([ 5., -2., 9., -8.])
+
+ >>> # Values equidistant from two integers are rounded towards the
+ >>> # the nearest even value (zero is treated as even)
+ >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
+ tensor([-0., 0., 2., 2.])
+
+ >>> # A positive decimals argument rounds to the to that decimal place
+ >>> torch.round(torch.tensor([0.1234567]), decimals=3)
+ tensor([0.1230])
+
+ >>> # A negative decimals argument rounds to the left of the decimal
+ >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
+ tensor([1000.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.rsqrt,
+ r"""
+rsqrt(input, *, out=None) -> Tensor
+
+Returns a new tensor with the reciprocal of the square-root of each of
+the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.0370, 0.2970, 1.5420, -0.9105])
+ >>> torch.rsqrt(a)
+ tensor([ nan, 1.8351, 0.8053, nan])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.scatter,
+ r"""
+scatter(input, dim, index, src) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_`
+""",
+)
+
+add_docstr(
+ torch.scatter_add,
+ r"""
+scatter_add(input, dim, index, src) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_add_`
+""",
+)
+
+add_docstr(
+ torch.scatter_reduce,
+ r"""
+scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
+
+Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
+""",
+)
+
+add_docstr(
+ torch.select,
+ r"""
+select(input, dim, index) -> Tensor
+
+Slices the :attr:`input` tensor along the selected dimension at the given index.
+This function returns a view of the original tensor with the given dimension removed.
+
+.. note:: If :attr:`input` is a sparse tensor and returning a view of
+ the tensor is not possible, a RuntimeError exception is
+ raised. In this is the case, consider using
+ :func:`torch.select_copy` function.
+
+Args:
+ {input}
+ dim (int): the dimension to slice
+ index (int): the index to select with
+
+.. note::
+
+ :meth:`select` is equivalent to slicing. For example,
+ ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
+ ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.select_scatter,
+ r"""
+select_scatter(input, src, dim, index) -> Tensor
+
+Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
+This function returns a tensor with fresh storage; it does not create a view.
+
+
+Args:
+ {input}
+ src (Tensor): The tensor to embed into :attr:`input`
+ dim (int): the dimension to insert the slice into.
+ index (int): the index to select with
+
+.. note::
+
+ :attr:`src` must be of the proper size in order to be embedded
+ into :attr:`input`. Specifically, it should have the same shape as
+ ``torch.select(input, dim, index)``
+
+Example::
+
+ >>> a = torch.zeros(2, 2)
+ >>> b = torch.ones(2)
+ >>> a.select_scatter(b, 0, 0)
+ tensor([[1., 1.],
+ [0., 0.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.slice_scatter,
+ r"""
+slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
+
+Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
+dimension.
+This function returns a tensor with fresh storage; it does not create a view.
+
+
+Args:
+ {input}
+ src (Tensor): The tensor to embed into :attr:`input`
+ dim (int): the dimension to insert the slice into
+ start (Optional[int]): the start index of where to insert the slice
+ end (Optional[int]): the end index of where to insert the slice
+ step (int): the how many elements to skip in
+
+Example::
+
+ >>> a = torch.zeros(8, 8)
+ >>> b = torch.ones(2, 8)
+ >>> a.slice_scatter(b, start=6)
+ tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0.],
+ [1., 1., 1., 1., 1., 1., 1., 1.],
+ [1., 1., 1., 1., 1., 1., 1., 1.]])
+
+ >>> b = torch.ones(8, 2)
+ >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
+ tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 1., 0., 0., 0.]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.set_flush_denormal,
+ r"""
+set_flush_denormal(mode) -> bool
+
+Disables denormal floating numbers on CPU.
+
+Returns ``True`` if your system supports flushing denormal numbers and it
+successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
+is only supported on x86 architectures supporting SSE3.
+
+Args:
+ mode (bool): Controls whether to enable flush denormal mode or not
+
+Example::
+
+ >>> torch.set_flush_denormal(True)
+ True
+ >>> torch.tensor([1e-323], dtype=torch.float64)
+ tensor([ 0.], dtype=torch.float64)
+ >>> torch.set_flush_denormal(False)
+ True
+ >>> torch.tensor([1e-323], dtype=torch.float64)
+ tensor(9.88131e-324 *
+ [ 1.0000], dtype=torch.float64)
+""",
+)
+
+add_docstr(
+ torch.set_num_threads,
+ r"""
+set_num_threads(int)
+
+Sets the number of threads used for intraop parallelism on CPU.
+
+.. warning::
+ To ensure that the correct number of threads is used, set_num_threads
+ must be called before running eager, JIT or autograd code.
+""",
+)
+
+add_docstr(
+ torch.set_num_interop_threads,
+ r"""
+set_num_interop_threads(int)
+
+Sets the number of threads used for interop parallelism
+(e.g. in JIT interpreter) on CPU.
+
+.. warning::
+ Can only be called once and before any inter-op parallel work
+ is started (e.g. JIT execution).
+""",
+)
+
+add_docstr(
+ torch.sigmoid,
+ r"""
+sigmoid(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.expit`.
+""",
+)
+
+add_docstr(
+ torch.logit,
+ r"""
+logit(input, eps=None, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.logit`.
+""",
+)
+
+add_docstr(
+ torch.sign,
+ r"""
+sign(input, *, out=None) -> Tensor
+
+Returns a new tensor with the signs of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
+ >>> a
+ tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
+ >>> torch.sign(a)
+ tensor([ 1., -1., 0., 1.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.signbit,
+ r"""
+signbit(input, *, out=None) -> Tensor
+
+Tests if each element of :attr:`input` has its sign bit set or not.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
+ >>> torch.signbit(a)
+ tensor([ False, True, False, False])
+ >>> a = torch.tensor([-0.0, 0.0])
+ >>> torch.signbit(a)
+ tensor([ True, False])
+
+.. note::
+ signbit handles signed zeros, so negative zero (-0) returns True.
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.sgn,
+ r"""
+sgn(input, *, out=None) -> Tensor
+
+This function is an extension of torch.sign() to complex tensors.
+It computes a new tensor whose elements have
+the same angles as the corresponding elements of :attr:`input` and
+absolute values (i.e. magnitudes) of one for complex tensors and
+is equivalent to torch.sign() for non-complex tensors.
+
+.. math::
+ \text{out}_{i} = \begin{cases}
+ 0 & |\text{{input}}_i| == 0 \\
+ \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
+ \end{cases}
+
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
+ >>> t.sgn()
+ tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.sin,
+ r"""
+sin(input, *, out=None) -> Tensor
+
+Returns a new tensor with the sine of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \sin(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-0.5461, 0.1347, -2.7266, -0.2746])
+ >>> torch.sin(a)
+ tensor([-0.5194, 0.1343, -0.4032, -0.2711])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.sinc,
+ r"""
+sinc(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.special.sinc`.
+""",
+)
+
+add_docstr(
+ torch.sinh,
+ r"""
+sinh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the hyperbolic sine of the elements of
+:attr:`input`.
+
+.. math::
+ \text{out}_{i} = \sinh(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
+ >>> torch.sinh(a)
+ tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
+
+.. note::
+ When :attr:`input` is on the CPU, the implementation of torch.sinh may use
+ the Sleef library, which rounds very large results to infinity or negative
+ infinity. See `here `_ for details.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.sort,
+ r"""
+sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
+
+Sorts the elements of the :attr:`input` tensor along a given dimension
+in ascending order by value.
+
+If :attr:`dim` is not given, the last dimension of the `input` is chosen.
+
+If :attr:`descending` is ``True`` then the elements are sorted in descending
+order by value.
+
+If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
+the order of equivalent elements.
+
+A namedtuple of (values, indices) is returned, where the `values` are the
+sorted values and `indices` are the indices of the elements in the original
+`input` tensor.
+
+Args:
+ {input}
+ dim (int, optional): the dimension to sort along
+ descending (bool, optional): controls the sorting order (ascending or descending)
+ stable (bool, optional): makes the sorting routine stable, which guarantees that the order
+ of equivalent elements is preserved.
+
+Keyword args:
+ out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
+ be optionally given to be used as output buffers
+
+Example::
+
+ >>> x = torch.randn(3, 4)
+ >>> sorted, indices = torch.sort(x)
+ >>> sorted
+ tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
+ [-0.5793, 0.0061, 0.6058, 0.9497],
+ [-0.5071, 0.3343, 0.9553, 1.0960]])
+ >>> indices
+ tensor([[ 1, 0, 2, 3],
+ [ 3, 1, 0, 2],
+ [ 0, 3, 1, 2]])
+
+ >>> sorted, indices = torch.sort(x, 0)
+ >>> sorted
+ tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
+ [ 0.0608, 0.0061, 0.9497, 0.3343],
+ [ 0.6058, 0.9553, 1.0960, 2.3332]])
+ >>> indices
+ tensor([[ 2, 0, 0, 1],
+ [ 0, 1, 1, 2],
+ [ 1, 2, 2, 0]])
+ >>> x = torch.tensor([0, 1] * 9)
+ >>> x.sort()
+ torch.return_types.sort(
+ values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
+ >>> x.sort(stable=True)
+ torch.return_types.sort(
+ values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.argsort,
+ r"""
+argsort(input, dim=-1, descending=False, stable=False) -> Tensor
+
+Returns the indices that sort a tensor along a given dimension in ascending
+order by value.
+
+This is the second value returned by :meth:`torch.sort`. See its documentation
+for the exact semantics of this method.
+
+If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
+the order of equivalent elements. If ``False``, the relative order of values
+which compare equal is not guaranteed. ``True`` is slower.
+
+Args:
+ {input}
+ dim (int, optional): the dimension to sort along
+ descending (bool, optional): controls the sorting order (ascending or descending)
+ stable (bool, optional): controls the relative order of equivalent elements
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
+ [ 0.1598, 0.0788, -0.0745, -1.2700],
+ [ 1.2208, 1.0722, -0.7064, 1.2564],
+ [ 0.0669, -0.2318, -0.8229, -0.9280]])
+
+
+ >>> torch.argsort(a, dim=1)
+ tensor([[2, 0, 3, 1],
+ [3, 2, 1, 0],
+ [2, 1, 0, 3],
+ [3, 2, 1, 0]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.msort,
+ r"""
+msort(input, *, out=None) -> Tensor
+
+Sorts the elements of the :attr:`input` tensor along its first dimension
+in ascending order by value.
+
+.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
+ See also :func:`torch.sort`.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> t = torch.randn(3, 4)
+ >>> t
+ tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
+ [-2.0527, -1.1250, 0.2275, 0.3077],
+ [-0.0881, -0.1259, -0.5495, 1.0284]])
+ >>> torch.msort(t)
+ tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
+ [-0.1321, -0.1259, -0.5495, 0.3077],
+ [-0.0881, 0.4370, 0.2275, 1.0284]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_compressed_tensor,
+ r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
+ r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
+CSC, BSR, or BSC - ` with specified values at
+the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
+matrix multiplication operations in Compressed Sparse format are
+typically faster than that for sparse tensors in COO format. Make you
+have a look at :ref:`the note on the data type of the indices
+`.
+
+{sparse_factory_device_note}
+
+Args:
+ compressed_indices (array_like): (B+1)-dimensional array of size
+ ``(*batchsize, compressed_dim_size + 1)``. The last element of
+ each batch is the number of non-zero elements or blocks. This
+ tensor encodes the index in ``values`` and ``plain_indices``
+ depending on where the given compressed dimension (row or
+ column) starts. Each successive number in the tensor
+ subtracted by the number before it denotes the number of
+ elements or blocks in a given compressed dimension.
+ plain_indices (array_like): Plain dimension (column or row)
+ co-ordinates of each element or block in values. (B+1)-dimensional
+ tensor with the same length as values.
+
+ values (array_list): Initial values for the tensor. Can be a list,
+ tuple, NumPy ``ndarray``, scalar, and other types. that
+ represents a (1+K)-dimensional (for CSR and CSC layouts) or
+ (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
+ ``K`` is the number of dense dimensions.
+ size (list, tuple, :class:`torch.Size`, optional): Size of the
+ sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
+ blocksize[1], *densesize)`` where ``blocksize[0] ==
+ blocksize[1] == 1`` for CSR and CSC formats. If not provided,
+ the size will be inferred as the minimum size big enough to
+ hold all non-zero elements or blocks.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. Default: if None, infers data type from
+ :attr:`values`.
+ layout (:class:`torch.layout`, required): the desired layout of
+ returned tensor: :attr:`torch.sparse_csr`,
+ :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
+ :attr:`torch.sparse_bsc`.
+ device (:class:`torch.device`, optional): the desired device of
+ returned tensor. Default: if None, uses the current device
+ for the default tensor type (see
+ :func:`torch.set_default_device`). :attr:`device` will be
+ the CPU for CPU tensor types and the current CUDA device for
+ CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+
+Example::
+ >>> compressed_indices = [0, 2, 4]
+ >>> plain_indices = [0, 1, 0, 1]
+ >>> values = [1, 2, 3, 4]
+ >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
+ ... torch.tensor(plain_indices, dtype=torch.int64),
+ ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
+ tensor(crow_indices=tensor([0, 2, 4]),
+ col_indices=tensor([0, 1, 0, 1]),
+ values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
+ dtype=torch.float64, layout=torch.sparse_csr)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_csr_tensor,
+ r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """
+ r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified
+values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
+in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
+at :ref:`the note on the data type of the indices `.
+
+{sparse_factory_device_note}
+
+Args:
+ crow_indices (array_like): (B+1)-dimensional array of size
+ ``(*batchsize, nrows + 1)``. The last element of each batch
+ is the number of non-zeros. This tensor encodes the index in
+ values and col_indices depending on where the given row
+ starts. Each successive number in the tensor subtracted by the
+ number before it denotes the number of elements in a given
+ row.
+ col_indices (array_like): Column co-ordinates of each element in
+ values. (B+1)-dimensional tensor with the same length
+ as values.
+ values (array_list): Initial values for the tensor. Can be a list,
+ tuple, NumPy ``ndarray``, scalar, and other types that
+ represents a (1+K)-dimensional tensor where ``K`` is the number
+ of dense dimensions.
+ size (list, tuple, :class:`torch.Size`, optional): Size of the
+ sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
+ not provided, the size will be inferred as the minimum size
+ big enough to hold all non-zero elements.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. Default: if None, infers data type from
+ :attr:`values`.
+ device (:class:`torch.device`, optional): the desired device of
+ returned tensor. Default: if None, uses the current device
+ for the default tensor type (see
+ :func:`torch.set_default_device`). :attr:`device` will be
+ the CPU for CPU tensor types and the current CUDA device for
+ CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+
+Example::
+ >>> crow_indices = [0, 2, 4]
+ >>> col_indices = [0, 1, 0, 1]
+ >>> values = [1, 2, 3, 4]
+ >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
+ ... torch.tensor(col_indices, dtype=torch.int64),
+ ... torch.tensor(values), dtype=torch.double)
+ tensor(crow_indices=tensor([0, 2, 4]),
+ col_indices=tensor([0, 1, 0, 1]),
+ values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
+ dtype=torch.float64, layout=torch.sparse_csr)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_csc_tensor,
+ r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """
+ r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
+` with specified values at the given
+:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
+multiplication operations in CSC format are typically faster than that
+for sparse tensors in COO format. Make you have a look at :ref:`the
+note on the data type of the indices `.
+
+{sparse_factory_device_note}
+
+Args:
+ ccol_indices (array_like): (B+1)-dimensional array of size
+ ``(*batchsize, ncols + 1)``. The last element of each batch
+ is the number of non-zeros. This tensor encodes the index in
+ values and row_indices depending on where the given column
+ starts. Each successive number in the tensor subtracted by the
+ number before it denotes the number of elements in a given
+ column.
+ row_indices (array_like): Row co-ordinates of each element in
+ values. (B+1)-dimensional tensor with the same length as
+ values.
+ values (array_list): Initial values for the tensor. Can be a list,
+ tuple, NumPy ``ndarray``, scalar, and other types that
+ represents a (1+K)-dimensional tensor where ``K`` is the number
+ of dense dimensions.
+ size (list, tuple, :class:`torch.Size`, optional): Size of the
+ sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
+ not provided, the size will be inferred as the minimum size
+ big enough to hold all non-zero elements.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. Default: if None, infers data type from
+ :attr:`values`.
+ device (:class:`torch.device`, optional): the desired device of
+ returned tensor. Default: if None, uses the current device
+ for the default tensor type (see
+ :func:`torch.set_default_device`). :attr:`device` will be
+ the CPU for CPU tensor types and the current CUDA device for
+ CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+
+Example::
+ >>> ccol_indices = [0, 2, 4]
+ >>> row_indices = [0, 1, 0, 1]
+ >>> values = [1, 2, 3, 4]
+ >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
+ ... torch.tensor(row_indices, dtype=torch.int64),
+ ... torch.tensor(values), dtype=torch.double)
+ tensor(ccol_indices=tensor([0, 2, 4]),
+ row_indices=tensor([0, 1, 0, 1]),
+ values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
+ dtype=torch.float64, layout=torch.sparse_csc)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_bsr_tensor,
+ r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """
+ r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
+` with specified 2-dimensional blocks at the given
+:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
+multiplication operations in BSR format are typically faster than that
+for sparse tensors in COO format. Make you have a look at :ref:`the
+note on the data type of the indices `.
+
+{sparse_factory_device_note}
+
+Args:
+ crow_indices (array_like): (B+1)-dimensional array of size
+ ``(*batchsize, nrowblocks + 1)``. The last element of each
+ batch is the number of non-zeros. This tensor encodes the
+ block index in values and col_indices depending on where the
+ given row block starts. Each successive number in the tensor
+ subtracted by the number before it denotes the number of
+ blocks in a given row.
+ col_indices (array_like): Column block co-ordinates of each block
+ in values. (B+1)-dimensional tensor with the same length as
+ values.
+ values (array_list): Initial values for the tensor. Can be a list,
+ tuple, NumPy ``ndarray``, scalar, and other types that
+ represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
+ number of dense dimensions.
+ size (list, tuple, :class:`torch.Size`, optional): Size of the
+ sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
+ blocksize[1], *densesize)`` where ``blocksize ==
+ values.shape[1:3]``. If not provided, the size will be
+ inferred as the minimum size big enough to hold all non-zero
+ blocks.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. Default: if None, infers data type from
+ :attr:`values`.
+ device (:class:`torch.device`, optional): the desired device of
+ returned tensor. Default: if None, uses the current device
+ for the default tensor type (see
+ :func:`torch.set_default_device`). :attr:`device` will be
+ the CPU for CPU tensor types and the current CUDA device for
+ CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+
+Example::
+ >>> crow_indices = [0, 1, 2]
+ >>> col_indices = [0, 1]
+ >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+ >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
+ ... torch.tensor(col_indices, dtype=torch.int64),
+ ... torch.tensor(values), dtype=torch.double)
+ tensor(crow_indices=tensor([0, 1, 2]),
+ col_indices=tensor([0, 1]),
+ values=tensor([[[1., 2.],
+ [3., 4.]],
+ [[5., 6.],
+ [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
+ layout=torch.sparse_bsr)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_bsc_tensor,
+ r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """
+ r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
+Column)) ` with specified 2-dimensional blocks at the
+given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
+multiplication operations in BSC format are typically faster than that
+for sparse tensors in COO format. Make you have a look at :ref:`the
+note on the data type of the indices `.
+
+{sparse_factory_device_note}
+
+Args:
+ ccol_indices (array_like): (B+1)-dimensional array of size
+ ``(*batchsize, ncolblocks + 1)``. The last element of each
+ batch is the number of non-zeros. This tensor encodes the
+ index in values and row_indices depending on where the given
+ column starts. Each successive number in the tensor subtracted
+ by the number before it denotes the number of elements in a
+ given column.
+ row_indices (array_like): Row block co-ordinates of each block in
+ values. (B+1)-dimensional tensor with the same length
+ as values.
+ values (array_list): Initial blocks for the tensor. Can be a list,
+ tuple, NumPy ``ndarray``, and other types that
+ represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
+ number of dense dimensions.
+ size (list, tuple, :class:`torch.Size`, optional): Size of the
+ sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
+ blocksize[1], *densesize)`` If not provided, the size will be
+ inferred as the minimum size big enough to hold all non-zero
+ blocks.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. Default: if None, infers data type from
+ :attr:`values`.
+ device (:class:`torch.device`, optional): the desired device of
+ returned tensor. Default: if None, uses the current device
+ for the default tensor type (see
+ :func:`torch.set_default_device`). :attr:`device` will be
+ the CPU for CPU tensor types and the current CUDA device for
+ CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+
+Example::
+ >>> ccol_indices = [0, 1, 2]
+ >>> row_indices = [0, 1]
+ >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+ >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
+ ... torch.tensor(row_indices, dtype=torch.int64),
+ ... torch.tensor(values), dtype=torch.double)
+ tensor(ccol_indices=tensor([0, 1, 2]),
+ row_indices=tensor([0, 1]),
+ values=tensor([[[1., 2.],
+ [3., 4.]],
+ [[5., 6.],
+ [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
+ layout=torch.sparse_bsc)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sparse_coo_tensor,
+ r"""sparse_coo_tensor(indices, values, size=None, """
+ r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
+
+Constructs a :ref:`sparse tensor in COO(rdinate) format
+` with specified values at the given
+:attr:`indices`.
+
+.. note::
+
+ This function returns an :ref:`uncoalesced tensor
+ ` when :attr:`is_coalesced` is
+ unspecified or ``None``.
+
+{sparse_factory_device_note}
+
+Args:
+ indices (array_like): Initial data for the tensor. Can be a list, tuple,
+ NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
+ internally. The indices are the coordinates of the non-zero values in the matrix, and thus
+ should be two-dimensional where the first dimension is the number of tensor dimensions and
+ the second dimension is the number of non-zero values.
+ values (array_like): Initial values for the tensor. Can be a list, tuple,
+ NumPy ``ndarray``, scalar, and other types.
+ size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
+ provided the size will be inferred as the minimum size big enough to hold all non-zero
+ elements.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: if None, infers data type from :attr:`values`.
+ device (:class:`torch.device`, optional): the desired device of returned tensor.
+ Default: if None, uses the current device for the default tensor type
+ (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
+ for CPU tensor types and the current CUDA device for CUDA tensor types.
+ {requires_grad}
+ {check_invariants}
+ is_coalesced (bool, optional): When``True``, the caller is
+ responsible for providing tensor indices that correspond to a
+ coalesced tensor. If the :attr:`check_invariants` flag is
+ False, no error will be raised if the prerequisites are not
+ met and this will lead to silently incorrect results. To force
+ coalescion please use :meth:`coalesce` on the resulting
+ Tensor.
+ Default: None: except for trivial cases (e.g. nnz < 2) the
+ resulting Tensor has is_coalesced set to ``False```.
+
+Example::
+
+ >>> i = torch.tensor([[0, 1, 1],
+ ... [2, 0, 2]])
+ >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
+ >>> torch.sparse_coo_tensor(i, v, [2, 4])
+ tensor(indices=tensor([[0, 1, 1],
+ [2, 0, 2]]),
+ values=tensor([3., 4., 5.]),
+ size=(2, 4), nnz=3, layout=torch.sparse_coo)
+
+ >>> torch.sparse_coo_tensor(i, v) # Shape inference
+ tensor(indices=tensor([[0, 1, 1],
+ [2, 0, 2]]),
+ values=tensor([3., 4., 5.]),
+ size=(2, 3), nnz=3, layout=torch.sparse_coo)
+
+ >>> torch.sparse_coo_tensor(i, v, [2, 4],
+ ... dtype=torch.float64,
+ ... device=torch.device('cuda:0'))
+ tensor(indices=tensor([[0, 1, 1],
+ [2, 0, 2]]),
+ values=tensor([3., 4., 5.]),
+ device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
+ layout=torch.sparse_coo)
+
+ # Create an empty sparse tensor with the following invariants:
+ # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
+ # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
+ # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
+ #
+ # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
+ # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
+ >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
+ tensor(indices=tensor([], size=(1, 0)),
+ values=tensor([], size=(0,)),
+ size=(1,), nnz=0, layout=torch.sparse_coo)
+
+ # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
+ # sparse_dim = 1
+ >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
+ tensor(indices=tensor([], size=(1, 0)),
+ values=tensor([], size=(0, 2)),
+ size=(1, 2), nnz=0, layout=torch.sparse_coo)
+
+.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.sqrt,
+ r"""
+sqrt(input, *, out=None) -> Tensor
+
+Returns a new tensor with the square-root of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \sqrt{\text{input}_{i}}
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-2.0755, 1.0226, 0.0831, 0.4806])
+ >>> torch.sqrt(a)
+ tensor([ nan, 1.0112, 0.2883, 0.6933])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.square,
+ r"""
+square(input, *, out=None) -> Tensor
+
+Returns a new tensor with the square of the elements of :attr:`input`.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-2.0755, 1.0226, 0.0831, 0.4806])
+ >>> torch.square(a)
+ tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.squeeze,
+ r"""
+squeeze(input, dim=None) -> Tensor
+
+Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
+
+For example, if `input` is of shape:
+:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
+will be of shape: :math:`(A \times B \times C \times D)`.
+
+When :attr:`dim` is given, a squeeze operation is done only in the given
+dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
+``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
+will squeeze the tensor to the shape :math:`(A \times B)`.
+
+.. note:: The returned tensor shares the storage with the input tensor,
+ so changing the contents of one will change the contents of the other.
+
+.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
+ will also remove the batch dimension, which can lead to unexpected
+ errors. Consider specifying only the dims you wish to be squeezed.
+
+Args:
+ {input}
+ dim (int or tuple of ints, optional): if given, the input will be squeezed
+ only in the specified dimensions.
+
+ .. versionchanged:: 2.0
+ :attr:`dim` now accepts tuples of dimensions.
+
+Example::
+
+ >>> x = torch.zeros(2, 1, 2, 1, 2)
+ >>> x.size()
+ torch.Size([2, 1, 2, 1, 2])
+ >>> y = torch.squeeze(x)
+ >>> y.size()
+ torch.Size([2, 2, 2])
+ >>> y = torch.squeeze(x, 0)
+ >>> y.size()
+ torch.Size([2, 1, 2, 1, 2])
+ >>> y = torch.squeeze(x, 1)
+ >>> y.size()
+ torch.Size([2, 2, 1, 2])
+ >>> y = torch.squeeze(x, (1, 2, 3))
+ torch.Size([2, 2, 2])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.std,
+ r"""
+std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
+
+Calculates the standard deviation over the dimensions specified by :attr:`dim`.
+:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
+reduce over all dimensions.
+
+The standard deviation (:math:`\sigma`) is calculated as
+
+.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
+
+where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
+sample mean, :math:`N` is the number of samples and :math:`\delta N` is
+the :attr:`correction`.
+"""
+ + r"""
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim}
+
+Keyword args:
+ correction (int): difference between the sample size and sample degrees of freedom.
+ Defaults to `Bessel's correction`_, ``correction=1``.
+
+ .. versionchanged:: 2.0
+ Previously this argument was called ``unbiased`` and was a boolean
+ with ``True`` corresponding to ``correction=1`` and ``False`` being
+ ``correction=0``.
+ {keepdim}
+ {out}
+
+Example:
+
+ >>> a = torch.tensor(
+ ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
+ ... [ 1.5027, -0.3270, 0.5905, 0.6538],
+ ... [-1.5745, 1.3330, -0.5596, -0.6548],
+ ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
+ >>> torch.std(a, dim=1, keepdim=True)
+ tensor([[1.0311],
+ [0.7477],
+ [1.2204],
+ [0.9087]])
+
+.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
+
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.std_mean,
+ r"""
+std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
+
+Calculates the standard deviation and mean over the dimensions specified by
+:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
+``None`` to reduce over all dimensions.
+
+The standard deviation (:math:`\sigma`) is calculated as
+
+.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
+
+where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
+sample mean, :math:`N` is the number of samples and :math:`\delta N` is
+the :attr:`correction`.
+
+"""
+ + r"""
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+
+Keyword args:
+ correction (int): difference between the sample size and sample degrees of freedom.
+ Defaults to `Bessel's correction`_, ``correction=1``.
+
+ .. versionchanged:: 2.0
+ Previously this argument was called ``unbiased`` and was a boolean
+ with ``True`` corresponding to ``correction=1`` and ``False`` being
+ ``correction=0``.
+ {keepdim}
+ {out}
+
+Returns:
+ A tuple (std, mean) containing the standard deviation and mean.
+
+Example:
+
+ >>> a = torch.tensor(
+ ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
+ ... [ 1.5027, -0.3270, 0.5905, 0.6538],
+ ... [-1.5745, 1.3330, -0.5596, -0.6548],
+ ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
+ >>> torch.std_mean(a, dim=0, keepdim=True)
+ (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
+ tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
+
+.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
+
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.sub,
+ r"""
+sub(input, other, *, alpha=1, out=None) -> Tensor
+
+Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
+
+.. math::
+ \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
+"""
+ + r"""
+
+Supports :ref:`broadcasting to a common shape `,
+:ref:`type promotion `, and integer, float, and complex inputs.
+
+Args:
+ {input}
+ other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
+
+Keyword args:
+ alpha (Number): the multiplier for :attr:`other`.
+ {out}
+
+Example::
+
+ >>> a = torch.tensor((1, 2))
+ >>> b = torch.tensor((0, 1))
+ >>> torch.sub(a, b, alpha=2)
+ tensor([1, 0])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.subtract,
+ r"""
+subtract(input, other, *, alpha=1, out=None) -> Tensor
+
+Alias for :func:`torch.sub`.
+""",
+)
+
+add_docstr(
+ torch.sum,
+ r"""
+sum(input, *, dtype=None) -> Tensor
+
+Returns the sum of all elements in the :attr:`input` tensor.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.randn(1, 3)
+ >>> a
+ tensor([[ 0.1133, -0.9567, 0.2958]])
+ >>> torch.sum(a)
+ tensor(-0.5475)
+
+.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
+ :noindex:
+
+Returns the sum of each row of the :attr:`input` tensor in the given
+dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
+reduce over all of them.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+ {keepdim}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.randn(4, 4)
+ >>> a
+ tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
+ [-0.2993, 0.9138, 0.9337, -1.6864],
+ [ 0.1132, 0.7892, -0.1003, 0.5688],
+ [ 0.3637, -0.9906, -0.4752, -1.5197]])
+ >>> torch.sum(a, 1)
+ tensor([-0.4598, -0.1381, 1.3708, -2.6217])
+ >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
+ >>> torch.sum(b, (2, 1))
+ tensor([ 435., 1335., 2235., 3135.])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.nansum,
+ r"""
+nansum(input, *, dtype=None) -> Tensor
+
+Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> a = torch.tensor([1., 2., float('nan'), 4.])
+ >>> torch.nansum(a)
+ tensor(7.)
+
+.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
+ :noindex:
+
+Returns the sum of each row of the :attr:`input` tensor in the given
+dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
+If :attr:`dim` is a list of dimensions, reduce over all of them.
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+ {keepdim}
+
+Keyword args:
+ {dtype}
+
+Example::
+
+ >>> torch.nansum(torch.tensor([1., float("nan")]))
+ 1.0
+ >>> a = torch.tensor([[1, 2], [3., float("nan")]])
+ >>> torch.nansum(a)
+ tensor(6.)
+ >>> torch.nansum(a, dim=0)
+ tensor([4., 2.])
+ >>> torch.nansum(a, dim=1)
+ tensor([3., 3.])
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.svd,
+ r"""
+svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
+
+Computes the singular value decomposition of either a matrix or batch of
+matrices :attr:`input`. The singular value decomposition is represented as a
+namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
+where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
+and the conjugate transpose of `V` for complex inputs.
+If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
+batched with the same batch dimensions as :attr:`input`.
+
+If :attr:`some` is `True` (default), the method returns the reduced singular
+value decomposition. In this case, if the last two dimensions of :attr:`input` are
+`m` and `n`, then the returned `U` and `V` matrices will contain only
+`min(n, m)` orthonormal columns.
+
+If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
+zero-filled matrices of shape `(m, m)` and `(n, n)`
+respectively, and the same device as :attr:`input`. The argument :attr:`some`
+has no effect when :attr:`compute_uv` is `False`.
+
+Supports :attr:`input` of float, double, cfloat and cdouble data types.
+The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
+always be real-valued, even if :attr:`input` is complex.
+
+.. warning::
+
+ :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
+ and will be removed in a future PyTorch release.
+
+ ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
+
+ .. code:: python
+
+ U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
+ V = Vh.mH
+
+ ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
+
+ .. code:: python
+
+ S = torch.linalg.svdvals(A)
+
+.. note:: Differences with :func:`torch.linalg.svd`:
+
+ * :attr:`some` is the opposite of
+ :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
+ default value for both is `True`, so the default behavior is
+ effectively the opposite.
+ * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
+ `Vh`, that is, :math:`V^{\text{H}}`.
+ * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
+ tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
+ empty tensors.
+
+.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
+ then the singular values of each matrix in the batch are returned in descending order.
+
+.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
+
+.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
+ and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
+ can be arbitrary bases of the corresponding subspaces.
+
+.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
+ (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
+ on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
+ and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
+
+.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
+ be represented as a column-major matrix (i.e. Fortran-contiguous).
+
+.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
+ have zero nor repeated singular values.
+
+.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
+ `U` and `V` will be numerically unstable, as they depends on
+ :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
+ has small singular values, as these gradients also depend on `S⁻¹`.
+
+.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
+ as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
+ The same happens when :attr:`input` has repeated singular values, where one may multiply
+ the columns of the spanning subspace in `U` and `V` by a rotation matrix
+ and `the resulting vectors will span the same subspace`_.
+ Different platforms, like NumPy, or inputs on different device types,
+ may produce different `U` and `V` tensors.
+
+Args:
+ input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
+ batch dimensions consisting of `(m, n)` matrices.
+ some (bool, optional): controls whether to compute the reduced or full decomposition, and
+ consequently, the shape of returned `U` and `V`. Default: `True`.
+ compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
+
+Keyword args:
+ out (tuple, optional): the output tuple of tensors
+
+Example::
+
+ >>> a = torch.randn(5, 3)
+ >>> a
+ tensor([[ 0.2364, -0.7752, 0.6372],
+ [ 1.7201, 0.7394, -0.0504],
+ [-0.3371, -1.0584, 0.5296],
+ [ 0.3550, -0.4022, 1.5569],
+ [ 0.2445, -0.0158, 1.1414]])
+ >>> u, s, v = torch.svd(a)
+ >>> u
+ tensor([[ 0.4027, 0.0287, 0.5434],
+ [-0.1946, 0.8833, 0.3679],
+ [ 0.4296, -0.2890, 0.5261],
+ [ 0.6604, 0.2717, -0.2618],
+ [ 0.4234, 0.2481, -0.4733]])
+ >>> s
+ tensor([2.3289, 2.0315, 0.7806])
+ >>> v
+ tensor([[-0.0199, 0.8766, 0.4809],
+ [-0.5080, 0.4054, -0.7600],
+ [ 0.8611, 0.2594, -0.4373]])
+ >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
+ tensor(8.6531e-07)
+ >>> a_big = torch.randn(7, 5, 3)
+ >>> u, s, v = torch.svd(a_big)
+ >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
+ tensor(2.6503e-06)
+
+.. _the resulting vectors will span the same subspace:
+ (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
+""",
+)
+
+
+add_docstr(
+ torch.t,
+ r"""
+t(input) -> Tensor
+
+Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
+and 1.
+
+0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
+is equivalent to ``transpose(input, 0, 1)``.
+
+Args:
+ {input}
+
+Example::
+
+ >>> x = torch.randn(())
+ >>> x
+ tensor(0.1995)
+ >>> torch.t(x)
+ tensor(0.1995)
+ >>> x = torch.randn(3)
+ >>> x
+ tensor([ 2.4320, -0.4608, 0.7702])
+ >>> torch.t(x)
+ tensor([ 2.4320, -0.4608, 0.7702])
+ >>> x = torch.randn(2, 3)
+ >>> x
+ tensor([[ 0.4875, 0.9158, -0.5872],
+ [ 0.3938, -0.6929, 0.6932]])
+ >>> torch.t(x)
+ tensor([[ 0.4875, 0.3938],
+ [ 0.9158, -0.6929],
+ [-0.5872, 0.6932]])
+
+See also :func:`torch.transpose`.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.flip,
+ r"""
+flip(input, dims) -> Tensor
+
+Reverse the order of an n-D tensor along given axis in dims.
+
+.. note::
+ `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
+ which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
+ `torch.flip` is expected to be slower than `np.flip`.
+
+Args:
+ {input}
+ dims (a list or tuple): axis to flip on
+
+Example::
+
+ >>> x = torch.arange(8).view(2, 2, 2)
+ >>> x
+ tensor([[[ 0, 1],
+ [ 2, 3]],
+
+ [[ 4, 5],
+ [ 6, 7]]])
+ >>> torch.flip(x, [0, 1])
+ tensor([[[ 6, 7],
+ [ 4, 5]],
+
+ [[ 2, 3],
+ [ 0, 1]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.fliplr,
+ r"""
+fliplr(input) -> Tensor
+
+Flip tensor in the left/right direction, returning a new tensor.
+
+Flip the entries in each row in the left/right direction.
+Columns are preserved, but appear in a different order than before.
+
+Note:
+ Requires the tensor to be at least 2-D.
+
+.. note::
+ `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
+ which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
+ `torch.fliplr` is expected to be slower than `np.fliplr`.
+
+Args:
+ input (Tensor): Must be at least 2-dimensional.
+
+Example::
+
+ >>> x = torch.arange(4).view(2, 2)
+ >>> x
+ tensor([[0, 1],
+ [2, 3]])
+ >>> torch.fliplr(x)
+ tensor([[1, 0],
+ [3, 2]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.flipud,
+ r"""
+flipud(input) -> Tensor
+
+Flip tensor in the up/down direction, returning a new tensor.
+
+Flip the entries in each column in the up/down direction.
+Rows are preserved, but appear in a different order than before.
+
+Note:
+ Requires the tensor to be at least 1-D.
+
+.. note::
+ `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
+ which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
+ `torch.flipud` is expected to be slower than `np.flipud`.
+
+Args:
+ input (Tensor): Must be at least 1-dimensional.
+
+Example::
+
+ >>> x = torch.arange(4).view(2, 2)
+ >>> x
+ tensor([[0, 1],
+ [2, 3]])
+ >>> torch.flipud(x)
+ tensor([[2, 3],
+ [0, 1]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.roll,
+ r"""
+roll(input, shifts, dims=None) -> Tensor
+
+Roll the tensor :attr:`input` along the given dimension(s). Elements that are
+shifted beyond the last position are re-introduced at the first position. If
+:attr:`dims` is `None`, the tensor will be flattened before rolling and then
+restored to the original shape.
+
+Args:
+ {input}
+ shifts (int or tuple of ints): The number of places by which the elements
+ of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
+ the same size, and each dimension will be rolled by the corresponding
+ value
+ dims (int or tuple of ints): Axis along which to roll
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
+ >>> x
+ tensor([[1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8]])
+ >>> torch.roll(x, 1)
+ tensor([[8, 1],
+ [2, 3],
+ [4, 5],
+ [6, 7]])
+ >>> torch.roll(x, 1, 0)
+ tensor([[7, 8],
+ [1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> torch.roll(x, -1, 0)
+ tensor([[3, 4],
+ [5, 6],
+ [7, 8],
+ [1, 2]])
+ >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
+ tensor([[6, 5],
+ [8, 7],
+ [2, 1],
+ [4, 3]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.rot90,
+ r"""
+rot90(input, k=1, dims=[0,1]) -> Tensor
+
+Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
+Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
+
+Args:
+ {input}
+ k (int): number of times to rotate. Default value is 1
+ dims (a list or tuple): axis to rotate. Default value is [0, 1]
+
+Example::
+
+ >>> x = torch.arange(4).view(2, 2)
+ >>> x
+ tensor([[0, 1],
+ [2, 3]])
+ >>> torch.rot90(x, 1, [0, 1])
+ tensor([[1, 3],
+ [0, 2]])
+
+ >>> x = torch.arange(8).view(2, 2, 2)
+ >>> x
+ tensor([[[0, 1],
+ [2, 3]],
+
+ [[4, 5],
+ [6, 7]]])
+ >>> torch.rot90(x, 1, [1, 2])
+ tensor([[[1, 3],
+ [0, 2]],
+
+ [[5, 7],
+ [4, 6]]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.take,
+ r"""
+take(input, index) -> Tensor
+
+Returns a new tensor with the elements of :attr:`input` at the given indices.
+The input tensor is treated as if it were viewed as a 1-D tensor. The result
+takes the same shape as the indices.
+
+Args:
+ {input}
+ index (LongTensor): the indices into tensor
+
+Example::
+
+ >>> src = torch.tensor([[4, 3, 5],
+ ... [6, 7, 8]])
+ >>> torch.take(src, torch.tensor([0, 2, 5]))
+ tensor([ 4, 5, 8])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.take_along_dim,
+ r"""
+take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
+
+Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
+
+If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
+
+Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
+are designed to work with this function. See the examples below.
+
+.. note::
+ This function is similar to NumPy's `take_along_axis`.
+ See also :func:`torch.gather`.
+
+Args:
+ {input}
+ indices (tensor): the indices into :attr:`input`. Must have long dtype.
+ dim (int, optional): dimension to select along.
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
+ >>> max_idx = torch.argmax(t)
+ >>> torch.take_along_dim(t, max_idx)
+ tensor([60])
+ >>> sorted_idx = torch.argsort(t, dim=1)
+ >>> torch.take_along_dim(t, sorted_idx, dim=1)
+ tensor([[10, 20, 30],
+ [40, 50, 60]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.tan,
+ r"""
+tan(input, *, out=None) -> Tensor
+
+Returns a new tensor with the tangent of the elements of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \tan(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([-1.2027, -1.7687, 0.4412, -1.3856])
+ >>> torch.tan(a)
+ tensor([-2.5930, 4.9859, 0.4722, -5.3366])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.tanh,
+ r"""
+tanh(input, *, out=None) -> Tensor
+
+Returns a new tensor with the hyperbolic tangent of the elements
+of :attr:`input`.
+
+.. math::
+ \text{out}_{i} = \tanh(\text{input}_{i})
+"""
+ + r"""
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
+ >>> torch.tanh(a)
+ tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ # torch.softmax doc str. Point this to torch.nn.functional.softmax
+ torch.softmax,
+ r"""
+softmax(input, dim, *, dtype=None) -> Tensor
+
+Alias for :func:`torch.nn.functional.softmax`.
+""",
+)
+
+add_docstr(
+ torch.topk,
+ r"""
+topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
+
+Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
+a given dimension.
+
+If :attr:`dim` is not given, the last dimension of the `input` is chosen.
+
+If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
+
+A namedtuple of `(values, indices)` is returned with the `values` and
+`indices` of the largest `k` elements of each row of the `input` tensor in the
+given dimension `dim`.
+
+The boolean option :attr:`sorted` if ``True``, will make sure that the returned
+`k` elements are themselves sorted
+
+Args:
+ {input}
+ k (int): the k in "top-k"
+ dim (int, optional): the dimension to sort along
+ largest (bool, optional): controls whether to return largest or
+ smallest elements
+ sorted (bool, optional): controls whether to return the elements
+ in sorted order
+
+Keyword args:
+ out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
+ optionally given to be used as output buffers
+
+Example::
+
+ >>> x = torch.arange(1., 6.)
+ >>> x
+ tensor([ 1., 2., 3., 4., 5.])
+ >>> torch.topk(x, 3)
+ torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.trace,
+ r"""
+trace(input) -> Tensor
+
+Returns the sum of the elements of the diagonal of the input 2-D matrix.
+
+Example::
+
+ >>> x = torch.arange(1., 10.).view(3, 3)
+ >>> x
+ tensor([[ 1., 2., 3.],
+ [ 4., 5., 6.],
+ [ 7., 8., 9.]])
+ >>> torch.trace(x)
+ tensor(15.)
+""",
+)
+
+add_docstr(
+ torch.transpose,
+ r"""
+transpose(input, dim0, dim1) -> Tensor
+
+Returns a tensor that is a transposed version of :attr:`input`.
+The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
+
+If :attr:`input` is a strided tensor then the resulting :attr:`out`
+tensor shares its underlying storage with the :attr:`input` tensor, so
+changing the content of one would change the content of the other.
+
+If :attr:`input` is a :ref:`sparse tensor ` then the
+resulting :attr:`out` tensor *does not* share the underlying storage
+with the :attr:`input` tensor.
+
+If :attr:`input` is a :ref:`sparse tensor ` with compressed
+layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
+:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
+both be sparse dimensions. The batch dimensions of a sparse tensor are the
+dimensions preceding the sparse dimensions.
+
+.. note::
+ Transpositions which interchange the sparse dimensions of a `SparseCSR`
+ or `SparseCSC` layout tensor will result in the layout changing between
+ the two options. Transposition of the sparse dimensions of a ` SparseBSR`
+ or `SparseBSC` layout tensor will likewise generate a result with the
+ opposite layout.
+
+
+Args:
+ {input}
+ dim0 (int): the first dimension to be transposed
+ dim1 (int): the second dimension to be transposed
+
+Example::
+
+ >>> x = torch.randn(2, 3)
+ >>> x
+ tensor([[ 1.0028, -0.9893, 0.5809],
+ [-0.1669, 0.7299, 0.4942]])
+ >>> torch.transpose(x, 0, 1)
+ tensor([[ 1.0028, -0.1669],
+ [-0.9893, 0.7299],
+ [ 0.5809, 0.4942]])
+
+See also :func:`torch.t`.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.triangular_solve,
+ r"""
+triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
+
+Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
+and multiple right-hand sides :math:`b`.
+
+In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
+(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
+
+`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
+batches of 2D matrices. If the inputs are batches, then returns
+batched outputs `X`
+
+If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
+:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
+the result may contain `NaN` s.
+
+Supports input of float, double, cfloat and cdouble data types.
+
+.. warning::
+
+ :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
+ and will be removed in a future PyTorch release.
+ :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
+ copy of one of the inputs.
+
+ ``X = torch.triangular_solve(B, A).solution`` should be replaced with
+
+ .. code:: python
+
+ X = torch.linalg.solve_triangular(A, B)
+
+Args:
+ b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
+ :math:`*` is zero of more batch dimensions
+ A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
+ where :math:`*` is zero or more batch dimensions
+ upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
+ transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
+ and `op(A) = A` if it is ``False``. Default: ``False``.
+ unitriangular (bool, optional): whether :math:`A` is unit triangular.
+ If True, the diagonal elements of :math:`A` are assumed to be
+ 1 and not referenced from :math:`A`. Default: ``False``.
+
+Keyword args:
+ out ((Tensor, Tensor), optional): tuple of two tensors to write
+ the output to. Ignored if `None`. Default: `None`.
+
+Returns:
+ A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
+ is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
+ (or whatever variant of the system of equations, depending on the keyword arguments.)
+
+Examples::
+
+ >>> A = torch.randn(2, 2).triu()
+ >>> A
+ tensor([[ 1.1527, -1.0753],
+ [ 0.0000, 0.7986]])
+ >>> b = torch.randn(2, 3)
+ >>> b
+ tensor([[-0.0210, 2.3513, -1.5492],
+ [ 1.5429, 0.7403, -1.0243]])
+ >>> torch.triangular_solve(b, A)
+ torch.return_types.triangular_solve(
+ solution=tensor([[ 1.7841, 2.9046, -2.5405],
+ [ 1.9320, 0.9270, -1.2826]]),
+ cloned_coefficient=tensor([[ 1.1527, -1.0753],
+ [ 0.0000, 0.7986]]))
+""",
+)
+
+add_docstr(
+ torch.tril,
+ r"""
+tril(input, diagonal=0, *, out=None) -> Tensor
+
+Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
+:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
+
+The lower triangular part of the matrix is defined as the elements on and
+below the diagonal.
+
+The argument :attr:`diagonal` controls which diagonal to consider. If
+:attr:`diagonal` = 0, all elements on and below the main diagonal are
+retained. A positive value includes just as many diagonals above the main
+diagonal, and similarly a negative value excludes just as many diagonals below
+the main diagonal. The main diagonal are the set of indices
+:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
+:math:`d_{1}, d_{2}` are the dimensions of the matrix.
+"""
+ + r"""
+Args:
+ {input}
+ diagonal (int, optional): the diagonal to consider
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(3, 3)
+ >>> a
+ tensor([[-1.0813, -0.8619, 0.7105],
+ [ 0.0935, 0.1380, 2.2112],
+ [-0.3409, -0.9828, 0.0289]])
+ >>> torch.tril(a)
+ tensor([[-1.0813, 0.0000, 0.0000],
+ [ 0.0935, 0.1380, 0.0000],
+ [-0.3409, -0.9828, 0.0289]])
+
+ >>> b = torch.randn(4, 6)
+ >>> b
+ tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
+ [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
+ [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
+ [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
+ >>> torch.tril(b, diagonal=1)
+ tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
+ [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
+ [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
+ [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
+ >>> torch.tril(b, diagonal=-1)
+ tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
+ [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
+ [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
+ [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
+""".format(
+ **common_args
+ ),
+)
+
+# docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
+# as common args.
+add_docstr(
+ torch.tril_indices,
+ r"""
+tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
+
+Returns the indices of the lower triangular part of a :attr:`row`-by-
+:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
+coordinates of all indices and the second row contains column coordinates.
+Indices are ordered based on rows and then columns.
+
+The lower triangular part of the matrix is defined as the elements on and
+below the diagonal.
+
+The argument :attr:`offset` controls which diagonal to consider. If
+:attr:`offset` = 0, all elements on and below the main diagonal are
+retained. A positive value includes just as many diagonals above the main
+diagonal, and similarly a negative value excludes just as many diagonals below
+the main diagonal. The main diagonal are the set of indices
+:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
+where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
+
+.. note::
+ When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
+ prevent overflow during calculation.
+"""
+ + r"""
+Args:
+ row (``int``): number of rows in the 2-D matrix.
+ col (``int``): number of columns in the 2-D matrix.
+ offset (``int``): diagonal offset from the main diagonal.
+ Default: if not provided, 0.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: if ``None``, ``torch.long``.
+ {device}
+ layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
+
+Example::
+
+ >>> a = torch.tril_indices(3, 3)
+ >>> a
+ tensor([[0, 1, 1, 2, 2, 2],
+ [0, 0, 1, 0, 1, 2]])
+
+ >>> a = torch.tril_indices(4, 3, -1)
+ >>> a
+ tensor([[1, 2, 2, 3, 3, 3],
+ [0, 0, 1, 0, 1, 2]])
+
+ >>> a = torch.tril_indices(4, 3, 1)
+ >>> a
+ tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
+ [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.triu,
+ r"""
+triu(input, diagonal=0, *, out=None) -> Tensor
+
+Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
+:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
+
+The upper triangular part of the matrix is defined as the elements on and
+above the diagonal.
+
+The argument :attr:`diagonal` controls which diagonal to consider. If
+:attr:`diagonal` = 0, all elements on and above the main diagonal are
+retained. A positive value excludes just as many diagonals above the main
+diagonal, and similarly a negative value includes just as many diagonals below
+the main diagonal. The main diagonal are the set of indices
+:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
+:math:`d_{1}, d_{2}` are the dimensions of the matrix.
+"""
+ + r"""
+Args:
+ {input}
+ diagonal (int, optional): the diagonal to consider
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(3, 3)
+ >>> a
+ tensor([[ 0.2309, 0.5207, 2.0049],
+ [ 0.2072, -1.0680, 0.6602],
+ [ 0.3480, -0.5211, -0.4573]])
+ >>> torch.triu(a)
+ tensor([[ 0.2309, 0.5207, 2.0049],
+ [ 0.0000, -1.0680, 0.6602],
+ [ 0.0000, 0.0000, -0.4573]])
+ >>> torch.triu(a, diagonal=1)
+ tensor([[ 0.0000, 0.5207, 2.0049],
+ [ 0.0000, 0.0000, 0.6602],
+ [ 0.0000, 0.0000, 0.0000]])
+ >>> torch.triu(a, diagonal=-1)
+ tensor([[ 0.2309, 0.5207, 2.0049],
+ [ 0.2072, -1.0680, 0.6602],
+ [ 0.0000, -0.5211, -0.4573]])
+
+ >>> b = torch.randn(4, 6)
+ >>> b
+ tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
+ [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
+ [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
+ [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
+ >>> torch.triu(b, diagonal=1)
+ tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
+ [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
+ [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
+ >>> torch.triu(b, diagonal=-1)
+ tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
+ [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
+ [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
+ [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
+""".format(
+ **common_args
+ ),
+)
+
+# docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
+# as common args.
+add_docstr(
+ torch.triu_indices,
+ r"""
+triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
+
+Returns the indices of the upper triangular part of a :attr:`row` by
+:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
+coordinates of all indices and the second row contains column coordinates.
+Indices are ordered based on rows and then columns.
+
+The upper triangular part of the matrix is defined as the elements on and
+above the diagonal.
+
+The argument :attr:`offset` controls which diagonal to consider. If
+:attr:`offset` = 0, all elements on and above the main diagonal are
+retained. A positive value excludes just as many diagonals above the main
+diagonal, and similarly a negative value includes just as many diagonals below
+the main diagonal. The main diagonal are the set of indices
+:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
+where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
+
+.. note::
+ When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
+ prevent overflow during calculation.
+"""
+ + r"""
+Args:
+ row (``int``): number of rows in the 2-D matrix.
+ col (``int``): number of columns in the 2-D matrix.
+ offset (``int``): diagonal offset from the main diagonal.
+ Default: if not provided, 0.
+
+Keyword args:
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
+ Default: if ``None``, ``torch.long``.
+ {device}
+ layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
+
+Example::
+
+ >>> a = torch.triu_indices(3, 3)
+ >>> a
+ tensor([[0, 0, 0, 1, 1, 2],
+ [0, 1, 2, 1, 2, 2]])
+
+ >>> a = torch.triu_indices(4, 3, -1)
+ >>> a
+ tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
+ [0, 1, 2, 0, 1, 2, 1, 2, 2]])
+
+ >>> a = torch.triu_indices(4, 3, 1)
+ >>> a
+ tensor([[0, 0, 1],
+ [1, 2, 2]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.true_divide,
+ r"""
+true_divide(dividend, divisor, *, out) -> Tensor
+
+Alias for :func:`torch.div` with ``rounding_mode=None``.
+""",
+)
+
+add_docstr(
+ torch.trunc,
+ r"""
+trunc(input, *, out=None) -> Tensor
+
+Returns a new tensor with the truncated integer values of
+the elements of :attr:`input`.
+
+For integer inputs, follows the array-api convention of returning a
+copy of the input tensor.
+
+Args:
+ {input}
+
+Keyword args:
+ {out}
+
+Example::
+
+ >>> a = torch.randn(4)
+ >>> a
+ tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
+ >>> torch.trunc(a)
+ tensor([ 3., 0., -0., -0.])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.fake_quantize_per_tensor_affine,
+ r"""
+fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
+
+Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
+:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
+
+.. math::
+ \text{output} = (
+ min(
+ \text{quant\_max},
+ max(
+ \text{quant\_min},
+ \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
+ )
+ ) - \text{zero\_point}
+ ) \times \text{scale}
+
+Args:
+ input (Tensor): the input value(s), ``torch.float32`` tensor
+ scale (double scalar or ``float32`` Tensor): quantization scale
+ zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
+ quant_min (int64): lower bound of the quantized domain
+ quant_max (int64): upper bound of the quantized domain
+
+Returns:
+ Tensor: A newly fake_quantized ``torch.float32`` tensor
+
+Example::
+
+ >>> x = torch.randn(4)
+ >>> x
+ tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
+ >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
+ tensor([0.1000, 1.0000, 0.4000, 0.0000])
+ >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
+ tensor([0.1000, 1.0000, 0.4000, 0.0000])
+""",
+)
+
+add_docstr(
+ torch.fake_quantize_per_channel_affine,
+ r"""
+fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
+
+Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
+:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
+
+.. math::
+ \text{output} = (
+ min(
+ \text{quant\_max},
+ max(
+ \text{quant\_min},
+ \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
+ )
+ ) - \text{zero\_point}
+ ) \times \text{scale}
+
+Args:
+ input (Tensor): the input value(s), in ``torch.float32``
+ scale (Tensor): quantization scale, per channel in ``torch.float32``
+ zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
+ axis (int32): channel axis
+ quant_min (int64): lower bound of the quantized domain
+ quant_max (int64): upper bound of the quantized domain
+
+Returns:
+ Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
+
+Example::
+
+ >>> x = torch.randn(2, 2, 2)
+ >>> x
+ tensor([[[-0.2525, -0.0466],
+ [ 0.3491, -0.2168]],
+
+ [[-0.5906, 1.6258],
+ [ 0.6444, -0.0542]]])
+ >>> scales = (torch.randn(2) + 1) * 0.05
+ >>> scales
+ tensor([0.0475, 0.0486])
+ >>> zero_points = torch.zeros(2).to(torch.int32)
+ >>> zero_points
+ tensor([0, 0])
+ >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
+ tensor([[[0.0000, 0.0000],
+ [0.3405, 0.0000]],
+
+ [[0.0000, 1.6134],
+ [0.6323, 0.0000]]])
+""",
+)
+
+add_docstr(
+ torch.fix,
+ r"""
+fix(input, *, out=None) -> Tensor
+
+Alias for :func:`torch.trunc`
+""",
+)
+
+add_docstr(
+ torch.unsqueeze,
+ r"""
+unsqueeze(input, dim) -> Tensor
+
+Returns a new tensor with a dimension of size one inserted at the
+specified position.
+
+The returned tensor shares the same underlying data with this tensor.
+
+A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
+can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
+applied at :attr:`dim` = ``dim + input.dim() + 1``.
+
+Args:
+ {input}
+ dim (int): the index at which to insert the singleton dimension
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3, 4])
+ >>> torch.unsqueeze(x, 0)
+ tensor([[ 1, 2, 3, 4]])
+ >>> torch.unsqueeze(x, 1)
+ tensor([[ 1],
+ [ 2],
+ [ 3],
+ [ 4]])
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.var,
+ r"""
+var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
+
+Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
+can be a single dimension, list of dimensions, or ``None`` to reduce over all
+dimensions.
+
+The variance (:math:`\sigma^2`) is calculated as
+
+.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
+
+where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
+sample mean, :math:`N` is the number of samples and :math:`\delta N` is
+the :attr:`correction`.
+"""
+ + r"""
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+
+Keyword args:
+ correction (int): difference between the sample size and sample degrees of freedom.
+ Defaults to `Bessel's correction`_, ``correction=1``.
+
+ .. versionchanged:: 2.0
+ Previously this argument was called ``unbiased`` and was a boolean
+ with ``True`` corresponding to ``correction=1`` and ``False`` being
+ ``correction=0``.
+ {keepdim}
+ {out}
+
+Example:
+
+ >>> a = torch.tensor(
+ ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
+ ... [ 1.5027, -0.3270, 0.5905, 0.6538],
+ ... [-1.5745, 1.3330, -0.5596, -0.6548],
+ ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
+ >>> torch.var(a, dim=1, keepdim=True)
+ tensor([[1.0631],
+ [0.5590],
+ [1.4893],
+ [0.8258]])
+
+.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
+
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.var_mean,
+ r"""
+var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
+
+Calculates the variance and mean over the dimensions specified by :attr:`dim`.
+:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
+reduce over all dimensions.
+
+The variance (:math:`\sigma^2`) is calculated as
+
+.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
+
+where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
+sample mean, :math:`N` is the number of samples and :math:`\delta N` is
+the :attr:`correction`.
+"""
+ + r"""
+
+{keepdim_details}
+
+Args:
+ {input}
+ {opt_dim}
+
+Keyword args:
+ correction (int): difference between the sample size and sample degrees of freedom.
+ Defaults to `Bessel's correction`_, ``correction=1``.
+
+ .. versionchanged:: 2.0
+ Previously this argument was called ``unbiased`` and was a boolean
+ with ``True`` corresponding to ``correction=1`` and ``False`` being
+ ``correction=0``.
+ {keepdim}
+ {out}
+
+Returns:
+ A tuple (var, mean) containing the variance and mean.
+
+Example:
+
+ >>> a = torch.tensor(
+ ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
+ ... [ 1.5027, -0.3270, 0.5905, 0.6538],
+ ... [-1.5745, 1.3330, -0.5596, -0.6548],
+ ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
+ >>> torch.var_mean(a, dim=0, keepdim=True)
+ (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
+ tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
+
+.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
+
+""".format(
+ **multi_dim_common
+ ),
+)
+
+add_docstr(
+ torch.zeros,
+ r"""
+zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Returns a tensor filled with the scalar value `0`, with the shape defined
+by the variable argument :attr:`size`.
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+ Can be a variable number of arguments or a collection like a list or tuple.
+
+Keyword args:
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.zeros(2, 3)
+ tensor([[ 0., 0., 0.],
+ [ 0., 0., 0.]])
+
+ >>> torch.zeros(5)
+ tensor([ 0., 0., 0., 0., 0.])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.zeros_like,
+ r"""
+zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor filled with the scalar value `0`, with the same size as
+:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
+``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+.. warning::
+ As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
+ the old ``torch.zeros_like(input, out=output)`` is equivalent to
+ ``torch.zeros(input.size(), out=output)``.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+Example::
+
+ >>> input = torch.empty(2, 3)
+ >>> torch.zeros_like(input)
+ tensor([[ 0., 0., 0.],
+ [ 0., 0., 0.]])
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.empty,
+ """
+empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
+memory_format=torch.contiguous_format) -> Tensor
+
+Returns a tensor filled with uninitialized data. The shape of the tensor is
+defined by the variable argument :attr:`size`.
+
+.. note::
+ If :func:`torch.use_deterministic_algorithms()` and
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
+ ``True``, the output tensor is initialized to prevent any possible
+ nondeterministic behavior from using the data as an input to an operation.
+ Floating point and complex tensors are filled with NaN, and integer tensors
+ are filled with the maximum value.
+
+Args:
+ size (int...): a sequence of integers defining the shape of the output tensor.
+ Can be a variable number of arguments or a collection like a list or tuple.
+
+Keyword args:
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+ {memory_format}
+
+Example::
+
+ >>> torch.empty((2,3), dtype=torch.int64)
+ tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
+ [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.empty_like,
+ r"""
+empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
+
+Returns an uninitialized tensor with the same size as :attr:`input`.
+``torch.empty_like(input)`` is equivalent to
+``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+.. note::
+ If :func:`torch.use_deterministic_algorithms()` and
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
+ ``True``, the output tensor is initialized to prevent any possible
+ nondeterministic behavior from using the data as an input to an operation.
+ Floating point and complex tensors are filled with NaN, and integer tensors
+ are filled with the maximum value.
+
+Args:
+ {input}
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+
+Example::
+
+ >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
+ >>> torch.empty_like(a)
+ tensor([[0, 0, 0],
+ [0, 0, 0]], device='cuda:0', dtype=torch.int32)
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.empty_strided,
+ r"""
+empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
+
+Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
+
+.. warning::
+ If the constructed tensor is "overlapped" (with multiple indices referring to the same element
+ in memory) its behavior is undefined.
+
+.. note::
+ If :func:`torch.use_deterministic_algorithms()` and
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
+ ``True``, the output tensor is initialized to prevent any possible
+ nondeterministic behavior from using the data as an input to an operation.
+ Floating point and complex tensors are filled with NaN, and integer tensors
+ are filled with the maximum value.
+
+Args:
+ size (tuple of int): the shape of the output tensor
+ stride (tuple of int): the strides of the output tensor
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+
+Example::
+
+ >>> a = torch.empty_strided((2, 3), (1, 2))
+ >>> a
+ tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
+ [0.0000e+00, 0.0000e+00, 3.0705e-41]])
+ >>> a.stride()
+ (1, 2)
+ >>> a.size()
+ torch.Size([2, 3])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.empty_permuted,
+ r"""
+empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
+
+Creates an uninitialized, non-overlapping and dense tensor with the
+specified :attr:`size`, with :attr:`physical_layout` specifying how the
+dimensions are physically laid out in memory (each logical dimension is listed
+from outermost to innermost). :attr:`physical_layout` is a generalization
+of NCHW/NHWC notation: if each dimension is assigned a number according to
+what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
+while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
+tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
+(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
+
+Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
+tensor with no overlaps. If possible, prefer using this function over
+:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
+
+.. note::
+ If :func:`torch.use_deterministic_algorithms()` and
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
+ ``True``, the output tensor is initialized to prevent any possible
+ nondeterministic behavior from using the data as an input to an operation.
+ Floating point and complex tensors are filled with NaN, and integer tensors
+ are filled with the maximum value.
+
+Args:
+ size (tuple of int): the shape of the output tensor
+ physical_layout (tuple of int): the ordering of dimensions physically in memory
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {pin_memory}
+
+Examples:
+
+ >>> torch.empty((2, 3, 5, 7)).stride()
+ (105, 35, 7, 1)
+ >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
+ (105, 35, 7, 1)
+ >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
+ (105, 1, 21, 3)
+ >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
+ (105, 1, 21, 3)
+ >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
+ (0, 2, 3, 1)
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.full,
+ r"""
+full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
+
+Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
+tensor's dtype is inferred from :attr:`fill_value`.
+
+Args:
+ size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
+ shape of the output tensor.
+ fill_value (Scalar): the value to fill the output tensor with.
+
+Keyword args:
+ {out}
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+
+Example::
+
+ >>> torch.full((2, 3), 3.141592)
+ tensor([[ 3.1416, 3.1416, 3.1416],
+ [ 3.1416, 3.1416, 3.1416]])
+""".format(
+ **factory_common_args
+ ),
+)
+
+add_docstr(
+ torch.full_like,
+ """
+full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
+memory_format=torch.preserve_format) -> Tensor
+
+Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
+``torch.full_like(input, fill_value)`` is equivalent to
+``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
+
+Args:
+ {input}
+ fill_value: the number to fill the output tensor with.
+
+Keyword args:
+ {dtype}
+ {layout}
+ {device}
+ {requires_grad}
+ {memory_format}
+""".format(
+ **factory_like_common_args
+ ),
+)
+
+add_docstr(
+ torch.det,
+ r"""
+det(input) -> Tensor
+
+Alias for :func:`torch.linalg.det`
+""",
+)
+
+add_docstr(
+ torch.where,
+ r"""
+where(condition, input, other, *, out=None) -> Tensor
+
+Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
+
+The operation is defined as:
+
+.. math::
+ \text{out}_i = \begin{cases}
+ \text{input}_i & \text{if } \text{condition}_i \\
+ \text{other}_i & \text{otherwise} \\
+ \end{cases}
+"""
+ + r"""
+.. note::
+ The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `.
+
+Arguments:
+ condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
+ input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
+ where :attr:`condition` is ``True``
+ other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
+ where :attr:`condition` is ``False``
+
+Keyword args:
+ {out}
+
+Returns:
+ Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
+
+Example::
+
+ >>> x = torch.randn(3, 2)
+ >>> y = torch.ones(3, 2)
+ >>> x
+ tensor([[-0.4620, 0.3139],
+ [ 0.3898, -0.7197],
+ [ 0.0478, -0.1657]])
+ >>> torch.where(x > 0, 1.0, 0.0)
+ tensor([[0., 1.],
+ [1., 0.],
+ [1., 0.]])
+ >>> torch.where(x > 0, x, y)
+ tensor([[ 1.0000, 0.3139],
+ [ 0.3898, 1.0000],
+ [ 0.0478, 1.0000]])
+ >>> x = torch.randn(2, 2, dtype=torch.double)
+ >>> x
+ tensor([[ 1.0779, 0.0383],
+ [-0.8785, -1.1089]], dtype=torch.float64)
+ >>> torch.where(x > 0, x, 0.)
+ tensor([[1.0779, 0.0383],
+ [0.0000, 0.0000]], dtype=torch.float64)
+
+.. function:: where(condition) -> tuple of LongTensor
+ :noindex:
+
+``torch.where(condition)`` is identical to
+``torch.nonzero(condition, as_tuple=True)``.
+
+.. note::
+ See also :func:`torch.nonzero`.
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.logdet,
+ r"""
+logdet(input) -> Tensor
+
+Calculates log determinant of a square matrix or batches of square matrices.
+
+It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
+a negative determinant.
+
+.. note::
+ Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
+ is not invertible. In this case, double backward through :meth:`logdet` will
+ be unstable in when :attr:`input` doesn't have distinct singular values. See
+ :func:`torch.linalg.svd` for details.
+
+.. seealso::
+
+ :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
+ absolute value of the determinant of real-valued (resp. complex) square matrices.
+
+Arguments:
+ input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
+ batch dimensions.
+
+Example::
+
+ >>> A = torch.randn(3, 3)
+ >>> torch.det(A)
+ tensor(0.2611)
+ >>> torch.logdet(A)
+ tensor(-1.3430)
+ >>> A
+ tensor([[[ 0.9254, -0.6213],
+ [-0.5787, 1.6843]],
+
+ [[ 0.3242, -0.9665],
+ [ 0.4539, -0.0887]],
+
+ [[ 1.1336, -0.4025],
+ [-0.7089, 0.9032]]])
+ >>> A.det()
+ tensor([1.1990, 0.4099, 0.7386])
+ >>> A.det().log()
+ tensor([ 0.1815, -0.8917, -0.3031])
+""",
+)
+
+add_docstr(
+ torch.slogdet,
+ r"""
+slogdet(input) -> (Tensor, Tensor)
+
+Alias for :func:`torch.linalg.slogdet`
+""",
+)
+
+add_docstr(
+ torch.pinverse,
+ r"""
+pinverse(input, rcond=1e-15) -> Tensor
+
+Alias for :func:`torch.linalg.pinv`
+""",
+)
+
+add_docstr(
+ torch.hann_window,
+ """
+hann_window(window_length, periodic=True, *, dtype=None, \
+layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+Hann window function.
+
+.. math::
+ w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
+ \sin^2 \left( \frac{\pi n}{N - 1} \right),
+
+where :math:`N` is the full window size.
+
+The input :attr:`window_length` is a positive integer controlling the
+returned window size. :attr:`periodic` flag determines whether the returned
+window trims off the last duplicate value from the symmetric window and is
+ready to be used as a periodic window with functions like
+:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
+above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
+``torch.hann_window(L, periodic=True)`` equal to
+``torch.hann_window(L + 1, periodic=False)[:-1])``.
+
+.. note::
+ If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
+"""
+ + r"""
+Arguments:
+ window_length (int): the size of returned window
+ periodic (bool, optional): If True, returns a window to be used as periodic
+ function. If False, return a symmetric window.
+
+Keyword args:
+ {dtype} Only floating point types are supported.
+ layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
+ ``torch.strided`` (dense layout) is supported.
+ {device}
+ {requires_grad}
+
+Returns:
+ Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.hamming_window,
+ """
+hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
+layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+Hamming window function.
+
+.. math::
+ w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
+
+where :math:`N` is the full window size.
+
+The input :attr:`window_length` is a positive integer controlling the
+returned window size. :attr:`periodic` flag determines whether the returned
+window trims off the last duplicate value from the symmetric window and is
+ready to be used as a periodic window with functions like
+:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
+above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
+``torch.hamming_window(L, periodic=True)`` equal to
+``torch.hamming_window(L + 1, periodic=False)[:-1])``.
+
+.. note::
+ If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
+
+.. note::
+ This is a generalized version of :meth:`torch.hann_window`.
+"""
+ + r"""
+Arguments:
+ window_length (int): the size of returned window
+ periodic (bool, optional): If True, returns a window to be used as periodic
+ function. If False, return a symmetric window.
+ alpha (float, optional): The coefficient :math:`\alpha` in the equation above
+ beta (float, optional): The coefficient :math:`\beta` in the equation above
+
+Keyword args:
+ {dtype} Only floating point types are supported.
+ layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
+ ``torch.strided`` (dense layout) is supported.
+ {device}
+ {requires_grad}
+
+Returns:
+ Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window.
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.bartlett_window,
+ """
+bartlett_window(window_length, periodic=True, *, dtype=None, \
+layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+Bartlett window function.
+
+.. math::
+ w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
+ \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
+ 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
+ \end{cases},
+
+where :math:`N` is the full window size.
+
+The input :attr:`window_length` is a positive integer controlling the
+returned window size. :attr:`periodic` flag determines whether the returned
+window trims off the last duplicate value from the symmetric window and is
+ready to be used as a periodic window with functions like
+:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
+above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
+``torch.bartlett_window(L, periodic=True)`` equal to
+``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
+
+.. note::
+ If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
+"""
+ + r"""
+Arguments:
+ window_length (int): the size of returned window
+ periodic (bool, optional): If True, returns a window to be used as periodic
+ function. If False, return a symmetric window.
+
+Keyword args:
+ {dtype} Only floating point types are supported.
+ layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
+ ``torch.strided`` (dense layout) is supported.
+ {device}
+ {requires_grad}
+
+Returns:
+ Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.blackman_window,
+ """
+blackman_window(window_length, periodic=True, *, dtype=None, \
+layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+Blackman window function.
+
+.. math::
+ w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
+
+where :math:`N` is the full window size.
+
+The input :attr:`window_length` is a positive integer controlling the
+returned window size. :attr:`periodic` flag determines whether the returned
+window trims off the last duplicate value from the symmetric window and is
+ready to be used as a periodic window with functions like
+:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
+above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
+``torch.blackman_window(L, periodic=True)`` equal to
+``torch.blackman_window(L + 1, periodic=False)[:-1])``.
+
+.. note::
+ If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
+"""
+ + r"""
+Arguments:
+ window_length (int): the size of returned window
+ periodic (bool, optional): If True, returns a window to be used as periodic
+ function. If False, return a symmetric window.
+
+Keyword args:
+ {dtype} Only floating point types are supported.
+ layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
+ ``torch.strided`` (dense layout) is supported.
+ {device}
+ {requires_grad}
+
+Returns:
+ Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.kaiser_window,
+ """
+kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
+layout=torch.strided, device=None, requires_grad=False) -> Tensor
+"""
+ + r"""
+Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
+
+Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
+``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
+where ``L`` is the :attr:`window_length`. This function computes:
+
+.. math::
+ out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
+
+Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
+``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
+The :attr:`periodic` argument is intended as a helpful shorthand
+to produce a periodic window as input to functions like :func:`torch.stft`.
+
+.. note::
+ If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
+
+"""
+ + r"""
+Args:
+ window_length (int): length of the window.
+ periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
+ If False, returns a symmetric window suitable for use in filter design.
+ beta (float, optional): shape parameter for the window.
+
+Keyword args:
+ {dtype}
+ layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
+ ``torch.strided`` (dense layout) is supported.
+ {device}
+ {requires_grad}
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.vander,
+ """
+vander(x, N=None, increasing=False) -> Tensor
+"""
+ + r"""
+Generates a Vandermonde matrix.
+
+The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
+If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
+matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
+
+Arguments:
+ x (Tensor): 1-D input tensor.
+ N (int, optional): Number of columns in the output. If N is not specified,
+ a square array is returned :math:`(N = len(x))`.
+ increasing (bool, optional): Order of the powers of the columns. If True,
+ the powers increase from left to right, if False (the default) they are reversed.
+
+Returns:
+ Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
+ the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
+ are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3, 5])
+ >>> torch.vander(x)
+ tensor([[ 1, 1, 1, 1],
+ [ 8, 4, 2, 1],
+ [ 27, 9, 3, 1],
+ [125, 25, 5, 1]])
+ >>> torch.vander(x, N=3)
+ tensor([[ 1, 1, 1],
+ [ 4, 2, 1],
+ [ 9, 3, 1],
+ [25, 5, 1]])
+ >>> torch.vander(x, N=3, increasing=True)
+ tensor([[ 1, 1, 1],
+ [ 1, 2, 4],
+ [ 1, 3, 9],
+ [ 1, 5, 25]])
+
+""".format(
+ **factory_common_args
+ ),
+)
+
+
+add_docstr(
+ torch.unbind,
+ r"""
+unbind(input, dim=0) -> seq
+
+Removes a tensor dimension.
+
+Returns a tuple of all slices along a given dimension, already without it.
+
+Arguments:
+ input (Tensor): the tensor to unbind
+ dim (int): dimension to remove
+
+Example::
+
+ >>> torch.unbind(torch.tensor([[1, 2, 3],
+ >>> [4, 5, 6],
+ >>> [7, 8, 9]]))
+ (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
+""",
+)
+
+
+add_docstr(
+ torch.combinations,
+ r"""
+combinations(input, r=2, with_replacement=False) -> seq
+
+Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
+python's `itertools.combinations` when `with_replacement` is set to `False`, and
+`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
+
+Arguments:
+ input (Tensor): 1D vector.
+ r (int, optional): number of elements to combine
+ with_replacement (bool, optional): whether to allow duplication in combination
+
+Returns:
+ Tensor: A tensor equivalent to converting all the input tensors into lists, do
+ `itertools.combinations` or `itertools.combinations_with_replacement` on these
+ lists, and finally convert the resulting list into tensor.
+
+Example::
+
+ >>> a = [1, 2, 3]
+ >>> list(itertools.combinations(a, r=2))
+ [(1, 2), (1, 3), (2, 3)]
+ >>> list(itertools.combinations(a, r=3))
+ [(1, 2, 3)]
+ >>> list(itertools.combinations_with_replacement(a, r=2))
+ [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
+ >>> tensor_a = torch.tensor(a)
+ >>> torch.combinations(tensor_a)
+ tensor([[1, 2],
+ [1, 3],
+ [2, 3]])
+ >>> torch.combinations(tensor_a, r=3)
+ tensor([[1, 2, 3]])
+ >>> torch.combinations(tensor_a, with_replacement=True)
+ tensor([[1, 1],
+ [1, 2],
+ [1, 3],
+ [2, 2],
+ [2, 3],
+ [3, 3]])
+
+""",
+)
+
+add_docstr(
+ torch.trapezoid,
+ r"""
+trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
+
+Computes the `trapezoidal rule `_ along
+:attr:`dim`. By default the spacing between elements is assumed to be 1, but
+:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
+used to specify arbitrary spacing along :attr:`dim`.
+
+
+Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
+the default computation is
+
+.. math::
+ \begin{aligned}
+ \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
+ \end{aligned}
+
+When :attr:`dx` is specified the computation becomes
+
+.. math::
+ \begin{aligned}
+ \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
+ \end{aligned}
+
+effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
+assuming :attr:`x` is also a one-dimensional tensor with
+elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
+
+.. math::
+ \begin{aligned}
+ \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
+ \end{aligned}
+
+When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
+The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
+and :attr:`y`, the function computes the difference between consecutive elements along
+dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
+the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
+After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
+See the examples below for details.
+
+.. note::
+ The trapezoidal rule is a technique for approximating the definite integral of a function
+ by averaging its left and right Riemann sums. The approximation becomes more accurate as
+ the resolution of the partition increases.
+
+Arguments:
+ y (Tensor): Values to use when computing the trapezoidal rule.
+ x (Tensor): If specified, defines spacing between values as specified above.
+
+Keyword arguments:
+ dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
+ are specified then this defaults to 1. Effectively multiplies the result by its value.
+ dim (int): The dimension along which to compute the trapezoidal rule.
+ The last (inner-most) dimension by default.
+
+Examples::
+
+ >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
+ >>> y = torch.tensor([1, 5, 10])
+ >>> torch.trapezoid(y)
+ tensor(10.5)
+
+ >>> # Computes the same trapezoidal rule directly to verify
+ >>> (1 + 10 + 10) / 2
+ 10.5
+
+ >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
+ >>> # NOTE: the result is the same as before, but multiplied by 2
+ >>> torch.trapezoid(y, dx=2)
+ 21.0
+
+ >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
+ >>> x = torch.tensor([1, 3, 6])
+ >>> torch.trapezoid(y, x)
+ 28.5
+
+ >>> # Computes the same trapezoidal rule directly to verify
+ >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
+ 28.5
+
+ >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
+ >>> y = torch.arange(9).reshape(3, 3)
+ tensor([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> torch.trapezoid(y)
+ tensor([ 2., 8., 14.])
+
+ >>> # Computes the trapezoidal rule for each column of the matrix
+ >>> torch.trapezoid(y, dim=0)
+ tensor([ 6., 8., 10.])
+
+ >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
+ >>> # with the same arbitrary spacing
+ >>> y = torch.ones(3, 3)
+ >>> x = torch.tensor([1, 3, 6])
+ >>> torch.trapezoid(y, x)
+ array([5., 5., 5.])
+
+ >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
+ >>> # with different arbitrary spacing per row
+ >>> y = torch.ones(3, 3)
+ >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
+ >>> torch.trapezoid(y, x)
+ array([2., 4., 6.])
+""",
+)
+
+add_docstr(
+ torch.trapz,
+ r"""
+trapz(y, x, *, dim=-1) -> Tensor
+
+Alias for :func:`torch.trapezoid`.
+""",
+)
+
+add_docstr(
+ torch.cumulative_trapezoid,
+ r"""
+cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
+
+Cumulatively computes the `trapezoidal rule `_
+along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
+:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
+used to specify arbitrary spacing along :attr:`dim`.
+
+For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
+and this function is that, :func:`torch.trapezoid` returns a value for each integration,
+where as this function returns a cumulative value for every spacing within the integration. This
+is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
+
+Arguments:
+ y (Tensor): Values to use when computing the trapezoidal rule.
+ x (Tensor): If specified, defines spacing between values as specified above.
+
+Keyword arguments:
+ dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
+ are specified then this defaults to 1. Effectively multiplies the result by its value.
+ dim (int): The dimension along which to compute the trapezoidal rule.
+ The last (inner-most) dimension by default.
+
+Examples::
+
+ >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
+ >>> y = torch.tensor([1, 5, 10])
+ >>> torch.cumulative_trapezoid(y)
+ tensor([3., 10.5])
+
+ >>> # Computes the same trapezoidal rule directly up to each element to verify
+ >>> (1 + 5) / 2
+ 3.0
+ >>> (1 + 10 + 10) / 2
+ 10.5
+
+ >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
+ >>> # NOTE: the result is the same as before, but multiplied by 2
+ >>> torch.cumulative_trapezoid(y, dx=2)
+ tensor([6., 21.])
+
+ >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
+ >>> x = torch.tensor([1, 3, 6])
+ >>> torch.cumulative_trapezoid(y, x)
+ tensor([6., 28.5])
+
+ >>> # Computes the same trapezoidal rule directly up to each element to verify
+ >>> ((3 - 1) * (1 + 5)) / 2
+ 6.0
+ >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
+ 28.5
+
+ >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
+ >>> y = torch.arange(9).reshape(3, 3)
+ tensor([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> torch.cumulative_trapezoid(y)
+ tensor([[ 0.5, 2.],
+ [ 3.5, 8.],
+ [ 6.5, 14.]])
+
+ >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
+ >>> torch.cumulative_trapezoid(y, dim=0)
+ tensor([[ 1.5, 2.5, 3.5],
+ [ 6.0, 8.0, 10.0]])
+
+ >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
+ >>> # with the same arbitrary spacing
+ >>> y = torch.ones(3, 3)
+ >>> x = torch.tensor([1, 3, 6])
+ >>> torch.cumulative_trapezoid(y, x)
+ tensor([[2., 5.],
+ [2., 5.],
+ [2., 5.]])
+
+ >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
+ >>> # with different arbitrary spacing per row
+ >>> y = torch.ones(3, 3)
+ >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
+ >>> torch.cumulative_trapezoid(y, x)
+ tensor([[1., 2.],
+ [2., 4.],
+ [3., 6.]])
+""",
+)
+
+add_docstr(
+ torch.repeat_interleave,
+ r"""
+repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
+
+Repeat elements of a tensor.
+
+.. warning::
+
+ This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
+
+Args:
+ {input}
+ repeats (Tensor or int): The number of repetitions for each element.
+ repeats is broadcasted to fit the shape of the given axis.
+ dim (int, optional): The dimension along which to repeat values.
+ By default, use the flattened input array, and return a flat output
+ array.
+
+Keyword args:
+ output_size (int, optional): Total output size for the given axis
+ ( e.g. sum of repeats). If given, it will avoid stream synchronization
+ needed to calculate output shape of the tensor.
+
+Returns:
+ Tensor: Repeated tensor which has the same shape as input, except along the given axis.
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> x.repeat_interleave(2)
+ tensor([1, 1, 2, 2, 3, 3])
+ >>> y = torch.tensor([[1, 2], [3, 4]])
+ >>> torch.repeat_interleave(y, 2)
+ tensor([1, 1, 2, 2, 3, 3, 4, 4])
+ >>> torch.repeat_interleave(y, 3, dim=1)
+ tensor([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]])
+ >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
+ tensor([[1, 2],
+ [3, 4],
+ [3, 4]])
+ >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
+ tensor([[1, 2],
+ [3, 4],
+ [3, 4]])
+
+If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
+`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
+`1` appears `n2` times, `2` appears `n3` times, etc.
+
+.. function:: repeat_interleave(repeats, *) -> Tensor
+ :noindex:
+
+Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
+
+Args:
+ repeats (Tensor): The number of repetitions for each element.
+
+Returns:
+ Tensor: Repeated tensor of size `sum(repeats)`.
+
+Example::
+
+ >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
+ tensor([0, 1, 1, 2, 2, 2])
+
+""".format(
+ **common_args
+ ),
+)
+
+add_docstr(
+ torch.tile,
+ r"""
+tile(input, dims) -> Tensor
+
+Constructs a tensor by repeating the elements of :attr:`input`.
+The :attr:`dims` argument specifies the number of repetitions
+in each dimension.
+
+If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
+ones are prepended to :attr:`dims` until all dimensions are specified.
+For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
+is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
+
+Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
+specifies, then :attr:`input` is treated as if it were unsqueezed at
+dimension zero until it has as many dimensions as :attr:`dims` specifies.
+For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
+is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
+shape (1, 1, 4, 2).
+
+.. note::
+
+ This function is similar to NumPy's tile function.
+
+Args:
+ input (Tensor): the tensor whose elements to repeat.
+ dims (tuple): the number of repetitions per dimension.
+
+Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> x.tile((2,))
+ tensor([1, 2, 3, 1, 2, 3])
+ >>> y = torch.tensor([[1, 2], [3, 4]])
+ >>> torch.tile(y, (2, 2))
+ tensor([[1, 2, 1, 2],
+ [3, 4, 3, 4],
+ [1, 2, 1, 2],
+ [3, 4, 3, 4]])
+""",
+)
+
+add_docstr(
+ torch.quantize_per_tensor,
+ r"""
+quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
+
+Converts a float tensor to a quantized tensor with given scale and zero point.
+
+Arguments:
+ input (Tensor): float tensor or list of tensors to quantize
+ scale (float or Tensor): scale to apply in quantization formula
+ zero_point (int or Tensor): offset in integer value that maps to float zero
+ dtype (:class:`torch.dtype`): the desired data type of returned tensor.
+ Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
+
+Returns:
+ Tensor: A newly quantized tensor or list of quantized tensors.
+
+Example::
+
+ >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
+ tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
+ >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
+ tensor([ 0, 10, 20, 30], dtype=torch.uint8)
+ >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
+ >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
+ (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
+ tensor([-2., 2.], size=(2,), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
+ >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
+ tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
+""",
+)
+
+add_docstr(
+ torch.quantize_per_tensor_dynamic,
+ r"""
+quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
+
+Converts a float tensor to a quantized tensor with scale and zero_point calculated
+dynamically based on the input.
+
+Arguments:
+ input (Tensor): float tensor or list of tensors to quantize
+ dtype (:class:`torch.dtype`): the desired data type of returned tensor.
+ Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
+ reduce_range (bool): a flag to indicate whether to reduce the range of quantized
+ data by 1 bit, it's required to avoid instruction overflow for some hardwares
+
+Returns:
+ Tensor: A newly (dynamically) quantized tensor
+
+Example::
+
+ >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
+ >>> print(t)
+ tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
+ zero_point=85)
+ >>> t.int_repr()
+ tensor([ 0, 85, 170, 255], dtype=torch.uint8)
+""",
+)
+
+add_docstr(
+ torch.quantize_per_channel,
+ r"""
+quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
+
+Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
+
+Arguments:
+ input (Tensor): float tensor to quantize
+ scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
+ zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
+ axis (int): dimension on which apply per-channel quantization
+ dtype (:class:`torch.dtype`): the desired data type of returned tensor.
+ Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
+
+Returns:
+ Tensor: A newly quantized tensor
+
+Example::
+
+ >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
+ >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
+ tensor([[-1., 0.],
+ [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
+ quantization_scheme=torch.per_channel_affine,
+ scale=tensor([0.1000, 0.0100], dtype=torch.float64),
+ zero_point=tensor([10, 0]), axis=0)
+ >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
+ tensor([[ 0, 10],
+ [100, 200]], dtype=torch.uint8)
+""",
+)
+
+
+add_docstr(
+ torch.quantized_batch_norm,
+ r"""
+quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
+
+Applies batch normalization on a 4D (NCHW) quantized tensor.
+
+.. math::
+
+ y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
+
+Arguments:
+ input (Tensor): quantized tensor
+ weight (Tensor): float tensor that corresponds to the gamma, size C
+ bias (Tensor): float tensor that corresponds to the beta, size C
+ mean (Tensor): float mean value in batch normalization, size C
+ var (Tensor): float tensor for variance, size C
+ eps (float): a value added to the denominator for numerical stability.
+ output_scale (float): output quantized tensor scale
+ output_zero_point (int): output quantized tensor zero_point
+
+Returns:
+ Tensor: A quantized tensor with batch normalization applied.
+
+Example::
+
+ >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
+ >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
+ tensor([[[[-0.2000, -0.2000],
+ [ 1.6000, -0.2000]],
+
+ [[-0.4000, -0.4000],
+ [-0.4000, 0.6000]]],
+
+
+ [[[-0.2000, -0.2000],
+ [-0.2000, -0.2000]],
+
+ [[ 0.6000, -0.4000],
+ [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
+""",
+)
+
+
+add_docstr(
+ torch.quantized_max_pool1d,
+ r"""
+quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
+
+Applies a 1D max pooling over an input quantized tensor composed of several input planes.
+
+Arguments:
+ input (Tensor): quantized tensor
+ kernel_size (list of int): the size of the sliding window
+ stride (``list of int``, optional): the stride of the sliding window
+ padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
+ dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
+ ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
+ Defaults to False.
+
+
+Returns:
+ Tensor: A quantized tensor with max_pool1d applied.
+
+Example::
+
+ >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
+ >>> torch.quantized_max_pool1d(qx, [2])
+ tensor([[0.0000],
+ [1.5000]], size=(2, 1), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
+""",
+)
+
+
+add_docstr(
+ torch.quantized_max_pool2d,
+ r"""
+quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
+
+Applies a 2D max pooling over an input quantized tensor composed of several input planes.
+
+Arguments:
+ input (Tensor): quantized tensor
+ kernel_size (``list of int``): the size of the sliding window
+ stride (``list of int``, optional): the stride of the sliding window
+ padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
+ dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
+ ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
+ Defaults to False.
+
+
+Returns:
+ Tensor: A quantized tensor with max_pool2d applied.
+
+Example::
+
+ >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
+ >>> torch.quantized_max_pool2d(qx, [2,2])
+ tensor([[[[1.5000]],
+
+ [[1.5000]]],
+
+
+ [[[0.0000]],
+
+ [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
+ quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
+""",
+)
+
+
+add_docstr(
+ torch.Generator,
+ r"""
+Generator(device='cpu') -> Generator
+
+Creates and returns a generator object that manages the state of the algorithm which
+produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
+functions.
+
+Arguments:
+ device (:class:`torch.device`, optional): the desired device for the generator.
+
+Returns:
+ Generator: An torch.Generator object.
+
+Example::
+
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
+ >>> g_cpu = torch.Generator()
+ >>> g_cuda = torch.Generator(device='cuda')
+""",
+)
+
+
+add_docstr(
+ torch.Generator.set_state,
+ r"""
+Generator.set_state(new_state) -> void
+
+Sets the Generator state.
+
+Arguments:
+ new_state (torch.ByteTensor): The desired state.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu_other = torch.Generator()
+ >>> g_cpu.set_state(g_cpu_other.get_state())
+""",
+)
+
+
+add_docstr(
+ torch.Generator.get_state,
+ r"""
+Generator.get_state() -> Tensor
+
+Returns the Generator state as a ``torch.ByteTensor``.
+
+Returns:
+ Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
+ to restore a Generator to a specific point in time.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu.get_state()
+""",
+)
+
+
+add_docstr(
+ torch.Generator.manual_seed,
+ r"""
+Generator.manual_seed(seed) -> Generator
+
+Sets the seed for generating random numbers. Returns a `torch.Generator` object. Any 32-bit integer is a valid seed.
+
+Arguments:
+ seed (int): The desired seed. Value must be within the inclusive range
+ `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
+ is raised. Negative inputs are remapped to positive values with the formula
+ `0xffff_ffff_ffff_ffff + seed`.
+
+Returns:
+ Generator: An torch.Generator object.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu.manual_seed(2147483647)
+""",
+)
+
+
+add_docstr(
+ torch.Generator.initial_seed,
+ r"""
+Generator.initial_seed() -> int
+
+Returns the initial seed for generating random numbers.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu.initial_seed()
+ 2147483647
+""",
+)
+
+
+add_docstr(
+ torch.Generator.seed,
+ r"""
+Generator.seed() -> int
+
+Gets a non-deterministic random number from std::random_device or the current
+time and uses it to seed a Generator.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu.seed()
+ 1516516984916
+""",
+)
+
+
+add_docstr(
+ torch.Generator.device,
+ r"""
+Generator.device -> device
+
+Gets the current device of the generator.
+
+Example::
+
+ >>> g_cpu = torch.Generator()
+ >>> g_cpu.device
+ device(type='cpu')
+""",
+)
+
+add_docstr(
+ torch._assert_async,
+ r"""
+_assert_async(tensor) -> void
+
+Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
+this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
+CUDA tensors, we DO NOT synchronize and you may only find out the assertion
+failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
+testing invariants in CUDA tensors without giving up performance. This function
+is NOT intended to be used for regular error checking, as it will trash your CUDA
+context if the assert fails (forcing you to restart your PyTorch process.)
+
+Args:
+ tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
+ elements (including False for boolean tensors) cause an assertion failure
+ to be raised.
+""",
+)
+
+add_docstr(
+ torch.searchsorted,
+ r"""
+searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor
+
+Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
+corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
+of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
+Return a new tensor with the same size as :attr:`values`. More formally,
+the returned index satisfies the following rules:
+
+.. list-table::
+ :widths: 12 10 78
+ :header-rows: 1
+
+ * - :attr:`sorted_sequence`
+ - :attr:`right`
+ - *returned index satisfies*
+ * - 1-D
+ - False
+ - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
+ * - 1-D
+ - True
+ - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
+ * - N-D
+ - False
+ - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
+ * - N-D
+ - True
+ - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
+
+Args:
+ sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
+ dimension unless :attr:`sorter` is provided, in which case the sequence does not
+ need to be sorted
+ values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
+
+Keyword args:
+ out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
+ Default value is False, i.e. default output data type is torch.int64.
+ right (bool, optional): if False, return the first suitable location that is found. If True, return the
+ last such index. If no suitable index found, return 0 for non-numerical value
+ (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
+ (one pass the last index of the *innermost* dimension). In other words, if False,
+ gets the lower bound index for each value in :attr:`values` on the corresponding
+ *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
+ bound index instead. Default value is False. :attr:`side` does the same and is
+ preferred. It will error if :attr:`side` is set to "left" while this is True.
+ side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
+ and "right" corresponds to True for :attr:`right`. It will error if this is set to
+ "left" while :attr:`right` is True.
+ out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
+ sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
+ :attr:`sorted_sequence` containing a sequence of indices that sort it in the
+ ascending order on the innermost dimension
+
+
+Example::
+
+ >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
+ >>> sorted_sequence
+ tensor([[ 1, 3, 5, 7, 9],
+ [ 2, 4, 6, 8, 10]])
+ >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
+ >>> values
+ tensor([[3, 6, 9],
+ [3, 6, 9]])
+ >>> torch.searchsorted(sorted_sequence, values)
+ tensor([[1, 3, 4],
+ [1, 2, 4]])
+ >>> torch.searchsorted(sorted_sequence, values, side='right')
+ tensor([[2, 3, 5],
+ [1, 3, 4]])
+
+ >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
+ >>> sorted_sequence_1d
+ tensor([1, 3, 5, 7, 9])
+ >>> torch.searchsorted(sorted_sequence_1d, values)
+ tensor([[1, 3, 4],
+ [1, 3, 4]])
+""",
+)
+
+add_docstr(
+ torch.bucketize,
+ r"""
+bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
+
+Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
+boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
+as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
+this behavior is opposite the behavior of
+`numpy.digitize `_.
+More formally, the returned index satisfies the following rules:
+
+.. list-table::
+ :widths: 15 85
+ :header-rows: 1
+
+ * - :attr:`right`
+ - *returned index satisfies*
+ * - False
+ - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
+ * - True
+ - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
+
+Args:
+ input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
+ boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
+
+Keyword args:
+ out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
+ Default value is False, i.e. default output data type is torch.int64.
+ right (bool, optional): if False, return the first suitable location that is found. If True, return the
+ last such index. If no suitable index found, return 0 for non-numerical value
+ (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
+ In other words, if False, gets the lower bound index for each value in :attr:`input`
+ from :attr:`boundaries`. If True, gets the upper bound index instead.
+ Default value is False.
+ out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
+
+
+Example::
+
+ >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
+ >>> boundaries
+ tensor([1, 3, 5, 7, 9])
+ >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
+ >>> v
+ tensor([[3, 6, 9],
+ [3, 6, 9]])
+ >>> torch.bucketize(v, boundaries)
+ tensor([[1, 3, 4],
+ [1, 3, 4]])
+ >>> torch.bucketize(v, boundaries, right=True)
+ tensor([[2, 3, 5],
+ [2, 3, 5]])
+""",
+)
+
+add_docstr(
+ torch.view_as_real_copy,
+ r"""
+Performs the same operation as :func:`torch.view_as_real`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.view_as_complex_copy,
+ r"""
+Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.as_strided_copy,
+ r"""
+Performs the same operation as :func:`torch.as_strided`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.diagonal_copy,
+ r"""
+Performs the same operation as :func:`torch.diagonal`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.expand_copy,
+ r"""
+Performs the same operation as :func:`torch.expand`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.permute_copy,
+ r"""
+Performs the same operation as :func:`torch.permute`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.select_copy,
+ r"""
+Performs the same operation as :func:`torch.select`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.detach_copy,
+ r"""
+Performs the same operation as :func:`torch.detach`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.slice_copy,
+ r"""
+Performs the same operation as :func:`torch.slice`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.split_copy,
+ r"""
+Performs the same operation as :func:`torch.split`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.split_with_sizes_copy,
+ r"""
+Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.squeeze_copy,
+ r"""
+Performs the same operation as :func:`torch.squeeze`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.t_copy,
+ r"""
+Performs the same operation as :func:`torch.t`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.transpose_copy,
+ r"""
+Performs the same operation as :func:`torch.transpose`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.unsqueeze_copy,
+ r"""
+Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.indices_copy,
+ r"""
+Performs the same operation as :func:`torch.indices`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.values_copy,
+ r"""
+Performs the same operation as :func:`torch.values`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.crow_indices_copy,
+ r"""
+Performs the same operation as :func:`torch.crow_indices`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.col_indices_copy,
+ r"""
+Performs the same operation as :func:`torch.col_indices`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.unbind_copy,
+ r"""
+Performs the same operation as :func:`torch.unbind`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.view_copy,
+ r"""
+Performs the same operation as :func:`torch.view`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.unfold_copy,
+ r"""
+Performs the same operation as :func:`torch.unfold`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+add_docstr(
+ torch.alias_copy,
+ r"""
+Performs the same operation as :func:`torch.alias`, but all output tensors
+are freshly created instead of aliasing the input.
+""",
+)
+
+for unary_base_func_name in (
+ "exp",
+ "sqrt",
+ "abs",
+ "acos",
+ "asin",
+ "atan",
+ "ceil",
+ "cos",
+ "cosh",
+ "erf",
+ "erfc",
+ "expm1",
+ "floor",
+ "log",
+ "log10",
+ "log1p",
+ "log2",
+ "neg",
+ "tan",
+ "tanh",
+ "sin",
+ "sinh",
+ "round",
+ "lgamma",
+ "frac",
+ "reciprocal",
+ "sigmoid",
+ "trunc",
+ "zero",
+):
+ unary_foreach_func_name = f"_foreach_{unary_base_func_name}"
+ if hasattr(torch, unary_foreach_func_name):
+ add_docstr(
+ getattr(torch, unary_foreach_func_name),
+ rf"""
+{unary_foreach_func_name}(self: List[Tensor]) -> List[Tensor]
+
+Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
+ """,
+ )
+ unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_"
+ if hasattr(torch, unary_inplace_foreach_func_name):
+ add_docstr(
+ getattr(torch, unary_inplace_foreach_func_name),
+ rf"""
+{unary_inplace_foreach_func_name}(self: List[Tensor]) -> None
+
+Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
+ """,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fa1f3e58cfa361b43195a951cd75069af269365
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_utils.py
@@ -0,0 +1,918 @@
+import copyreg
+import functools
+import sys
+import traceback
+import warnings
+from collections import defaultdict
+from contextlib import nullcontext
+from typing import Any, DefaultDict, List, Optional
+
+import torch
+
+
+def _type(self, dtype=None, non_blocking=False, **kwargs):
+ """Returns the type if `dtype` is not provided, else casts this object to
+ the specified type.
+
+ If this is already of the correct type, no copy is performed and the
+ original object is returned.
+
+ Args:
+ dtype (type or string): The desired type
+ non_blocking (bool): If ``True``, and the source is in pinned memory
+ and destination is on the GPU or vice versa, the copy is performed
+ asynchronously with respect to the host. Otherwise, the argument
+ has no effect.
+ **kwargs: For compatibility, may contain the key ``async`` in place of
+ the ``non_blocking`` argument. The ``async`` arg is deprecated.
+ """
+ non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
+ if dtype is None:
+ return self.__module__ + "." + self.__class__.__name__
+
+ if isinstance(dtype, str):
+ dtype = _import_dotted_name(dtype)
+ if dtype == type(self):
+ return self
+ if self.is_sparse:
+ if not dtype.is_sparse:
+ raise RuntimeError("Cannot cast sparse tensor to dense tensor")
+ new_module_name = dtype.__module__.replace(".sparse", "")
+ new_values_type_name = new_module_name + "." + dtype.__name__
+ new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
+ new_indices_type_name = new_module_name + ".LongTensor"
+ new_indices = torch.Tensor._indices(self).type(
+ new_indices_type_name, non_blocking
+ )
+ return dtype(new_indices, new_values, self.size())
+ if dtype.is_sparse:
+ raise RuntimeError("Cannot cast dense tensor to sparse tensor")
+ return dtype(self.size()).copy_(self, non_blocking)
+
+
+def _hpu(self, device=None, non_blocking=False, **kwargs):
+ """Returns a copy of this object in HPU memory.
+
+ If this object is already in HPU memory and on the correct device, then
+ no copy is performed and the original object is returned.
+
+ Args:
+ device (int): The destination HPU id. Defaults to the current device.
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
+ the copy will be asynchronous with respect to the host. Otherwise,
+ the argument has no effect.
+ **kwargs: For compatibility, may contain the key ``async`` in place of
+ the ``non_blocking`` argument.
+ """
+ non_blocking = _get_async_or_non_blocking("hpu", non_blocking, kwargs)
+ hpu = getattr(torch, "hpu", None)
+ assert hpu is not None, "HPU device module is not loaded"
+ if self.is_hpu:
+ if device is None:
+ device = hpu.current_device()
+ if self.get_device() == device:
+ return self
+ else:
+ if device is None:
+ device = -1
+ with hpu.device(device):
+ assert not self.is_sparse, "sparse storage is not supported for HPU tensors"
+ untyped_storage = torch.UntypedStorage(self.size(), device=torch.device("hpu"))
+ untyped_storage.copy_(self, non_blocking)
+ return untyped_storage
+
+
+def _cuda(self, device=None, non_blocking=False, **kwargs):
+ """Returns a copy of this object in CUDA memory.
+
+ If this object is already in CUDA memory and on the correct device, then
+ no copy is performed and the original object is returned.
+
+ Args:
+ device (int): The destination GPU id. Defaults to the current device.
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
+ the copy will be asynchronous with respect to the host. Otherwise,
+ the argument has no effect.
+ **kwargs: For compatibility, may contain the key ``async`` in place of
+ the ``non_blocking`` argument.
+ """
+ non_blocking = _get_async_or_non_blocking("cuda", non_blocking, kwargs)
+ if self.is_cuda:
+ if device is None:
+ device = torch.cuda.current_device()
+ if self.get_device() == device:
+ return self
+ else:
+ if device is None:
+ device = -1
+ with torch.cuda.device(device):
+ if self.is_sparse:
+ new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
+ indices = torch.Tensor._indices(self).cuda(device, non_blocking)
+ values = torch.Tensor._values(self).cuda(device, non_blocking)
+ return new_type(indices, values, self.size())
+ else:
+ untyped_storage = torch.UntypedStorage(
+ self.size(), device=torch.device("cuda")
+ )
+ untyped_storage.copy_(self, non_blocking)
+ return untyped_storage
+
+
+def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
+ """Return the non-blocking flag given the function name and kwargs.
+
+ Args:
+ function_name (str): the name of the function being used.
+ non_blocking (bool): the default value.
+ **kwargs (dict): the kwargs passed to the function.
+ """
+ if not kwargs:
+ return non_blocking
+ if len(kwargs) != 1 or "async" not in kwargs:
+ message = "{}() got an unexpected keyword argument '{}'"
+ argument = list(kwargs.keys()).pop()
+ raise TypeError(message.format(function_name, argument))
+ warnings.warn("'async' is deprecated; use 'non_blocking'")
+ return kwargs["async"]
+
+
+# Note [Don't serialize hooks]
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Since time immemorial, we have serialized the backward hooks associated with
+# variables. This kind of half-worked--Python can pickle global functions
+# (but not closures!)--but there were problems.
+#
+# - It's fragile. If you serialize a backward hook into a saved
+# model, and then you rename the function associated with the hook,
+# now your saved model is broken and you can't load it anymore.
+#
+# - It's not actually used. The standard recommendation is to
+# serialize the *state_dict* of a model, not the model itself
+# (since this is more stable to code changes affecting the model
+# serialization), and the state dict saves "data" only, thus
+# stripping the backward hooks. In some cases, hooks are
+# essential to the well-functioning of a model (e.g., DDP),
+# but DDP already manages readding the hooks!
+#
+# - We didn't serialize them in many cases. Prior to #10220, we
+# were dropping backward hooks in ForkingPickler. We "fixed" this
+# to be convenient with other serialization sites, but lack of
+# serializing backward hooks wasn't actually the root cause of
+# the bug.
+#
+# With these cases in mind, we have decided that a better strategy
+# is to just NOT serialize hooks at all.
+#
+# Since this is a BC-breaking change, we should warn when we previously
+# serialized a hook, but no longer do so. This will be done by adding a special
+# sentinel property to hooks will be used to suppress this warning. If a hook
+# has the property _torch_serialize_ignore, we will not emit a warning if we
+# attempt to serialize a Tensor with this hook attached to it.
+#
+# By the way, when _backward_hooks is skipped, we must give an EMPTY
+# OrderedDict(), if you pass a None you'll run afoul #12219.
+
+
+# TODO: Once we decide to break serialization FC, `storage` no longer needs to
+# be a TypedStorage
+def _rebuild_tensor(storage, storage_offset, size, stride):
+ # first construct a tensor with the correct dtype/device
+ t = torch.tensor([], dtype=storage.dtype, device=storage._untyped_storage.device)
+ return t.set_(storage._untyped_storage, storage_offset, size, stride)
+
+
+def get_tensor_metadata(tensor):
+ # Tensor's Metadata for serializing.
+ # Currently, this only returns a dict[string, bool] specifing whether
+ # `conj` or `neg` bit is set.
+ assert isinstance(tensor, torch.Tensor)
+ return torch._C._get_tensor_metadata(tensor) # type: ignore[attr-defined]
+
+
+def set_tensor_metadata(tensor, metadata):
+ # See `get_tensor_metadata` above
+ assert isinstance(metadata, dict)
+ assert isinstance(tensor, torch.Tensor)
+ torch._C._set_tensor_metadata(tensor, metadata) # type: ignore[attr-defined]
+
+
+def _rebuild_tensor_v2(
+ storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata=None
+):
+ tensor = _rebuild_tensor(storage, storage_offset, size, stride)
+ tensor.requires_grad = requires_grad
+ if metadata:
+ set_tensor_metadata(tensor, metadata)
+
+ # NB: This line exists only for backwards compatibility; the
+ # general expectation is that backward_hooks is an empty
+ # OrderedDict. See Note [Don't serialize hooks]
+ tensor._backward_hooks = backward_hooks
+ return tensor
+
+
+def _rebuild_tensor_v3(
+ storage,
+ storage_offset,
+ size,
+ stride,
+ requires_grad,
+ backward_hooks,
+ dtype,
+ metadata=None,
+):
+ t = torch.tensor(
+ [],
+ dtype=dtype,
+ device=storage._untyped_storage.device,
+ requires_grad=requires_grad,
+ )
+ t.set_(storage._untyped_storage, storage_offset, size, stride)
+ if metadata:
+ set_tensor_metadata(t, metadata)
+ t._backward_hooks = backward_hooks
+ return t
+
+
+_sparse_tensors_to_validate: List["torch.Tensor"] = []
+
+
+# In _legacy_load() in serialization.py we unpickle storages after the sparse
+# tensors have been already unpickled. Those storages contain data necessary for
+# validating sparse tensors: indices and values. That's why sparse tensors are
+# first unpickled without any validation, and then this function is called just
+# before _legacy_load() returns, so that all the sparse tensors can be validated
+# in bulk.
+#
+# The same procedure must be followed by _load() in serialization.py because due
+# to Pickler semantics, we have to use the same (non-validating) function for
+# unpickling sparse tensors, regardless of the caller.
+def _validate_loaded_sparse_tensors():
+ try:
+ for t in _sparse_tensors_to_validate:
+ if t.layout is torch.sparse_coo:
+ torch._validate_sparse_coo_tensor_args(
+ t._indices(), t._values(), t.size(), t.is_coalesced()
+ )
+ elif t.layout in {
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ }:
+ # TODO: Validation currently involves an expensive traversal
+ # on CPU, which may include a device transfer.
+ if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
+ compressed_indices, plain_indices = (
+ t.crow_indices(),
+ t.col_indices(),
+ )
+ else:
+ compressed_indices, plain_indices = (
+ t.ccol_indices(),
+ t.row_indices(),
+ )
+ torch._validate_sparse_compressed_tensor_args(
+ compressed_indices, plain_indices, t.values(), t.size(), t.layout
+ )
+ else:
+ raise NotImplementedError(
+ f"_validate_loaded_sparse_tensors for layout `{t.layout}`"
+ )
+
+ finally:
+ _sparse_tensors_to_validate.clear()
+
+
+def _rebuild_sparse_tensor(layout, data):
+ """
+ Rebuilds a sparse tensor from its sparse storage representation.
+
+ Args:
+ layout (str): The sparse storage layout of the tensor.
+ data (tuple): The tensor's sparse storage representation.
+ """
+ if layout == torch.sparse_coo:
+ if len(data) == 3:
+ # For BC:
+ indices, values, size = data
+ is_coalesced = None
+ else:
+ indices, values, size, is_coalesced = data
+ result = torch.sparse_coo_tensor(
+ indices, values, size, check_invariants=False, is_coalesced=is_coalesced
+ )
+ _sparse_tensors_to_validate.append(result)
+ return result
+
+ elif layout in {
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ }:
+ compressed_indices, plain_indices, values, size = data
+ result = torch.sparse_compressed_tensor(
+ compressed_indices,
+ plain_indices,
+ values,
+ size,
+ layout=layout,
+ check_invariants=False,
+ )
+ _sparse_tensors_to_validate.append(result)
+ return result
+
+ raise NotImplementedError(f"rebuilding sparse tensor for layout {layout}")
+
+
+def _rebuild_nested_tensor(buffer, sizes, strides, storage_offsets):
+ return torch._nested_view_from_buffer(buffer, sizes, strides, storage_offsets)
+
+
+def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
+ tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
+ tensor.requires_grad = requires_grad
+ return tensor
+
+
+# Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
+_rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
+
+
+def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
+ return torch.empty_strided(
+ size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
+ )
+
+
+def _rebuild_wrapper_subclass(
+ cls, dtype, size, stride, storage_offset, layout, device, requires_grad
+):
+ return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
+ cls,
+ size,
+ strides=stride,
+ storage_offset=storage_offset,
+ layout=layout,
+ device=device,
+ requires_grad=requires_grad,
+ )
+
+
+# TODO: Once we decide to break serialization FC, `storage` no longer needs to
+# be a TypedStorage
+def _rebuild_qtensor(
+ storage,
+ storage_offset,
+ size,
+ stride,
+ quantizer_params,
+ requires_grad,
+ backward_hooks,
+):
+ qscheme = quantizer_params[0]
+ if qscheme == torch.per_tensor_affine:
+ _, scale, zero_point = quantizer_params
+ tensor = torch._empty_affine_quantized(
+ size,
+ scale=scale,
+ zero_point=zero_point,
+ dtype=storage.dtype,
+ device=storage.device,
+ )
+ elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
+ _, scales, zero_points, axis = quantizer_params
+ if type(scales) is list and type(zero_points) is list:
+ if qscheme == torch.per_channel_affine:
+ scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
+ zero_points = torch.tensor(
+ zero_points, dtype=torch.long, device=storage.device
+ )
+ else:
+ scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
+ zero_points = torch.tensor(
+ zero_points, dtype=torch.float, device=storage.device
+ )
+ tensor = torch._empty_per_channel_affine_quantized(
+ size,
+ scales=scales,
+ zero_points=zero_points,
+ axis=axis,
+ dtype=storage.dtype,
+ device=storage.device,
+ )
+ else:
+ raise RuntimeError(f"Can't deserialize quantized tensor with qscheme {qscheme}")
+ tensor.set_(storage, storage_offset, size, stride)
+ tensor.requires_grad = requires_grad
+ # NB: This line exists only for backwards compatibility; the
+ # general expectation is that backward_hooks is an empty
+ # OrderedDict. See Note [Don't serialize hooks]
+ tensor._backward_hooks = backward_hooks
+ return tensor
+
+
+def _rebuild_parameter(data, requires_grad, backward_hooks):
+ param = torch.nn.Parameter(data, requires_grad)
+ # NB: This line exists only for backwards compatibility; the
+ # general expectation is that backward_hooks is an empty
+ # OrderedDict. See Note [Don't serialize hooks]
+ param._backward_hooks = backward_hooks
+
+ return param
+
+
+def _rebuild_parameter_with_state(data, requires_grad, backward_hooks, state):
+ param = torch.nn.Parameter(data, requires_grad)
+ # NB: This line exists only for backwards compatibility; the
+ # general expectation is that backward_hooks is an empty
+ # OrderedDict. See Note [Don't serialize hooks]
+ param._backward_hooks = backward_hooks
+
+ # Restore state on Parameter like python attr.
+ param = _set_obj_state(param, state)
+ return param
+
+
+def _get_obj_state(obj):
+ # Get the state of the python subclass
+ # This loosely mimicks the function on the object class but since Tensor do not inherit
+ # from it, we cannot call that function directly
+ # https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
+ # Note that starting with Python 3.11, this `__getstate__` is always defined and thus
+ # the else branch will never be taken.
+ getstate_fn = getattr(obj, "__getstate__", None)
+ if getstate_fn:
+ state = getstate_fn()
+ else:
+ slots_to_save = copyreg._slotnames(obj.__class__) # type: ignore[attr-defined]
+ if slots_to_save:
+ state = (
+ obj.__dict__,
+ {
+ name: getattr(obj, name)
+ for name in slots_to_save
+ if hasattr(obj, name)
+ },
+ )
+ else:
+ state = obj.__dict__
+
+ return state
+
+
+def _set_obj_state(obj, state):
+ if isinstance(state, tuple):
+ if not len(state) == 2:
+ raise RuntimeError(f"Invalid serialized state: {state}")
+ dict_state = state[0]
+ slots_state = state[1]
+ else:
+ dict_state = state
+ slots_state = None
+
+ # Starting with Python 3.11, the __dict__ attribute is lazily created
+ # and is serialized as None when not needed.
+ if dict_state:
+ for k, v in dict_state.items():
+ setattr(obj, k, v)
+
+ if slots_state:
+ for k, v in slots_state.items():
+ setattr(obj, k, v)
+ return obj
+
+
+def _import_dotted_name(name):
+ components = name.split(".")
+ obj = __import__(components[0])
+ for component in components[1:]:
+ obj = getattr(obj, component)
+ return obj
+
+
+# Taken from python 3.5 docs
+def _accumulate(iterable, fn=lambda x, y: x + y):
+ "Return running totals"
+ # _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
+ # _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
+ it = iter(iterable)
+ try:
+ total = next(it)
+ except StopIteration:
+ return
+ yield total
+ for element in it:
+ total = fn(total, element)
+ yield total
+
+
+def _flatten_dense_tensors(tensors):
+ """Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
+ same dense type.
+
+ Since inputs are dense, the resulting tensor will be a concatenated 1D
+ buffer. Element-wise operation on this buffer will be equivalent to
+ operating individually.
+
+ Args:
+ tensors (Iterable[Tensor]): dense tensors to flatten.
+
+ Returns:
+ A contiguous 1D buffer containing input tensors.
+ """
+ return torch._C._nn.flatten_dense_tensors(tensors)
+
+
+def _flatten_sparse_tensors(tensors):
+ """Flatten sparse tensors into two contiguous 1D buffers, one of indices and
+ one of values. Assume tensors are of same sparse type.
+
+ Args:
+ tensors (Iterable[Tensor]): sparse tensors to flatten.
+
+ Returns:
+ A tuple of two contiguous 1D buffers, one containing input tensors'
+ indices and the other containing the values.
+ """
+ flat_indices = torch._C._nn.flatten_dense_tensors(
+ [torch.Tensor._indices(t) for t in tensors]
+ )
+ flat_values = torch._C._nn.flatten_dense_tensors(
+ [torch.Tensor._values(t) for t in tensors]
+ )
+ return flat_indices, flat_values
+
+
+def _unflatten_dense_tensors(flat, tensors):
+ """View a flat buffer using the sizes of tensors. Assume that tensors are of
+ same dense type, and that flat is given by _flatten_dense_tensors.
+
+ Args:
+ flat (Tensor): flattened dense tensors to unflatten.
+ tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
+ unflatten flat.
+
+ Returns:
+ Unflattened dense tensors with sizes same as tensors and values from
+ flat.
+ """
+ return torch._C._nn.unflatten_dense_tensors(flat, tensors)
+
+
+def _unflatten_sparse_tensors(flat, tensors):
+ """View flat buffer (containing indices and values) using the sizes of
+ tensors. Assume that tensors are of same sparse type, and that flat is given
+ by _flatten_sparse_tensors.
+
+ Args:
+ flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
+ tensors to unflatten.
+ tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
+ unflatten flat.
+
+ Returns:
+ Unflattened sparse tensors with sizes same as tensors and values from
+ flat.
+ """
+ flat_indices, flat_values = flat
+ indices = torch._C._nn.unflatten_dense_tensors(
+ flat_indices, [torch.Tensor._indices(t) for t in tensors]
+ )
+ values = torch._C._nn.unflatten_dense_tensors(
+ flat_values, [torch.Tensor._values(t) for t in tensors]
+ )
+ outputs = []
+ for t, i, v in zip(tensors, indices, values):
+ outputs.append(t.new(i, v, t.size()))
+ return tuple(outputs)
+
+
+def _reorder_tensors_as(tensors, ordered_tensors):
+ """Assume that tensors are of same order as ordered_tensors within their
+ types, e.g., from _take_tensors. Reorder them to be of same order as
+ ordered_tensors.
+
+ Args:
+ tensors (Iterable[Tensor]): tensors to be reordered. They should be of
+ the same order as ordered_tensors within their own types.
+ ordered_tensors (Iterable[Tensor]): tensors whose order will be the
+ reference.
+
+ Returns:
+ Ordered tuple of tensors with contents from tensors and order of
+ ordered_tensors.
+ """
+ type_dict = defaultdict(list)
+ for tensor in tensors:
+ type_dict[tensor.type()].append(tensor)
+ type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
+ return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
+
+
+def _take_tensors(tensors, size_limit):
+ """Group tensors into chunks. This generator yields a chunk at each time,
+ each containing tensors of same type up to certain byte limit in total size.
+
+ Args:
+ tensors (Sequence): A sequence of tensors to be separated into chunks.
+ size_limit (int): The limit of each chunk in bytes.
+
+ Yields:
+ Blocks of tensors of same type and within size_limit. The yielded
+ tensors are only ordered as the original sequence within its types.
+ """
+ buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
+ for tensor in tensors:
+ t = tensor.type()
+ if tensor.is_sparse:
+ indices = torch.Tensor._indices(tensor)
+ values = torch.Tensor._values(tensor)
+ size = (
+ indices.numel() * indices.element_size()
+ + values.numel() * values.element_size()
+ )
+ else:
+ size = tensor.numel() * tensor.element_size()
+ buf_and_size = buf_dict[t]
+ if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
+ yield buf_and_size[0]
+ buf_and_size = buf_dict[t] = [[], 0]
+ buf_and_size[0].append(tensor)
+ buf_and_size[1] += size
+ for buf, _ in buf_dict.values():
+ if len(buf) > 0:
+ yield buf
+
+
+# annotation decorator to get annotations in a way that is compatible
+# with both Python 2 and 3
+def annotate(ret, **kwargs):
+ def dec(fun):
+ fun.__annotations__ = dict(kwargs)
+ fun.__annotations__["return"] = ret
+ return fun
+
+ return dec
+
+
+def render_call(fn, args, kwargs):
+ str_fn = torch.overrides.resolve_name(fn)
+ if str_fn is None:
+ str_fn = str(fn)
+
+ str_args: List[str] = []
+ with torch._tensor_str.printoptions(threshold=0, edgeitems=0):
+ str_args.extend(repr(a) for a in args)
+ str_args.extend(f"{k}={repr(v)}" for k, v in kwargs.items())
+ r = f"{str_fn}({', '.join(str_args)})"
+ return r
+
+
+# NOTE [ Python Traceback Reference Cycle Problem ]
+#
+# When using sys.exc_info(), it is important to **not** store the exc_info[2],
+# which is the traceback, because otherwise you will run into the traceback
+# reference cycle problem, i.e., the traceback holding reference to the frame,
+# and the frame (which holds reference to all the object in its temporary scope)
+# holding reference the traceback.
+
+
+class KeyErrorMessage(str):
+ r"""str subclass that returns itself in repr"""
+
+ def __repr__(self):
+ return self
+
+
+class ExceptionWrapper:
+ r"""Wraps an exception plus traceback to communicate across threads"""
+
+ def __init__(self, exc_info=None, where="in background"):
+ # It is important that we don't store exc_info, see
+ # NOTE [ Python Traceback Reference Cycle Problem ]
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ self.exc_type = exc_info[0]
+ self.exc_msg = "".join(traceback.format_exception(*exc_info))
+ self.where = where
+
+ def reraise(self):
+ r"""Reraises the wrapped exception in the current thread"""
+ # Format a message such as: "Caught ValueError in DataLoader worker
+ # process 2. Original Traceback:", followed by the traceback.
+ msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}"
+ if self.exc_type == KeyError:
+ # KeyError calls repr() on its argument (usually a dict key). This
+ # makes stack traces unreadable. It will not be changed in Python
+ # (https://bugs.python.org/issue2651), so we work around it.
+ msg = KeyErrorMessage(msg)
+ elif getattr(self.exc_type, "message", None):
+ # Some exceptions have first argument as non-str but explicitly
+ # have message field
+ raise self.exc_type(message=msg)
+ try:
+ exception = self.exc_type(msg)
+ except TypeError:
+ # If the exception takes multiple arguments, don't try to
+ # instantiate since we don't know how to
+ raise RuntimeError(msg) from None
+ raise exception
+
+
+def _get_available_device_type():
+ if torch.cuda.is_available():
+ return "cuda"
+ if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
+ return "xpu"
+ custom_backend_name = torch._C._get_privateuse1_backend_name()
+ custom_device_mod = getattr(torch, custom_backend_name, None)
+ if custom_device_mod and custom_device_mod.is_available():
+ return custom_backend_name
+ # add more available device types here
+ return None
+
+
+def _get_device_attr(get_member):
+ device_type = _get_available_device_type()
+ if device_type and device_type.lower() == "cuda":
+ return get_member(torch.cuda)
+ if device_type and device_type.lower() == "xpu":
+ return get_member(torch.xpu) # type: ignore[attr-defined]
+ if device_type == torch._C._get_privateuse1_backend_name():
+ return get_member(getattr(torch, device_type))
+ # add more available device types here
+ return None
+
+
+def _get_current_device_index():
+ # current device index
+ return _get_device_attr(lambda m: m.current_device())
+
+
+def _get_all_device_indices():
+ # all device index
+ return _get_device_attr(lambda m: list(range(m.device_count())))
+
+
+def _get_devices_properties(device_ids):
+ # all device properties
+ return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
+
+
+def get_current_device_index() -> int:
+ r"""Checks if there are CUDA devices available and
+ returns the device index of the current default CUDA device.
+ Returns -1 in case there are no CUDA devices available.
+ Arguments: ``None``
+ """
+ if torch.cuda.device_count() > 0:
+ return torch.cuda.current_device()
+ return -1
+
+
+def _get_device_index(
+ device: Any, optional: bool = False, allow_cpu: bool = False
+) -> int:
+ r"""Gets the device index from :attr:`device`, which can be a torch.device
+ object, a Python integer, or ``None``.
+
+ If :attr:`device` is a torch.device object, returns the device index if it
+ has index. Note that for a device without a specified index,
+ i.e., ``torch.device('xxx')``, this will return the current default
+ device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
+ CPU devices will be accepted and ``-1`` will be returned in this case.
+
+ If :attr:`device` is a Python integer, it is returned as is.
+
+ If :attr:`device` is ``None``, this will return the current default
+ device of the supported runtime platform if :attr:`optional` is ``True``.
+ i.e., the current default CUDA device will be returned if CUDA runtime is supported.
+ """
+ if isinstance(device, str):
+ device = torch.device(device)
+ device_idx: Optional[int] = None
+ if isinstance(device, torch.device):
+ if not allow_cpu and device.type == "cpu":
+ raise ValueError(f"Expected a non cpu device, but got: {device}")
+ device_idx = -1 if device.type == "cpu" else device.index
+ if isinstance(device, int):
+ device_idx = device
+ if device_idx is None:
+ if optional:
+ # The eager API _get_current_device_index uses `lambda` functions which are
+ # not supported in JIT and hence not scriptable. The JIT equivalent API to get
+ # the current device index is `get_current_device_index()` which can
+ # be scripted. We use is_scripting to check the mode we are in and call the
+ # appropriate API.
+ if torch.jit.is_scripting():
+ device_idx = get_current_device_index()
+ else:
+ device_idx = _get_current_device_index()
+ else:
+ raise ValueError(
+ f"Expected a torch.device with a specified index or an integer, but got:{device}"
+ )
+ return device_idx
+
+
+def _handle_complex(tensor):
+ """
+ Returns a real view of a tensor if complex dtype else just the tensor
+ need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
+ """
+ return (
+ torch.view_as_real(tensor)
+ if not isinstance(tensor, torch.nn.UninitializedParameter)
+ and tensor.is_complex()
+ else tensor
+ )
+
+
+def _element_size(dtype):
+ """
+ Returns the element size for a dtype, in bytes
+ """
+ if not isinstance(dtype, torch.dtype):
+ raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
+
+ if dtype.is_complex:
+ return torch.finfo(dtype).bits >> 2
+ elif dtype.is_floating_point:
+ return torch.finfo(dtype).bits >> 3
+ elif dtype == torch.bool:
+ # NOTE: torch.bool is not supported in torch.iinfo()
+ return 1
+ else:
+ return torch.iinfo(dtype).bits >> 3
+
+
+class _ClassPropertyDescriptor:
+ def __init__(self, fget, fset=None):
+ self.fget = fget
+
+ def __get__(self, instance, owner=None):
+ if owner is None:
+ owner = type(instance)
+ return self.fget.__get__(instance, owner)()
+
+
+def classproperty(func):
+ if not isinstance(func, (classmethod, staticmethod)):
+ func = classmethod(func)
+ return _ClassPropertyDescriptor(func)
+
+
+# Whether we are compiling with torch.compile or not
+def is_compiling():
+ return False
+
+
+def _functionalize_sync(t):
+ # This code lives in python instead of C++ since conditioning on a certain python subclass
+ # is much more of a pain in C++.
+ from torch._subclasses.functional_tensor import (
+ FunctionalTensor,
+ maybe_disable_functional_mode,
+ )
+
+ ctx = (
+ maybe_disable_functional_mode
+ if isinstance(t, FunctionalTensor)
+ else nullcontext
+ )
+ if isinstance(t, FunctionalTensor):
+ # If a FunctionalTensorMode is active while syncing, we don't want it to intercept any ops that get called
+ # when we sync our inner tensor.
+ # Why?
+ # (1) If there are input mutations in the graph, then they will be re-applied during
+ # AOTAutograd when we call _sync() from inside of our functionalization kernels.
+ # (2) _sync() causes us to regenerate our updated the tensor from the updated base,
+ # which dispatches to a bunch of view ops
+ # (3) The input to these view ops is our inner FunctionalTensorWrapper
+ # (since the sync was called from C++), not the python FunctionalTensor
+ # (4) if a python FunctionalTensorMode is active, it will complain when it intercepts
+ # the view op, since it will see an input that is a C++ FunctionalTensorWrapper
+ # (aka a normal torch.Tensor) instead of a python `FunctionalTensor).
+ maybe_functional_mode = torch._C._unset_dispatch_mode(
+ torch._C._TorchDispatchModeKey.FUNCTIONAL
+ )
+ try:
+ torch._functionalize_sync(t.elem) # type: ignore[attr-defined]
+ finally:
+ if maybe_functional_mode is not None:
+ torch._C._set_dispatch_mode(maybe_functional_mode)
+ else:
+ torch._functionalize_sync(t) # type: ignore[attr-defined]
+
+
+@functools.lru_cache(2)
+def _get_device_module(device_type: str):
+ device_module = getattr(torch, device_type, None)
+ if device_module is None:
+ raise RuntimeError(
+ f"Device '{device_type}' does not have a corresponding module registered as 'torch.{device_type}'."
+ )
+ return device_module
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_vmap_internals.py b/env-llmeval/lib/python3.10/site-packages/torch/_vmap_internals.py
new file mode 100644
index 0000000000000000000000000000000000000000..8440abccb23904e935878e245b390465e04b5db0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_vmap_internals.py
@@ -0,0 +1,237 @@
+import functools
+import warnings
+from typing import Any, Callable, List, Optional, Tuple, Union
+
+import torch
+from torch import Tensor
+from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten
+
+in_dims_t = Union[int, Tuple]
+out_dims_t = Union[int, Tuple[int, ...]]
+
+
+# Checks that all args-to-be-batched have the same batch dim size
+def _validate_and_get_batch_size(
+ flat_in_dims: List[Optional[int]], flat_args: List
+) -> int:
+ batch_sizes = [
+ arg.size(in_dim)
+ for in_dim, arg in zip(flat_in_dims, flat_args)
+ if in_dim is not None
+ ]
+ if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
+ raise ValueError(
+ f"vmap: Expected all tensors to have the same size in the mapped "
+ f"dimension, got sizes {batch_sizes} for the mapped dimension"
+ )
+ return batch_sizes[0]
+
+
+def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
+ if isinstance(batched_outputs, tuple):
+ return len(batched_outputs)
+ return 1
+
+
+# If value is a tuple, check it has length `num_elements`.
+# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
+def _as_tuple(
+ value: Any, num_elements: int, error_message_lambda: Callable[[], str]
+) -> Tuple:
+ if not isinstance(value, tuple):
+ return (value,) * num_elements
+ if len(value) != num_elements:
+ raise ValueError(error_message_lambda())
+ return value
+
+
+# Creates BatchedTensors for every Tensor in arg that should be batched.
+# Returns the (potentially) batched arguments and the batch_size.
+def _create_batched_inputs(
+ in_dims: in_dims_t, args: Tuple, vmap_level: int, func: Callable
+) -> Tuple[Tuple, int]:
+ if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
+ raise ValueError(
+ f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): "
+ f"expected `in_dims` to be int or a (potentially nested) tuple "
+ f"matching the structure of inputs, got: {type(in_dims)}."
+ )
+ if len(args) == 0:
+ raise ValueError(
+ f"vmap({_get_name(func)})(): got no inputs. Maybe you forgot to add "
+ f"inputs, or you are trying to vmap over a function with no inputs. "
+ f"The latter is unsupported."
+ )
+
+ flat_args, args_spec = tree_flatten(args)
+ flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
+ if flat_in_dims is None:
+ raise ValueError(
+ f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): "
+ f"in_dims is not compatible with the structure of `inputs`. "
+ f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
+ f"has structure {args_spec}."
+ )
+
+ for arg, in_dim in zip(flat_args, flat_in_dims):
+ if not isinstance(in_dim, int) and in_dim is not None:
+ raise ValueError(
+ f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): "
+ f"Got in_dim={in_dim} for an input but in_dim must be either "
+ f"an integer dimension or None."
+ )
+ if isinstance(in_dim, int) and not isinstance(arg, Tensor):
+ raise ValueError(
+ f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): "
+ f"Got in_dim={in_dim} for an input but the input is of type "
+ f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
+ f"please use None as the respective in_dim"
+ )
+ if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):
+ raise ValueError(
+ f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(): "
+ f"Got in_dim={in_dim} for some input, but that input is a Tensor "
+ f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
+ f"0 <= in_dim < {arg.dim()}."
+ )
+
+ batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
+ # See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
+ batched_inputs = [
+ arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level)
+ for in_dim, arg in zip(flat_in_dims, flat_args)
+ ]
+ return tree_unflatten(batched_inputs, args_spec), batch_size
+
+
+# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
+def _unwrap_batched(
+ batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
+ out_dims: out_dims_t,
+ vmap_level: int,
+ batch_size: int,
+ func: Callable,
+ allow_none_pass_through: bool = False,
+) -> Tuple:
+ num_outputs = _num_outputs(batched_outputs)
+ out_dims_as_tuple = _as_tuple(
+ out_dims,
+ num_outputs,
+ lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must "
+ f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.",
+ )
+
+ # NOTE [Ignored _remove_batch_dim, _add_batch_dim]
+ # There is something wrong with our type bindings for functions that begin
+ # with '_', see #40397.
+ if isinstance(batched_outputs, Tensor):
+ out_dim = out_dims_as_tuple[0]
+ return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]
+ if allow_none_pass_through:
+ return tuple(
+ (
+ torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
+ if out is not None
+ else None
+ )
+ for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
+ )
+ else:
+ return tuple(
+ torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
+ for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
+ )
+
+
+# Checks that `fn` returned one or more Tensors and nothing else.
+# NB: A python function that return multiple arguments returns a single tuple,
+# so we are effectively checking that `outputs` is a single Tensor or a tuple of
+# Tensors.
+def _validate_outputs(outputs: Any, func: Callable) -> None:
+ if isinstance(outputs, Tensor):
+ return
+ if not isinstance(outputs, tuple):
+ raise ValueError(
+ f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
+ f"Tensors, got type {type(outputs)} as the return."
+ )
+ for idx, output in enumerate(outputs):
+ if isinstance(output, Tensor):
+ continue
+ raise ValueError(
+ f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
+ f"Tensors, got type {type(output)} for return {idx}."
+ )
+
+
+def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:
+ if isinstance(out_dims, int):
+ return
+ if not isinstance(out_dims, tuple) or not all(
+ isinstance(out_dim, int) for out_dim in out_dims
+ ):
+ raise ValueError(
+ f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
+ f"an int or a tuple of int representing where in the outputs the "
+ f"vmapped dimension should appear."
+ )
+
+
+def _get_name(func: Callable):
+ if hasattr(func, "__name__"):
+ return func.__name__
+
+ # Not all callables have __name__, in fact, only static functions/methods do.
+ # A callable created via functools.partial or an nn.Module, to name some
+ # examples, don't have a __name__.
+ return repr(func)
+
+
+# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
+# sends those into func, and then unwraps the output BatchedTensors. Operations
+# on BatchedTensors perform the batched operations that the user is asking for.
+def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
+ """
+ Please use torch.vmap instead of this API.
+ """
+ warnings.warn(
+ "Please use torch.vmap instead of torch._vmap_internals.vmap. ",
+ stacklevel=2,
+ )
+ return _vmap(func, in_dims, out_dims)
+
+
+# A version of vmap but without the initial "experimental prototype" warning
+def _vmap(
+ func: Callable,
+ in_dims: in_dims_t = 0,
+ out_dims: out_dims_t = 0,
+ allow_none_pass_through: bool = False,
+) -> Callable:
+ # The `allow_none_pass_through` argument is a temporary workaround may be removed.
+ # Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,
+ # which may return None if any of the inputs are unused. See the issue discussing this:
+ # https://github.com/facebookresearch/functorch/issues/159.
+ @functools.wraps(func)
+ def wrapped(*args):
+ _check_out_dims_is_int_or_int_tuple(out_dims, func)
+ vmap_level = torch._C._vmapmode_increment_nesting()
+ try:
+ batched_inputs, batch_size = _create_batched_inputs(
+ in_dims, args, vmap_level, func
+ )
+ batched_outputs = func(*batched_inputs)
+ if not allow_none_pass_through:
+ _validate_outputs(batched_outputs, func)
+ return _unwrap_batched(
+ batched_outputs,
+ out_dims,
+ vmap_level,
+ batch_size,
+ func,
+ allow_none_pass_through=allow_none_pass_through,
+ )
+ finally:
+ torch._C._vmapmode_decrement_nesting()
+
+ return wrapped
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_weights_only_unpickler.py b/env-llmeval/lib/python3.10/site-packages/torch/_weights_only_unpickler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2acf049a384aa558321e5be171e4eb1fa7d30182
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/_weights_only_unpickler.py
@@ -0,0 +1,304 @@
+# Unpickler restricted to loading only state dicts
+# Restrict constructing types to a list defined in _get_allowed_globals()
+# Restrict BUILD operation to `Tensor`, `Parameter` and `OrderedDict` types only
+# Restrict APPEND/APPENDS to `list`
+# In `GLOBALS` operation do not do class lookup by name, but rather rely on dictionary
+# defined by `_get_allowed_globals()` method, that contains:
+# - torch types (Storage, dtypes, Tensor, `torch.Size`),
+# - `torch._utils._rebuild` functions.
+# - `torch.nn.Parameter`
+# - `collections.OrderedDict`
+
+# Based of https://github.com/python/cpython/blob/main/Lib/pickle.py
+# Expected to be useful for loading PyTorch model weights
+# For example:
+# data = urllib.request.urlopen('https://download.pytorch.org/models/resnet50-0676ba61.pth').read()
+# buf = io.BytesIO(data)
+# weights = torch.load(buf, weights_only = True)
+
+import functools as _functools
+from collections import OrderedDict
+from pickle import (
+ APPEND,
+ APPENDS,
+ BINFLOAT,
+ BINGET,
+ BININT,
+ BININT1,
+ BININT2,
+ BINPERSID,
+ BINPUT,
+ BINUNICODE,
+ BUILD,
+ bytes_types,
+ decode_long,
+ EMPTY_DICT,
+ EMPTY_LIST,
+ EMPTY_SET,
+ EMPTY_TUPLE,
+ GLOBAL,
+ LONG1,
+ LONG_BINGET,
+ LONG_BINPUT,
+ MARK,
+ NEWFALSE,
+ NEWOBJ,
+ NEWTRUE,
+ NONE,
+ PROTO,
+ REDUCE,
+ SETITEM,
+ SETITEMS,
+ SHORT_BINSTRING,
+ STOP,
+ TUPLE,
+ TUPLE1,
+ TUPLE2,
+ TUPLE3,
+ UnpicklingError,
+)
+from struct import unpack
+from sys import maxsize
+from typing import Any, Dict, List
+
+import torch
+
+
+# Unpickling machinery
+@_functools.lru_cache(maxsize=1)
+def _get_allowed_globals():
+ rc: Dict[str, Any] = {
+ "collections.OrderedDict": OrderedDict,
+ "torch.nn.parameter.Parameter": torch.nn.Parameter,
+ "torch.serialization._get_layout": torch.serialization._get_layout,
+ "torch.Size": torch.Size,
+ "torch.Tensor": torch.Tensor,
+ }
+ # dtype
+ for t in [
+ torch.complex32,
+ torch.complex64,
+ torch.complex128,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
+ torch.float16,
+ torch.float32,
+ torch.float64,
+ torch.int8,
+ torch.int16,
+ torch.int32,
+ torch.int64,
+ ]:
+ rc[str(t)] = t
+ # Tensor classes
+ for tt in torch._tensor_classes:
+ rc[f"{tt.__module__}.{tt.__name__}"] = tt
+ # Storage classes
+ for ts in torch._storage_classes:
+ if ts not in (torch.storage.TypedStorage, torch.storage.UntypedStorage):
+ # Wrap legacy storage types in a dummy class
+ rc[f"{ts.__module__}.{ts.__name__}"] = torch.serialization.StorageType(
+ ts.__name__
+ )
+ else:
+ rc[f"{ts.__module__}.{ts.__name__}"] = ts
+ # Rebuild functions
+ for f in [
+ torch._utils._rebuild_parameter,
+ torch._utils._rebuild_tensor,
+ torch._utils._rebuild_tensor_v2,
+ torch._utils._rebuild_tensor_v3,
+ torch._utils._rebuild_sparse_tensor,
+ torch._utils._rebuild_meta_tensor_no_storage,
+ torch._utils._rebuild_nested_tensor,
+ ]:
+ rc[f"torch._utils.{f.__name__}"] = f
+
+ # Handles Tensor Subclasses, Tensor's with attributes.
+ # NOTE: It calls into above rebuild functions for regular Tensor types.
+ rc["torch._tensor._rebuild_from_type_v2"] = torch._tensor._rebuild_from_type_v2
+ return rc
+
+
+class Unpickler:
+ def __init__(self, file, *, encoding: str = "bytes"):
+ self.encoding = encoding
+ self.readline = file.readline
+ self.read = file.read
+ self.memo: Dict[int, Any] = {}
+
+ def load(self):
+ """Read a pickled object representation from the open file.
+
+ Return the reconstituted object hierarchy specified in the file.
+ """
+ self.metastack = []
+ self.stack: List[Any] = []
+ self.append = self.stack.append
+ read = self.read
+ readline = self.readline
+ while True:
+ key = read(1)
+ if not key:
+ raise EOFError
+ assert isinstance(key, bytes_types)
+ # Risky operators
+ if key[0] == GLOBAL[0]:
+ module = readline()[:-1].decode("utf-8")
+ name = readline()[:-1].decode("utf-8")
+ full_path = f"{module}.{name}"
+ if full_path in _get_allowed_globals():
+ self.append(_get_allowed_globals()[full_path])
+ else:
+ raise RuntimeError(f"Unsupported class {full_path}")
+ elif key[0] == NEWOBJ[0]:
+ args = self.stack.pop()
+ cls = self.stack.pop()
+ if cls is not torch.nn.Parameter:
+ raise RuntimeError(f"Trying to instantiate unsupported class {cls}")
+ self.append(torch.nn.Parameter(*args))
+ elif key[0] == REDUCE[0]:
+ args = self.stack.pop()
+ func = self.stack[-1]
+ if func not in _get_allowed_globals().values():
+ raise RuntimeError(
+ f"Trying to call reduce for unrecognized function {func}"
+ )
+ self.stack[-1] = func(*args)
+ elif key[0] == BUILD[0]:
+ state = self.stack.pop()
+ inst = self.stack[-1]
+ if type(inst) is torch.Tensor:
+ # Legacy unpickling
+ inst.set_(*state)
+ elif type(inst) is torch.nn.Parameter:
+ inst.__setstate__(state)
+ elif type(inst) is OrderedDict:
+ inst.__dict__.update(state)
+ else:
+ raise RuntimeError(
+ f"Can only build Tensor, parameter or dict objects, but got {type(inst)}"
+ )
+ # Stack manipulation
+ elif key[0] == APPEND[0]:
+ item = self.stack.pop()
+ list_obj = self.stack[-1]
+ if type(list_obj) is not list:
+ raise RuntimeError(
+ f"Can only append to lists, but got {type(list_obj)}"
+ )
+ list_obj.append(item)
+ elif key[0] == APPENDS[0]:
+ items = self.pop_mark()
+ list_obj = self.stack[-1]
+ if type(list_obj) is not list:
+ raise RuntimeError(
+ f"Can only extend lists, but got {type(list_obj)}"
+ )
+ list_obj.extend(items)
+ elif key[0] == SETITEM[0]:
+ (v, k) = (self.stack.pop(), self.stack.pop())
+ self.stack[-1][k] = v
+ elif key[0] == SETITEMS[0]:
+ items = self.pop_mark()
+ for i in range(0, len(items), 2):
+ self.stack[-1][items[i]] = items[i + 1]
+ elif key[0] == MARK[0]:
+ self.metastack.append(self.stack)
+ self.stack = []
+ self.append = self.stack.append
+ elif key[0] == TUPLE[0]:
+ items = self.pop_mark()
+ self.append(tuple(items))
+ elif key[0] == TUPLE1[0]:
+ self.stack[-1] = (self.stack[-1],)
+ elif key[0] == TUPLE2[0]:
+ self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
+ elif key[0] == TUPLE3[0]:
+ self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
+ # Basic types construction
+ elif key[0] == NONE[0]:
+ self.append(None)
+ elif key[0] == NEWFALSE[0]:
+ self.append(False)
+ elif key[0] == NEWTRUE[0]:
+ self.append(True)
+ elif key[0] == EMPTY_TUPLE[0]:
+ self.append(())
+ elif key[0] == EMPTY_LIST[0]:
+ self.append([])
+ elif key[0] == EMPTY_DICT[0]:
+ self.append({})
+ elif key[0] == EMPTY_SET[0]:
+ self.append(set())
+ elif key[0] == BININT[0]:
+ self.append(unpack("d", self.read(8))[0])
+ elif key[0] == BINUNICODE[0]:
+ strlen = unpack(" maxsize:
+ raise RuntimeError("String is too long")
+ strval = str(read(strlen), "utf-8", "surrogatepass")
+ self.append(strval)
+ elif key[0] == SHORT_BINSTRING[0]:
+ strlen = read(1)[0]
+ strdata = read(strlen)
+ if self.encoding != "bytes":
+ strdata = strdata.decode(self.encoding, "strict")
+ self.append(strdata)
+ elif key[0] == BINPERSID[0]:
+ pid = self.stack.pop()
+ # Only allow persistent load of storage
+ if type(pid) is not tuple and not type(pid) is not int:
+ raise RuntimeError(
+ f"persistent_load id must be tuple or int, but got {type(pid)}"
+ )
+ if (
+ type(pid) is tuple
+ and len(pid) > 0
+ and torch.serialization._maybe_decode_ascii(pid[0]) != "storage"
+ ):
+ raise RuntimeError(
+ f"Only persistent_load of storage is allowed, but got {pid[0]}"
+ )
+ self.append(self.persistent_load(pid))
+ elif key[0] in [BINGET[0], LONG_BINGET[0]]:
+ idx = (read(1) if key[0] == BINGET[0] else unpack(".enabled and friends when running our
+# test suite, where it's very easy to forget to undo the change
+# later.
+__allow_nonbracketed_mutation_flag = True
+
+
+def disable_global_flags():
+ global __allow_nonbracketed_mutation_flag
+ __allow_nonbracketed_mutation_flag = False
+
+
+def flags_frozen():
+ return not __allow_nonbracketed_mutation_flag
+
+
+@contextmanager
+def __allow_nonbracketed_mutation():
+ global __allow_nonbracketed_mutation_flag
+ old = __allow_nonbracketed_mutation_flag
+ __allow_nonbracketed_mutation_flag = True
+ try:
+ yield
+ finally:
+ __allow_nonbracketed_mutation_flag = old
+
+
+class ContextProp:
+ def __init__(self, getter, setter):
+ self.getter = getter
+ self.setter = setter
+
+ def __get__(self, obj, objtype):
+ return self.getter()
+
+ def __set__(self, obj, val):
+ if not flags_frozen():
+ self.setter(val)
+ else:
+ raise RuntimeError(
+ "not allowed to set %s flags "
+ "after disable_global_flags; please use flags() context manager instead"
+ % obj.__name__
+ )
+
+
+class PropModule(types.ModuleType):
+ def __init__(self, m, name):
+ super().__init__(name)
+ self.m = m
+
+ def __getattr__(self, attr):
+ return self.m.__getattribute__(attr)
+
+
+from torch.backends import (
+ cpu as cpu,
+ cuda as cuda,
+ cudnn as cudnn,
+ mkl as mkl,
+ mkldnn as mkldnn,
+ mps as mps,
+ openmp as openmp,
+ quantized as quantized,
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2f936e9791b42d7e7943ff70d1ff627e2a8fe8d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py
@@ -0,0 +1,91 @@
+import sys
+from contextlib import contextmanager
+
+import torch
+from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
+
+
+def is_available():
+ r"""Return whether PyTorch is built with MKL-DNN support."""
+ return torch._C._has_mkldnn
+
+
+VERBOSE_OFF = 0
+VERBOSE_ON = 1
+VERBOSE_ON_CREATION = 2
+
+
+class verbose:
+ """
+ On-demand oneDNN (former MKL-DNN) verbosing functionality.
+
+ To make it easier to debug performance issues, oneDNN can dump verbose
+ messages containing information like kernel size, input data size and
+ execution duration while executing the kernel. The verbosing functionality
+ can be invoked via an environment variable named `DNNL_VERBOSE`. However,
+ this methodology dumps messages in all steps. Those are a large amount of
+ verbose messages. Moreover, for investigating the performance issues,
+ generally taking verbose messages for one single iteration is enough.
+ This on-demand verbosing functionality makes it possible to control scope
+ for verbose message dumping. In the following example, verbose messages
+ will be dumped out for the second inference only.
+
+ .. highlight:: python
+ .. code-block:: python
+
+ import torch
+ model(data)
+ with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
+ model(data)
+
+ Args:
+ level: Verbose level
+ - ``VERBOSE_OFF``: Disable verbosing
+ - ``VERBOSE_ON``: Enable verbosing
+ - ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
+ """
+
+ def __init__(self, level):
+ self.level = level
+
+ def __enter__(self):
+ if self.level == VERBOSE_OFF:
+ return
+ st = torch._C._verbose.mkldnn_set_verbose(self.level)
+ assert (
+ st
+ ), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
+ return False
+
+
+def set_flags(_enabled):
+ orig_flags = (torch._C._get_mkldnn_enabled(),)
+ torch._C._set_mkldnn_enabled(_enabled)
+ return orig_flags
+
+
+@contextmanager
+def flags(enabled=False):
+ with __allow_nonbracketed_mutation():
+ orig_flags = set_flags(enabled)
+ try:
+ yield
+ finally:
+ with __allow_nonbracketed_mutation():
+ set_flags(orig_flags[0])
+
+
+class MkldnnModule(PropModule):
+ def __init__(self, m, name):
+ super().__init__(m, name)
+
+ enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
+
+
+# Cool stuff from torch/backends/cudnn/__init__.py and
+# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
+sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e63df73d2cca235fe2b9c446ea7a0277def49c4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..513f1bd06346993222e905e3134b14d3993403ce
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__init__.py
@@ -0,0 +1,47 @@
+from functools import lru_cache as _lru_cache
+
+import torch
+from ...library import Library as _Library
+
+__all__ = ["is_built", "is_available", "is_macos13_or_newer"]
+
+
+def is_built() -> bool:
+ r"""Return whether PyTorch is built with MPS support.
+
+ Note that this doesn't necessarily mean MPS is available; just that
+ if this PyTorch binary were run a machine with working MPS drivers
+ and devices, we would be able to use it.
+ """
+ return torch._C._has_mps
+
+
+@_lru_cache
+def is_available() -> bool:
+ r"""Return a bool indicating if MPS is currently available."""
+ return torch._C._mps_is_available()
+
+
+@_lru_cache
+def is_macos13_or_newer(minor: int = 0) -> bool:
+ r"""Return a bool indicating whether MPS is running on MacOS 13 or newer."""
+ return torch._C._mps_is_on_macos_13_or_newer(minor)
+
+
+_lib = None
+
+
+def _init():
+ r"""Register prims as implementation of var_mean and group_norm."""
+ global _lib
+ if is_built() is False or _lib is not None:
+ return
+ from ..._decomp.decompositions import (
+ native_group_norm_backward as _native_group_norm_backward,
+ )
+ from ..._refs import native_group_norm as _native_group_norm, var_mean as _var_mean
+
+ _lib = _Library("aten", "IMPL")
+ _lib.impl("var_mean.correction", _var_mean, "MPS")
+ _lib.impl("native_group_norm", _native_group_norm, "MPS")
+ _lib.impl("native_group_norm_backward", _native_group_norm_backward, "MPS")
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab55392b59b0219ffeb318aab41d4d5abff3fe89
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a7fcca12d0c8be54a3a1d733facf2cf9f2e6aaa
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__init__.py
@@ -0,0 +1,6 @@
+import torch
+
+
+def is_available():
+ r"""Return whether PyTorch is built with OpenMP support."""
+ return torch._C.has_openmp
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e2592655ed492a569b6c8eb024015591fb3611e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e66cd37542d124ef2227d40a9d0f88d277a0660
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py
@@ -0,0 +1,110 @@
+import sys
+import warnings
+from contextlib import contextmanager
+from functools import lru_cache as _lru_cache
+from typing import Any
+
+from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
+
+try:
+ import opt_einsum as _opt_einsum # type: ignore[import]
+except ImportError:
+ _opt_einsum = None
+
+
+@_lru_cache
+def is_available() -> bool:
+ r"""Return a bool indicating if opt_einsum is currently available."""
+ return _opt_einsum is not None
+
+
+def get_opt_einsum() -> Any:
+ r"""Return the opt_einsum package if opt_einsum is currently available, else None."""
+ return _opt_einsum
+
+
+def _set_enabled(_enabled: bool) -> None:
+ if not is_available() and _enabled:
+ raise ValueError(
+ f"opt_einsum is not available, so setting `enabled` to {_enabled} will not reap "
+ "the benefits of calculating an optimal path for einsum. torch.einsum will "
+ "fall back to contracting from left to right. To enable this optimal path "
+ "calculation, please install opt-einsum."
+ )
+ global enabled
+ enabled = _enabled
+
+
+def _get_enabled() -> bool:
+ return enabled
+
+
+def _set_strategy(_strategy: str) -> None:
+ if not is_available():
+ raise ValueError(
+ f"opt_einsum is not available, so setting `strategy` to {_strategy} will not be meaningful. "
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
+ "Please install opt_einsum or unset `strategy`."
+ )
+ if not enabled:
+ raise ValueError(
+ f"opt_einsum is not enabled, so setting a `strategy` to {_strategy} will not be meaningful. "
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
+ "Please set `enabled` to `True` as well or unset `strategy`."
+ )
+ if _strategy not in ["auto", "greedy", "optimal"]:
+ raise ValueError(
+ f"`strategy` must be one of the following: [auto, greedy, optimal] but is {_strategy}"
+ )
+ global strategy
+ strategy = _strategy
+
+
+def _get_strategy() -> str:
+ return strategy
+
+
+def set_flags(_enabled=None, _strategy=None):
+ orig_flags = (enabled, None if not is_available() else strategy)
+ if _enabled is not None:
+ _set_enabled(_enabled)
+ if _strategy is not None:
+ _set_strategy(_strategy)
+ return orig_flags
+
+
+@contextmanager
+def flags(enabled=None, strategy=None):
+ with __allow_nonbracketed_mutation():
+ orig_flags = set_flags(enabled, strategy)
+ try:
+ yield
+ finally:
+ # recover the previous values
+ with __allow_nonbracketed_mutation():
+ set_flags(*orig_flags)
+
+
+# The magic here is to allow us to intercept code like this:
+#
+# torch.backends.opt_einsum.enabled = True
+
+
+class OptEinsumModule(PropModule):
+ def __init__(self, m, name):
+ super().__init__(m, name)
+
+ global enabled
+ enabled = ContextProp(_get_enabled, _set_enabled)
+ global strategy
+ strategy = None
+ if is_available():
+ strategy = ContextProp(_get_strategy, _set_strategy)
+
+
+# This is the sys.modules replacement trick, see
+# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
+sys.modules[__name__] = OptEinsumModule(sys.modules[__name__], __name__)
+
+enabled = True if is_available() else False
+strategy = "auto" if is_available() else None
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b11266783974e28bccd8a7c6d4a12d34e8d7b38
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c26dc11deb47b96d35611f52c813e8295606c298
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py
@@ -0,0 +1,28 @@
+import sys
+import types
+
+import torch
+
+
+class _XNNPACKEnabled:
+ def __get__(self, obj, objtype):
+ return torch._C._is_xnnpack_enabled()
+
+ def __set__(self, obj, val):
+ raise RuntimeError("Assignment not supported")
+
+
+class XNNPACKEngine(types.ModuleType):
+ def __init__(self, m, name):
+ super().__init__(name)
+ self.m = m
+
+ def __getattr__(self, attr):
+ return self.m.__getattribute__(attr)
+
+ enabled = _XNNPACKEnabled()
+
+
+# This is the sys.modules replacement trick, see
+# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
+sys.modules[__name__] = XNNPACKEngine(sys.modules[__name__], __name__)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c14825db2be97d8c1809b674610d3bcb70bc8fdb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/functional.py b/env-llmeval/lib/python3.10/site-packages/torch/functional.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c124177a0c61d409c7a58b0a165fc1d89ca4dd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/functional.py
@@ -0,0 +1,1978 @@
+from typing import (
+ List, Tuple, Optional, Union, Any, Sequence, TYPE_CHECKING
+)
+import operator
+import itertools
+
+import torch
+from torch._C import _add_docstr
+import torch.nn.functional as F
+from ._lowrank import svd_lowrank, pca_lowrank
+from .overrides import (
+ has_torch_function, has_torch_function_unary, has_torch_function_variadic,
+ handle_torch_function)
+from ._jit_internal import boolean_dispatch
+from ._jit_internal import _overload as overload
+
+Tensor = torch.Tensor
+from torch import _VF
+
+__all__ = [
+ 'atleast_1d',
+ 'atleast_2d',
+ 'atleast_3d',
+ 'align_tensors',
+ 'broadcast_shapes',
+ 'broadcast_tensors',
+ 'cartesian_prod',
+ 'block_diag',
+ 'cdist',
+ 'chain_matmul',
+ 'einsum',
+ 'istft',
+ 'lu',
+ 'norm',
+ 'meshgrid',
+ 'pca_lowrank',
+ 'split',
+ 'stft',
+ 'svd_lowrank',
+ 'tensordot',
+ 'unique',
+ 'unique_consecutive',
+ 'unravel_index',
+]
+
+
+def broadcast_tensors(*tensors):
+ r"""broadcast_tensors(*tensors) -> List of Tensors
+
+ Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
+
+ Args:
+ *tensors: any number of tensors of the same type
+
+ .. warning::
+
+ More than one element of a broadcasted tensor may refer to a single
+ memory location. As a result, in-place operations (especially ones that
+ are vectorized) may result in incorrect behavior. If you need to write
+ to the tensors, please clone them first.
+
+ Example::
+
+ >>> x = torch.arange(3).view(1, 3)
+ >>> y = torch.arange(2).view(2, 1)
+ >>> a, b = torch.broadcast_tensors(x, y)
+ >>> a.size()
+ torch.Size([2, 3])
+ >>> a
+ tensor([[0, 1, 2],
+ [0, 1, 2]])
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(broadcast_tensors, tensors, *tensors)
+ return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
+
+
+def broadcast_shapes(*shapes):
+ r"""broadcast_shapes(*shapes) -> Size
+
+ Similar to :func:`broadcast_tensors` but for shapes.
+
+ This is equivalent to
+ ``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape``
+ but avoids the need create to intermediate tensors. This is useful for
+ broadcasting tensors of common batch shape but different rightmost shape,
+ e.g. to broadcast mean vectors with covariance matrices.
+
+ Example::
+
+ >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1))
+ torch.Size([1, 3, 2])
+
+ Args:
+ \*shapes (torch.Size): Shapes of tensors.
+
+ Returns:
+ shape (torch.Size): A shape compatible with all input shapes.
+
+ Raises:
+ RuntimeError: If shapes are incompatible.
+ """
+ # This wrapper exists to support variadic args.
+ # TODO Move this to C++ once the jit has better support for torch.Size.
+ if not torch.jit.is_tracing():
+ max_len = 0
+ for shape in shapes:
+ if isinstance(shape, (int, torch.SymInt)):
+ if max_len < 1:
+ max_len = 1
+ elif isinstance(shape, (tuple, list)):
+ s = len(shape)
+ if max_len < s:
+ max_len = s
+ result = [1] * max_len
+ for shape in shapes:
+ if isinstance(shape, (int, torch.SymInt)):
+ shape = (shape,)
+ if isinstance(shape, (tuple, list)):
+ for i in range(-1, -1 - len(shape), -1):
+ if shape[i] < 0:
+ raise RuntimeError(f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})")
+ if shape[i] == 1 or shape[i] == result[i]:
+ continue
+ if result[i] != 1:
+ raise RuntimeError("Shape mismatch: objects cannot be broadcast to a single shape")
+ result[i] = shape[i]
+ else:
+ raise RuntimeError("Input shapes should be of type ints, a tuple of ints, or a list of ints, got ", shape)
+ return torch.Size(result)
+ else:
+ # with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail
+ with torch.no_grad():
+ scalar = torch.zeros((), device="cpu")
+ tensors = [scalar.expand(shape) for shape in shapes]
+ tensors = broadcast_tensors(*tensors)
+ return tensors[0].shape
+
+
+def split(
+ tensor: Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0
+) -> Tuple[Tensor, ...]:
+ r"""Splits the tensor into chunks. Each chunk is a view of the original tensor.
+
+ If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
+ be split into equally sized chunks (if possible). Last chunk will be smaller if
+ the tensor size along the given dimension :attr:`dim` is not divisible by
+ :attr:`split_size`.
+
+ If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
+ into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
+ to :attr:`split_size_or_sections`.
+
+ Args:
+ tensor (Tensor): tensor to split.
+ split_size_or_sections (int) or (list(int)): size of a single chunk or
+ list of sizes for each chunk
+ dim (int): dimension along which to split the tensor.
+
+ Example::
+
+ >>> a = torch.arange(10).reshape(5, 2)
+ >>> a
+ tensor([[0, 1],
+ [2, 3],
+ [4, 5],
+ [6, 7],
+ [8, 9]])
+ >>> torch.split(a, 2)
+ (tensor([[0, 1],
+ [2, 3]]),
+ tensor([[4, 5],
+ [6, 7]]),
+ tensor([[8, 9]]))
+ >>> torch.split(a, [1, 4])
+ (tensor([[0, 1]]),
+ tensor([[2, 3],
+ [4, 5],
+ [6, 7],
+ [8, 9]]))
+ """
+ if has_torch_function_unary(tensor):
+ return handle_torch_function(
+ split, (tensor,), tensor, split_size_or_sections, dim=dim)
+ # Overwriting reason:
+ # This dispatches to two ATen functions depending on the type of
+ # split_size_or_sections. The branching code is in _tensor.py, which we
+ # call here.
+ return tensor.split(split_size_or_sections, dim)
+
+
+def einsum(*args: Any) -> Tensor:
+ r"""einsum(equation, *operands) -> Tensor
+
+ Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
+ based on the Einstein summation convention.
+
+ Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
+ in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
+ this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
+ with some subscript and define which subscripts are part of the output. The output is then computed by summing
+ the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
+ output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`.
+ Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
+
+ Equation:
+
+ The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
+ the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a
+ comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
+ must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
+ repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
+ must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
+ appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
+ The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
+ on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
+
+ Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
+ followed by the subscripts for the output. For instance, the following equation computes the transpose of a
+ matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
+ at most once for the output.
+
+ Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
+ Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
+ e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
+ dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
+ 'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
+ explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
+ before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
+ batch matrix multiplication `'...ij,...jk'`.
+
+ A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
+ arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
+
+ .. note::
+
+ ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
+ covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
+
+ .. note::
+
+ This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to
+ consume less memory by optimizing contraction order. This optimization occurs when there are at least three
+ inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem,
+ thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available,
+ the default order is to contract from left to right.
+
+ To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path
+ calculation: `torch.backends.opt_einsum.enabled = False`
+
+ To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line:
+ `torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and
+ 'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in
+ the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html).
+
+ .. note::
+
+ As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format,
+ subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists
+ follow their operands, and an extra sublist can appear at the end of the input to specify the output's
+ subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object
+ may be provided in a sublist to enable broadcasting as described in the Equation section above.
+
+ Args:
+ equation (str): The subscripts for the Einstein summation.
+ operands (List[Tensor]): The tensors to compute the Einstein summation of.
+
+ Examples::
+
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> # trace
+ >>> torch.einsum('ii', torch.randn(4, 4))
+ tensor(-1.2104)
+
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> # diagonal
+ >>> torch.einsum('ii->i', torch.randn(4, 4))
+ tensor([-0.1034, 0.7952, -0.2433, 0.4545])
+
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> # outer product
+ >>> x = torch.randn(5)
+ >>> y = torch.randn(4)
+ >>> torch.einsum('i,j->ij', x, y)
+ tensor([[ 0.1156, -0.2897, -0.3918, 0.4963],
+ [-0.3744, 0.9381, 1.2685, -1.6070],
+ [ 0.7208, -1.8058, -2.4419, 3.0936],
+ [ 0.1713, -0.4291, -0.5802, 0.7350],
+ [ 0.5704, -1.4290, -1.9323, 2.4480]])
+
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> # batch matrix multiplication
+ >>> As = torch.randn(3, 2, 5)
+ >>> Bs = torch.randn(3, 5, 4)
+ >>> torch.einsum('bij,bjk->bik', As, Bs)
+ tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
+ [-1.6706, -0.8097, -0.8025, -2.1183]],
+
+ [[ 4.2239, 0.3107, -0.5756, -0.2354],
+ [-1.4558, -0.3460, 1.5087, -0.8530]],
+
+ [[ 2.8153, 1.8787, -4.3839, -1.2112],
+ [ 0.3728, -2.1131, 0.0921, 0.8305]]])
+
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> # with sublist format and ellipsis
+ >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2])
+ tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
+ [-1.6706, -0.8097, -0.8025, -2.1183]],
+
+ [[ 4.2239, 0.3107, -0.5756, -0.2354],
+ [-1.4558, -0.3460, 1.5087, -0.8530]],
+
+ [[ 2.8153, 1.8787, -4.3839, -1.2112],
+ [ 0.3728, -2.1131, 0.0921, 0.8305]]])
+
+ >>> # batch permute
+ >>> A = torch.randn(2, 3, 4, 5)
+ >>> torch.einsum('...ij->...ji', A).shape
+ torch.Size([2, 3, 5, 4])
+
+ >>> # equivalent to torch.nn.functional.bilinear
+ >>> A = torch.randn(3, 5, 4)
+ >>> l = torch.randn(2, 5)
+ >>> r = torch.randn(2, 4)
+ >>> torch.einsum('bn,anm,bm->ba', l, A, r)
+ tensor([[-0.3430, -5.2405, 0.4494],
+ [ 0.3311, 5.5201, -3.0356]])
+ """
+ import torch.backends.opt_einsum as opt_einsum
+ # This wrapper exists to support variadic args.
+ if len(args) < 2:
+ raise ValueError('einsum(): must specify the equation string and at least one operand, '
+ 'or at least one operand and its subscripts list')
+
+ equation = None
+ operands = None
+
+ if isinstance(args[0], torch.Tensor):
+ # Convert the subscript list format which is an interleaving of operand and its subscripts
+ # list with an optional output subscripts list at the end (see documentation for more details on this)
+ # to the equation string format by creating the equation string from the subscripts list and grouping the
+ # input operands into a tensorlist (List[Tensor]).
+ def parse_subscript(n: int) -> str:
+ if n == Ellipsis:
+ return '...'
+ if n >= 0 and n < 26:
+ return chr(ord('A') + n)
+ if n >= 26 and n < 52:
+ return chr(ord('a') + n - 26)
+ raise ValueError('einsum(): subscript in subscript list is not within the valid range [0, 52)')
+
+ # Parse subscripts for input operands
+ equation = ','.join(''.join(parse_subscript(s) for s in l) for l in args[1::2])
+
+ # Parse optional output subscripts (provided when the number of arguments is odd)
+ if len(args) % 2 == 1:
+ equation += '->' + ''.join(parse_subscript(s) for s in args[-1])
+ operands = args[:-1:2]
+ else:
+ operands = args[::2]
+ else:
+ equation = args[0]
+ operands = args[1:]
+
+ if has_torch_function(operands):
+ return handle_torch_function(einsum, operands, equation, *operands)
+
+ if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
+ # the old interface of passing the operands as one list argument
+ _operands = operands[0]
+ # recurse incase operands contains value that has torch function
+ # in the original implementation this line is omitted
+ return einsum(equation, *_operands)
+
+ if len(operands) <= 2 or not opt_einsum.enabled:
+ # the path for contracting 0 or 1 time(s) is already optimized
+ # or the user has disabled using opt_einsum
+ return _VF.einsum(equation, operands) # type: ignore[attr-defined]
+
+ path = None
+ if opt_einsum.is_available():
+ _opt_einsum = opt_einsum.get_opt_einsum()
+ tupled_path = _opt_einsum.contract_path(equation, *operands, optimize=opt_einsum.strategy)[0]
+ # flatten path for dispatching to C++
+ path = [item for pair in tupled_path for item in pair]
+ return _VF.einsum(equation, operands, path=path) # type: ignore[attr-defined]
+
+
+# This wrapper exists to support variadic args.
+if TYPE_CHECKING:
+ # The JIT doesn't understand Union, so only add type annotation for mypy
+ def meshgrid(*tensors: Union[Tensor, List[Tensor]],
+ indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
+ return _meshgrid(*tensors, indexing=indexing)
+else:
+ def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
+ r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors.
+
+ This is helpful when you want to visualize data over some
+ range of inputs. See below for a plotting example.
+
+ Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as
+ inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`,
+ this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots
+ G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where
+ the output :math:`G_i` is constructed by expanding :math:`T_i`
+ to the result shape.
+
+ .. note::
+ 0D inputs are treated equivalently to 1D inputs of a
+ single element.
+
+ .. warning::
+ `torch.meshgrid(*tensors)` currently has the same behavior
+ as calling `numpy.meshgrid(*arrays, indexing='ij')`.
+
+ In the future `torch.meshgrid` will transition to
+ `indexing='xy'` as the default.
+
+ https://github.com/pytorch/pytorch/issues/50276 tracks
+ this issue with the goal of migrating to NumPy's behavior.
+
+ .. seealso::
+
+ :func:`torch.cartesian_prod` has the same effect but it
+ collects the data in a tensor of vectors.
+
+ Args:
+ tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
+ treated as tensors of size :math:`(1,)` automatically
+
+ indexing: (str, optional): the indexing mode, either "xy"
+ or "ij", defaults to "ij". See warning for future changes.
+
+ If "xy" is selected, the first dimension corresponds
+ to the cardinality of the second input and the second
+ dimension corresponds to the cardinality of the first
+ input.
+
+ If "ij" is selected, the dimensions are in the same
+ order as the cardinality of the inputs.
+
+ Returns:
+ seq (sequence of Tensors): If the input has :math:`N`
+ tensors of size :math:`S_0 \ldots S_{N-1}``, then the
+ output will also have :math:`N` tensors, where each tensor
+ is of shape :math:`(S_0, ..., S_{N-1})`.
+
+ Example::
+
+ >>> x = torch.tensor([1, 2, 3])
+ >>> y = torch.tensor([4, 5, 6])
+
+ Observe the element-wise pairings across the grid, (1, 4),
+ (1, 5), ..., (3, 6). This is the same thing as the
+ cartesian product.
+ >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
+ >>> grid_x
+ tensor([[1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]])
+ >>> grid_y
+ tensor([[4, 5, 6],
+ [4, 5, 6],
+ [4, 5, 6]])
+
+ This correspondence can be seen when these grids are
+ stacked properly.
+ >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))),
+ ... torch.cartesian_prod(x, y))
+ True
+
+ `torch.meshgrid` is commonly used to produce a grid for
+ plotting.
+ >>> # xdoctest: +REQUIRES(module:matplotlib)
+ >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW)
+ >>> import matplotlib.pyplot as plt
+ >>> xs = torch.linspace(-5, 5, steps=100)
+ >>> ys = torch.linspace(-5, 5, steps=100)
+ >>> x, y = torch.meshgrid(xs, ys, indexing='xy')
+ >>> z = torch.sin(torch.sqrt(x * x + y * y))
+ >>> ax = plt.axes(projection='3d')
+ >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy())
+ >>> plt.show()
+
+ .. image:: ../_static/img/meshgrid.png
+ :width: 512
+
+ """
+ return _meshgrid(*tensors, indexing=indexing)
+
+
+def _meshgrid(*tensors, indexing: Optional[str]):
+ if has_torch_function(tensors):
+ return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing)
+ if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
+ # the old interface of passing the operands as one list argument
+ tensors = tensors[0] # type: ignore[assignment]
+
+ # Continue allowing call of old method that takes no indexing
+ # kwarg for forward compatibility reasons.
+ #
+ # Remove this two weeks after landing.
+ kwargs = {} if indexing is None else {'indexing': indexing}
+ return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
+
+
+def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None,
+ win_length: Optional[int] = None, window: Optional[Tensor] = None,
+ center: bool = True, pad_mode: str = 'reflect', normalized: bool = False,
+ onesided: Optional[bool] = None,
+ return_complex: Optional[bool] = None) -> Tensor:
+ r"""Short-time Fourier transform (STFT).
+
+ .. warning::
+ From version 1.8.0, :attr:`return_complex` must always be given
+ explicitly for real inputs and `return_complex=False` has been
+ deprecated. Strongly prefer `return_complex=True` as in a future
+ pytorch release, this function will only return complex tensors.
+
+ Note that :func:`torch.view_as_real` can be used to recover a real
+ tensor with an extra last dimension for real and imaginary components.
+
+ .. warning::
+ From version 2.1, a warning will be provided if a :attr:`window` is
+ not specified. In a future release, this attribute will be required.
+ Not providing a window currently defaults to using a rectangular window,
+ which may result in undesirable artifacts. Consider using tapered windows,
+ such as :func:`torch.hann_window`.
+
+ The STFT computes the Fourier transform of short overlapping windows of the
+ input. This giving frequency components of the signal as they change over
+ time. The interface of this function is modeled after (but *not* a drop-in
+ replacement for) librosa_ stft function.
+
+ .. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html
+
+ Ignoring the optional batch dimension, this method computes the following
+ expression:
+
+ .. math::
+ X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}%
+ \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
+ \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right),
+
+ where :math:`m` is the index of the sliding window, and :math:`\omega` is
+ the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``,
+ or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``.
+
+ * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
+ sequences.
+
+ * If :attr:`hop_length` is ``None`` (default), it is treated as equal to
+ ``floor(n_fft / 4)``.
+
+ * If :attr:`win_length` is ``None`` (default), it is treated as equal to
+ :attr:`n_fft`.
+
+ * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
+ :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
+ treated as if having :math:`1` everywhere in the window. If
+ :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
+ both sides to length :attr:`n_fft` before being applied.
+
+ * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
+ both sides so that the :math:`t`-th frame is centered at time
+ :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
+ begins at time :math:`t \times \text{hop\_length}`.
+
+ * :attr:`pad_mode` determines the padding method used on :attr:`input` when
+ :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
+ all available options. Default is ``"reflect"``.
+
+ * If :attr:`onesided` is ``True`` (default for real input), only values for
+ :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor
+ \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because
+ the real-to-complex Fourier transform satisfies the conjugate symmetry,
+ i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
+ Note if the input or window tensors are complex, then :attr:`onesided`
+ output is not possible.
+
+ * If :attr:`normalized` is ``True`` (default is ``False``), the function
+ returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
+
+ * If :attr:`return_complex` is ``True`` (default if input is complex), the
+ return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``,
+ the output is a ``input.dim() + 2`` dimensional real tensor where the last
+ dimension represents the real and imaginary components.
+
+ Returns either a complex tensor of size :math:`(* \times N \times T)` if
+ :attr:`return_complex` is true, or a real tensor of size :math:`(* \times N
+ \times T \times 2)`. Where :math:`*` is the optional batch size of
+ :attr:`input`, :math:`N` is the number of frequencies where STFT is applied
+ and :math:`T` is the total number of frames used.
+
+ .. warning::
+ This function changed signature at version 0.4.1. Calling with the
+ previous signature may cause error or return incorrect result.
+
+ Args:
+ input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional
+ batch dimension
+ n_fft (int): size of Fourier transform
+ hop_length (int, optional): the distance between neighboring sliding window
+ frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
+ win_length (int, optional): the size of window frame and STFT filter.
+ Default: ``None`` (treated as equal to :attr:`n_fft`)
+ window (Tensor, optional): the optional window function.
+ Shape must be 1d and `<= n_fft`
+ Default: ``None`` (treated as window of all :math:`1` s)
+ center (bool, optional): whether to pad :attr:`input` on both sides so
+ that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
+ Default: ``True``
+ pad_mode (str, optional): controls the padding method used when
+ :attr:`center` is ``True``. Default: ``"reflect"``
+ normalized (bool, optional): controls whether to return the normalized STFT results
+ Default: ``False``
+ onesided (bool, optional): controls whether to return half of results to
+ avoid redundancy for real inputs.
+ Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise.
+ return_complex (bool, optional): whether to return a complex tensor, or
+ a real tensor with an extra last dimension for the real and
+ imaginary components.
+
+ .. versionchanged:: 2.0
+ ``return_complex`` is now a required argument for real inputs,
+ as the default is being transitioned to ``True``.
+
+ .. deprecated:: 2.0
+ ``return_complex=False`` is deprecated, instead use ``return_complex=True``
+ Note that calling :func:`torch.view_as_real` on the output will
+ recover the deprecated output format.
+
+ Returns:
+ Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where
+ - `B?` is an optional batch dimnsion from the input
+ - `N` is the number of frequency samples, `(n_fft // 2) + 1` for
+ `onesided=True`, or otherwise `n_fft`.
+ - `T` is the number of frames, `1 + L // hop_length`
+ for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise.
+ - `C?` is an optional length-2 dimension of real and imaginary
+ components, present when `return_complex=False`.
+
+ """
+ if has_torch_function_unary(input):
+ return handle_torch_function(
+ stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length,
+ window=window, center=center, pad_mode=pad_mode, normalized=normalized,
+ onesided=onesided, return_complex=return_complex)
+ # NOTE: Do not edit. This code will be removed once the forward-compatibility
+ # period is over for PR #73432
+ if center:
+ signal_dim = input.dim()
+ extended_shape = [1] * (3 - signal_dim) + list(input.size())
+ pad = int(n_fft // 2)
+ input = F.pad(input.view(extended_shape), [pad, pad], pad_mode)
+ input = input.view(input.shape[-signal_dim:])
+ return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]
+ normalized, onesided, return_complex)
+
+
+istft = _add_docstr(
+ torch.istft,
+ "istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, "
+ "normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n"
+ r"""
+Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`.
+
+.. warning::
+ From version 2.1, a warning will be provided if a :attr:`window` is
+ not specified. In a future release, this attribute will be required.
+ Please provide the same window used in the stft call.
+
+It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the
+least squares estimation of the original signal. The algorithm will check using the NOLA condition (
+nonzero overlap).
+
+Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelop
+created by the summation of all the windows is never zero at certain point in time. Specifically,
+:math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`.
+
+Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame,
+``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False
+since the signal isn't padded). If `length` is given in the arguments and is longer than expected,
+``istft`` will pad zeros to the end of the returned signal.
+
+If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc.
+Left padding can be trimmed off exactly because they can be calculated but right padding cannot be
+calculated without additional information.
+
+Example: Suppose the last window is:
+``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]``
+
+The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation
+of right padding. These additional values could be zeros or a reflection of the signal so providing
+:attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed
+(some loss of signal).
+
+[1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform,"
+IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984.
+
+Args:
+ input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`,
+ output. That is a complex tensor of shape `(B?, N, T)` where
+
+ - `B?` is an optional batch dimension
+ - `N` is the number of frequency samples, `(n_fft // 2) + 1`
+ for onesided input, or otherwise `n_fft`.
+ - `T` is the number of frames, `1 + length // hop_length` for centered stft,
+ or `1 + (length - n_fft) // hop_length` otherwise.
+
+ .. versionchanged:: 2.0
+ Real datatype inputs are no longer supported. Input must now have a
+ complex datatype, as returned by ``stft(..., return_complex=True)``.
+ n_fft (int): Size of Fourier transform
+ hop_length (Optional[int]): The distance between neighboring sliding window frames.
+ (Default: ``n_fft // 4``)
+ win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``)
+ window (Optional[torch.Tensor]): The optional window function.
+ Shape must be 1d and `<= n_fft`
+ (Default: ``torch.ones(win_length)``)
+ center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is
+ centered at time :math:`t \times \text{hop\_length}`.
+ (Default: ``True``)
+ normalized (bool): Whether the STFT was normalized. (Default: ``False``)
+ onesided (Optional[bool]): Whether the STFT was onesided.
+ (Default: ``True`` if `n_fft != fft_size` in the input size)
+ length (Optional[int]): The amount to trim the signal by (i.e. the
+ original signal length). Defaults to `(T - 1) * hop_length` for
+ centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T`
+ is the number of input frames.
+ return_complex (Optional[bool]):
+ Whether the output should be complex, or if the input should be
+ assumed to derive from a real signal and window.
+ Note that this is incompatible with ``onesided=True``.
+ (Default: ``False``)
+
+Returns:
+ Tensor: Least squares estimation of the original signal of shape `(B?, length)` where
+ `B?` is an optional batch dimension from the input tensor.
+""")
+
+
+if TYPE_CHECKING:
+ # These _impl functions return a variable number of tensors as output with
+ # __torch_function__; tuple unpacking is done already rather than being
+ # done by the caller of the _impl function
+ _unique_impl_out = Any
+else:
+ _unique_impl_out = Tuple[Tensor, Tensor, Tensor]
+
+
+def _unique_impl(input: Tensor, sorted: bool = True,
+ return_inverse: bool = False, return_counts: bool = False,
+ dim: Optional[int] = None) -> _unique_impl_out:
+ r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor]
+
+ Returns the unique elements of the input tensor.
+
+ .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
+ this function also eliminates non-consecutive duplicate values.
+
+ .. note:: Currently in the CUDA implementation and the CPU implementation,
+ `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
+ Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
+ :func:`torch.unique_consecutive` which avoids the sorting.
+
+ Args:
+ input (Tensor): the input tensor
+ sorted (bool): Whether to sort the unique elements in ascending order
+ before returning as output.
+ return_inverse (bool): Whether to also return the indices for where
+ elements in the original input ended up in the returned unique list.
+ return_counts (bool): Whether to also return the counts for each unique
+ element.
+ dim (int, optional): the dimension to operate upon. If ``None``, the
+ unique of the flattened input is returned. Otherwise, each of the
+ tensors indexed by the given dimension is treated as one of the
+ elements to apply the unique operation upon. See examples for more
+ details. Default: ``None``
+
+ Returns:
+ (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
+
+ - **output** (*Tensor*): the output list of unique scalar elements.
+ - **inverse_indices** (*Tensor*): (optional) if
+ :attr:`return_inverse` is True, there will be an additional
+ returned tensor (same shape as input) representing the indices
+ for where elements in the original input map to in the output;
+ otherwise, this function will only return a single tensor.
+ - **counts** (*Tensor*): (optional) if
+ :attr:`return_counts` is True, there will be an additional
+ returned tensor (same shape as output or output.size(dim),
+ if dim was specified) representing the number of occurrences
+ for each unique value or tensor.
+
+ Example::
+
+ >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
+ >>> output
+ tensor([1, 2, 3])
+
+ >>> output, inverse_indices = torch.unique(
+ ... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
+ >>> output
+ tensor([1, 2, 3])
+ >>> inverse_indices
+ tensor([0, 2, 1, 2])
+
+ >>> output, inverse_indices = torch.unique(
+ ... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
+ >>> output
+ tensor([1, 2, 3])
+ >>> inverse_indices
+ tensor([[0, 2],
+ [1, 2]])
+
+ >>> a = torch.tensor([
+ ... [
+ ... [1, 1, 0, 0],
+ ... [1, 1, 0, 0],
+ ... [0, 0, 1, 1],
+ ... ],
+ ... [
+ ... [0, 0, 1, 1],
+ ... [0, 0, 1, 1],
+ ... [1, 1, 1, 1],
+ ... ],
+ ... [
+ ... [1, 1, 0, 0],
+ ... [1, 1, 0, 0],
+ ... [0, 0, 1, 1],
+ ... ],
+ ... ])
+
+ >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]`
+ >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match
+ >>> # each other, so one of them will be removed.
+ >>> (a[0, :, :] == a[2, :, :]).all()
+ tensor(True)
+ >>> a_unique_dim0 = torch.unique(a, dim=0)
+ >>> a_unique_dim0
+ tensor([[[0, 0, 1, 1],
+ [0, 0, 1, 1],
+ [1, 1, 1, 1]],
+ [[1, 1, 0, 0],
+ [1, 1, 0, 0],
+ [0, 0, 1, 1]]])
+
+ >>> # Notice which sub-tensors from `a` match with the sub-tensors from
+ >>> # `a_unique_dim0`:
+ >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all()
+ tensor(True)
+ >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all()
+ tensor(True)
+
+ >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are
+ >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of
+ >>> # them will be removed.
+ >>> (a[:, 0, :] == a[:, 1, :]).all()
+ tensor(True)
+ >>> torch.unique(a, dim=1)
+ tensor([[[0, 0, 1, 1],
+ [1, 1, 0, 0]],
+ [[1, 1, 1, 1],
+ [0, 0, 1, 1]],
+ [[0, 0, 1, 1],
+ [1, 1, 0, 0]]])
+
+ >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared.
+ >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and
+ >>> # `a[:, :, 3]` match each other as well. So in this case, two of the
+ >>> # sub-tensors will be removed.
+ >>> (a[:, :, 0] == a[:, :, 1]).all()
+ tensor(True)
+ >>> (a[:, :, 2] == a[:, :, 3]).all()
+ tensor(True)
+ >>> torch.unique(a, dim=2)
+ tensor([[[0, 1],
+ [0, 1],
+ [1, 0]],
+ [[1, 0],
+ [1, 0],
+ [1, 1]],
+ [[0, 1],
+ [0, 1],
+ [1, 0]]])
+ """
+ if has_torch_function_unary(input):
+ return handle_torch_function(
+ unique, (input,), input, sorted=sorted, return_inverse=return_inverse,
+ return_counts=return_counts, dim=dim)
+
+ if dim is not None:
+ output, inverse_indices, counts = _VF.unique_dim(
+ input,
+ dim,
+ sorted=sorted,
+ return_inverse=return_inverse,
+ return_counts=return_counts,
+ )
+ else:
+ output, inverse_indices, counts = torch._unique2(
+ input,
+ sorted=sorted,
+ return_inverse=return_inverse,
+ return_counts=return_counts,
+ )
+ return output, inverse_indices, counts
+
+
+def _unique_consecutive_impl(input: Tensor, return_inverse: bool = False,
+ return_counts: bool = False,
+ dim: Optional[int] = None) -> _unique_impl_out:
+ r"""Eliminates all but the first element from every consecutive group of equivalent elements.
+
+ .. note:: This function is different from :func:`torch.unique` in the sense that this function
+ only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
+ in C++.
+
+ Args:
+ input (Tensor): the input tensor
+ return_inverse (bool): Whether to also return the indices for where
+ elements in the original input ended up in the returned unique list.
+ return_counts (bool): Whether to also return the counts for each unique
+ element.
+ dim (int): the dimension to apply unique. If ``None``, the unique of the
+ flattened input is returned. default: ``None``
+
+ Returns:
+ (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
+
+ - **output** (*Tensor*): the output list of unique scalar elements.
+ - **inverse_indices** (*Tensor*): (optional) if
+ :attr:`return_inverse` is True, there will be an additional
+ returned tensor (same shape as input) representing the indices
+ for where elements in the original input map to in the output;
+ otherwise, this function will only return a single tensor.
+ - **counts** (*Tensor*): (optional) if
+ :attr:`return_counts` is True, there will be an additional
+ returned tensor (same shape as output or output.size(dim),
+ if dim was specified) representing the number of occurrences
+ for each unique value or tensor.
+
+ Example::
+
+ >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
+ >>> output = torch.unique_consecutive(x)
+ >>> output
+ tensor([1, 2, 3, 1, 2])
+
+ >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
+ >>> output
+ tensor([1, 2, 3, 1, 2])
+ >>> inverse_indices
+ tensor([0, 0, 1, 1, 2, 3, 3, 4])
+
+ >>> output, counts = torch.unique_consecutive(x, return_counts=True)
+ >>> output
+ tensor([1, 2, 3, 1, 2])
+ >>> counts
+ tensor([2, 2, 1, 2, 1])
+ """
+ if has_torch_function_unary(input):
+ return handle_torch_function(
+ unique_consecutive, (input,), input, return_inverse=return_inverse,
+ return_counts=return_counts, dim=dim)
+ output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined]
+ input, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
+ return output, inverse_indices, counts
+
+
+def _return_counts(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
+
+ if has_torch_function_unary(input):
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
+
+ output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim)
+ return output, counts
+
+
+def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor
+
+ if has_torch_function_unary(input):
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
+
+ output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
+ return output
+
+
+def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
+
+ if has_torch_function_unary(input):
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
+
+ output, inverse_indices, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
+ return output, inverse_indices
+
+
+_return_inverse_false = boolean_dispatch(
+ arg_name='return_counts',
+ arg_index=3,
+ default=False,
+ if_true=_return_counts,
+ if_false=_return_output,
+ module_name=__name__,
+ func_name='unique')
+
+_return_inverse_true = boolean_dispatch(
+ arg_name='return_counts',
+ arg_index=3,
+ default=False,
+ if_true=_unique_impl,
+ if_false=_return_inverse,
+ module_name=__name__,
+ func_name='unique')
+
+# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
+# resolve the output type in TorchScript we need to statically know the value of both parameters
+
+unique = boolean_dispatch(
+ arg_name='return_inverse',
+ arg_index=2,
+ default=False,
+ if_true=_return_inverse_true,
+ if_false=_return_inverse_false,
+ module_name=__name__,
+ func_name='unique')
+unique.__doc__ = _unique_impl.__doc__
+
+
+def _consecutive_return_counts(input, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
+
+ if has_torch_function_unary(input):
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+
+ output, _, counts = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+ return output, counts
+
+
+def _consecutive_return_output(input, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, Optional[int]) -> Tensor
+
+ if has_torch_function_unary(input):
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+
+ output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+ return output
+
+
+def _consecutive_return_inverse(input, return_inverse=False, return_counts=False, dim=None):
+ # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
+
+ if has_torch_function_unary(input):
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+
+ output, inverse_indices, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
+ return output, inverse_indices
+
+
+_consecutive_return_inverse_false = boolean_dispatch(
+ arg_name='return_counts',
+ arg_index=1,
+ default=False,
+ if_true=_consecutive_return_counts,
+ if_false=_consecutive_return_output,
+ module_name=__name__,
+ func_name='unique_consecutive')
+
+_consecutive_return_inverse_true = boolean_dispatch(
+ arg_name='return_counts',
+ arg_index=1,
+ default=False,
+ if_true=_unique_consecutive_impl,
+ if_false=_consecutive_return_inverse,
+ module_name=__name__,
+ func_name='unique_consecutive')
+
+# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
+# resolve the output type in TorchScript we need to statically know the value of both parameters
+
+unique_consecutive = boolean_dispatch(
+ arg_name='return_inverse',
+ arg_index=2,
+ default=False,
+ if_true=_consecutive_return_inverse_true,
+ if_false=_consecutive_return_inverse_false,
+ module_name=__name__,
+ func_name='unique_consecutive')
+unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__
+
+if TYPE_CHECKING:
+ pass
+ # There's no good way to use this type annotation without breaking JIT
+ # overloads. So leave untyped for mypy for now.
+else:
+ @overload
+ def tensordot(a, b, dims: int = 2, out: Optional[torch.Tensor] = None):
+ pass
+
+ @overload # noqa: F811
+ def tensordot(a, b, dims: Tuple[List[int], List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
+ pass
+
+ @overload # noqa: F811
+ def tensordot(a, b, dims: List[List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
+ pass
+
+ @overload # noqa: F811
+ def tensordot(a, b, dims: torch.Tensor, out: Optional[torch.Tensor] = None): # noqa: F811
+ pass
+
+
+def tensordot(a, b, dims=2, out: Optional[torch.Tensor] = None): # noqa: F811
+ r"""Returns a contraction of a and b over multiple dimensions.
+
+ :attr:`tensordot` implements a generalized matrix product.
+
+ Args:
+ a (Tensor): Left tensor to contract
+ b (Tensor): Right tensor to contract
+ dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to
+ contract or explicit lists of dimensions for :attr:`a` and
+ :attr:`b` respectively
+
+ When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
+ the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
+ respectively, :func:`~torch.tensordot` computes
+
+ .. math::
+ r_{i_0,...,i_{m-d}, i_d,...,i_n}
+ = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
+
+ When called with :attr:`dims` of the list form, the given dimensions will be contracted
+ in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
+ in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
+ dimensions.
+
+ Examples::
+
+ >>> a = torch.arange(60.).reshape(3, 4, 5)
+ >>> b = torch.arange(24.).reshape(4, 3, 2)
+ >>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
+ tensor([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
+ >>> a = torch.randn(3, 4, 5, device='cuda')
+ >>> b = torch.randn(4, 5, 6, device='cuda')
+ >>> c = torch.tensordot(a, b, dims=2).cpu()
+ tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741],
+ [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744],
+ [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]])
+
+ >>> a = torch.randn(3, 5, 4, 6)
+ >>> b = torch.randn(6, 4, 5, 3)
+ >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0]))
+ tensor([[ 7.7193, -2.4867, -10.3204],
+ [ 1.5513, -14.4737, -6.5113],
+ [ -0.2850, 4.2573, -3.5997]])
+ """
+ if has_torch_function_variadic(a, b):
+ return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out)
+
+ if not isinstance(dims, (tuple, list, torch.Tensor, int, torch.SymInt)):
+ raise RuntimeError("tensordot expects dims to be int or "
+ + "Tuple[List[int], List[int]] or "
+ + "List[List[int]] containing two lists, but got "
+ + f"dims={dims}")
+
+ dims_a: List[int] = []
+ dims_b: List[int] = []
+
+ if isinstance(dims, (tuple, list)):
+ dims_a, dims_b = dims
+
+ if isinstance(dims, torch.Tensor):
+ num_elements = dims.numel()
+ if num_elements > 1:
+ assert dims.size()[0] == 2
+ dims_a = torch.jit.annotate(List[int], dims[0].tolist())
+ dims_b = torch.jit.annotate(List[int], dims[1].tolist())
+ else:
+ dims_val = int(dims.item())
+ if dims_val < 0:
+ raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
+ dims_a = list(range(-dims_val, 0))
+ dims_b = list(range(dims_val))
+
+ if isinstance(dims, (int, torch.SymInt)):
+ if dims < 0:
+ raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
+ if dims > min(a.dim(), b.dim()):
+ raise RuntimeError(f"tensordot expects dims < ndim_a or ndim_b, but got dims={dims}")
+ dims_a = list(range(-dims, 0))
+ dims_b = list(range(dims))
+
+ if out is None:
+ return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined]
+ else:
+ return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined]
+
+
+def cartesian_prod(*tensors: Tensor) -> Tensor:
+ """Do cartesian product of the given sequence of tensors. The behavior is similar to
+ python's `itertools.product`.
+
+ Args:
+ *tensors: any number of 1 dimensional tensors.
+
+ Returns:
+ Tensor: A tensor equivalent to converting all the input tensors into lists,
+ do `itertools.product` on these lists, and finally convert the resulting list
+ into tensor.
+
+ Example::
+
+ >>> import itertools
+ >>> a = [1, 2, 3]
+ >>> b = [4, 5]
+ >>> list(itertools.product(a, b))
+ [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
+ >>> tensor_a = torch.tensor(a)
+ >>> tensor_b = torch.tensor(b)
+ >>> torch.cartesian_prod(tensor_a, tensor_b)
+ tensor([[1, 4],
+ [1, 5],
+ [2, 4],
+ [2, 5],
+ [3, 4],
+ [3, 5]])
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(cartesian_prod, tensors, *tensors)
+ return _VF.cartesian_prod(tensors) # type: ignore[attr-defined]
+
+
+def block_diag(*tensors):
+ """Create a block diagonal matrix from provided tensors.
+
+ Args:
+ *tensors: One or more tensors with 0, 1, or 2 dimensions.
+
+ Returns:
+ Tensor: A 2 dimensional tensor with all the input tensors arranged in
+ order such that their upper left and lower right corners are
+ diagonally adjacent. All other elements are set to 0.
+
+ Example::
+
+ >>> import torch
+ >>> A = torch.tensor([[0, 1], [1, 0]])
+ >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]])
+ >>> C = torch.tensor(7)
+ >>> D = torch.tensor([1, 2, 3])
+ >>> E = torch.tensor([[4], [5], [6]])
+ >>> torch.block_diag(A, B, C, D, E)
+ tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 3, 4, 5, 0, 0, 0, 0, 0],
+ [0, 0, 6, 7, 8, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 7, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 2, 3, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]])
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(block_diag, tensors, *tensors)
+ return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined]
+
+
+def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
+ # type: (Tensor, Tensor, float, str) -> (Tensor)
+ r"""Computes batched the p-norm distance between each pair of the two collections of row vectors.
+
+ Args:
+ x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
+ x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
+ p: p value for the p-norm distance to calculate between each vector pair
+ :math:`\in [0, \infty]`.
+ compute_mode:
+ 'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
+ euclidean distance (p = 2) if P > 25 or R > 25
+ 'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
+ euclidean distance (p = 2)
+ 'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
+ euclidean distance (p = 2)
+ Default: use_mm_for_euclid_dist_if_necessary.
+
+ If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
+ output will have shape :math:`B \times P \times R`.
+
+ This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
+ if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
+ `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
+ scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
+
+ Example:
+
+ >>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
+ >>> a
+ tensor([[ 0.9041, 0.0196],
+ [-0.3108, -2.4423],
+ [-0.4821, 1.0590]])
+ >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
+ >>> b
+ tensor([[-2.1763, -0.4713],
+ [-0.6986, 1.3702]])
+ >>> torch.cdist(a, b, p=2)
+ tensor([[3.1193, 2.0959],
+ [2.7138, 3.8322],
+ [2.2830, 0.3791]])
+ """
+ if has_torch_function_variadic(x1, x2):
+ return handle_torch_function(
+ cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode)
+ if compute_mode == 'use_mm_for_euclid_dist_if_necessary':
+ return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined]
+ elif compute_mode == 'use_mm_for_euclid_dist':
+ return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined]
+ elif compute_mode == 'donot_use_mm_for_euclid_dist':
+ return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined]
+ else:
+ raise ValueError(f"{compute_mode} is not a valid value for compute_mode")
+
+
+def atleast_1d(*tensors):
+ r"""
+ Returns a 1-dimensional view of each input tensor with zero dimensions.
+ Input tensors with one or more dimensions are returned as-is.
+
+ Args:
+ input (Tensor or list of Tensors)
+
+ Returns:
+ output (Tensor or tuple of Tensors)
+
+ Example::
+
+ >>> x = torch.arange(2)
+ >>> x
+ tensor([0, 1])
+ >>> torch.atleast_1d(x)
+ tensor([0, 1])
+ >>> x = torch.tensor(1.)
+ >>> x
+ tensor(1.)
+ >>> torch.atleast_1d(x)
+ tensor([1.])
+ >>> x = torch.tensor(0.5)
+ >>> y = torch.tensor(1.)
+ >>> torch.atleast_1d((x, y))
+ (tensor([0.5000]), tensor([1.]))
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(atleast_1d, tensors, *tensors)
+ if len(tensors) == 1:
+ tensors = tensors[0]
+ return _VF.atleast_1d(tensors) # type: ignore[attr-defined]
+
+
+def atleast_2d(*tensors):
+ r"""
+ Returns a 2-dimensional view of each input tensor with zero dimensions.
+ Input tensors with two or more dimensions are returned as-is.
+
+ Args:
+ input (Tensor or list of Tensors)
+
+ Returns:
+ output (Tensor or tuple of Tensors)
+
+ Example::
+
+ >>> x = torch.tensor(1.)
+ >>> x
+ tensor(1.)
+ >>> torch.atleast_2d(x)
+ tensor([[1.]])
+ >>> x = torch.arange(4).view(2, 2)
+ >>> x
+ tensor([[0, 1],
+ [2, 3]])
+ >>> torch.atleast_2d(x)
+ tensor([[0, 1],
+ [2, 3]])
+ >>> x = torch.tensor(0.5)
+ >>> y = torch.tensor(1.)
+ >>> torch.atleast_2d((x, y))
+ (tensor([[0.5000]]), tensor([[1.]]))
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(atleast_2d, tensors, *tensors)
+ if len(tensors) == 1:
+ tensors = tensors[0]
+ return _VF.atleast_2d(tensors) # type: ignore[attr-defined]
+
+
+def atleast_3d(*tensors):
+ r"""
+ Returns a 3-dimensional view of each input tensor with zero dimensions.
+ Input tensors with three or more dimensions are returned as-is.
+
+ Args:
+ input (Tensor or list of Tensors)
+
+ Returns:
+ output (Tensor or tuple of Tensors)
+
+ Example:
+
+ >>> x = torch.tensor(0.5)
+ >>> x
+ tensor(0.5000)
+ >>> torch.atleast_3d(x)
+ tensor([[[0.5000]]])
+ >>> y = torch.arange(4).view(2, 2)
+ >>> y
+ tensor([[0, 1],
+ [2, 3]])
+ >>> torch.atleast_3d(y)
+ tensor([[[0],
+ [1]],
+
+ [[2],
+ [3]]])
+ >>> x = torch.tensor(1).view(1, 1, 1)
+ >>> x
+ tensor([[[1]]])
+ >>> torch.atleast_3d(x)
+ tensor([[[1]]])
+ >>> x = torch.tensor(0.5)
+ >>> y = torch.tensor(1.)
+ >>> torch.atleast_3d((x, y))
+ (tensor([[[0.5000]]]), tensor([[[1.]]]))
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(tensors):
+ return handle_torch_function(atleast_3d, tensors, *tensors)
+ if len(tensors) == 1:
+ tensors = tensors[0]
+ return _VF.atleast_3d(tensors) # type: ignore[attr-defined]
+
+
+if TYPE_CHECKING:
+ pass
+ # There's no good way to use this type annotation; cannot rename norm() to
+ # _norm_impl() in a way that doesn't break JIT overloads. So leave untyped
+ # for mypy for now.
+ # def norm(input: Tensor,
+ # p: Optional[Union[str, Number]] = "fro",
+ # dim: Optional[Union[int, List[int]]] = None,
+ # keepdim: bool = False,
+ # out: Optional[Tensor] = None,
+ # dtype: _dtype = None) -> Tensor:
+ # return _norm_impl(input, p, dim, keepdim, out, dtype)
+else:
+ # TODO: type dim as BroadcastingList when
+ # https://github.com/pytorch/pytorch/issues/33782 is fixed
+ @overload
+ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None):
+ # type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
+ pass
+
+ @overload # noqa: F811
+ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
+ # type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
+ pass
+
+ @overload # noqa: F811
+ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
+ # type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
+ pass
+
+ @overload # noqa: F811
+ def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
+ # type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
+ pass
+
+
+def norm(input, p: Optional[Union[float, str]] = "fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
+ r"""Returns the matrix norm or vector norm of a given tensor.
+
+ .. warning::
+
+ torch.norm is deprecated and may be removed in a future PyTorch release.
+ Its documentation and behavior may be incorrect, and it is no longer
+ actively maintained.
+
+ Use :func:`torch.linalg.vector_norm` when computing vector norms and
+ :func:`torch.linalg.matrix_norm` when computing matrix norms.
+ For a function with a similar behavior as this one see :func:`torch.linalg.norm`.
+ Note, however, the signature for these functions is slightly different than the
+ signature for ``torch.norm``.
+
+ Args:
+ input (Tensor): The input tensor. Its data type must be either a floating
+ point or complex type. For complex inputs, the norm is calculated using the
+ absolute value of each element. If the input is complex and neither
+ :attr:`dtype` nor :attr:`out` is specified, the result's data type will
+ be the corresponding floating point type (e.g. float if :attr:`input` is
+ complexfloat).
+
+ p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
+ The following norms can be calculated:
+
+ ====== ============== ==========================
+ ord matrix norm vector norm
+ ====== ============== ==========================
+ 'fro' Frobenius norm --
+ 'nuc' nuclear norm --
+ Number -- sum(abs(x)**ord)**(1./ord)
+ ====== ============== ==========================
+
+ The vector norm can be calculated across any number of dimensions.
+ The corresponding dimensions of :attr:`input` are flattened into
+ one dimension, and the norm is calculated on the flattened
+ dimension.
+
+ Frobenius norm produces the same result as ``p=2`` in all cases
+ except when :attr:`dim` is a list of three or more dims, in which
+ case Frobenius norm throws an error.
+
+ Nuclear norm can only be calculated across exactly two dimensions.
+
+ dim (int, tuple of ints, list of ints, optional):
+ Specifies which dimension or dimensions of :attr:`input` to
+ calculate the norm across. If :attr:`dim` is ``None``, the norm will
+ be calculated across all dimensions of :attr:`input`. If the norm
+ type indicated by :attr:`p` does not support the specified number of
+ dimensions, an error will occur.
+ keepdim (bool, optional): whether the output tensors have :attr:`dim`
+ retained or not. Ignored if :attr:`dim` = ``None`` and
+ :attr:`out` = ``None``. Default: ``False``
+ out (Tensor, optional): the output tensor. Ignored if
+ :attr:`dim` = ``None`` and :attr:`out` = ``None``.
+ dtype (:class:`torch.dtype`, optional): the desired data type of
+ returned tensor. If specified, the input tensor is casted to
+ :attr:`dtype` while performing the operation. Default: None.
+
+ .. note::
+ Even though ``p='fro'`` supports any number of dimensions, the true
+ mathematical definition of Frobenius norm only applies to tensors with
+ exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'``
+ aligns with the mathematical definition, since it can only be applied across
+ exactly two dimensions.
+
+ Example::
+
+ >>> import torch
+ >>> a = torch.arange(9, dtype= torch.float) - 4
+ >>> b = a.reshape((3, 3))
+ >>> torch.norm(a)
+ tensor(7.7460)
+ >>> torch.norm(b)
+ tensor(7.7460)
+ >>> torch.norm(a, float('inf'))
+ tensor(4.)
+ >>> torch.norm(b, float('inf'))
+ tensor(4.)
+ >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float)
+ >>> torch.norm(c, dim=0)
+ tensor([1.4142, 2.2361, 5.0000])
+ >>> torch.norm(c, dim=1)
+ tensor([3.7417, 4.2426])
+ >>> torch.norm(c, p=1, dim=1)
+ tensor([6., 6.])
+ >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
+ >>> torch.norm(d, dim=(1, 2))
+ tensor([ 3.7417, 11.2250])
+ >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
+ (tensor(3.7417), tensor(11.2250))
+ """
+
+ if has_torch_function_unary(input):
+ return handle_torch_function(
+ norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
+
+ # NB. All the repeated code and weird python is to please TorchScript.
+ # For a more compact implementation see the relevant function in `_refs/__init__.py`
+
+ # We don't do this for MPS or sparse tensors
+ if input.layout == torch.strided and input.device.type in \
+ ("cpu", "cuda", "meta", torch.utils.backend_registration._privateuse1_backend_name):
+ if dim is not None:
+ if isinstance(dim, (int, torch.SymInt)):
+ _dim = [dim]
+ else:
+ _dim = dim
+ else:
+ _dim = None # type: ignore[assignment]
+
+ if isinstance(p, str):
+ if p == "fro" and (dim is None or isinstance(dim, (int, torch.SymInt)) or len(dim) <= 2):
+ if out is None:
+ return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype)
+ else:
+ return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype, out=out)
+
+ # Here we either call the nuclear norm, or we call matrix_norm with some arguments
+ # that will throw an error
+ if _dim is None:
+ _dim = list(range(input.ndim))
+ if out is None:
+ return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype)
+ else:
+ return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype, out=out)
+ else:
+ # NB. p should be Union[str, number], not Optional!
+ _p = 2.0 if p is None else p
+ if out is None:
+ return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)
+ else:
+ return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype, out=out)
+
+ ndim = input.dim()
+
+ # catch default case
+ if dim is None and out is None and dtype is None and p is not None:
+ if isinstance(p, str):
+ if p == "fro":
+ return _VF.frobenius_norm(input, dim=(), keepdim=keepdim)
+ if not isinstance(p, str):
+ _dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
+ return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined]
+
+ # TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed
+ # remove the overloads where dim is an int and replace with BraodcastingList1
+ # and remove next four lines, replace _dim with dim
+ if dim is not None:
+ if isinstance(dim, (int, torch.SymInt)):
+ _dim = [dim]
+ else:
+ _dim = dim
+ else:
+ _dim = None # type: ignore[assignment]
+
+ if isinstance(p, str):
+ if p == "fro":
+ if dtype is not None:
+ raise ValueError("dtype argument is not supported in frobenius norm")
+
+ if _dim is None:
+ _dim = list(range(ndim))
+ if out is None:
+ return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
+ else:
+ return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
+ elif p == "nuc":
+ if dtype is not None:
+ raise ValueError("dtype argument is not supported in nuclear norm")
+ if _dim is None:
+ if out is None:
+ return _VF.nuclear_norm(input, keepdim=keepdim) # type: ignore[arg-type]
+ else:
+ return _VF.nuclear_norm(input, keepdim=keepdim, out=out) # type: ignore[arg-type]
+ else:
+ if out is None:
+ return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
+ else:
+ return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
+ raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
+ else:
+ if _dim is None:
+ _dim = list(range(ndim))
+
+ if out is None:
+ if dtype is None:
+ return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined]
+ else:
+ return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined]
+ else:
+ if dtype is None:
+ return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined]
+ else:
+ return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined]
+
+def unravel_index(indices: Tensor, shape: Union[int, Sequence[int], torch.Size]) -> List[Tensor]:
+ r"""Converts a tensor of flat indices into a tuple of coordinate tensors that
+ index into an arbitrary tensor of the specified shape.
+
+ Args:
+ indices (Tensor): An integer tensor containing indices into the
+ flattened version of an arbitrary tensor of shape :attr:`shape`.
+ All elements must be in the range ``[0, prod(shape) - 1]``.
+
+ shape (int, sequence of ints, or torch.Size): The shape of the arbitrary
+ tensor. All elements must be non-negative.
+
+ Returns:
+ tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with
+ dimension ``i`` of :attr:`shape`. Each tensor has the same shape as
+ ``indices`` and contains one index into dimension ``i`` for each of the
+ flat indices given by ``indices``.
+
+ Example::
+
+ >>> import torch
+ >>> torch.unravel_index(torch.tensor(4), (3, 2))
+ (tensor(2),
+ tensor(0))
+
+ >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2))
+ (tensor([2, 0]),
+ tensor([0, 1]))
+
+ >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2))
+ (tensor([0, 0, 1, 1, 2, 2]),
+ tensor([0, 1, 0, 1, 0, 1]))
+
+ >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10))
+ (tensor([1, 5]),
+ tensor([2, 6]),
+ tensor([3, 7]),
+ tensor([4, 8]))
+
+ >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10))
+ (tensor([[1], [5]]),
+ tensor([[2], [6]]),
+ tensor([[3], [7]]),
+ tensor([[4], [8]]))
+
+ >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100))
+ (tensor([[12], [56]]),
+ tensor([[34], [78]]))
+ """
+ if has_torch_function_unary(indices):
+ return handle_torch_function(
+ unravel_index, (indices,), indices, shape=shape)
+ res_tensor = _unravel_index(indices, shape)
+ return res_tensor.unbind(-1)
+
+def _unravel_index(indices: Tensor, shape: Union[int, Sequence[int]]) -> Tensor:
+ torch._check_type(
+ not indices.is_complex() and not indices.is_floating_point() and not indices.dtype == torch.bool,
+ lambda: f"expected 'indices' to be integer dtype, but got {indices.dtype}")
+
+ torch._check_type(
+ isinstance(shape, (int, torch.SymInt, Sequence)),
+ lambda: f"expected 'shape' to be int or sequence of ints, but got {type(shape)}")
+
+ if isinstance(shape, (int, torch.SymInt)):
+ shape = torch.Size([shape])
+ else:
+ for dim in shape:
+ torch._check_type(
+ isinstance(dim, (int, torch.SymInt)),
+ lambda: f"expected 'shape' sequence to only contain ints, but got {type(dim)}")
+ shape = torch.Size(shape)
+
+ torch._check_value(
+ all(dim >= 0 for dim in shape),
+ lambda: f"'shape' cannot have negative values, but got {tuple(shape)}")
+
+ coefs = list(reversed(list(itertools.accumulate(reversed(shape[1:] + torch.Size([1])), func=operator.mul))))
+ return indices.unsqueeze(-1).floor_divide(
+ torch.tensor(coefs, device=indices.device, dtype=torch.int64)
+ ) % torch.tensor(shape, device=indices.device, dtype=torch.int64)
+
+def chain_matmul(*matrices, out=None):
+ r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
+ using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
+ of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
+ needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
+ If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
+
+ .. warning::
+
+ :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release.
+ Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors
+ rather than multiple arguments.
+
+ Args:
+ matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
+ out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``.
+
+ Returns:
+ Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
+ would be of dimensions :math:`p_{1} \times p_{N + 1}`.
+
+ Example::
+
+ >>> # xdoctest: +SKIP
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> a = torch.randn(3, 4)
+ >>> b = torch.randn(4, 5)
+ >>> c = torch.randn(5, 6)
+ >>> d = torch.randn(6, 7)
+ >>> # will raise a deprecation warning
+ >>> torch.chain_matmul(a, b, c, d)
+ tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614],
+ [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163],
+ [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]])
+
+ .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
+ """
+ # This wrapper exists to support variadic args.
+ if has_torch_function(matrices):
+ return handle_torch_function(chain_matmul, matrices, *matrices)
+
+ if out is None:
+ return _VF.chain_matmul(matrices) # type: ignore[attr-defined]
+ else:
+ return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined]
+
+
+def _lu_impl(A, pivot=True, get_infos=False, out=None):
+ # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]
+ r"""Computes the LU factorization of a matrix or batches of matrices
+ :attr:`A`. Returns a tuple containing the LU factorization and
+ pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to
+ ``True``.
+
+ .. warning::
+
+ :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor`
+ and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a
+ future PyTorch release.
+ ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with
+
+ .. code:: python
+
+ LU, pivots = torch.linalg.lu_factor(A, compute_pivots)
+
+ ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with
+
+ .. code:: python
+
+ LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots)
+
+ .. note::
+ * The returned permutation matrix for every matrix in the batch is
+ represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.
+ ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,
+ the ``i``-th row was permuted with the ``j-1``-th row.
+ * LU factorization with :attr:`pivot` = ``False`` is not available
+ for CPU, and attempting to do so will throw an error. However,
+ LU factorization with :attr:`pivot` = ``False`` is available for
+ CUDA.
+ * This function does not check if the factorization was successful
+ or not if :attr:`get_infos` is ``True`` since the status of the
+ factorization is present in the third element of the return tuple.
+ * In the case of batches of square matrices with size less or equal
+ to 32 on a CUDA device, the LU factorization is repeated for
+ singular matrices due to the bug in the MAGMA library
+ (see magma issue 13).
+ * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.
+
+ .. warning::
+ The gradients of this function will only be finite when :attr:`A` is full rank.
+ This is because the LU decomposition is just differentiable at full rank matrices.
+ Furthermore, if :attr:`A` is close to not being full rank,
+ the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.
+
+ Args:
+ A (Tensor): the tensor to factor of size :math:`(*, m, n)`
+ pivot (bool, optional): controls whether pivoting is done. Default: ``True``
+ get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
+ Default: ``False``
+ out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
+ then the elements in the tuple are Tensor, IntTensor,
+ and IntTensor. If :attr:`get_infos` is ``False``, then the
+ elements in the tuple are Tensor, IntTensor. Default: ``None``
+
+ Returns:
+ (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
+
+ - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
+
+ - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`.
+ ``pivots`` stores all the intermediate transpositions of rows.
+ The final permutation ``perm`` could be reconstructed by
+ applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,
+ where ``perm`` is initially the identity permutation of :math:`m` elements
+ (essentially this is what :func:`torch.lu_unpack` is doing).
+
+ - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
+ size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
+ each minibatch has succeeded or failed
+
+ Example::
+
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
+ >>> A = torch.randn(2, 3, 3)
+ >>> A_LU, pivots = torch.lu(A)
+ >>> A_LU
+ tensor([[[ 1.3506, 2.5558, -0.0816],
+ [ 0.1684, 1.1551, 0.1940],
+ [ 0.1193, 0.6189, -0.5497]],
+
+ [[ 0.4526, 1.2526, -0.3285],
+ [-0.7988, 0.7175, -0.9701],
+ [ 0.2634, -0.9255, -0.3459]]])
+ >>> pivots
+ tensor([[ 3, 3, 3],
+ [ 3, 3, 3]], dtype=torch.int32)
+ >>> A_LU, pivots, info = torch.lu(A, get_infos=True)
+ >>> if info.nonzero().size(0) == 0:
+ ... print('LU factorization succeeded for all samples!')
+ LU factorization succeeded for all samples!
+ """
+ # If get_infos is True, then we don't need to check for errors and vice versa
+ return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))
+
+if TYPE_CHECKING:
+ _ListOrSeq = Sequence[Tensor]
+else:
+ _ListOrSeq = List[Tensor]
+
+
+def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
+ get_infos_int = 1 if get_infos else 0
+ if out_len - get_infos_int != 2:
+ raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}")
+ if not isinstance(out, (tuple, list)):
+ raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}")
+
+
+def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
+ # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
+ if has_torch_function_unary(A):
+ return handle_torch_function(
+ lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
+ result = _lu_impl(A, pivot, get_infos, out)
+ if out is not None:
+ _check_list_size(len(out), get_infos, out)
+ for i in range(len(out)):
+ out[i].resize_as_(result[i]).copy_(result[i])
+ return out
+ else:
+ return result # A_LU, pivots, infos
+
+
+def _lu_no_infos(A, pivot=True, get_infos=False, out=None):
+ # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
+ # need to check for torch_function here so that we exit if
+ if has_torch_function_unary(A):
+ return handle_torch_function(
+ lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
+ result = _lu_impl(A, pivot, get_infos, out)
+ if out is not None:
+ _check_list_size(len(out), get_infos, out)
+ for i in range(len(out)):
+ out[i].resize_as_(result[i]).copy_(result[i])
+ return out
+ else:
+ return result[0], result[1] # A_LU, pivots
+
+# The return type of lu depends on `get_infos`, so in order to resolve the output type
+# of lu in TorchScript we need to statically know the value of `get_infos`
+lu = boolean_dispatch(
+ arg_name='get_infos',
+ arg_index=2,
+ default=False,
+ if_true=_lu_with_infos,
+ if_false=_lu_no_infos,
+ module_name=__name__,
+ func_name='lu')
+lu.__doc__ = _lu_impl.__doc__
+
+
+def align_tensors(*tensors):
+ raise RuntimeError('`align_tensors` not yet implemented.')
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/hub.py b/env-llmeval/lib/python3.10/site-packages/torch/hub.py
new file mode 100644
index 0000000000000000000000000000000000000000..f276f49a9ce04f9cc503918f2ba3bab8a98a3c7f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/hub.py
@@ -0,0 +1,770 @@
+import contextlib
+import errno
+import hashlib
+import json
+import os
+import re
+import shutil
+import sys
+import tempfile
+import torch
+import uuid
+import warnings
+import zipfile
+from pathlib import Path
+from typing import Dict, Optional, Any
+from urllib.error import HTTPError, URLError
+from urllib.request import urlopen, Request
+from urllib.parse import urlparse # noqa: F401
+from torch.serialization import MAP_LOCATION
+
+class _Faketqdm: # type: ignore[no-redef]
+
+ def __init__(self, total=None, disable=False,
+ unit=None, *args, **kwargs):
+ self.total = total
+ self.disable = disable
+ self.n = 0
+ # Ignore all extra *args and **kwargs lest you want to reinvent tqdm
+
+ def update(self, n):
+ if self.disable:
+ return
+
+ self.n += n
+ if self.total is None:
+ sys.stderr.write(f"\r{self.n:.1f} bytes")
+ else:
+ sys.stderr.write(f"\r{100 * self.n / float(self.total):.1f}%")
+ sys.stderr.flush()
+
+ # Don't bother implementing; use real tqdm if you want
+ def set_description(self, *args, **kwargs):
+ pass
+
+ def write(self, s):
+ sys.stderr.write(f"{s}\n")
+
+ def close(self):
+ self.disable = True
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.disable:
+ return
+
+ sys.stderr.write('\n')
+
+try:
+ from tqdm import tqdm # If tqdm is installed use it, otherwise use the fake wrapper
+except ImportError:
+ tqdm = _Faketqdm
+
+__all__ = [
+ 'download_url_to_file',
+ 'get_dir',
+ 'help',
+ 'list',
+ 'load',
+ 'load_state_dict_from_url',
+ 'set_dir',
+]
+
+# matches bfd8deac from resnet18-bfd8deac.pth
+HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
+
+_TRUSTED_REPO_OWNERS = ("facebookresearch", "facebookincubator", "pytorch", "fairinternal")
+ENV_GITHUB_TOKEN = 'GITHUB_TOKEN'
+ENV_TORCH_HOME = 'TORCH_HOME'
+ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
+DEFAULT_CACHE_DIR = '~/.cache'
+VAR_DEPENDENCY = 'dependencies'
+MODULE_HUBCONF = 'hubconf.py'
+READ_DATA_CHUNK = 8192
+_hub_dir = None
+
+
+@contextlib.contextmanager
+def _add_to_sys_path(path):
+ sys.path.insert(0, path)
+ try:
+ yield
+ finally:
+ sys.path.remove(path)
+
+
+# Copied from tools/shared/module_loader to be included in torch package
+def _import_module(name, path):
+ import importlib.util
+ from importlib.abc import Loader
+ spec = importlib.util.spec_from_file_location(name, path)
+ assert spec is not None
+ module = importlib.util.module_from_spec(spec)
+ assert isinstance(spec.loader, Loader)
+ spec.loader.exec_module(module)
+ return module
+
+
+def _remove_if_exists(path):
+ if os.path.exists(path):
+ if os.path.isfile(path):
+ os.remove(path)
+ else:
+ shutil.rmtree(path)
+
+
+def _git_archive_link(repo_owner, repo_name, ref):
+ # See https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-zip
+ return f"https://github.com/{repo_owner}/{repo_name}/zipball/{ref}"
+
+
+def _load_attr_from_module(module, func_name):
+ # Check if callable is defined in the module
+ if func_name not in dir(module):
+ return None
+ return getattr(module, func_name)
+
+
+def _get_torch_home():
+ torch_home = os.path.expanduser(
+ os.getenv(ENV_TORCH_HOME,
+ os.path.join(os.getenv(ENV_XDG_CACHE_HOME,
+ DEFAULT_CACHE_DIR), 'torch')))
+ return torch_home
+
+
+def _parse_repo_info(github):
+ if ':' in github:
+ repo_info, ref = github.split(':')
+ else:
+ repo_info, ref = github, None
+ repo_owner, repo_name = repo_info.split('/')
+
+ if ref is None:
+ # The ref wasn't specified by the user, so we need to figure out the
+ # default branch: main or master. Our assumption is that if main exists
+ # then it's the default branch, otherwise it's master.
+ try:
+ with urlopen(f"https://github.com/{repo_owner}/{repo_name}/tree/main/"):
+ ref = 'main'
+ except HTTPError as e:
+ if e.code == 404:
+ ref = 'master'
+ else:
+ raise
+ except URLError as e:
+ # No internet connection, need to check for cache as last resort
+ for possible_ref in ("main", "master"):
+ if os.path.exists(f"{get_dir()}/{repo_owner}_{repo_name}_{possible_ref}"):
+ ref = possible_ref
+ break
+ if ref is None:
+ raise RuntimeError(
+ "It looks like there is no internet connection and the "
+ f"repo could not be found in the cache ({get_dir()})"
+ ) from e
+ return repo_owner, repo_name, ref
+
+
+def _read_url(url):
+ with urlopen(url) as r:
+ return r.read().decode(r.headers.get_content_charset('utf-8'))
+
+
+def _validate_not_a_forked_repo(repo_owner, repo_name, ref):
+ # Use urlopen to avoid depending on local git.
+ headers = {'Accept': 'application/vnd.github.v3+json'}
+ token = os.environ.get(ENV_GITHUB_TOKEN)
+ if token is not None:
+ headers['Authorization'] = f'token {token}'
+ for url_prefix in (
+ f'https://api.github.com/repos/{repo_owner}/{repo_name}/branches',
+ f'https://api.github.com/repos/{repo_owner}/{repo_name}/tags'):
+ page = 0
+ while True:
+ page += 1
+ url = f'{url_prefix}?per_page=100&page={page}'
+ response = json.loads(_read_url(Request(url, headers=headers)))
+ # Empty response means no more data to process
+ if not response:
+ break
+ for br in response:
+ if br['name'] == ref or br['commit']['sha'].startswith(ref):
+ return
+
+ raise ValueError(f'Cannot find {ref} in https://github.com/{repo_owner}/{repo_name}. '
+ 'If it\'s a commit from a forked repo, please call hub.load() with forked repo directly.')
+
+
+def _get_cache_or_reload(github, force_reload, trust_repo, calling_fn, verbose=True, skip_validation=False):
+ # Setup hub_dir to save downloaded files
+ hub_dir = get_dir()
+ if not os.path.exists(hub_dir):
+ os.makedirs(hub_dir)
+ # Parse github repo information
+ repo_owner, repo_name, ref = _parse_repo_info(github)
+ # Github allows branch name with slash '/',
+ # this causes confusion with path on both Linux and Windows.
+ # Backslash is not allowed in Github branch name so no need to
+ # to worry about it.
+ normalized_br = ref.replace('/', '_')
+ # Github renames folder repo-v1.x.x to repo-1.x.x
+ # We don't know the repo name before downloading the zip file
+ # and inspect name from it.
+ # To check if cached repo exists, we need to normalize folder names.
+ owner_name_branch = '_'.join([repo_owner, repo_name, normalized_br])
+ repo_dir = os.path.join(hub_dir, owner_name_branch)
+ # Check that the repo is in the trusted list
+ _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo=trust_repo, calling_fn=calling_fn)
+
+ use_cache = (not force_reload) and os.path.exists(repo_dir)
+
+ if use_cache:
+ if verbose:
+ sys.stderr.write(f'Using cache found in {repo_dir}\n')
+ else:
+ # Validate the tag/branch is from the original repo instead of a forked repo
+ if not skip_validation:
+ _validate_not_a_forked_repo(repo_owner, repo_name, ref)
+
+ cached_file = os.path.join(hub_dir, normalized_br + '.zip')
+ _remove_if_exists(cached_file)
+
+ try:
+ url = _git_archive_link(repo_owner, repo_name, ref)
+ sys.stderr.write(f'Downloading: \"{url}\" to {cached_file}\n')
+ download_url_to_file(url, cached_file, progress=False)
+ except HTTPError as err:
+ if err.code == 300:
+ # Getting a 300 Multiple Choices error likely means that the ref is both a tag and a branch
+ # in the repo. This can be disambiguated by explicitely using refs/heads/ or refs/tags
+ # See https://git-scm.com/book/en/v2/Git-Internals-Git-References
+ # Here, we do the same as git: we throw a warning, and assume the user wanted the branch
+ warnings.warn(
+ f"The ref {ref} is ambiguous. Perhaps it is both a tag and a branch in the repo? "
+ "Torchhub will now assume that it's a branch. "
+ "You can disambiguate tags and branches by explicitly passing refs/heads/branch_name or "
+ "refs/tags/tag_name as the ref. That might require using skip_validation=True."
+ )
+ disambiguated_branch_ref = f"refs/heads/{ref}"
+ url = _git_archive_link(repo_owner, repo_name, ref=disambiguated_branch_ref)
+ download_url_to_file(url, cached_file, progress=False)
+ else:
+ raise
+
+ with zipfile.ZipFile(cached_file) as cached_zipfile:
+ extraced_repo_name = cached_zipfile.infolist()[0].filename
+ extracted_repo = os.path.join(hub_dir, extraced_repo_name)
+ _remove_if_exists(extracted_repo)
+ # Unzip the code and rename the base folder
+ cached_zipfile.extractall(hub_dir)
+
+ _remove_if_exists(cached_file)
+ _remove_if_exists(repo_dir)
+ shutil.move(extracted_repo, repo_dir) # rename the repo
+
+ return repo_dir
+
+
+def _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo, calling_fn="load"):
+ hub_dir = get_dir()
+ filepath = os.path.join(hub_dir, "trusted_list")
+
+ if not os.path.exists(filepath):
+ Path(filepath).touch()
+ with open(filepath) as file:
+ trusted_repos = tuple(line.strip() for line in file)
+
+ # To minimize friction of introducing the new trust_repo mechanism, we consider that
+ # if a repo was already downloaded by torchhub, then it is already trusted (even if it's not in the allowlist)
+ trusted_repos_legacy = next(os.walk(hub_dir))[1]
+
+ owner_name = '_'.join([repo_owner, repo_name])
+ is_trusted = (
+ owner_name in trusted_repos
+ or owner_name_branch in trusted_repos_legacy
+ or repo_owner in _TRUSTED_REPO_OWNERS
+ )
+
+ # TODO: Remove `None` option in 2.0 and change the default to "check"
+ if trust_repo is None:
+ if not is_trusted:
+ warnings.warn(
+ "You are about to download and run code from an untrusted repository. In a future release, this won't "
+ "be allowed. To add the repository to your trusted list, change the command to {calling_fn}(..., "
+ "trust_repo=False) and a command prompt will appear asking for an explicit confirmation of trust, "
+ f"or {calling_fn}(..., trust_repo=True), which will assume that the prompt is to be answered with "
+ f"'yes'. You can also use {calling_fn}(..., trust_repo='check') which will only prompt for "
+ f"confirmation if the repo is not already trusted. This will eventually be the default behaviour")
+ return
+
+ if (trust_repo is False) or (trust_repo == "check" and not is_trusted):
+ response = input(
+ f"The repository {owner_name} does not belong to the list of trusted repositories and as such cannot be downloaded. "
+ "Do you trust this repository and wish to add it to the trusted list of repositories (y/N)?")
+ if response.lower() in ("y", "yes"):
+ if is_trusted:
+ print("The repository is already trusted.")
+ elif response.lower() in ("n", "no", ""):
+ raise Exception("Untrusted repository.")
+ else:
+ raise ValueError(f"Unrecognized response {response}.")
+
+ # At this point we're sure that the user trusts the repo (or wants to trust it)
+ if not is_trusted:
+ with open(filepath, "a") as file:
+ file.write(owner_name + "\n")
+
+
+def _check_module_exists(name):
+ import importlib.util
+ return importlib.util.find_spec(name) is not None
+
+
+def _check_dependencies(m):
+ dependencies = _load_attr_from_module(m, VAR_DEPENDENCY)
+
+ if dependencies is not None:
+ missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
+ if len(missing_deps):
+ raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}")
+
+
+def _load_entry_from_hubconf(m, model):
+ if not isinstance(model, str):
+ raise ValueError('Invalid input: model should be a string of function name')
+
+ # Note that if a missing dependency is imported at top level of hubconf, it will
+ # throw before this function. It's a chicken and egg situation where we have to
+ # load hubconf to know what're the dependencies, but to import hubconf it requires
+ # a missing package. This is fine, Python will throw proper error message for users.
+ _check_dependencies(m)
+
+ func = _load_attr_from_module(m, model)
+
+ if func is None or not callable(func):
+ raise RuntimeError(f'Cannot find callable {model} in hubconf')
+
+ return func
+
+
+def get_dir():
+ r"""
+ Get the Torch Hub cache directory used for storing downloaded models & weights.
+
+ If :func:`~torch.hub.set_dir` is not called, default path is ``$TORCH_HOME/hub`` where
+ environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``.
+ ``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux
+ filesystem layout, with a default value ``~/.cache`` if the environment
+ variable is not set.
+ """
+ # Issue warning to move data if old env is set
+ if os.getenv('TORCH_HUB'):
+ warnings.warn('TORCH_HUB is deprecated, please use env TORCH_HOME instead')
+
+ if _hub_dir is not None:
+ return _hub_dir
+ return os.path.join(_get_torch_home(), 'hub')
+
+
+def set_dir(d):
+ r"""
+ Optionally set the Torch Hub directory used to save downloaded models & weights.
+
+ Args:
+ d (str): path to a local folder to save downloaded models & weights.
+ """
+ global _hub_dir
+ _hub_dir = os.path.expanduser(d)
+
+
+def list(github, force_reload=False, skip_validation=False, trust_repo=None):
+ r"""
+ List all callable entrypoints available in the repo specified by ``github``.
+
+ Args:
+ github (str): a string with format "repo_owner/repo_name[:ref]" with an optional
+ ref (tag or branch). If ``ref`` is not specified, the default branch is assumed to be ``main`` if
+ it exists, and otherwise ``master``.
+ Example: 'pytorch/vision:0.10'
+ force_reload (bool, optional): whether to discard the existing cache and force a fresh download.
+ Default is ``False``.
+ skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit
+ specified by the ``github`` argument properly belongs to the repo owner. This will make
+ requests to the GitHub API; you can specify a non-default GitHub token by setting the
+ ``GITHUB_TOKEN`` environment variable. Default is ``False``.
+ trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
+ This parameter was introduced in v1.12 and helps ensuring that users
+ only run code from repos that they trust.
+
+ - If ``False``, a prompt will ask the user whether the repo should
+ be trusted.
+ - If ``True``, the repo will be added to the trusted list and loaded
+ without requiring explicit confirmation.
+ - If ``"check"``, the repo will be checked against the list of
+ trusted repos in the cache. If it is not present in that list, the
+ behaviour will fall back onto the ``trust_repo=False`` option.
+ - If ``None``: this will raise a warning, inviting the user to set
+ ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
+ is only present for backward compatibility and will be removed in
+ v2.0.
+
+ Default is ``None`` and will eventually change to ``"check"`` in v2.0.
+
+ Returns:
+ list: The available callables entrypoint
+
+ Example:
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB)
+ >>> entrypoints = torch.hub.list('pytorch/vision', force_reload=True)
+ """
+ repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "list", verbose=True,
+ skip_validation=skip_validation)
+
+ with _add_to_sys_path(repo_dir):
+ hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)
+ hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
+
+ # We take functions starts with '_' as internal helper functions
+ entrypoints = [f for f in dir(hub_module) if callable(getattr(hub_module, f)) and not f.startswith('_')]
+
+ return entrypoints
+
+
+def help(github, model, force_reload=False, skip_validation=False, trust_repo=None):
+ r"""
+ Show the docstring of entrypoint ``model``.
+
+ Args:
+ github (str): a string with format with an optional
+ ref (a tag or a branch). If ``ref`` is not specified, the default branch is assumed
+ to be ``main`` if it exists, and otherwise ``master``.
+ Example: 'pytorch/vision:0.10'
+ model (str): a string of entrypoint name defined in repo's ``hubconf.py``
+ force_reload (bool, optional): whether to discard the existing cache and force a fresh download.
+ Default is ``False``.
+ skip_validation (bool, optional): if ``False``, torchhub will check that the ref
+ specified by the ``github`` argument properly belongs to the repo owner. This will make
+ requests to the GitHub API; you can specify a non-default GitHub token by setting the
+ ``GITHUB_TOKEN`` environment variable. Default is ``False``.
+ trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
+ This parameter was introduced in v1.12 and helps ensuring that users
+ only run code from repos that they trust.
+
+ - If ``False``, a prompt will ask the user whether the repo should
+ be trusted.
+ - If ``True``, the repo will be added to the trusted list and loaded
+ without requiring explicit confirmation.
+ - If ``"check"``, the repo will be checked against the list of
+ trusted repos in the cache. If it is not present in that list, the
+ behaviour will fall back onto the ``trust_repo=False`` option.
+ - If ``None``: this will raise a warning, inviting the user to set
+ ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
+ is only present for backward compatibility and will be removed in
+ v2.0.
+
+ Default is ``None`` and will eventually change to ``"check"`` in v2.0.
+ Example:
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB)
+ >>> print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True))
+ """
+ repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "help", verbose=True,
+ skip_validation=skip_validation)
+
+ with _add_to_sys_path(repo_dir):
+ hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)
+ hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
+
+ entry = _load_entry_from_hubconf(hub_module, model)
+
+ return entry.__doc__
+
+
+def load(repo_or_dir, model, *args, source='github', trust_repo=None, force_reload=False, verbose=True,
+ skip_validation=False,
+ **kwargs):
+ r"""
+ Load a model from a github repo or a local directory.
+
+ Note: Loading a model is the typical use case, but this can also be used to
+ for loading other objects such as tokenizers, loss functions, etc.
+
+ If ``source`` is 'github', ``repo_or_dir`` is expected to be
+ of the form ``repo_owner/repo_name[:ref]`` with an optional
+ ref (a tag or a branch).
+
+ If ``source`` is 'local', ``repo_or_dir`` is expected to be a
+ path to a local directory.
+
+ Args:
+ repo_or_dir (str): If ``source`` is 'github',
+ this should correspond to a github repo with format ``repo_owner/repo_name[:ref]`` with
+ an optional ref (tag or branch), for example 'pytorch/vision:0.10'. If ``ref`` is not specified,
+ the default branch is assumed to be ``main`` if it exists, and otherwise ``master``.
+ If ``source`` is 'local' then it should be a path to a local directory.
+ model (str): the name of a callable (entrypoint) defined in the
+ repo/dir's ``hubconf.py``.
+ *args (optional): the corresponding args for callable ``model``.
+ source (str, optional): 'github' or 'local'. Specifies how
+ ``repo_or_dir`` is to be interpreted. Default is 'github'.
+ trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
+ This parameter was introduced in v1.12 and helps ensuring that users
+ only run code from repos that they trust.
+
+ - If ``False``, a prompt will ask the user whether the repo should
+ be trusted.
+ - If ``True``, the repo will be added to the trusted list and loaded
+ without requiring explicit confirmation.
+ - If ``"check"``, the repo will be checked against the list of
+ trusted repos in the cache. If it is not present in that list, the
+ behaviour will fall back onto the ``trust_repo=False`` option.
+ - If ``None``: this will raise a warning, inviting the user to set
+ ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
+ is only present for backward compatibility and will be removed in
+ v2.0.
+
+ Default is ``None`` and will eventually change to ``"check"`` in v2.0.
+ force_reload (bool, optional): whether to force a fresh download of
+ the github repo unconditionally. Does not have any effect if
+ ``source = 'local'``. Default is ``False``.
+ verbose (bool, optional): If ``False``, mute messages about hitting
+ local caches. Note that the message about first download cannot be
+ muted. Does not have any effect if ``source = 'local'``.
+ Default is ``True``.
+ skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit
+ specified by the ``github`` argument properly belongs to the repo owner. This will make
+ requests to the GitHub API; you can specify a non-default GitHub token by setting the
+ ``GITHUB_TOKEN`` environment variable. Default is ``False``.
+ **kwargs (optional): the corresponding kwargs for callable ``model``.
+
+ Returns:
+ The output of the ``model`` callable when called with the given
+ ``*args`` and ``**kwargs``.
+
+ Example:
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB)
+ >>> # from a github repo
+ >>> repo = 'pytorch/vision'
+ >>> model = torch.hub.load(repo, 'resnet50', weights='ResNet50_Weights.IMAGENET1K_V1')
+ >>> # from a local directory
+ >>> path = '/some/local/path/pytorch/vision'
+ >>> # xdoctest: +SKIP
+ >>> model = torch.hub.load(path, 'resnet50', weights='ResNet50_Weights.DEFAULT')
+ """
+ source = source.lower()
+
+ if source not in ('github', 'local'):
+ raise ValueError(
+ f'Unknown source: "{source}". Allowed values: "github" | "local".')
+
+ if source == 'github':
+ repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, trust_repo, "load",
+ verbose=verbose, skip_validation=skip_validation)
+
+ model = _load_local(repo_or_dir, model, *args, **kwargs)
+ return model
+
+
+def _load_local(hubconf_dir, model, *args, **kwargs):
+ r"""
+ Load a model from a local directory with a ``hubconf.py``.
+
+ Args:
+ hubconf_dir (str): path to a local directory that contains a
+ ``hubconf.py``.
+ model (str): name of an entrypoint defined in the directory's
+ ``hubconf.py``.
+ *args (optional): the corresponding args for callable ``model``.
+ **kwargs (optional): the corresponding kwargs for callable ``model``.
+
+ Returns:
+ a single model with corresponding pretrained weights.
+
+ Example:
+ >>> # xdoctest: +SKIP("stub local path")
+ >>> path = '/some/local/path/pytorch/vision'
+ >>> model = _load_local(path, 'resnet50', weights='ResNet50_Weights.IMAGENET1K_V1')
+ """
+ with _add_to_sys_path(hubconf_dir):
+ hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)
+ hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
+
+ entry = _load_entry_from_hubconf(hub_module, model)
+ model = entry(*args, **kwargs)
+
+ return model
+
+
+def download_url_to_file(url: str, dst: str, hash_prefix: Optional[str] = None,
+ progress: bool = True) -> None:
+ r"""Download object at the given URL to a local path.
+
+ Args:
+ url (str): URL of the object to download
+ dst (str): Full path where object will be saved, e.g. ``/tmp/temporary_file``
+ hash_prefix (str, optional): If not None, the SHA256 downloaded file should start with ``hash_prefix``.
+ Default: None
+ progress (bool, optional): whether or not to display a progress bar to stderr
+ Default: True
+
+ Example:
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB)
+ >>> # xdoctest: +REQUIRES(POSIX)
+ >>> torch.hub.download_url_to_file('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth', '/tmp/temporary_file')
+
+ """
+ file_size = None
+ req = Request(url, headers={"User-Agent": "torch.hub"})
+ u = urlopen(req)
+ meta = u.info()
+ if hasattr(meta, 'getheaders'):
+ content_length = meta.getheaders("Content-Length")
+ else:
+ content_length = meta.get_all("Content-Length")
+ if content_length is not None and len(content_length) > 0:
+ file_size = int(content_length[0])
+
+ # We deliberately save it in a temp file and move it after
+ # download is complete. This prevents a local working checkpoint
+ # being overridden by a broken download.
+ # We deliberately do not use NamedTemporaryFile to avoid restrictive
+ # file permissions being applied to the downloaded file.
+ dst = os.path.expanduser(dst)
+ for seq in range(tempfile.TMP_MAX):
+ tmp_dst = dst + '.' + uuid.uuid4().hex + '.partial'
+ try:
+ f = open(tmp_dst, 'w+b')
+ except FileExistsError:
+ continue
+ break
+ else:
+ raise FileExistsError(errno.EEXIST, 'No usable temporary file name found')
+
+ try:
+ if hash_prefix is not None:
+ sha256 = hashlib.sha256()
+ with tqdm(total=file_size, disable=not progress,
+ unit='B', unit_scale=True, unit_divisor=1024) as pbar:
+ while True:
+ buffer = u.read(8192)
+ if len(buffer) == 0:
+ break
+ f.write(buffer)
+ if hash_prefix is not None:
+ sha256.update(buffer)
+ pbar.update(len(buffer))
+
+ f.close()
+ if hash_prefix is not None:
+ digest = sha256.hexdigest()
+ if digest[:len(hash_prefix)] != hash_prefix:
+ raise RuntimeError(f'invalid hash value (expected "{hash_prefix}", got "{digest}")')
+ shutil.move(f.name, dst)
+ finally:
+ f.close()
+ if os.path.exists(f.name):
+ os.remove(f.name)
+
+
+# Hub used to support automatically extracts from zipfile manually compressed by users.
+# The legacy zip format expects only one file from torch.save() < 1.6 in the zip.
+# We should remove this support since zipfile is now default zipfile format for torch.save().
+def _is_legacy_zip_format(filename: str) -> bool:
+ if zipfile.is_zipfile(filename):
+ infolist = zipfile.ZipFile(filename).infolist()
+ return len(infolist) == 1 and not infolist[0].is_dir()
+ return False
+
+
+def _legacy_zip_load(filename: str, model_dir: str, map_location: MAP_LOCATION, weights_only: bool) -> Dict[str, Any]:
+ warnings.warn('Falling back to the old format < 1.6. This support will be '
+ 'deprecated in favor of default zipfile format introduced in 1.6. '
+ 'Please redo torch.save() to save it in the new zipfile format.')
+ # Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
+ # We deliberately don't handle tarfile here since our legacy serialization format was in tar.
+ # E.g. resnet18-5c106cde.pth which is widely used.
+ with zipfile.ZipFile(filename) as f:
+ members = f.infolist()
+ if len(members) != 1:
+ raise RuntimeError('Only one file(not dir) is allowed in the zipfile')
+ f.extractall(model_dir)
+ extraced_name = members[0].filename
+ extracted_file = os.path.join(model_dir, extraced_name)
+ return torch.load(extracted_file, map_location=map_location, weights_only=weights_only)
+
+
+def load_state_dict_from_url(
+ url: str,
+ model_dir: Optional[str] = None,
+ map_location: MAP_LOCATION = None,
+ progress: bool = True,
+ check_hash: bool = False,
+ file_name: Optional[str] = None,
+ weights_only: bool = False,
+) -> Dict[str, Any]:
+ r"""Loads the Torch serialized object at the given URL.
+
+ If downloaded file is a zip file, it will be automatically
+ decompressed.
+
+ If the object is already present in `model_dir`, it's deserialized and
+ returned.
+ The default value of ``model_dir`` is ``/checkpoints`` where
+ ``hub_dir`` is the directory returned by :func:`~torch.hub.get_dir`.
+
+ Args:
+ url (str): URL of the object to download
+ model_dir (str, optional): directory in which to save the object
+ map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
+ progress (bool, optional): whether or not to display a progress bar to stderr.
+ Default: True
+ check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
+ ``filename-.ext`` where ```` is the first eight or more
+ digits of the SHA256 hash of the contents of the file. The hash is used to
+ ensure unique names and to verify the contents of the file.
+ Default: False
+ file_name (str, optional): name for the downloaded file. Filename from ``url`` will be used if not set.
+ weights_only(bool, optional): If True, only weights will be loaded and no complex pickled objects.
+ Recommended for untrusted sources. See :func:`~torch.load` for more details.
+
+ Example:
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB)
+ >>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
+
+ """
+ # Issue warning to move data if old env is set
+ if os.getenv('TORCH_MODEL_ZOO'):
+ warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
+
+ if model_dir is None:
+ hub_dir = get_dir()
+ model_dir = os.path.join(hub_dir, 'checkpoints')
+
+ try:
+ os.makedirs(model_dir)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ # Directory already exists, ignore.
+ pass
+ else:
+ # Unexpected OSError, re-raise.
+ raise
+
+ parts = urlparse(url)
+ filename = os.path.basename(parts.path)
+ if file_name is not None:
+ filename = file_name
+ cached_file = os.path.join(model_dir, filename)
+ if not os.path.exists(cached_file):
+ sys.stderr.write(f'Downloading: "{url}" to {cached_file}\n')
+ hash_prefix = None
+ if check_hash:
+ r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
+ hash_prefix = r.group(1) if r else None
+ download_url_to_file(url, cached_file, hash_prefix, progress=progress)
+
+ if _is_legacy_zip_format(cached_file):
+ return _legacy_zip_load(cached_file, model_dir, map_location, weights_only)
+ return torch.load(cached_file, map_location=map_location, weights_only=weights_only)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/library.py b/env-llmeval/lib/python3.10/site-packages/torch/library.py
new file mode 100644
index 0000000000000000000000000000000000000000..c48c2f68c944cef74e7c88f07311c5a6c3ce417a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/library.py
@@ -0,0 +1,502 @@
+from ._ops import OpOverload
+from typing import Any, Optional, Set, List
+import traceback
+import torch
+import weakref
+import functools
+import inspect
+import re
+import sys
+
+__all__ = [
+ 'Library',
+ 'impl',
+ 'define',
+ 'fallthrough_kernel',
+ 'impl_abstract',
+ 'get_ctx',
+]
+
+# Set containing the combination of (namespace, operator, DispatchKey) for which a new kernel has been registered
+# The keys in the set are of the form `namespace + "/" + op_name + "/" + dispatch_key`.
+# This set is maintained to ensure that two libraries don't try to override the exact same functionality to avoid
+# libraries calling into kernels not intended to be called.
+_impls: Set[str] = set()
+_defs: Set[str] = set()
+
+# prim is reserved by TorchScript interpreter
+_reserved_namespaces = ['prim']
+
+def fallthrough_kernel():
+ """
+ A dummy function to pass to ``Library.impl`` in order to register a fallthrough.
+ """
+ raise NotImplementedError("fallthrough_kernel() should never be called.")
+
+class Library:
+ """
+ A class to create libraries that can be used to register new operators or
+ override operators in existing libraries from Python.
+ A user can optionally pass in a dispatch keyname if they only want to register
+ kernels corresponding to only one specific dispatch key.
+
+ To create a library to override operators in an existing library (with name ns), set the kind to "IMPL".
+ To create a new library (with name ns) to register new operators, set the kind to "DEF".
+ To create a fragment of a possibly existing library to register operators (and bypass
+ the limitation that there is only one library for a given namespace), set the kind to
+ "FRAGMENT".
+
+ Args:
+ ns: library name
+ kind: "DEF", "IMPL" (default: "IMPL"), "FRAGMENT"
+ dispatch_key: PyTorch dispatch key (default: "")
+ """
+ def __init__(self, ns, kind, dispatch_key=""):
+ if kind not in ('IMPL', 'DEF', 'FRAGMENT'):
+ raise ValueError("Unsupported kind: ", kind)
+
+ if ns in _reserved_namespaces and (kind == "DEF" or kind == 'FRAGMENT'):
+ raise ValueError(ns, " is a reserved namespace. Please try creating a library with another name.")
+
+ frame = traceback.extract_stack(limit=3)[0]
+ filename, lineno = frame.filename, frame.lineno
+ self.m: Optional[Any] = torch._C._dispatch_library(kind, ns, dispatch_key, filename, lineno)
+ self.ns = ns
+ self._op_defs: Set[str] = set()
+ self._op_impls: Set[str] = set()
+ self._registration_handles: List["torch._library.utils.RegistrationHandle"] = []
+ self.kind = kind
+ self.dispatch_key = dispatch_key
+ # Use a finalizer to setup the "destructor" instead of __del__.
+ # Python __del__ can lead to weird things (globals and locals may already
+ # be gone when __del__ actually gets called!). finalizers help the
+ # situation because it lets us capture references and keeps them alive
+ weakref.finalize(self, _del_library, _impls, self._op_impls, _defs, self._op_defs, self._registration_handles)
+
+ def __repr__(self):
+ return f"Library(kind={self.kind}, ns={self.ns}, dispatch_key={self.dispatch_key})>"
+
+ def define(self, schema, alias_analysis="", *, tags=()):
+ r'''Defines a new operator and its semantics in the ns namespace.
+
+ Args:
+ schema: function schema to define a new operator.
+ alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be
+ inferred from the schema (default behavior) or not ("CONSERVATIVE").
+ tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
+ operator. Tagging an operator changes the operator's behavior
+ under various PyTorch subsystems; please read the docs for the
+ torch.Tag carefully before applying it.
+
+ Returns:
+ name of the operator as inferred from the schema.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LIBRARY)
+ >>> my_lib = Library("foo", "DEF")
+ >>> my_lib.define("sum(Tensor self) -> Tensor")
+ '''
+ # This is added because we also want to disallow PURE_FUNCTION alias analysis which is a valid
+ # AliasAnalysis type in C++
+ if alias_analysis not in ["", "FROM_SCHEMA", "CONSERVATIVE"]:
+ raise RuntimeError(f"Invalid alias_analysis type {alias_analysis}")
+ assert self.m is not None
+ if isinstance(tags, torch.Tag):
+ tags = (tags,)
+ result = self.m.define(schema, alias_analysis, tuple(tags))
+ qualname = self.ns + "::" + schema.split("(")[0]
+ self._op_defs.add(qualname)
+ _defs.add(qualname)
+ return result
+
+ def impl(self, op_name, fn, dispatch_key=''):
+ r'''Registers the function implementation for an operator defined in the library.
+
+ Args:
+ op_name: operator name (along with the overload) or OpOverload object.
+ fn: function that's the operator implementation for the input dispatch key or :func:`~fallthrough_kernel`
+ to register a fallthrough.
+ dispatch_key: dispatch key that the input function should be registered for. By default, it uses
+ the dispatch key that the library was created with.
+
+ Example::
+ >>> my_lib = Library("aten", "IMPL")
+ >>> def div_cpu(self, other):
+ >>> return self * (1 / other)
+ >>> my_lib.impl("div.Tensor", div_cpu, "CPU")
+ '''
+ if not callable(fn):
+ raise TypeError(f"Input function is required to be a callable but found type {type(fn)}")
+ if dispatch_key == '':
+ dispatch_key = self.dispatch_key
+
+ if isinstance(op_name, str):
+ name = op_name
+ elif isinstance(op_name, OpOverload):
+ name = op_name._schema.name
+ overload_name = op_name._schema.overload_name
+ if overload_name != '':
+ name = name + '.' + overload_name
+ else:
+ raise RuntimeError("impl should be passed either a name or an OpOverload object as the first argument")
+
+ key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
+ if key in _impls:
+ # TODO: in future, add more info about where the existing function is registered (this info is
+ # today already returned by the C++ warning when impl is called but we error out before that)
+ raise RuntimeError("This is not allowed since there's already a kernel registered from python overriding {}"
+ "'s behavior for {} dispatch key and {} namespace.".
+ format(name.split("::")[-1], dispatch_key, self.ns))
+
+ if dispatch_key == "Meta":
+ dispatcher_op_name = name
+ if '::' not in dispatcher_op_name:
+ dispatcher_op_name = f'{self.ns}::{dispatcher_op_name}'
+
+ # Internally, we shouldn't be registering meta kernels for any operators that
+ # have CompositeImplicitAutograd kernels.
+ # Instead, we should be letting those decompositions run, and writing meta kernels
+ # only for the base operators.
+ if torch._C._dispatch_has_kernel_for_dispatch_key(dispatcher_op_name, "CompositeImplicitAutograd"):
+ raise RuntimeError(
+ f"We should not register a meta kernel directly to the operator '{name}',"
+ " because it has a CompositeImplicitAutograd kernel in core."
+ " Instead we should let the operator decompose, and ensure that we have meta kernels"
+ " for the base ops that it decomposes into.")
+
+ assert self.m is not None
+ self.m.impl(name, dispatch_key if dispatch_key != "" else "CompositeImplicitAutograd", fn)
+
+ _impls.add(key)
+ self._op_impls.add(key)
+
+ def _destroy(self):
+ self.m = None
+ for handle in self._registration_handles:
+ handle.destroy()
+ self._registration_handles.clear()
+
+
+def _del_library(captured_impls, op_impls, captured_defs, op_defs, registration_handles):
+ captured_impls -= op_impls
+ captured_defs -= op_defs
+ for handle in registration_handles:
+ handle.destroy()
+
+
+_keep_alive = []
+
+
+NAMELESS_SCHEMA = re.compile(r"\(.*\) -> .*")
+
+
+@functools.singledispatch
+def define(qualname, schema, *, lib=None, tags=()):
+ r"""Defines a new operator.
+
+ In PyTorch, defining an op (short for "operator") is a two step-process:
+ - we need to define the op (by providing an operator name and schema)
+ - we need to implement behavior for how the operator interacts with
+ various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
+
+ This entrypoint defines the custom operator (the first step)
+ you must then perform the second step by calling various
+ ``impl_*`` APIs, like :func:`torch.library.impl` or
+ :func:`torch.library.impl_abstract`.
+
+ Args:
+ qualname (str): The qualified name for the operator. Should be
+ a string that looks like "namespace::name", e.g. "aten::sin".
+ Operators in PyTorch need a namespace to
+ avoid name collisions; a given operator may only be created once.
+ If you are writing a Python library, we recommend the namespace to
+ be the name of your top-level module.
+ schema (str): The schema of the operator. E.g. "(Tensor x) -> Tensor"
+ for an op that accepts one Tensor and returns one Tensor. It does
+ not contain the operator name (that is passed in ``qualname``).
+ lib (Optional[Library]): If provided, the lifetime of this operator
+ will be tied to the lifetime of the Library object.
+ tags (Tag | Sequence[Tag]): one or more torch.Tag to apply to this
+ operator. Tagging an operator changes the operator's behavior
+ under various PyTorch subsystems; please read the docs for the
+ torch.Tag carefully before applying it.
+
+ Example::
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LIBRARY)
+ >>> import torch
+ >>> import numpy as np
+ >>>
+ >>> # Define the operator
+ >>> torch.library.define("mylib::sin", "(Tensor x) -> Tensor")
+ >>>
+ >>> # Add implementations for the operator
+ >>> @torch.library.impl("mylibrary::sin", "cpu")
+ >>> def f(x):
+ >>> return torch.from_numpy(np.sin(x.numpy()))
+ >>>
+ >>> # Call the new operator from torch.ops.
+ >>> x = torch.randn(3)
+ >>> y = torch.ops.mylib.sin(x)
+ >>> assert torch.allclose(y, x)
+
+ """
+ if not isinstance(qualname, str):
+ raise ValueError(
+ f"define(qualname, schema): expected qualname "
+ f"to be instance of str, got {type(qualname)}")
+ namespace, name = torch._library.utils.parse_namespace(qualname)
+ if lib is None:
+ lib = Library(namespace, "FRAGMENT")
+ _keep_alive.append(lib)
+ if not NAMELESS_SCHEMA.fullmatch(schema):
+ raise ValueError(
+ f"define(qualname, schema, ...): expected schema "
+ f"to look like e.g. \"(Tensor x) -> Tensor\" but "
+ f"got \"{schema}\"")
+ lib.define(name + schema, alias_analysis="", tags=tags)
+
+
+@define.register
+def _(lib: Library, schema, alias_analysis=""):
+ """The old torch.library.define.
+ We're keeping this around for BC reasons
+ """
+ def wrap(f):
+ name = lib.define(schema, alias_analysis)
+ lib.impl(name, f)
+ return f
+ return wrap
+
+
+@functools.singledispatch
+def impl(qualname, types, func=None, *, lib=None):
+ """Register an implementation for a device type for this operator.
+
+ You may pass "default" for ``types`` to register this implementation as the
+ default implementation for ALL device types.
+ Please only use this if the implementation truly supports all device types;
+ for example, this is true if it is a composition of built-in PyTorch operators.
+
+ Some valid types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".
+
+ Args:
+ qualname (str): Should be a string that looks like "namespace::operator_name".
+ types (str | Sequence[str]): The device types to register an impl to.
+ lib (Optional[Library]): If provided, the lifetime of this registration
+ will be tied to the lifetime of the Library object.
+
+ Examples:
+ >>> import torch
+ >>> import numpy as np
+ >>>
+ >>> # Define the operator
+ >>> torch.library.define("mylibrary::sin", "(Tensor x) -> Tensor")
+ >>>
+ >>> # Add implementations for the cpu device
+ >>> @torch.library.impl("mylibrary::sin", "cpu")
+ >>> def f(x):
+ >>> return torch.from_numpy(np.sin(x.numpy()))
+ >>>
+ >>> x = torch.randn(3)
+ >>> y = torch.ops.mylibrary.sin(x)
+ >>> assert torch.allclose(y, x.sin())
+ """
+ if isinstance(types, str):
+ types = (types,)
+ keys = set({})
+ for typ in types:
+ is_dispatch_key = torch._C._parse_dispatch_key(typ)
+ if is_dispatch_key:
+ # We also support passing a DispatchKey to impl. Please prefer using
+ # the higher-level torch.library APIs and only pass DispatchKey to
+ # torch.library.impl with caution (or even better, don't use this
+ # option and file an issue on GitHub for what you need).
+ # We don't advertise this to users because
+ # it is very easy to shoot yourself in the foot.
+ keys.add(typ)
+ else:
+ keys.add(_device_type_to_key(typ))
+
+ def register(func):
+ namespace, _ = torch._library.utils.parse_namespace(qualname)
+ if lib is None:
+ use_lib = Library(namespace, "FRAGMENT")
+ _keep_alive.append(use_lib)
+ else:
+ use_lib = lib
+ for key in keys:
+ use_lib.impl(qualname, func, key)
+
+ if func is None:
+ return register
+ else:
+ register(func)
+
+
+def _device_type_to_key(device_type: str) -> str:
+ if device_type == "default":
+ # This is technically not correct, because although all device_type
+ # DispatchKeys are included in CompositeExplicitAutograd,
+ # not everything in CompositeExplicitAutograd is associated with a
+ # device_type. I don't really care that much about the difference.
+ return "CompositeExplicitAutograd"
+ return torch._C._dispatch_key_for_device(device_type)
+
+
+@impl.register
+def _(lib: Library, name, dispatch_key=""):
+ """Legacy torch.library.impl API. Kept around for BC"""
+ def wrap(f):
+ lib.impl(name, f, dispatch_key)
+ return f
+ return wrap
+
+
+
+def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
+ r"""Register an abstract implementation for this operator.
+
+ An "abstract implementation" specifies the behavior of this operator on
+ Tensors that carry no data. Given some input Tensors with certain properties
+ (sizes/strides/storage_offset/device), it specifies what the properties of
+ the output Tensors are.
+
+ The abstract implementation has the same signature as the operator.
+ It is run for both FakeTensors and meta tensors. To write an abstract
+ implementation, assume that all Tensor inputs to the operator are
+ regular CPU/CUDA/Meta tensors, but they do not have storage, and
+ you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
+ The abstract implementation must consist of only PyTorch operations
+ (and may not directly access the storage or data of any input or
+ intermediate Tensors).
+
+ This API may be used as a decorator (see examples).
+
+ For a detailed guide on custom ops, please see
+ https://docs.google.com/document/d/1W--T6wz8IY8fOI0Vm8BF44PdBgs283QvpelJZWieQWQ/edit
+
+ Examples:
+ >>> import torch
+ >>> import numpy as np
+ >>> from torch import Tensor
+ >>>
+ >>> # Example 1: an operator without data-dependent output shape
+ >>> torch.library.define(
+ >>> "mylib::custom_linear",
+ >>> "(Tensor x, Tensor weight, Tensor bias) -> Tensor")
+ >>>
+ >>> @torch.library.impl_abstract("mylib::custom_linear")
+ >>> def custom_linear_abstract(x, weight):
+ >>> assert x.dim() == 2
+ >>> assert weight.dim() == 2
+ >>> assert bias.dim() == 1
+ >>> assert x.shape[1] == weight.shape[1]
+ >>> assert weight.shape[0] == bias.shape[0]
+ >>> assert x.device == weight.device
+ >>>
+ >>> return (x @ weight.t()) + bias
+ >>>
+ >>> # Example 2: an operator with data-dependent output shape
+ >>> torch.library.define("mylib::custom_nonzero", "(Tensor x) -> Tensor")
+ >>>
+ >>> @torch.library.impl_abstract("mylib::custom_nonzero")
+ >>> def custom_nonzero_abstract(x):
+ >>> # Number of nonzero-elements is data-dependent.
+ >>> # Since we cannot peek at the data in an abstract impl,
+ >>> # we use the ctx object to construct a new symint that
+ >>> # represents the data-dependent size.
+ >>> ctx = torch.library.get_ctx()
+ >>> nnz = ctx.new_dynamic_size()
+ >>> shape = [nnz, x.dim()]
+ >>> result = x.new_empty(shape, dtype=torch.int64)
+ >>> return result
+ >>>
+ >>> @torch.library.impl("mylib::custom_nonzero", "cpu")
+ >>> def custom_nonzero_cpu(x):
+ >>> x_np = x.numpy()
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
+ >>> return torch.tensor(res, device=x.device)
+
+ """
+ source = torch._library.utils.get_source(_stacklevel + 1)
+ frame = sys._getframe(_stacklevel)
+ caller_module = inspect.getmodule(frame)
+ # Can be none if you call impl_abstract from somewhere there isn't a module
+ # (e.g. __main__)
+ caller_module_name = None if caller_module is None else caller_module.__name__
+
+ # TODO(rzou): We're gonna need to stage this change with torchvision,
+ # since torchvision is github first.
+ if caller_module_name is not None and caller_module_name.startswith("torchvision."):
+ caller_module_name = None
+
+ def inner(func):
+ entry = torch._library.simple_registry.singleton.find(qualname)
+ if caller_module_name is not None:
+ func_to_register = _check_pystubs_once(func, qualname, caller_module_name)
+ else:
+ func_to_register = func
+
+ handle = entry.abstract_impl.register(func_to_register, source)
+ if lib is not None:
+ lib._registration_handles.append(handle)
+ return func
+
+ if func is None:
+ return inner
+ return inner(func)
+
+
+# If the op was defined in C++, then we want to make sure there was an
+# m.impl_abstract_pystub(module, ...) call and that the module is the
+# same as the module that called torch.library.impl_abstract.
+def _check_pystubs_once(func, qualname, actual_module_name):
+ checked = False
+
+ def inner(*args, **kwargs):
+ nonlocal checked
+ if checked:
+ return func(*args, **kwargs)
+
+ op = torch._library.utils.lookup_op(qualname)
+ if op._defined_in_python:
+ checked = True
+ return func(*args, **kwargs)
+
+ maybe_pystub = torch._C._dispatch_pystub(
+ op._schema.name,
+ op._schema.overload_name)
+ if not maybe_pystub:
+ raise RuntimeError(
+ f"Operator '{qualname}' was defined in C++ and has a Python "
+ f"abstract impl. In this situation, it is required to have a "
+ f"C++ `m.impl_abstract_pystub` call, but we could not find one."
+ f"Please add a call to `m.impl_abstract_pystub(\"{actual_module_name}\");` "
+ f"to the C++ TORCH_LIBRARY block the operator was "
+ f"defined in.")
+ pystub_module = maybe_pystub[0]
+ if actual_module_name != pystub_module:
+ raise RuntimeError(
+ f"Operator '{qualname}' specified that its python abstract impl "
+ f"is in the Python module '{pystub_module}' but it was actually found "
+ f"in '{actual_module_name}'. Please either move the abstract impl "
+ f"or correct the m.impl_abstract_pystub call.")
+ checked = True
+ return func(*args, **kwargs)
+ return inner
+
+
+# NOTE [ctx inside the fake implementation]
+# If a user has an operator with data-dependent output shape, then when writing
+# a fake implementation they must query the current ctx and use methods on the
+# ctx to construct a new unbacked symint.
+#
+# This is done via us setting the global_ctx_getter function every time a fake
+# implementation is invoked.
+def get_ctx() -> "torch._library.abstract_impl.AbstractImplCtx":
+ """get_ctx() returns the current AbstractImplCtx object.
+
+ Calling ``get_ctx()`` is only valid inside of an abstract impl
+ (see :func:`torch.library.impl_abstract` for more usage details.
+ """
+ return torch._library.abstract_impl.global_ctx_getter()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cbb1fb07ff885d5fc4d26667e5fb4a1670efb9e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__init__.py
@@ -0,0 +1,78 @@
+"""torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
+
+It registers custom reducers, that use shared memory to provide shared
+views on the same data in different processes. Once the tensor/storage is moved
+to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
+to send it to other processes without making any copies.
+
+The API is 100% compatible with the original module - it's enough to change
+``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
+tensors sent through the queues or shared via other mechanisms, moved to shared
+memory.
+
+Because of the similarity of APIs we do not document most of this package
+contents, and we recommend referring to very good docs of the original module.
+"""
+import multiprocessing
+import sys
+
+import torch
+from .reductions import init_reductions
+
+__all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
+
+
+from multiprocessing import * # noqa: F403
+
+
+__all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
+
+
+# This call adds a Linux specific prctl(2) wrapper function to this module.
+# See https://github.com/pytorch/pytorch/pull/14391 for more information.
+torch._C._multiprocessing_init()
+
+
+"""Add helper function to spawn N processes and wait for completion of any of
+them. This depends `mp.get_context` which was added in Python 3.4."""
+from .spawn import (
+ ProcessContext,
+ ProcessExitedException,
+ ProcessRaisedException,
+ spawn,
+ SpawnContext,
+ start_processes,
+)
+
+
+if sys.platform == "darwin" or sys.platform == "win32":
+ _sharing_strategy = "file_system"
+ _all_sharing_strategies = {"file_system"}
+else:
+ _sharing_strategy = "file_descriptor"
+ _all_sharing_strategies = {"file_descriptor", "file_system"}
+
+
+def set_sharing_strategy(new_strategy):
+ """Set the strategy for sharing CPU tensors.
+
+ Args:
+ new_strategy (str): Name of the selected strategy. Should be one of
+ the values returned by :func:`get_all_sharing_strategies()`.
+ """
+ global _sharing_strategy
+ assert new_strategy in _all_sharing_strategies
+ _sharing_strategy = new_strategy
+
+
+def get_sharing_strategy():
+ """Return the current strategy for sharing CPU tensors."""
+ return _sharing_strategy
+
+
+def get_all_sharing_strategies():
+ """Return a set of sharing strategies supported on a current system."""
+ return _all_sharing_strategies
+
+
+init_reductions()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99b2db22c4f143a9aac369108cf3b3a691b21721
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d95d10f0993c6620141d9af10fe4abd6e01aa53
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1572dc1d706b968f74cbf93d4c1c0f4455143f44
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5b6bb97c2f7b4dc1074ae8c524f488e3fe4d917
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4049d83b2ad450b18aab18033dd8b3d00114245b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59eff19f057dd2ed8f3997eee2d6ace4def814e9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py
new file mode 100644
index 0000000000000000000000000000000000000000..92a3280fee78b538230dfa63862c4681c1a5b186
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py
@@ -0,0 +1,33 @@
+import sys
+
+__all__ = ["register_after_fork"]
+
+if sys.platform == "win32":
+ import multiprocessing.util as _util
+
+ def _register(func):
+ def wrapper(arg):
+ func()
+
+ _util.register_after_fork(_register, wrapper)
+
+else:
+ import os
+
+ def _register(func):
+ os.register_at_fork(after_in_child=func)
+
+
+def register_after_fork(func):
+ """Register a callable to be executed in the child process after a fork.
+
+ Note:
+ In python < 3.7 this will only work with processes created using the
+ ``multiprocessing`` module. In python >= 3.7 it also works with
+ ``os.fork()``.
+
+ Args:
+ func (function): Function taking no arguments to be called in the child after fork
+
+ """
+ _register(func)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/pool.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..6915203566469cfaf7170d87894ce03cc8348dd5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/pool.py
@@ -0,0 +1,52 @@
+import multiprocessing.pool
+import multiprocessing.util as util
+
+from .queue import SimpleQueue
+
+
+def clean_worker(*args, **kwargs):
+ import gc
+
+ multiprocessing.pool.worker(*args, **kwargs)
+ # Regular multiprocessing workers don't fully clean up after themselves,
+ # so we have to explicitly trigger garbage collection to make sure that all
+ # destructors are called...
+ gc.collect()
+
+
+class Pool(multiprocessing.pool.Pool):
+ """Pool implementation which uses our version of SimpleQueue.
+
+ This lets us pass tensors in shared memory across processes instead of
+ serializing the underlying data.
+ """
+
+ def _setup_queues(self):
+ self._inqueue = SimpleQueue()
+ self._outqueue = SimpleQueue()
+ self._quick_put = self._inqueue._writer.send
+ self._quick_get = self._outqueue._reader.recv
+
+ def _repopulate_pool(self):
+ """Increase the number of pool processes to the specified number.
+
+ Bring the number of pool processes up to the specified number, for use after
+ reaping workers which have exited.
+ """
+ for i in range(self._processes - len(self._pool)):
+ # changed worker -> clean_worker
+ args = (
+ self._inqueue,
+ self._outqueue,
+ self._initializer,
+ self._initargs,
+ self._maxtasksperchild,
+ )
+ if hasattr(self, "_wrap_exception"):
+ args += (self._wrap_exception,)
+ w = self.Process(target=clean_worker, args=args)
+ self._pool.append(w)
+ w.name = w.name.replace("Process", "PoolWorker")
+ w.daemon = True
+ w.start()
+ util.debug("added worker")
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/queue.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..99da145e75f1a9f6fb2467251948bc74361cbc02
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/queue.py
@@ -0,0 +1,42 @@
+import io
+import multiprocessing.queues
+import pickle
+from multiprocessing.reduction import ForkingPickler
+
+
+class ConnectionWrapper:
+ """Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization."""
+
+ def __init__(self, conn):
+ self.conn = conn
+
+ def send(self, obj):
+ buf = io.BytesIO()
+ ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
+ self.send_bytes(buf.getvalue())
+
+ def recv(self):
+ buf = self.recv_bytes()
+ return pickle.loads(buf)
+
+ def __getattr__(self, name):
+ if "conn" in self.__dict__:
+ return getattr(self.conn, name)
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'")
+
+
+class Queue(multiprocessing.queues.Queue):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
+ self._send = self._writer.send
+ self._recv = self._reader.recv
+
+
+class SimpleQueue(multiprocessing.queues.SimpleQueue):
+ def _make_methods(self):
+ if not isinstance(self._reader, ConnectionWrapper):
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
+ super()._make_methods() # type: ignore[misc]
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/reductions.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/reductions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5eb0a6abd86f2d2036032aec894298862a322cf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/reductions.py
@@ -0,0 +1,594 @@
+import multiprocessing
+import os
+import threading
+from multiprocessing.reduction import ForkingPickler
+from multiprocessing.util import register_after_fork
+from typing import Union
+
+import torch
+import torch.utils.hooks
+from torch._namedtensor_internals import check_serializing_named_tensor
+
+try:
+ # Early load resource_sharer to prevent a partially initialized instance
+ # from being inherited in a forked child process. The reduce_storage method
+ # requires this module indirectly through DupFd(). The built-in mp.Queue
+ # class pickles arguments in a background thread which may overlap with the
+ # fork.
+ import multiprocessing.resource_sharer
+except ImportError:
+ pass
+
+
+class StorageWeakRef:
+ r"""A weak reference to a Storage.
+
+ The cdata member is a Python number containing the integer representation of
+ the Storage pointer.
+ """
+
+ __slots__ = ["cdata", "_free_weak_ref"]
+
+ def __init__(self, storage):
+ self.cdata = storage._weak_ref()
+ # Save a direct reference to _free_weak_ref because the `torch` module
+ # might be cleared during Python shutdown before this module is cleared.
+ self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
+
+ @classmethod
+ def from_weakref(cls, cdata):
+ instance = cls.__new__(cls)
+ instance.cdata = cdata
+ instance._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
+ return instance
+
+ def expired(self):
+ return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
+
+ def __del__(self):
+ self._free_weak_ref(self.cdata)
+
+ def __hash__(self):
+ return self.cdata
+
+ def __eq__(self, other):
+ if id(self) == id(other):
+ return True
+ return self.cdata == other.cdata
+
+
+class SharedCache(dict):
+ """Dictionary from multiprocessing handles to StorageWeakRef."""
+
+ def __init__(self):
+ # free_dead_references() is called if the len exceeds the current
+ # limit. The limit scales with the number of remaining live objects.
+ self.limit = 128
+ # `fork` inherits lock state, so in case we fork when the lock is held,
+ # we register a function to reset the lock to a new object to avoid
+ # possible deadlocks, following python multiprocessing library design.
+ self._after_fork()
+ register_after_fork(self, SharedCache._after_fork)
+
+ def _after_fork(self):
+ self.lock = threading.Lock()
+
+ def get(self, key):
+ with self.lock:
+ return dict.get(self, key)
+
+ def __setitem__(self, key, storage_ref):
+ with self.lock:
+ dict.__setitem__(self, key, storage_ref)
+ if len(self) > self.limit:
+ self.free_dead_references()
+
+ def free_dead_references(self):
+ live = 0
+ for key, storage_ref in list(self.items()):
+ if storage_ref.expired():
+ del self[key]
+ else:
+ live += 1
+ self.limit = max(128, live * 2)
+
+
+# mapping from handles to StorageWeakRef objects
+shared_cache = SharedCache()
+
+
+def rebuild_event(device, handle):
+ return torch.cuda.Event.from_ipc_handle(device, handle)
+
+
+def reduce_event(event):
+ handle = event.ipc_handle()
+ return (rebuild_event, (event.device, handle))
+
+
+def rebuild_tensor(cls, storage, metadata):
+ storage_offset, size, stride, requires_grad = metadata
+ t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
+ if cls == torch.nn.parameter.Parameter:
+ # we have to pass requires_grad into constructor, rather than set it as an
+ # attribute later, because it's an important check for Integer Tensors to
+ # have requires_grad=False (or else they raise an error)
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
+ else:
+ t.requires_grad = requires_grad
+ return t
+
+
+def rebuild_cuda_tensor(
+ tensor_cls,
+ tensor_size,
+ tensor_stride,
+ tensor_offset,
+ storage_cls,
+ dtype,
+ storage_device,
+ storage_handle,
+ storage_size_bytes,
+ storage_offset_bytes,
+ requires_grad,
+ ref_counter_handle,
+ ref_counter_offset,
+ event_handle,
+ event_sync_required,
+):
+ # If storage_handle is None, storage points to nullptr.
+ if storage_handle is None or storage_size_bytes == 0:
+ storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
+ else:
+ storage = storage_from_cache(
+ storage_cls, (storage_handle, storage_offset_bytes)
+ )
+ if storage is None:
+ torch.cuda._lazy_init()
+ storage = storage_cls._new_shared_cuda(
+ storage_device,
+ storage_handle,
+ storage_size_bytes,
+ storage_offset_bytes,
+ ref_counter_handle,
+ ref_counter_offset,
+ event_handle,
+ event_sync_required,
+ )
+ shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
+ storage
+ )
+ else:
+ # We already ref counting this Storage, but producer needs new ref-counters to be released.
+ storage_cls._release_ipc_counter(
+ ref_counter_handle, ref_counter_offset, device=storage_device
+ )
+
+ _storage = (
+ storage
+ if isinstance(storage, torch.UntypedStorage)
+ else storage._untyped_storage
+ )
+
+ t = torch._utils._rebuild_tensor(
+ torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
+ tensor_offset,
+ tensor_size,
+ tensor_stride,
+ )
+
+ if tensor_cls == torch.nn.parameter.Parameter:
+ # It is crucial for integer tensors to receive
+ # the requires_grad=False as an argument in the constructor
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
+ else:
+ t.requires_grad = requires_grad
+
+ return t
+
+
+def reduce_tensor(tensor):
+ if tensor.requires_grad and not tensor.is_leaf:
+ raise RuntimeError(
+ "Cowardly refusing to serialize non-leaf tensor which requires_grad, "
+ "since autograd does not support crossing process boundaries. "
+ "If you just want to transfer the data, call detach() on the tensor "
+ "before serializing (e.g., putting it on the queue)."
+ )
+
+ check_serializing_named_tensor(tensor)
+ torch.utils.hooks.warn_if_has_hooks(tensor)
+
+ # Note [CUDA IPC and the caching allocator]
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # When you send a CUDA tensor over IPC, you might expect that you will
+ # get out the same storage from the other end. However, the CUDA caching
+ # allocator makes it difficult to preserve this invariant. Consider
+ # the following situation: a tensor of size 0x100 points to offset 0x20 of
+ # a storage at 0xA100 of size 0x100. (For simplicity, all of these
+ # sizes are given in bytes). HOWEVER, with the caching allocator, this storage
+ # might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
+ #
+ # When we want to send this CUDA tensor over IPC, we must send the
+ # *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
+ # the storage 0xA100 (because that is what CUDA supports). So, on the
+ # other end, there simply isn't any way to say, "Wait, you gave me
+ # a bigger region (0xA000) than the one I wanted (0xA100)".
+ #
+ # OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
+ # one storage itself? No, because this cudaMalloc allocation might contain
+ # storages of mixed types: float, bytes, double... If you make the entire
+ # allocation a single storage of a type A, we'll hit an error when constructing
+ # a tensor of type B on the storage.
+ #
+ # cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
+ # receiver side. However, cudaIpcMemHandles from each device in a given process may
+ # only be opened by one context per device per other process.
+ # If we open and close a memory handle multiples times in a process, CUDA is allowed
+ # to give it a different address; similarly, once we close the memory, we're not
+ # allowed to access it(and the storage/tensor built on top of it), even if it is
+ # still live in the original process. As we cannot make a cudaMalloc allocation
+ # to a single storage in one go, this requires us to cache the device pointer for
+ # each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
+ # the old ones alives.
+ # See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
+ #
+ # This is fine, because all we need to do is to save our position in the allocation,
+ # and reconstruct storage and tensor from it.
+ # 0xA000 -> -------CUDA Allocation------
+ # | |
+ # | |
+ # | |
+ # | |
+ # 0xA100 -> --------storage1 begin------
+ # | |
+ # 0xA120 -> --------tensor1 begin ------
+ # | |
+ # | |
+ # | |
+ # | |
+ # | |
+ # 0xA160 -> --------tensor1 end---------
+ # | |
+ # | |
+ # | |
+ # 0xA200 -> --------storage1 end--------
+ # | |
+ # 0xE000 -> --------CUDA allocation-----
+ #
+ # To send tensor1, the following info are required from sender to receiver for
+ # storage recontruction.
+ # 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
+ # basePtr may not be exactly 0xA000 since it's a different process.
+ # 2. offset(0xA100) of storage1 in the CUDA allocation.
+ # 3. size of storage1(0x100).
+ #
+ # On receiver side:
+ # 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
+ # of the same type using (basePtr, offset, size).
+ # 2. we can reconstruct the tensor on top of the reconstructed storage
+ # Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
+ #
+ # This strategy has a few implications:
+ #
+ # 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
+ # go (non-compositionally), and this requires to have a global map
+ # memHandle -> devPtr for each process.
+ #
+ # 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
+ # of the storage beyond 0x100 would merely have caused us to do a
+ # reallocation. You don't really want to do this, but if you did,
+ # all that would happen is that you would lose IPC sharing. But if
+ # you do this in the new world, we will happily let you write out of
+ # bounds of your "allocation", clobbering unrelated data in the cached
+ # allocator block. BAD!
+ #
+ # By the way, in old versions of PyTorch, we supported this situation
+ # natively using a "storage view", which permitted multiple storages to be
+ # views on each other. But this was the *only* use of storage views, so we
+ # eliminated it so that we could just use tensor views to implement the same
+ # thing.
+ #
+
+ # TODO: Handle distinguishing between subclass and non-subclass versions of NT better
+ # https://github.com/pytorch/pytorch/issues/110543
+ from torch.nested._internal.nested_tensor import NestedTensor
+
+ if tensor.is_nested and not isinstance(tensor, NestedTensor):
+ return reduce_nested_tensor(tensor)
+
+ if tensor.layout in {
+ torch.sparse_coo,
+ torch.sparse_csr,
+ torch.sparse_bsr,
+ torch.sparse_csc,
+ torch.sparse_bsc,
+ }:
+ return reduce_sparse_tensor(tensor)
+
+ storage = tensor._typed_storage()
+
+ if storage._untyped_storage.device.type == "cuda":
+ (
+ device,
+ handle,
+ storage_size_bytes,
+ storage_offset_bytes,
+ ref_counter_handle,
+ ref_counter_offset,
+ event_handle,
+ event_sync_required,
+ ) = storage._share_cuda_()
+ tensor_offset = tensor.storage_offset()
+ shared_cache[handle] = StorageWeakRef(storage)
+ # _backward_hooks purposely omitted here, see
+ # Note [Don't serialize hooks]
+ return (
+ rebuild_cuda_tensor,
+ (
+ type(tensor),
+ tensor.size(),
+ tensor.stride(),
+ tensor_offset, # tensor offset in its storage
+ type(storage),
+ tensor.dtype,
+ device,
+ handle, # identifier which CUDA allocation is the storage in.
+ storage_size_bytes, # size(in bytes) of the storage
+ storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
+ tensor.requires_grad,
+ ref_counter_handle,
+ ref_counter_offset,
+ event_handle,
+ event_sync_required,
+ ),
+ )
+
+ # _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
+ metadata = (
+ tensor.storage_offset(),
+ tensor.size(),
+ tensor.stride(),
+ tensor.requires_grad,
+ )
+ return (rebuild_tensor, (type(tensor), storage, metadata))
+
+
+def rebuild_nested_tensor(
+ rebuild_buffer_func,
+ rebuild_buffer_args,
+ rebuild_sizes_func,
+ rebuild_sizes_args,
+ rebuild_strides_func,
+ rebuild_strides_args,
+ rebuild_offsets_func,
+ rebuild_offsets_args,
+):
+ buffer = rebuild_buffer_func(*rebuild_buffer_args)
+ sizes = rebuild_sizes_func(*rebuild_sizes_args)
+ strides = rebuild_strides_func(*rebuild_strides_args)
+ offsets = rebuild_offsets_func(*rebuild_offsets_args)
+ return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
+
+
+def reduce_nested_tensor(nt):
+ rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
+ rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
+ rebuild_strides_func, rebuild_strides_args = reduce_tensor(
+ nt._nested_tensor_strides()
+ )
+ rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
+ nt._nested_tensor_storage_offsets()
+ )
+
+ return (
+ rebuild_nested_tensor,
+ (
+ rebuild_buffer_func,
+ rebuild_buffer_args,
+ rebuild_sizes_func,
+ rebuild_sizes_args,
+ rebuild_strides_func,
+ rebuild_strides_args,
+ rebuild_offsets_func,
+ rebuild_offsets_args,
+ ),
+ )
+
+
+def rebuild_sparse_coo_tensor(
+ rebuild_indices_func,
+ rebuild_indices_args,
+ rebuild_values_func,
+ rebuild_values_args,
+ shape,
+ is_coalesced,
+):
+ indices = rebuild_indices_func(*rebuild_indices_args)
+ values = rebuild_values_func(*rebuild_values_args)
+ return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
+
+
+def rebuild_sparse_compressed_tensor(
+ rebuild_compressed_indices_func,
+ rebuild_compressed_indices_args,
+ rebuild_plain_indices_func,
+ rebuild_plain_indices_args,
+ rebuild_values_func,
+ rebuild_values_args,
+ shape,
+ layout,
+):
+ compressed_indices = rebuild_compressed_indices_func(
+ *rebuild_compressed_indices_args
+ )
+ plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
+ values = rebuild_values_func(*rebuild_values_args)
+ return torch.sparse_compressed_tensor(
+ compressed_indices, plain_indices, values, shape, layout=layout
+ )
+
+
+def reduce_sparse_tensor(sparse):
+ if sparse.layout is torch.sparse_coo:
+ rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
+ return (
+ rebuild_sparse_coo_tensor,
+ (
+ rebuild_indices_func,
+ rebuild_indices_args,
+ rebuild_values_func,
+ rebuild_values_args,
+ sparse.shape,
+ sparse.is_coalesced(),
+ ),
+ )
+ else:
+ if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
+ compressed_indices = sparse.crow_indices()
+ plain_indices = sparse.col_indices()
+ elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
+ compressed_indices = sparse.ccol_indices()
+ plain_indices = sparse.row_indices()
+ else:
+ raise NotImplementedError(sparse.layout)
+ (
+ rebuild_compressed_indices_func,
+ rebuild_compressed_indices_args,
+ ) = reduce_tensor(compressed_indices)
+ rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
+ plain_indices
+ )
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
+ return (
+ rebuild_sparse_compressed_tensor,
+ (
+ rebuild_compressed_indices_func,
+ rebuild_compressed_indices_args,
+ rebuild_plain_indices_func,
+ rebuild_plain_indices_args,
+ rebuild_values_func,
+ rebuild_values_args,
+ sparse.shape,
+ sparse.layout,
+ ),
+ )
+
+
+def fd_id(fd):
+ # Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
+ # this doesn't work with shared memory handles, which is why we don't
+ # support the "file_descriptor" sharing method on that platform.
+ stat = os.fstat(fd)
+ return (stat.st_ino, stat.st_dev)
+
+
+def storage_from_cache(cls, key):
+ storage_ref = shared_cache.get(key)
+ if storage_ref is None:
+ return None
+ return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
+
+
+def rebuild_storage_fd(cls, df, size):
+ fd = df.detach()
+ try:
+ storage = storage_from_cache(cls, fd_id(fd))
+ if storage is not None:
+ return storage
+ storage = cls._new_shared_fd_cpu(fd, size)
+ shared_cache[fd_id(fd)] = StorageWeakRef(storage)
+ return storage
+ finally:
+ os.close(fd)
+
+
+def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
+ storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
+ cls, handle
+ )
+ if storage is not None:
+ return storage._shared_decref()
+ if dtype is None:
+ storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
+ else:
+ byte_size = size * torch._utils._element_size(dtype)
+ untyped_storage: torch.UntypedStorage = (
+ torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
+ )
+ storage = torch.TypedStorage(
+ wrap_storage=untyped_storage, dtype=dtype, _internal=True
+ )
+ shared_cache[handle] = StorageWeakRef(storage)
+ return storage._shared_decref()
+
+
+def rebuild_storage_empty(cls):
+ return cls()
+
+
+def rebuild_typed_storage(storage, dtype):
+ return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
+
+
+# Use for torch.storage.TypedStorage
+def reduce_typed_storage(storage):
+ return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
+
+
+def rebuild_typed_storage_child(storage, storage_type):
+ return storage_type(wrap_storage=storage, _internal=True)
+
+
+# Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
+def reduce_typed_storage_child(storage):
+ return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
+
+
+def reduce_storage(storage):
+ from . import get_sharing_strategy
+
+ if storage.is_cuda:
+ raise RuntimeError(
+ "Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
+ )
+ elif get_sharing_strategy() == "file_system":
+ metadata = storage._share_filename_cpu_()
+ cache_key = metadata[1]
+ rebuild = rebuild_storage_filename
+ if isinstance(storage, torch.TypedStorage):
+ metadata += (storage.dtype,)
+ storage._shared_incref()
+ elif storage.size() == 0:
+ # This is special cased because Empty tensors
+ # (with size 0) cannot be mmapped.
+ return (rebuild_storage_empty, (type(storage),))
+ else:
+ fd, size = storage._share_fd_cpu_()
+ df = multiprocessing.reduction.DupFd(fd)
+ cache_key = fd_id(fd)
+ metadata = (df, size)
+ rebuild = rebuild_storage_fd # type: ignore[assignment]
+
+ shared_cache[cache_key] = StorageWeakRef(storage)
+ return (rebuild, (type(storage),) + metadata)
+
+
+def init_reductions():
+ ForkingPickler.register(torch.cuda.Event, reduce_event)
+
+ for t in torch._storage_classes:
+ if t.__name__ == "UntypedStorage":
+ ForkingPickler.register(t, reduce_storage)
+ else:
+ ForkingPickler.register(t, reduce_typed_storage_child)
+
+ ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
+
+ for t in torch._tensor_classes:
+ ForkingPickler.register(t, reduce_tensor)
+
+ # TODO: Maybe this should be in tensor_classes? :)
+ ForkingPickler.register(torch.Tensor, reduce_tensor)
+ ForkingPickler.register(torch.nn.parameter.Parameter, reduce_tensor)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/spawn.py b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/spawn.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea43df98c542358e9582fd18f76b4703960a878b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/multiprocessing/spawn.py
@@ -0,0 +1,241 @@
+import multiprocessing
+import multiprocessing.connection
+import signal
+import sys
+import warnings
+from typing import Optional
+
+from . import _prctl_pr_set_pdeathsig # type: ignore[attr-defined]
+
+
+class ProcessException(Exception):
+ __slots__ = ["error_index", "error_pid"]
+
+ def __init__(self, msg: str, error_index: int, pid: int):
+ super().__init__(msg)
+ self.msg = msg
+ self.error_index = error_index
+ self.pid = pid
+
+ def __reduce__(self):
+ return type(self), (self.msg, self.error_index, self.pid)
+
+
+class ProcessRaisedException(ProcessException):
+ """Exception raised when a process failed due to an exception raised by the code."""
+
+ def __init__(
+ self,
+ msg: str,
+ error_index: int,
+ error_pid: int,
+ ):
+ super().__init__(msg, error_index, error_pid)
+
+
+class ProcessExitedException(ProcessException):
+ """Exception raised when a process failed due to signal or exited with a specific code."""
+
+ __slots__ = ["exit_code"]
+
+ def __init__(
+ self,
+ msg: str,
+ error_index: int,
+ error_pid: int,
+ exit_code: int,
+ signal_name: Optional[str] = None,
+ ):
+ super().__init__(msg, error_index, error_pid)
+ self.exit_code = exit_code
+ self.signal_name = signal_name
+
+ def __reduce__(self):
+ return (
+ type(self),
+ (self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
+ )
+
+
+def _wrap(fn, i, args, error_queue):
+ # prctl(2) is a Linux specific system call.
+ # On other systems the following function call has no effect.
+ # This is set to ensure that non-daemonic child processes can
+ # terminate if their parent terminates before they do.
+ _prctl_pr_set_pdeathsig(signal.SIGINT)
+
+ try:
+ fn(i, *args)
+ except KeyboardInterrupt:
+ pass # SIGINT; Killed by parent, do nothing
+ except Exception:
+ # Propagate exception to parent process, keeping original traceback
+ import traceback
+
+ error_queue.put(traceback.format_exc())
+ sys.exit(1)
+
+
+class ProcessContext:
+ def __init__(self, processes, error_queues):
+ self.error_queues = error_queues
+ self.processes = processes
+ self.sentinels = {
+ process.sentinel: index for index, process in enumerate(processes)
+ }
+
+ def pids(self):
+ return [int(process.pid) for process in self.processes]
+
+ def join(self, timeout=None):
+ r"""Join one or more processes within spawn context.
+
+ Attempt to join one or more processes in this spawn context.
+ If one of them exited with a non-zero exit status, this function
+ kills the remaining processes and raises an exception with the cause
+ of the first process exiting.
+
+ Returns ``True`` if all processes have been joined successfully,
+ ``False`` if there are more processes that need to be joined.
+
+ Args:
+ timeout (float): Wait this long before giving up on waiting.
+ """
+ # Ensure this function can be called even when we're done.
+ if len(self.sentinels) == 0:
+ return True
+
+ # Wait for any process to fail or all of them to succeed.
+ ready = multiprocessing.connection.wait(
+ self.sentinels.keys(),
+ timeout=timeout,
+ )
+
+ error_index = None
+ for sentinel in ready:
+ index = self.sentinels.pop(sentinel)
+ process = self.processes[index]
+ process.join()
+ if process.exitcode != 0:
+ error_index = index
+ break
+
+ # Return if there was no error.
+ if error_index is None:
+ # Return whether or not all processes have been joined.
+ return len(self.sentinels) == 0
+
+ # Assume failure. Terminate processes that are still alive.
+ for process in self.processes:
+ if process.is_alive():
+ process.terminate()
+ process.join()
+
+ # There won't be an error on the queue if the process crashed.
+ failed_process = self.processes[error_index]
+ if self.error_queues[error_index].empty():
+ exitcode = self.processes[error_index].exitcode
+ if exitcode < 0:
+ name = signal.Signals(-exitcode).name
+ raise ProcessExitedException(
+ "process %d terminated with signal %s" % (error_index, name),
+ error_index=error_index,
+ error_pid=failed_process.pid,
+ exit_code=exitcode,
+ signal_name=name,
+ )
+ else:
+ raise ProcessExitedException(
+ "process %d terminated with exit code %d" % (error_index, exitcode),
+ error_index=error_index,
+ error_pid=failed_process.pid,
+ exit_code=exitcode,
+ )
+
+ original_trace = self.error_queues[error_index].get()
+ msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
+ msg += original_trace
+ raise ProcessRaisedException(msg, error_index, failed_process.pid)
+
+
+class SpawnContext(ProcessContext):
+ def __init__(self, processes, error_queues):
+ warnings.warn("SpawnContext is renamed to ProcessContext since 1.4 release.")
+ super().__init__(processes, error_queues)
+
+
+# Note: [start_processes]
+# mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
+# more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
+# CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
+# works better than 'spawn'. Every helper function we created for mp.spawn is indeed
+# general enough, and backends like XLA can reuse them in Colab notebooks as well.
+# Currently we only add this API first, we can consider adding it to documentation as
+# needed in the future.
+def start_processes(
+ fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"
+):
+ mp = multiprocessing.get_context(start_method)
+ error_queues = []
+ processes = []
+ for i in range(nprocs):
+ error_queue = mp.SimpleQueue()
+ process = mp.Process(
+ target=_wrap,
+ args=(fn, i, args, error_queue),
+ daemon=daemon,
+ )
+ process.start()
+ error_queues.append(error_queue)
+ processes.append(process)
+
+ context = ProcessContext(processes, error_queues)
+ if not join:
+ return context
+
+ # Loop on join until it returns True or raises an exception.
+ while not context.join():
+ pass
+
+
+def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
+ r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
+
+ If one of the processes exits with a non-zero exit status, the
+ remaining processes are killed and an exception is raised with the
+ cause of termination. In the case an exception was caught in the
+ child process, it is forwarded and its traceback is included in
+ the exception raised in the parent process.
+
+ Args:
+ fn (function): Function is called as the entrypoint of the
+ spawned process. This function must be defined at the top
+ level of a module so it can be pickled and spawned. This
+ is a requirement imposed by multiprocessing.
+
+ The function is called as ``fn(i, *args)``, where ``i`` is
+ the process index and ``args`` is the passed through tuple
+ of arguments.
+
+ args (tuple): Arguments passed to ``fn``.
+ nprocs (int): Number of processes to spawn.
+ join (bool): Perform a blocking join on all processes.
+ daemon (bool): The spawned processes' daemon flag. If set to True,
+ daemonic processes will be created.
+ start_method (str): (deprecated) this method will always use ``spawn``
+ as the start method. To use a different start method
+ use ``start_processes()``.
+
+ Returns:
+ None if ``join`` is ``True``,
+ :class:`~ProcessContext` if ``join`` is ``False``
+
+ """
+ if start_method != "spawn":
+ msg = (
+ "This method only supports start_method=spawn (got: %s).\n"
+ "To use a different start_method use:\n\t\t"
+ " torch.multiprocessing.start_processes(...)" % start_method
+ )
+ warnings.warn(msg)
+ return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/py.typed b/env-llmeval/lib/python3.10/site-packages/torch/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/quasirandom.py b/env-llmeval/lib/python3.10/site-packages/torch/quasirandom.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c9b949c55651c42895c1a1afb6d9050d41aca2f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/quasirandom.py
@@ -0,0 +1,180 @@
+import torch
+from typing import Optional
+
+
+class SobolEngine:
+ r"""
+ The :class:`torch.quasirandom.SobolEngine` is an engine for generating
+ (scrambled) Sobol sequences. Sobol sequences are an example of low
+ discrepancy quasi-random sequences.
+
+ This implementation of an engine for Sobol sequences is capable of
+ sampling sequences up to a maximum dimension of 21201. It uses direction
+ numbers from https://web.maths.unsw.edu.au/~fkuo/sobol/ obtained using the
+ search criterion D(6) up to the dimension 21201. This is the recommended
+ choice by the authors.
+
+ References:
+ - Art B. Owen. Scrambling Sobol and Niederreiter-Xing points.
+ Journal of Complexity, 14(4):466-489, December 1998.
+
+ - I. M. Sobol. The distribution of points in a cube and the accurate
+ evaluation of integrals.
+ Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967.
+
+ Args:
+ dimension (Int): The dimensionality of the sequence to be drawn
+ scramble (bool, optional): Setting this to ``True`` will produce
+ scrambled Sobol sequences. Scrambling is
+ capable of producing better Sobol
+ sequences. Default: ``False``.
+ seed (Int, optional): This is the seed for the scrambling. The seed
+ of the random number generator is set to this,
+ if specified. Otherwise, it uses a random seed.
+ Default: ``None``
+
+ Examples::
+
+ >>> # xdoctest: +SKIP("unseeded random state")
+ >>> soboleng = torch.quasirandom.SobolEngine(dimension=5)
+ >>> soboleng.draw(3)
+ tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
+ [0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
+ [0.7500, 0.2500, 0.2500, 0.2500, 0.7500]])
+ """
+ MAXBIT = 30
+ MAXDIM = 21201
+
+ def __init__(self, dimension, scramble=False, seed=None):
+ if dimension > self.MAXDIM or dimension < 1:
+ raise ValueError("Supported range of dimensionality "
+ f"for SobolEngine is [1, {self.MAXDIM}]")
+
+ self.seed = seed
+ self.scramble = scramble
+ self.dimension = dimension
+
+ cpu = torch.device("cpu")
+
+ self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
+ torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
+
+ if not self.scramble:
+ self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
+ else:
+ self._scramble()
+
+ self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
+ self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
+ self.num_generated = 0
+
+ def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
+ dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ r"""
+ Function to draw a sequence of :attr:`n` points from a Sobol sequence.
+ Note that the samples are dependent on the previous samples. The size
+ of the result is :math:`(n, dimension)`.
+
+ Args:
+ n (Int, optional): The length of sequence of points to draw.
+ Default: 1
+ out (Tensor, optional): The output tensor
+ dtype (:class:`torch.dtype`, optional): the desired data type of the
+ returned tensor.
+ Default: ``torch.float32``
+ """
+ if self.num_generated == 0:
+ if n == 1:
+ result = self._first_point.to(dtype)
+ else:
+ result, self.quasi = torch._sobol_engine_draw(
+ self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype,
+ )
+ result = torch.cat((self._first_point, result), dim=-2)
+ else:
+ result, self.quasi = torch._sobol_engine_draw(
+ self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype,
+ )
+
+ self.num_generated += n
+
+ if out is not None:
+ out.resize_as_(result).copy_(result)
+ return out
+
+ return result
+
+ def draw_base2(self, m: int, out: Optional[torch.Tensor] = None,
+ dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ r"""
+ Function to draw a sequence of :attr:`2**m` points from a Sobol sequence.
+ Note that the samples are dependent on the previous samples. The size
+ of the result is :math:`(2**m, dimension)`.
+
+ Args:
+ m (Int): The (base2) exponent of the number of points to draw.
+ out (Tensor, optional): The output tensor
+ dtype (:class:`torch.dtype`, optional): the desired data type of the
+ returned tensor.
+ Default: ``torch.float32``
+ """
+ n = 2 ** m
+ total_n = self.num_generated + n
+ if not (total_n & (total_n - 1) == 0):
+ raise ValueError("The balance properties of Sobol' points require "
+ f"n to be a power of 2. {self.num_generated} points have been "
+ f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. "
+ "If you still want to do this, please use "
+ "'SobolEngine.draw()' instead."
+ )
+ return self.draw(n=n, out=out, dtype=dtype)
+
+ def reset(self):
+ r"""
+ Function to reset the ``SobolEngine`` to base state.
+ """
+ self.quasi.copy_(self.shift)
+ self.num_generated = 0
+ return self
+
+ def fast_forward(self, n):
+ r"""
+ Function to fast-forward the state of the ``SobolEngine`` by
+ :attr:`n` steps. This is equivalent to drawing :attr:`n` samples
+ without using the samples.
+
+ Args:
+ n (Int): The number of steps to fast-forward by.
+ """
+ if self.num_generated == 0:
+ torch._sobol_engine_ff_(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated)
+ else:
+ torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1)
+ self.num_generated += n
+ return self
+
+ def _scramble(self):
+ g: Optional[torch.Generator] = None
+ if self.seed is not None:
+ g = torch.Generator()
+ g.manual_seed(self.seed)
+
+ cpu = torch.device("cpu")
+
+ # Generate shift vector
+ shift_ints = torch.randint(2, (self.dimension, self.MAXBIT), device=cpu, generator=g)
+ self.shift = torch.mv(shift_ints, torch.pow(2, torch.arange(0, self.MAXBIT, device=cpu)))
+
+ # Generate lower triangular matrices (stacked across dimensions)
+ ltm_dims = (self.dimension, self.MAXBIT, self.MAXBIT)
+ ltm = torch.randint(2, ltm_dims, device=cpu, generator=g).tril()
+
+ torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension)
+
+ def __repr__(self):
+ fmt_string = [f'dimension={self.dimension}']
+ if self.scramble:
+ fmt_string += ['scramble=True']
+ if self.seed is not None:
+ fmt_string += [f'seed={self.seed}']
+ return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')'
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/random.py b/env-llmeval/lib/python3.10/site-packages/torch/random.py
new file mode 100644
index 0000000000000000000000000000000000000000..668443a2b2dd0b35db2f01882d1c7f991c70f22e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/random.py
@@ -0,0 +1,175 @@
+import contextlib
+from typing import Generator
+import warnings
+
+from torch._C import default_generator
+import torch
+
+
+def set_rng_state(new_state: torch.Tensor) -> None:
+ r"""Sets the random number generator state.
+
+ .. note: This function only works for CPU. For CUDA, please use
+ torch.manual_seed(seed), which works for both CPU and CUDA.
+
+ Args:
+ new_state (torch.ByteTensor): The desired state
+ """
+ default_generator.set_state(new_state)
+
+
+def get_rng_state() -> torch.Tensor:
+ r"""Returns the random number generator state as a `torch.ByteTensor`."""
+ return default_generator.get_state()
+
+
+def manual_seed(seed) -> torch._C.Generator:
+ r"""Sets the seed for generating random numbers. Returns a
+ `torch.Generator` object.
+
+ Args:
+ seed (int): The desired seed. Value must be within the inclusive range
+ `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
+ is raised. Negative inputs are remapped to positive values with the formula
+ `0xffff_ffff_ffff_ffff + seed`.
+ """
+ seed = int(seed)
+ import torch.cuda
+
+ if not torch.cuda._is_in_bad_fork():
+ torch.cuda.manual_seed_all(seed)
+
+ import torch.mps
+ if not torch.mps._is_in_bad_fork():
+ torch.mps.manual_seed(seed)
+
+ if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork():
+ torch.xpu.manual_seed_all(seed)
+
+ _seed_custom_device(seed)
+
+ return default_generator.manual_seed(seed)
+
+
+def seed() -> int:
+ r"""Sets the seed for generating random numbers to a non-deterministic
+ random number. Returns a 64 bit number used to seed the RNG.
+ """
+ seed = default_generator.seed()
+ import torch.cuda
+
+ if not torch.cuda._is_in_bad_fork():
+ torch.cuda.manual_seed_all(seed)
+
+ import torch.mps
+ if not torch.mps._is_in_bad_fork():
+ torch.mps.manual_seed(seed)
+
+ if hasattr(torch, 'xpu') and not torch.xpu._is_in_bad_fork():
+ torch.xpu.manual_seed_all(seed)
+
+ _seed_custom_device(seed)
+
+ return seed
+
+
+def _seed_custom_device(seed) -> None:
+ r"""Sets the seed to generate random numbers for custom device.
+
+ Args:
+ seed (int): The desired seed.
+
+ See [Note: support the custom device with privateuse1]
+ """
+ seed = int(seed)
+ custom_backend_name = torch._C._get_privateuse1_backend_name()
+ if hasattr(torch, custom_backend_name):
+ custom_device_mod = getattr(torch, custom_backend_name)
+ _bad_fork_name = "_is_in_bad_fork"
+ _seed_all_name = "manual_seed_all"
+ if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name):
+ if not getattr(custom_device_mod, _bad_fork_name)():
+ getattr(custom_device_mod, _seed_all_name)(seed)
+ else:
+ message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's "
+ message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module."
+ warnings.warn(message, UserWarning, stacklevel=3)
+
+
+def initial_seed() -> int:
+ r"""Returns the initial seed for generating random numbers as a
+ Python `long`.
+ """
+ return default_generator.initial_seed()
+
+
+_fork_rng_warned_already = False
+
+
+@contextlib.contextmanager
+def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices", device_type="cuda") -> Generator:
+ """
+ Forks the RNG, so that when you return, the RNG is reset
+ to the state that it was previously in.
+
+ Args:
+ devices (iterable of Device IDs): devices for which to fork
+ the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
+ on all devices, but will emit a warning if your machine has a lot
+ of devices, since this function will run very slowly in that case.
+ If you explicitly specify devices, this warning will be suppressed
+ enabled (bool): if ``False``, the RNG is not forked. This is a convenience
+ argument for easily disabling the context manager without having
+ to delete it and unindent your Python code under it.
+ deivce_type (str): device type str, default is `cuda`. As for custom device,
+ see details in [Note: support the custom device with privateuse1]
+ """
+
+ device_type = torch.device(device_type).type
+ device_mod = getattr(torch, device_type, None)
+ if device_mod is None:
+ raise RuntimeError(f"torch has no module of `{device_type}`, you should register " +
+ "a module by `torch._register_device_module`.")
+ global _fork_rng_warned_already
+
+ # Internal arguments:
+ # _caller: the function which called fork_rng, which the user used
+ # _devices_kw: the devices keyword of _caller
+
+ if not enabled:
+ yield
+ return
+
+ if devices is None:
+ num_devices = device_mod.device_count()
+ if num_devices > 1 and not _fork_rng_warned_already:
+ message = (f"{device_type.upper()} reports that you have {num_devices} available devices, and "
+ f"you have used {_caller} without explicitly specifying which devices are being used. "
+ f"For safety, we initialize *every* {device_type.upper()} device by default, which can "
+ f"be quite slow if you have a lot of {device_type.upper()}s. If you know that you are only"
+ f" making use of a few {device_type.upper()} devices, set the environment variable "
+ f"{device_type.upper()}_VISIBLE_DEVICES or the '{_devices_kw}' keyword argument of {_caller} "
+ "with the set of devices you are actually using. For example, if you are using CPU only, "
+ "set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, "
+ f"set {device_type.upper()}_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices "
+ f"and suppress this warning, set the '{_devices_kw}' keyword argument to "
+ f"`range(torch.{device_type}.device_count())`.")
+ warnings.warn(message)
+ _fork_rng_warned_already = True
+ devices = list(range(num_devices))
+ else:
+ # Protect against user passing us a generator; we need to traverse this
+ # multiple times but a generator will be exhausted upon first traversal
+ devices = list(devices)
+
+ cpu_rng_state = torch.get_rng_state()
+ device_rng_states = []
+ for device in devices:
+ device_rng_states.append(device_mod.get_rng_state(device))
+
+ try:
+ yield
+ finally:
+ torch.set_rng_state(cpu_rng_state)
+ for device, device_rng_state in zip(devices, device_rng_states):
+ device_mod.set_rng_state(device_rng_state, device)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/return_types.py b/env-llmeval/lib/python3.10/site-packages/torch/return_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1284c813387e71d5d0be90e1f6bf349b6bcf68e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/return_types.py
@@ -0,0 +1,34 @@
+import torch
+import inspect
+
+__all__ = ["pytree_register_structseq"]
+
+# error: Module has no attribute "_return_types"
+return_types = torch._C._return_types # type: ignore[attr-defined]
+
+def pytree_register_structseq(cls):
+ def structseq_flatten(structseq):
+ return list(structseq), None
+
+ def structseq_unflatten(values, context):
+ return cls(values)
+
+ torch.utils._pytree.register_pytree_node(cls, structseq_flatten, structseq_unflatten)
+
+for name in dir(return_types):
+ if name.startswith('__'):
+ continue
+
+ _attr = getattr(return_types, name)
+ globals()[name] = _attr
+
+ if not name.startswith('_'):
+ __all__.append(name)
+
+ # Today everything in torch.return_types is a structseq, aka a "namedtuple"-like
+ # thing defined by the Python C-API. We're going to need to modify this when that
+ # is no longer the case.
+ # NB: I don't know how to check that something is a "structseq" so we do a fuzzy
+ # check for tuple
+ if inspect.isclass(_attr) and issubclass(_attr, tuple):
+ pytree_register_structseq(_attr)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/return_types.pyi b/env-llmeval/lib/python3.10/site-packages/torch/return_types.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..f617e000fff88ed114b46e36fa89aa5379d0b6ea
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/return_types.pyi
@@ -0,0 +1,172 @@
+# @generated from torch/_C/return_types.pyi
+
+from typing import (
+ Any,
+ Callable,
+ ContextManager,
+ Iterator,
+ List,
+ Literal,
+ NamedTuple,
+ Optional,
+ overload,
+ Sequence,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor, SymInt
+from torch.types import (
+ _bool,
+ _device,
+ _dtype,
+ _float,
+ _int,
+ _layout,
+ _qscheme,
+ _size,
+ Number,
+)
+
+class _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(NamedTuple):
+ output: Tensor
+ mask: Tensor
+
+class _fused_moving_avg_obs_fq_helper(NamedTuple):
+ output: Tensor
+ mask: Tensor
+
+class _linalg_det(NamedTuple):
+ result: Tensor
+ LU: Tensor
+ pivots: Tensor
+
+class _linalg_eigh(NamedTuple):
+ eigenvalues: Tensor
+ eigenvectors: Tensor
+
+class _linalg_slogdet(NamedTuple):
+ sign: Tensor
+ logabsdet: Tensor
+ LU: Tensor
+ pivots: Tensor
+
+class _linalg_solve_ex(NamedTuple):
+ result: Tensor
+ LU: Tensor
+ pivots: Tensor
+ info: Tensor
+
+class _linalg_svd(NamedTuple):
+ U: Tensor
+ S: Tensor
+ Vh: Tensor
+
+class _lu_with_info(NamedTuple):
+ LU: Tensor
+ pivots: Tensor
+ info: Tensor
+
+class _scaled_dot_product_efficient_attention(NamedTuple):
+ output: Tensor
+ log_sumexp: Tensor
+ philox_seed: Tensor
+ philox_offset: Tensor
+
+class _scaled_dot_product_flash_attention(NamedTuple):
+ output: Tensor
+ logsumexp: Tensor
+ cum_seq_q: Tensor
+ cum_seq_k: Tensor
+ max_q: Union[_int, SymInt]
+ max_k: Union[_int, SymInt]
+ philox_seed: Tensor
+ philox_offset: Tensor
+ debug_attn_mask: Tensor
+
+class _unpack_dual(NamedTuple):
+ primal: Tensor
+ tangent: Tensor
+
+class aminmax(NamedTuple):
+ min: Tensor
+ max: Tensor
+
+class cummax(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class cummin(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class frexp(NamedTuple):
+ mantissa: Tensor
+ exponent: Tensor
+
+class geqrf(NamedTuple):
+ a: Tensor
+ tau: Tensor
+
+class histogram(NamedTuple):
+ hist: Tensor
+ bin_edges: Tensor
+
+class histogramdd(NamedTuple):
+ hist: Tensor
+ bin_edges: List[Tensor]
+
+class kthvalue(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class lu_unpack(NamedTuple):
+ P: Tensor
+ L: Tensor
+ U: Tensor
+
+class max(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class median(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class min(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class mode(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class nanmedian(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class qr(NamedTuple):
+ Q: Tensor
+ R: Tensor
+
+class slogdet(NamedTuple):
+ sign: Tensor
+ logabsdet: Tensor
+
+class sort(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class svd(NamedTuple):
+ U: Tensor
+ S: Tensor
+ V: Tensor
+
+class topk(NamedTuple):
+ values: Tensor
+ indices: Tensor
+
+class triangular_solve(NamedTuple):
+ solution: Tensor
+ cloned_coefficient: Tensor
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/serialization.py b/env-llmeval/lib/python3.10/site-packages/torch/serialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d02efd53f27b39088c2ba39c7e9162888f3f79a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/serialization.py
@@ -0,0 +1,1448 @@
+import difflib
+import os
+import io
+import shutil
+import struct
+import sys
+import torch
+import tarfile
+import tempfile
+import warnings
+from contextlib import closing, contextmanager
+from enum import Enum
+from ._utils import _import_dotted_name
+from torch._sources import get_source_lines_and_file
+from torch.types import Storage
+from torch.storage import _get_dtype_from_pickle_storage_type
+from typing import Any, BinaryIO, Callable, cast, Dict, Optional, Type, Tuple, Union, IO
+from typing_extensions import TypeAlias # Python 3.10+
+import copyreg
+import pickle
+import pathlib
+import torch._weights_only_unpickler as _weights_only_unpickler
+
+DEFAULT_PROTOCOL = 2
+
+LONG_SIZE = struct.Struct('=l').size
+INT_SIZE = struct.Struct('=i').size
+SHORT_SIZE = struct.Struct('=h').size
+
+MAGIC_NUMBER = 0x1950a86a20f9469cfc6c
+PROTOCOL_VERSION = 1001
+STORAGE_KEY_SEPARATOR = ','
+
+FILE_LIKE: TypeAlias = Union[str, os.PathLike, BinaryIO, IO[bytes]]
+MAP_LOCATION: TypeAlias = Optional[Union[Callable[[torch.Tensor, str], torch.Tensor], torch.device, str, Dict[str, str]]]
+STORAGE: TypeAlias = Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]
+
+__all__ = [
+ 'SourceChangeWarning',
+ 'mkdtemp',
+ 'register_package',
+ 'check_module_version_greater_or_equal',
+ 'validate_cuda_device',
+ 'validate_hpu_device',
+ 'location_tag',
+ 'default_restore_location',
+ 'normalize_storage_type',
+ 'storage_to_tensor_type',
+ 'save',
+ 'load',
+ 'StorageType',
+ 'LoadEndianness',
+ 'get_default_load_endianness',
+ 'set_default_load_endianness',
+]
+
+
+class SourceChangeWarning(Warning):
+ pass
+
+
+@contextmanager
+def mkdtemp():
+ path = tempfile.mkdtemp()
+ try:
+ yield path
+ finally:
+ shutil.rmtree(path)
+
+
+_package_registry = []
+
+class LoadEndianness(Enum):
+ NATIVE = 1
+ LITTLE = 2
+ BIG = 3
+
+_default_load_endian: Optional[LoadEndianness] = None
+
+def get_default_load_endianness() -> Optional[LoadEndianness]:
+ '''
+ Get fallback byte order for loading files
+
+ If byteorder mark is not present in saved checkpoint,
+ this byte order is used as fallback.
+ By default, it's "native" byte order.
+
+ Returns:
+ default_load_endian: Optional[LoadEndianness]
+ '''
+ return _default_load_endian
+
+def set_default_load_endianness(endianness):
+ '''
+ Set fallback byte order for loading files
+
+ If byteorder mark is not present in saved checkpoint,
+ this byte order is used as fallback.
+ By default, it's "native" byte order.
+
+ Args:
+ endianness: the new fallback byte order
+ '''
+ global _default_load_endian
+ if not isinstance(endianness, LoadEndianness) and endianness is not None:
+ raise TypeError("Invalid argument type in function set_default_load_endianness")
+ _default_load_endian = endianness
+
+def _is_zipfile(f) -> bool:
+ # This is a stricter implementation than zipfile.is_zipfile().
+ # zipfile.is_zipfile() is True if the magic number appears anywhere in the
+ # binary. Since we expect the files here to be generated by torch.save or
+ # torch.jit.save, it's safe to only check the start bytes and avoid
+ # collisions and assume the zip has only 1 file.
+ # See bugs.python.org/issue28494.
+
+ start = f.tell()
+ # Read the first few bytes and match against the ZIP file signature
+ local_header_magic_number = b'PK\x03\x04'
+ read_bytes = f.read(len(local_header_magic_number))
+ f.seek(start)
+ return read_bytes == local_header_magic_number
+
+
+def register_package(
+ priority: int,
+ tagger: Callable[[STORAGE], Optional[str]],
+ deserializer: Callable[[STORAGE, str], Optional[STORAGE]]
+):
+ '''
+ Registers callables for tagging and deserializing storage objects with an associated priority.
+ Tagging associates a device with a storage object at save time while deserializing moves a
+ storage object to an appropriate device at load time. :attr:`tagger` and :attr:`deserializer`
+ are run in the order given by their :attr:`priority` until a tagger/deserializer returns a
+ value that is not `None`.
+
+ To override the deserialization behavior for a device in the global registry, one can register a
+ tagger with a higher priority than the existing tagger.
+
+ This function can also be used to register a tagger and deserializer for new devices.
+
+ Args:
+ priority: Indicates the priority associated with the tagger and deserializer, where a lower
+ value indicates higher priority.
+ tagger: Callable that takes in a storage object and returns its tagged device as a string
+ or None.
+ deserializer: Callable that takes in storage object and a device string and returns a storage
+ object on the appropriate device or None.
+
+ Returns:
+ `None`
+
+ Example:
+ >>> def ipu_tag(obj):
+ >>> if obj.device.type == 'ipu':
+ >>> return 'ipu'
+ >>> def ipu_deserialize(obj, location):
+ >>> if location.startswith('ipu'):
+ >>> ipu = getattr(torch, "ipu", None)
+ >>> assert ipu is not None, "IPU device module is not loaded"
+ >>> assert torch.ipu.is_available(), "ipu is not available"
+ >>> return obj.ipu(location)
+ >>> torch.serialization.register_package(11, ipu_tag, ipu_deserialize)
+ '''
+ queue_elem = (priority, tagger, deserializer)
+ _package_registry.append(queue_elem)
+ _package_registry.sort()
+
+
+def check_module_version_greater_or_equal(module, req_version_tuple, error_if_malformed=True):
+ '''
+ Check if a module's version satisfies requirements
+
+ Usually, a module's version string will be like 'x.y.z', which would be represented
+ as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version
+ string does not match the given tuple's format up to the length of the tuple, then
+ error and exit or emit a warning.
+
+ Args:
+ module: the module to check the version of
+ req_version_tuple: tuple (usually of ints) representing the required version
+ error_if_malformed: whether we should exit if module version string is malformed
+
+ Returns:
+ requirement_is_met: bool
+ '''
+ try:
+ version_strs = module.__version__.split('.')
+ # Cast module version fields to match the types of the required version
+ module_version = tuple(
+ type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple)
+ )
+ requirement_is_met = module_version >= req_version_tuple
+
+ except Exception as e:
+ message = (
+ f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared"
+ f" with tuple {str(req_version_tuple)}"
+ )
+ if error_if_malformed:
+ raise RuntimeError(message) from e
+ else:
+ warnings.warn(message + ', but continuing assuming that requirement is met')
+ requirement_is_met = True
+
+ return requirement_is_met
+
+
+def _cpu_tag(obj):
+ if obj.device.type == 'cpu':
+ return 'cpu'
+
+
+def _cuda_tag(obj):
+ if obj.device.type == 'cuda':
+ return 'cuda:' + str(obj.device.index)
+
+def _hpu_tag(obj):
+ if obj.device.type == 'hpu':
+ return 'hpu:' + str(obj.device.index)
+
+def _mps_tag(obj):
+ if obj.device.type == 'mps':
+ return 'mps'
+
+
+def _meta_tag(obj):
+ if obj.device.type == 'meta':
+ return 'meta'
+
+
+def _privateuse1_tag(obj):
+ backend_name = torch._C._get_privateuse1_backend_name()
+ if obj.device.type == backend_name:
+ if obj.device.index is None:
+ return backend_name
+ else:
+ return backend_name + ':' + str(obj.device.index)
+
+
+def _cpu_deserialize(obj, location):
+ if location == 'cpu':
+ return obj
+
+
+def validate_cuda_device(location):
+ device = torch.cuda._utils._get_device_index(location, True)
+
+ if not torch.cuda.is_available():
+ raise RuntimeError('Attempting to deserialize object on a CUDA '
+ 'device but torch.cuda.is_available() is False. '
+ 'If you are running on a CPU-only machine, '
+ 'please use torch.load with map_location=torch.device(\'cpu\') '
+ 'to map your storages to the CPU.')
+ device_count = torch.cuda.device_count()
+ if device >= device_count:
+ raise RuntimeError('Attempting to deserialize object on CUDA device '
+ f'{device} but torch.cuda.device_count() is {device_count}. Please use '
+ 'torch.load with map_location to map your storages '
+ 'to an existing device.')
+ return device
+
+
+def _cuda_deserialize(obj, location):
+ if location.startswith('cuda'):
+ device = validate_cuda_device(location)
+ if getattr(obj, "_torch_load_uninitialized", False):
+ with torch.cuda.device(device):
+ return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
+ else:
+ return obj.cuda(device)
+
+
+def validate_hpu_device(location):
+ hpu = getattr(torch, "hpu", None)
+ assert hpu is not None, "HPU device module is not loaded"
+ device = hpu._utils._get_device_index(location, optional=True)
+
+ if not hpu.is_available():
+ raise RuntimeError('Attempting to deserialize object on a HPU '
+ 'device but torch.hpu.is_available() is False. '
+ 'If you are running on a CPU-only machine, '
+ 'please use torch.load with map_location=torch.device(\'cpu\') '
+ 'to map your storages to the CPU.')
+ device_count = hpu.device_count()
+ if device >= device_count:
+ raise RuntimeError('Attempting to deserialize object on HPU device '
+ f'{device} but torch.hpu.device_count() is {device_count}. Please use '
+ 'torch.load with map_location to map your storages '
+ 'to an existing device.')
+ return device
+
+
+def _hpu_deserialize(obj, location):
+ if location.startswith('hpu'):
+ hpu = getattr(torch, "hpu", None)
+ assert hpu is not None, "HPU device module is not loaded"
+ device = validate_hpu_device(location)
+ if getattr(obj, "_torch_load_uninitialized", False):
+ with hpu.device(device):
+ return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
+ else:
+ return obj.hpu(device)
+
+
+def _mps_deserialize(obj, location):
+ if location.startswith('mps'):
+ return obj.mps()
+
+
+def _meta_deserialize(obj, location):
+ if location == 'meta':
+ return torch.UntypedStorage(obj.nbytes(), device='meta')
+
+
+def _validate_privateuse1_device(location, backend_name):
+ '''
+ Check whether the device index of privateuse1 is valid
+
+ Register a device_module of privateuse1 by torch._register_device_module.
+ Implement the following methods in device_module like cuda:
+ device_module._utils._get_device_index(location, True),
+ device_module.device_count().
+
+ Args:
+ location: string of device
+ backend_name: the name of privateuse1, which can be renamed
+
+ Returns:
+ device_index: int
+ '''
+ if not hasattr(torch, backend_name):
+ raise RuntimeError(f'The {backend_name.upper()} device module is not registered. '
+ 'If you are running on a CPU-only machine, '
+ 'please use torch.load with map_location=torch.device(\'cpu\') '
+ 'to map your storages to the CPU.')
+ device_module = getattr(torch, backend_name)
+ if hasattr(device_module, '_utils') and hasattr(device_module._utils, '_get_device_index'):
+ device_index = device_module._utils._get_device_index(location, True)
+ else:
+ device = torch.device(location)
+ device_index = device.index if device.index else 0
+ if hasattr(device_module, 'is_available') and not device_module.is_available():
+ raise RuntimeError(f'Attempting to deserialize object on a {backend_name.upper()} '
+ f'device but torch.{backend_name}.is_available() is False. '
+ 'If you are running on a CPU-only machine, '
+ 'please use torch.load with map_location=torch.device(\'cpu\') '
+ 'to map your storages to the CPU.')
+ if hasattr(device_module, 'device_count'):
+ device_count = device_module.device_count()
+ if device_index >= device_count:
+ raise RuntimeError(f'Attempting to deserialize object on {backend_name.upper()} device '
+ f'{device_index} but torch.{backend_name}.device_count() is {device_count}. '
+ 'Please use torch.load with map_location to map your storages '
+ 'to an existing device.')
+ return device_index
+
+
+def _privateuse1_deserialize(obj, location):
+ backend_name = torch._C._get_privateuse1_backend_name()
+ if location.startswith(backend_name):
+ if not hasattr(obj, backend_name):
+ raise RuntimeError(f'Attempting to load the storages to the {backend_name.upper()} device '
+ f'but torch.storage._StorageBase.{backend_name}() or '
+ f'torch.storage.TypedStorage.{backend_name}() is not generated. '
+ 'Please use torch.utils.generate_methods_for_privateuse1_backend '
+ f'to generate storage.{backend_name}() method first.')
+ device_index = _validate_privateuse1_device(location, backend_name)
+ return getattr(obj, backend_name)(device_index)
+
+
+register_package(10, _cpu_tag, _cpu_deserialize)
+register_package(20, _cuda_tag, _cuda_deserialize)
+register_package(21, _mps_tag, _mps_deserialize)
+register_package(22, _meta_tag, _meta_deserialize)
+register_package(23, _privateuse1_tag, _privateuse1_deserialize)
+register_package(24, _hpu_tag, _hpu_deserialize)
+
+
+def location_tag(storage: Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]):
+ for _, tagger, _ in _package_registry:
+ location = tagger(storage)
+ if location:
+ return location
+ raise RuntimeError("don't know how to determine data location of "
+ + torch.typename(storage))
+
+
+def default_restore_location(storage, location):
+ for _, _, fn in _package_registry:
+ result = fn(storage, location)
+ if result is not None:
+ return result
+ raise RuntimeError("don't know how to restore data location of "
+ + torch.typename(storage) + " (tagged with "
+ + location + ")")
+
+
+def normalize_storage_type(storage_type):
+ return getattr(torch, storage_type.__name__)
+
+
+def storage_to_tensor_type(storage):
+ storage_type = type(storage)
+ module = _import_dotted_name(storage_type.__module__)
+ return getattr(module, storage_type.__name__.replace('Storage', 'Tensor'))
+
+
+def _is_path(name_or_buffer):
+ return isinstance(name_or_buffer, (str, pathlib.Path))
+
+
+class _opener:
+ def __init__(self, file_like):
+ self.file_like = file_like
+
+ def __enter__(self):
+ return self.file_like
+
+ def __exit__(self, *args):
+ pass
+
+
+class _open_file(_opener):
+ def __init__(self, name, mode):
+ super().__init__(open(name, mode))
+
+ def __exit__(self, *args):
+ self.file_like.close()
+
+
+class _open_buffer_reader(_opener):
+ def __init__(self, buffer):
+ super().__init__(buffer)
+ _check_seekable(buffer)
+
+
+class _open_buffer_writer(_opener):
+ def __exit__(self, *args):
+ self.file_like.flush()
+
+
+def _open_file_like(name_or_buffer, mode):
+ if _is_path(name_or_buffer):
+ return _open_file(name_or_buffer, mode)
+ else:
+ if 'w' in mode:
+ return _open_buffer_writer(name_or_buffer)
+ elif 'r' in mode:
+ return _open_buffer_reader(name_or_buffer)
+ else:
+ raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
+
+
+class _open_zipfile_reader(_opener):
+ def __init__(self, name_or_buffer) -> None:
+ super().__init__(torch._C.PyTorchFileReader(name_or_buffer))
+
+
+class _open_zipfile_writer_file(_opener):
+ def __init__(self, name) -> None:
+ self.file_stream = None
+ self.name = str(name)
+ try:
+ self.name.encode('ascii')
+ except UnicodeEncodeError:
+ # PyTorchFileWriter only supports ascii filename.
+ # For filenames with non-ascii characters, we rely on Python
+ # for writing out the file.
+ self.file_stream = io.FileIO(self.name, mode='w')
+ super().__init__(torch._C.PyTorchFileWriter(self.file_stream))
+ else:
+ super().__init__(torch._C.PyTorchFileWriter(self.name))
+
+ def __exit__(self, *args) -> None:
+ self.file_like.write_end_of_file()
+ if self.file_stream is not None:
+ self.file_stream.close()
+
+
+class _open_zipfile_writer_buffer(_opener):
+ def __init__(self, buffer) -> None:
+ if not callable(getattr(buffer, "write", None)):
+ msg = f"Buffer of {str(type(buffer)).strip('<>')} has no callable attribute 'write'"
+ if not hasattr(buffer, "write"):
+ raise AttributeError(msg)
+ raise TypeError(msg)
+ self.buffer = buffer
+ super().__init__(torch._C.PyTorchFileWriter(buffer))
+
+ def __exit__(self, *args) -> None:
+ self.file_like.write_end_of_file()
+ self.buffer.flush()
+
+
+def _open_zipfile_writer(name_or_buffer):
+ container: Type[_opener]
+ if _is_path(name_or_buffer):
+ container = _open_zipfile_writer_file
+ else:
+ container = _open_zipfile_writer_buffer
+ return container(name_or_buffer)
+
+
+def _is_compressed_file(f) -> bool:
+ compress_modules = ['gzip']
+ try:
+ return f.__module__ in compress_modules
+ except AttributeError:
+ return False
+
+
+def _should_read_directly(f):
+ """
+ Checks if f is a file that should be read directly. It should be read
+ directly if it is backed by a real file (has a fileno) and is not a
+ a compressed file (e.g. gzip)
+ """
+ if _is_compressed_file(f):
+ return False
+ try:
+ return f.fileno() >= 0
+ except io.UnsupportedOperation:
+ return False
+ except AttributeError:
+ return False
+
+
+def _check_seekable(f) -> bool:
+
+ def raise_err_msg(patterns, e):
+ for p in patterns:
+ if p in str(e):
+ msg = (str(e) + ". You can only torch.load from a file that is seekable."
+ + " Please pre-load the data into a buffer like io.BytesIO and"
+ + " try to load from it instead.")
+ raise type(e)(msg)
+ raise e
+
+ try:
+ f.seek(f.tell())
+ return True
+ except (io.UnsupportedOperation, AttributeError) as e:
+ raise_err_msg(["seek", "tell"], e)
+ return False
+
+
+def _check_dill_version(pickle_module) -> None:
+ '''Checks if using dill as the pickle module, and if so, checks if it is the correct version.
+ If dill version is lower than 0.3.1, a ValueError is raised.
+
+ Args:
+ pickle_module: module used for pickling metadata and objects
+
+ '''
+ if pickle_module is not None and pickle_module.__name__ == 'dill':
+ required_dill_version = (0, 3, 1)
+ if not check_module_version_greater_or_equal(pickle_module, required_dill_version, False):
+ raise ValueError((
+ "'torch' supports dill >= {}, but you have dill {}."
+ " Please upgrade dill or switch to 'pickle'"
+ ).format(
+ '.'.join([str(num) for num in required_dill_version]),
+ pickle_module.__version__
+ ))
+
+
+def _check_save_filelike(f):
+ if not isinstance(f, (str, os.PathLike)) and not hasattr(f, 'write'):
+ raise AttributeError(
+ "expected 'f' to be string, path, or a file-like object with "
+ "a 'write' attribute")
+
+
+def save(
+ obj: object,
+ f: FILE_LIKE,
+ pickle_module: Any = pickle,
+ pickle_protocol: int = DEFAULT_PROTOCOL,
+ _use_new_zipfile_serialization: bool = True,
+ _disable_byteorder_record: bool = False
+) -> None:
+ # Reference: https://github.com/pytorch/pytorch/issues/54354
+ # The first line of this docstring overrides the one Sphinx generates for the
+ # documentation. We need it so that Sphinx doesn't leak `pickle`s path from
+ # the build environment (e.g. `>> # xdoctest: +SKIP("makes cwd dirty")
+ >>> # Save to file
+ >>> x = torch.tensor([0, 1, 2, 3, 4])
+ >>> torch.save(x, 'tensor.pt')
+ >>> # Save to io.BytesIO buffer
+ >>> buffer = io.BytesIO()
+ >>> torch.save(x, buffer)
+ """
+ torch._C._log_api_usage_once("torch.save")
+ _check_dill_version(pickle_module)
+ _check_save_filelike(f)
+
+ if _use_new_zipfile_serialization:
+ with _open_zipfile_writer(f) as opened_zipfile:
+ _save(obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record)
+ return
+ else:
+ with _open_file_like(f, 'wb') as opened_file:
+ _legacy_save(obj, opened_file, pickle_module, pickle_protocol)
+
+
+def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None:
+ import torch.nn as nn
+ serialized_container_types = {}
+ serialized_storages = {}
+
+ # Since loading storages that view the same data with different dtypes is
+ # not supported, we need to keep track of the dtype associated with each
+ # storage data_ptr and throw an error if the dtype is ever different.
+ # TODO: This feature could be added in the future
+ storage_dtypes: Dict[int, torch.dtype] = {}
+
+ def persistent_id(obj: Any) -> Optional[Tuple]:
+ # FIXME: the docs say that persistent_id should only return a string
+ # but torch store returns tuples. This works only in the binary protocol
+ # see
+ # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
+ # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
+ if isinstance(obj, type) and issubclass(obj, nn.Module):
+ if obj in serialized_container_types:
+ return None
+ serialized_container_types[obj] = True
+ source_file = source = None
+ try:
+ source_lines, _, source_file = get_source_lines_and_file(obj)
+ source = ''.join(source_lines)
+ except Exception: # saving the source is optional, so we can ignore any errors
+ warnings.warn("Couldn't retrieve source code for container of "
+ "type " + obj.__name__ + ". It won't be checked "
+ "for correctness upon loading.")
+ return ('module', obj, source_file, source)
+
+ if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
+ storage: torch.UntypedStorage
+
+ if isinstance(obj, torch.storage.TypedStorage):
+ # TODO: Once we decide to break serialization FC, this case
+ # can be deleted
+ storage = obj._untyped_storage
+ storage_dtype = obj.dtype
+ storage_type_str = obj._pickle_storage_type()
+ storage_type = getattr(torch, storage_type_str)
+ dtype = obj.dtype
+ storage_numel = obj._size()
+
+ elif isinstance(obj, torch.UntypedStorage):
+ storage = obj
+ storage_dtype = torch.uint8
+ storage_type = normalize_storage_type(type(obj))
+ dtype = torch.uint8
+ storage_numel = storage.nbytes()
+ else:
+ raise TypeError(f'type not recognized: {type(obj)}')
+
+ # If storage is allocated, ensure that any other saved storages
+ # pointing to the same data all have the same dtype. If storage is
+ # not allocated, don't perform this check
+ if storage.data_ptr() != 0:
+ if storage.data_ptr() in storage_dtypes:
+ if storage_dtype != storage_dtypes[storage.data_ptr()]:
+ raise RuntimeError(
+ 'Cannot save multiple tensors or storages that '
+ 'view the same data as different types')
+ else:
+ storage_dtypes[storage.data_ptr()] = storage_dtype
+
+ view_metadata: Optional[Tuple[str, int, int]]
+
+ # Offset is always 0, but we keep it for backwards compatibility
+ # with the old serialization format (which supported storage views)
+ offset = 0
+ storage_key = str(storage._cdata)
+ location = location_tag(storage)
+
+ # TODO: There's an issue here with FC. It might be impossible to
+ # solve, but it's worth noting. Imagine we save a list `[storage,
+ # tensor]`, where `tensor.storage()` is the same as `storage`, and
+ # `tensor.element_size() > 1`. Let's say that `tensor.dtype ==
+ # torch.float`. The storage will be serialized with element size
+ # of 1, since we're choosing to serialize the first occurance of
+ # a duplicate storage. Since this legacy serialization format saves
+ # the numel of the storage, rather than nbytes directly, we'll be
+ # effectively saving nbytes in this case. We'll be able to load it
+ # and the tensor back up with no problems in _this_ and future
+ # versions of pytorch, but in older versions, here's the problem:
+ # the storage will be loaded up as a UntypedStorage, and then the
+ # FloatTensor will loaded and the UntypedStorage will be assigned to
+ # it. Since the storage dtype does not match the tensor dtype, this
+ # will cause an error. If we reverse the list, like `[tensor,
+ # storage]`, then we will save the `tensor.storage()` as a faked
+ # `FloatStorage`, and the saved size will be the correct
+ # dtype-specific numel count that old versions expect. `tensor`
+ # will be able to load up properly in old versions, pointing to
+ # a FloatStorage. However, `storage` is still being translated to
+ # a UntypedStorage, and it will try to resolve to the same
+ # FloatStorage that `tensor` contains. This will also cause an
+ # error. It doesn't seem like there's any way around this.
+ # Probably, we just cannot maintain FC for the legacy format if the
+ # saved list contains both a tensor and a storage that point to the
+ # same data. We should still be able to maintain FC for lists of
+ # just tensors, as long as all views share the same dtype as the
+ # tensor they are viewing.
+
+ if storage_key not in serialized_storages:
+ serialized_storages[storage_key] = (storage, dtype)
+ is_view = storage._cdata != storage._cdata
+ if is_view:
+ view_metadata = (str(storage._cdata), offset, storage.nbytes())
+ else:
+ view_metadata = None
+
+ res = ('storage',
+ storage_type,
+ storage_key,
+ location,
+ storage_numel,
+ view_metadata)
+ return res
+ return None
+
+ sys_info = dict(
+ protocol_version=PROTOCOL_VERSION,
+ little_endian=sys.byteorder == 'little',
+ type_sizes=dict(
+ short=SHORT_SIZE,
+ int=INT_SIZE,
+ long=LONG_SIZE,
+ ),
+ )
+
+ pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol)
+ pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol)
+ pickle_module.dump(sys_info, f, protocol=pickle_protocol)
+ pickler = pickle_module.Pickler(f, protocol=pickle_protocol)
+ pickler.persistent_id = persistent_id
+ pickler.dump(obj)
+
+ serialized_storage_keys = sorted(serialized_storages.keys())
+ pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol)
+ f.flush()
+ for key in serialized_storage_keys:
+ storage, dtype = serialized_storages[key]
+ storage._write_file(f, _should_read_directly(f), True, torch._utils._element_size(dtype))
+
+
+def _save(obj, zip_file, pickle_module, pickle_protocol, _disable_byteorder_record):
+ serialized_storages = {}
+ id_map: Dict[int, str] = {}
+
+ # Since loading storages that view the same data with different dtypes is
+ # not supported, we need to keep track of the dtype associated with each
+ # storage data_ptr and throw an error if the dtype is ever different.
+ # TODO: This feature could be added in the future
+ storage_dtypes: Dict[int, torch.dtype] = {}
+
+ def persistent_id(obj):
+ # FIXME: the docs say that persistent_id should only return a string
+ # but torch store returns tuples. This works only in the binary protocol
+ # see
+ # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
+ # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
+ if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
+
+ if isinstance(obj, torch.storage.TypedStorage):
+ # TODO: Once we decide to break serialization FC, this case
+ # can be deleted
+ storage = obj._untyped_storage
+ storage_dtype = obj.dtype
+ storage_type_str = obj._pickle_storage_type()
+ storage_type = getattr(torch, storage_type_str)
+ storage_numel = obj._size()
+
+ else:
+ storage = obj
+ storage_dtype = torch.uint8
+ storage_type = normalize_storage_type(type(obj))
+ storage_numel = storage.nbytes()
+
+ # If storage is allocated, ensure that any other saved storages
+ # pointing to the same data all have the same dtype. If storage is
+ # not allocated, don't perform this check
+ if storage.data_ptr() != 0:
+ if storage.data_ptr() in storage_dtypes:
+ if storage_dtype != storage_dtypes[storage.data_ptr()]:
+ raise RuntimeError(
+ 'Cannot save multiple tensors or storages that '
+ 'view the same data as different types')
+ else:
+ storage_dtypes[storage.data_ptr()] = storage_dtype
+
+ storage_key = id_map.setdefault(storage._cdata, str(len(id_map)))
+ location = location_tag(storage)
+ serialized_storages[storage_key] = storage
+
+ return ('storage',
+ storage_type,
+ storage_key,
+ location,
+ storage_numel)
+
+ return None
+
+ # Write the pickle data for `obj`
+ data_buf = io.BytesIO()
+ pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
+ pickler.persistent_id = persistent_id
+ pickler.dump(obj)
+ data_value = data_buf.getvalue()
+ zip_file.write_record('data.pkl', data_value, len(data_value))
+
+ # Write byte order marker
+ if not _disable_byteorder_record:
+ if sys.byteorder not in ['little', 'big']:
+ raise ValueError('Unknown endianness type: ' + sys.byteorder)
+
+ zip_file.write_record('byteorder', sys.byteorder, len(sys.byteorder))
+
+ # Write each tensor to a file named tensor/the_tensor_key in the zip archive
+ for key in sorted(serialized_storages.keys()):
+ name = f'data/{key}'
+ storage = serialized_storages[key]
+ # given that we copy things around anyway, we might use storage.cpu()
+ # this means to that to get tensors serialized, you need to implement
+ # .cpu() on the underlying Storage
+ if storage.device.type != 'cpu':
+ storage = storage.cpu()
+ # Now that it is on the CPU we can directly copy it into the zip file
+ num_bytes = storage.nbytes()
+ zip_file.write_record(name, storage.data_ptr(), num_bytes)
+
+
+def load(
+ f: FILE_LIKE,
+ map_location: MAP_LOCATION = None,
+ pickle_module: Any = None,
+ *,
+ weights_only: bool = False,
+ mmap: Optional[bool] = None,
+ **pickle_load_args: Any
+) -> Any:
+ # Reference: https://github.com/pytorch/pytorch/issues/54354
+ # The first line of this docstring overrides the one Sphinx generates for the
+ # documentation. We need it so that Sphinx doesn't leak `pickle`s path from
+ # the build environment (e.g. `>> # xdoctest: +SKIP("undefined filepaths")
+ >>> torch.load('tensors.pt', weights_only=True)
+ # Load all tensors onto the CPU
+ >>> torch.load('tensors.pt', map_location=torch.device('cpu'), weights_only=True)
+ # Load all tensors onto the CPU, using a function
+ >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage, weights_only=True)
+ # Load all tensors onto GPU 1
+ >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1), weights_only=True)
+ # Map tensors from GPU 1 to GPU 0
+ >>> torch.load('tensors.pt', map_location={'cuda:1': 'cuda:0'}, weights_only=True)
+ # Load tensor from io.BytesIO object
+ # Loading from a buffer setting weights_only=False, warning this can be unsafe
+ >>> with open('tensor.pt', 'rb') as f:
+ ... buffer = io.BytesIO(f.read())
+ >>> torch.load(buffer, weights_only=False)
+ # Load a module with 'ascii' encoding for unpickling
+ # Loading from a module setting weights_only=False, warning this can be unsafe
+ >>> torch.load('module.pt', encoding='ascii', weights_only=False)
+ """
+ torch._C._log_api_usage_once("torch.load")
+ UNSAFE_MESSAGE = (
+ "Weights only load failed. Re-running `torch.load` with `weights_only` set to `False`"
+ " will likely succeed, but it can result in arbitrary code execution."
+ "Do it only if you get the file from a trusted source. WeightsUnpickler error: "
+ )
+ # Add ability to force safe only weight loads via environment variable
+ if os.getenv("TORCH_FORCE_WEIGHTS_ONLY_LOAD", "0").lower() in ['1', 'y', 'yes', 'true']:
+ weights_only = True
+
+ if weights_only:
+ if pickle_module is not None:
+ raise RuntimeError("Can not safely load weights when explicit pickle_module is specified")
+ else:
+ if pickle_module is None:
+ pickle_module = pickle
+
+ # make flipping default BC-compatible
+ if mmap is None:
+ mmap = False
+
+ _check_dill_version(pickle_module)
+
+ if 'encoding' not in pickle_load_args.keys():
+ pickle_load_args['encoding'] = 'utf-8'
+
+ with _open_file_like(f, 'rb') as opened_file:
+ if _is_zipfile(opened_file):
+ # The zipfile reader is going to advance the current file position.
+ # If we want to actually tail call to torch.jit.load, we need to
+ # reset back to the original position.
+ orig_position = opened_file.tell()
+ overall_storage = None
+ with _open_zipfile_reader(opened_file) as opened_zipfile:
+ if _is_torchscript_zip(opened_zipfile):
+ warnings.warn("'torch.load' received a zip file that looks like a TorchScript archive"
+ " dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to"
+ " silence this warning)", UserWarning)
+ opened_file.seek(orig_position)
+ return torch.jit.load(opened_file, map_location=map_location)
+ if mmap:
+ if not isinstance(f, str):
+ raise ValueError("f must be a string filename in order to use mmap argument")
+ size = os.path.getsize(f)
+ overall_storage = torch.UntypedStorage.from_file(f, False, size)
+ if weights_only:
+ try:
+ return _load(opened_zipfile,
+ map_location,
+ _weights_only_unpickler,
+ overall_storage=overall_storage,
+ **pickle_load_args)
+ except RuntimeError as e:
+ raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None
+ return _load(opened_zipfile,
+ map_location,
+ pickle_module,
+ overall_storage=overall_storage,
+ **pickle_load_args)
+ if mmap:
+ raise RuntimeError("mmap can only be used with files saved with "
+ "`torch.save(_use_new_zipfile_serialization=True), "
+ "please torch.save your checkpoint with this option in order to use mmap.")
+ if weights_only:
+ try:
+ return _legacy_load(opened_file, map_location, _weights_only_unpickler, **pickle_load_args)
+ except RuntimeError as e:
+ raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) from None
+ return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
+
+
+# Register pickling support for layout instances such as
+# torch.sparse_coo, etc
+def _get_layout(name):
+ """Get layout extension object from its string representation.
+ """
+ cache = _get_layout.cache # type: ignore[attr-defined]
+ if not cache:
+ for v in torch.__dict__.values():
+ if isinstance(v, torch.layout):
+ cache[str(v)] = v
+ return cache[name]
+
+# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087
+_get_layout.cache = {} # type: ignore[attr-defined]
+copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),)))
+
+
+def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
+ deserialized_objects: Dict[int, Any] = {}
+
+ restore_location = _get_restore_location(map_location)
+
+ class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
+
+ def find_class(self, mod_name, name):
+ if type(name) is str and 'Storage' in name:
+ try:
+ return StorageType(name)
+ except KeyError:
+ pass
+ return super().find_class(mod_name, name)
+
+ def _check_container_source(container_type, source_file, original_source):
+ try:
+ current_source = ''.join(get_source_lines_and_file(container_type)[0])
+ except Exception: # saving the source is optional, so we can ignore any errors
+ warnings.warn("Couldn't retrieve source code for container of "
+ "type " + container_type.__name__ + ". It won't be checked "
+ "for correctness upon loading.")
+ return
+ if original_source != current_source:
+ if container_type.dump_patches:
+ file_name = container_type.__name__ + '.patch'
+ diff = difflib.unified_diff(current_source.split('\n'),
+ original_source.split('\n'),
+ source_file,
+ source_file, lineterm="")
+ lines = '\n'.join(diff)
+ try:
+ with open(file_name, 'a+') as f:
+ file_size = f.seek(0, 2)
+ f.seek(0)
+ if file_size == 0:
+ f.write(lines)
+ elif file_size != len(lines) or f.read() != lines:
+ raise OSError
+ msg = ("Saved a reverse patch to " + file_name + ". "
+ "Run `patch -p0 < " + file_name + "` to revert your "
+ "changes.")
+ except OSError:
+ msg = ("Tried to save a patch, but couldn't create a "
+ "writable file " + file_name + ". Make sure it "
+ "doesn't exist and your working directory is "
+ "writable.")
+ else:
+ msg = ("you can retrieve the original source code by "
+ "accessing the object's source attribute or set "
+ "`torch.nn.Module.dump_patches = True` and use the "
+ "patch tool to revert the changes.")
+ msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}"
+ warnings.warn(msg, SourceChangeWarning)
+
+ def legacy_load(f):
+ deserialized_objects: Dict[int, Any] = {}
+
+ def persistent_load(saved_id):
+ if isinstance(saved_id, tuple):
+ # Ignore containers that don't have any sources saved
+ if all(saved_id[1:]):
+ _check_container_source(*saved_id)
+ return saved_id[0]
+ return deserialized_objects[int(saved_id)]
+
+ with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \
+ mkdtemp() as tmpdir:
+
+ tar.extract('storages', path=tmpdir)
+ with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f:
+ num_storages = pickle_module.load(f, **pickle_load_args)
+ for i in range(num_storages):
+ args = pickle_module.load(f, **pickle_load_args)
+ key, location, storage_type = args
+ dtype = storage_type._dtype
+ obj = cast(Storage, torch.UntypedStorage)._new_with_file(f, torch._utils._element_size(dtype))
+ obj = restore_location(obj, location)
+ # TODO: Once we decide to break serialization FC, we can
+ # stop wrapping with TypedStorage
+ deserialized_objects[key] = torch.storage.TypedStorage(
+ wrap_storage=obj,
+ dtype=dtype,
+ _internal=True)
+
+ storage_views = pickle_module.load(f, **pickle_load_args)
+ for target_cdata, root_cdata, offset, numel in storage_views:
+ root = deserialized_objects[root_cdata]
+ element_size = torch._utils._element_size(root.dtype)
+ offset_bytes = offset * element_size
+ # TODO: Once we decide to break serialization FC, we can
+ # stop wrapping with TypedStorage
+ deserialized_objects[target_cdata] = torch.storage.TypedStorage(
+ wrap_storage=root._untyped_storage[offset_bytes:offset_bytes + numel * element_size],
+ dtype=root.dtype,
+ _internal=True)
+
+ tar.extract('tensors', path=tmpdir)
+ with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f:
+ num_tensors = pickle_module.load(f, **pickle_load_args)
+ for _ in range(num_tensors):
+ args = pickle_module.load(f, **pickle_load_args)
+ key, storage_id, original_tensor_type = args
+ storage = deserialized_objects[storage_id]
+ ndim, = struct.unpack(' str:
+ # When using encoding='bytes' in Py3, some **internal** keys stored as
+ # strings in Py2 are loaded as bytes. This function decodes them with
+ # ascii encoding, one that Py3 uses by default.
+ #
+ # NOTE: This should only be used on internal keys (e.g., `typename` and
+ # `location` in `persistent_load` below!
+ if isinstance(bytes_str, bytes):
+ return bytes_str.decode('ascii')
+ return bytes_str
+
+
+def _get_restore_location(map_location):
+ if map_location is None:
+ restore_location = default_restore_location
+ elif isinstance(map_location, dict):
+ def restore_location(storage, location):
+ location = map_location.get(location, location)
+ return default_restore_location(storage, location)
+ elif isinstance(map_location, (str, bytes)):
+ def restore_location(storage, location):
+ return default_restore_location(storage, map_location)
+ elif isinstance(map_location, torch.device):
+ def restore_location(storage, location):
+ return default_restore_location(storage, str(map_location))
+ else:
+ def restore_location(storage, location):
+ result = map_location(storage, location)
+ if result is None:
+ result = default_restore_location(storage, location)
+ return result
+ return restore_location
+
+
+class StorageType:
+ def __init__(self, name):
+ self._dtype = _get_dtype_from_pickle_storage_type(name)
+
+ @property
+ def dtype(self):
+ return self._dtype
+
+ def __str__(self):
+ return f'StorageType(dtype={self.dtype})'
+
+
+def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', overall_storage=None, **pickle_load_args):
+ restore_location = _get_restore_location(map_location)
+
+ loaded_storages = {}
+
+ # check if byteswapping is needed
+ byteordername = 'byteorder'
+ byteorderdata = None
+ if zip_file.has_record(byteordername):
+ byteorderdata = zip_file.get_record(byteordername)
+ if byteorderdata not in [b'little', b'big']:
+ raise ValueError('Unknown endianness type: ' + byteorderdata.decode())
+ elif get_default_load_endianness() == LoadEndianness.LITTLE or \
+ get_default_load_endianness() is None:
+ byteorderdata = b'little'
+ elif get_default_load_endianness() == LoadEndianness.BIG:
+ byteorderdata = b'big'
+ elif get_default_load_endianness() == LoadEndianness.NATIVE:
+ pass
+ else:
+ raise ValueError('Invalid load endianness type')
+
+ if not zip_file.has_record(byteordername) and \
+ get_default_load_endianness() is None and \
+ sys.byteorder == 'big':
+ # Default behaviour was changed
+ # See https://github.com/pytorch/pytorch/issues/101688
+ warnings.warn("The default load endianness for checkpoints without a byteorder mark "
+ "on big endian machines was changed from 'native' to 'little' endian, "
+ "to avoid this behavior please use "
+ "torch.serialization.set_default_load_endianness to set "
+ "the desired default load endianness",
+ UserWarning)
+
+ def load_tensor(dtype, numel, key, location):
+ name = f'data/{key}'
+ if overall_storage is not None:
+ storage_offset = zip_file.get_record_offset(name)
+ storage = overall_storage[storage_offset:storage_offset + numel]
+ else:
+ storage = zip_file.get_storage_from_record(name, numel, torch.UntypedStorage)._typed_storage()._untyped_storage
+ # swap here if byteswapping is needed
+ if byteorderdata is not None:
+ if byteorderdata.decode() != sys.byteorder:
+ storage.byteswap(dtype)
+
+ # TODO: Once we decide to break serialization FC, we can
+ # stop wrapping with TypedStorage
+ typed_storage = torch.storage.TypedStorage(
+ wrap_storage=restore_location(storage, location),
+ dtype=dtype,
+ _internal=True)
+
+ if typed_storage._data_ptr() != 0:
+ loaded_storages[key] = typed_storage
+
+ return typed_storage
+
+ def persistent_load(saved_id):
+ assert isinstance(saved_id, tuple)
+ typename = _maybe_decode_ascii(saved_id[0])
+ data = saved_id[1:]
+
+ assert typename == 'storage', \
+ f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'"
+ storage_type, key, location, numel = data
+ if storage_type is torch.UntypedStorage:
+ dtype = torch.uint8
+ else:
+ dtype = storage_type.dtype
+
+ if key in loaded_storages:
+ typed_storage = loaded_storages[key]
+ else:
+ nbytes = numel * torch._utils._element_size(dtype)
+ typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location))
+
+ return typed_storage
+
+ load_module_mapping: Dict[str, str] = {
+ # See https://github.com/pytorch/pytorch/pull/51633
+ 'torch.tensor': 'torch._tensor'
+ }
+
+ # Need to subclass Unpickler instead of directly monkey-patching the find_class method
+ # because it's marked readonly in pickle.
+ # The type: ignore is because mypy can't statically determine the type of this class.
+ class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
+ # from https://stackoverflow.com/questions/13398462/unpickling-python-objects-with-a-changed-module-path/13405732
+ # Lets us override the imports that pickle uses when unpickling an object.
+ # This is useful for maintaining BC if we change a module path that tensor instantiation relies on.
+ def find_class(self, mod_name, name):
+ if type(name) is str and 'Storage' in name:
+ try:
+ return StorageType(name)
+ except KeyError:
+ pass
+ mod_name = load_module_mapping.get(mod_name, mod_name)
+ return super().find_class(mod_name, name)
+
+ # Load the data (which may in turn use `persistent_load` to load tensors)
+ data_file = io.BytesIO(zip_file.get_record(pickle_file))
+
+ unpickler = UnpicklerWrapper(data_file, **pickle_load_args)
+ unpickler.persistent_load = persistent_load
+ result = unpickler.load()
+
+ torch._utils._validate_loaded_sparse_tensors()
+ torch._C._log_api_usage_metadata(
+ "torch.load.metadata", {"serialization_id": zip_file.serialization_id()}
+ )
+ return result
+
+
+def _is_torchscript_zip(zip_file):
+ return 'constants.pkl' in zip_file.get_all_records()
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/storage.py b/env-llmeval/lib/python3.10/site-packages/torch/storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..f65c0806accda17640c987a78fee5b47fdea9b45
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/storage.py
@@ -0,0 +1,1200 @@
+import io
+
+import torch
+from ._utils import _type, _cuda, _hpu
+from torch.types import Storage
+from typing import cast, Any, Dict as _Dict, Optional as _Optional, TypeVar, Type, Union
+import copy
+import collections
+from functools import lru_cache
+import warnings
+import threading
+import functools
+try:
+ import numpy as np
+ HAS_NUMPY = True
+except ModuleNotFoundError:
+ np = None # type: ignore[assignment]
+
+_share_memory_lock = threading.Lock()
+_share_memory_map: _Dict[int, threading.RLock] = {}
+
+T = TypeVar('T', bound='Union[_StorageBase, TypedStorage]')
+class _StorageBase:
+ _cdata: Any
+ is_sparse: bool = False
+ is_sparse_csr: bool = False
+ device: torch.device
+
+ def __init__(self, *args, **kwargs): ... # noqa: E704
+ def __len__(self) -> int: ... # type: ignore[empty-body] # noqa: E704
+ def __getitem__(self, idx): ... # noqa: E704
+ def __setitem__(self, *args, **kwargs): ... # noqa: E704
+ def copy_(self, source: T, non_blocking: _Optional[bool] = None) -> T: ... # type: ignore[empty-body] # noqa: E704
+ def new(self) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def nbytes(self) -> int: ... # type: ignore[empty-body] # noqa: E704
+
+ def size(self) -> int:
+ return self.nbytes()
+
+ def type(self, dtype: _Optional[str] = None, non_blocking: bool = False) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def hpu(self, device=None, non_blocking=False, **kwargs) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def element_size(self) -> int: ... # type: ignore[empty-body, type-var] # noqa: E704
+
+ def get_device(self) -> int:
+ return self.device.index
+
+ def data_ptr(self) -> int: ... # type: ignore[empty-body] # noqa: E704
+
+ # Defined in torch/csrc/generic/StorageSharing.cpp
+ def _share_filename_cpu_(self, *args, **kwargs): ... # noqa: E704
+ def _share_fd_cpu_(self, *args, **kwargs): ... # noqa: E704
+ @classmethod
+ def _new_using_filename_cpu(cls: Type[T], size: int) -> T: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def _new_using_fd_cpu(cls: Type[T], size: int) -> T: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def from_buffer(cls: Type[T], *args, **kwargs) -> T: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def _new_shared_filename_cpu(cls: Type[T], manager, obj, size, *, device=None, dtype=None) -> T: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def _release_ipc_counter_cuda(cls: Type[T], *args, **kwargs) -> T: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def _new_with_weak_ptr(cls: Type[T], *args, **kwargs) -> T: ... # type: ignore[empty-body] # noqa: E704
+ def _shared_decref(self) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def _write_file(self, *args, **kwargs): ... # noqa: E704
+ def resize_(self, size: int): ... # noqa: E704
+ def _weak_ref(self, *args, **kwargs) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def _set_from_file(self, *args, **kwargs): ... # noqa: E704
+ def _set_cdata(self, *args, **kwargs): ... # noqa: E704
+ def _share_cuda_(self, *args, **kwargs): ... # noqa: E704
+ def is_shared(self) -> bool: ... # type: ignore[empty-body] # noqa: E704
+ @classmethod
+ def _new_shared_cuda(cls: Type[T], *args, **kwargs) -> T: ... # type: ignore[empty-body] # noqa: E704
+ def _shared_incref(self, *args, **kwargs): ... # noqa: E704
+ @classmethod
+ def _free_weak_ref(cls, *args, **kwargs): ... # noqa: E704
+ @property
+ def is_cuda(self): ... # noqa: E704
+ @property
+ def is_hpu(self): ... # noqa: E704
+ @classmethod
+ def from_file(cls, filename, shared, nbytes) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ @classmethod
+ def _expired(cls, *args, **kwargs) -> T: ... # type: ignore[empty-body, misc, type-var] # noqa: E704
+ def _byteswap(self, *args, **kwargs): ... # noqa: E704
+ def _get_filename(self, *args, **kwargs) -> _Optional[str]: ... # type: ignore[empty-body, misc] # noqa: E704
+
+ def __str__(self):
+ info_str = (
+ f'[{torch.typename(self)}(device={self.device}) '
+ f'of size {len(self)}]')
+ if self.device.type == 'meta':
+ return '...\n' + info_str
+ else:
+ data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
+ return data_str + '\n' + info_str
+
+ def __repr__(self):
+ return str(self)
+
+ def __iter__(self):
+ return iter(self[i] for i in range(self.size()))
+
+ def __copy__(self):
+ return self.clone()
+
+ def __deepcopy__(self, memo):
+ memo = memo.setdefault('torch', {})
+ if self._cdata in memo:
+ return memo[self._cdata]
+ new_storage = self.clone()
+ memo[self._cdata] = new_storage
+ return new_storage
+
+ def __reduce__(self):
+ b = io.BytesIO()
+ torch.save(self, b, _use_new_zipfile_serialization=False)
+ return (_load_from_bytes, (b.getvalue(),))
+
+ def __sizeof__(self):
+ return super().__sizeof__() + self.size()
+
+ def clone(self):
+ """Return a copy of this storage."""
+ return type(self)(self.nbytes(), device=self.device).copy_(self)
+
+ def tolist(self):
+ """Return a list containing the elements of this storage."""
+ return list(self)
+
+ def cpu(self):
+ """Return a CPU copy of this storage if it's not already on the CPU."""
+ if self.device.type != 'cpu':
+ return torch.UntypedStorage(self.size()).copy_(self, False)
+ else:
+ return self
+
+ def mps(self):
+ """Return a MPS copy of this storage if it's not already on the MPS."""
+ if self.device.type != 'mps':
+ return torch.UntypedStorage(self.size(), device="mps").copy_(self, False)
+ else:
+ return self
+
+ def _to(self, dtype):
+ if not isinstance(dtype, torch.dtype):
+ raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
+ storage = torch.tensor([], dtype=torch.uint8, device=self.device).set_(cast(Storage, self)).to(dtype)._typed_storage()
+ if storage.data_ptr() == self.data_ptr():
+ storage = storage.clone()
+ return storage
+
+ def double(self):
+ """Casts this storage to double type."""
+ return self._to(torch.double)
+
+ def float(self):
+ """Casts this storage to float type."""
+ return self._to(torch.float)
+
+ def half(self):
+ """Casts this storage to half type."""
+ return self._to(torch.half)
+
+ def long(self):
+ """Casts this storage to long type."""
+ return self._to(torch.long)
+
+ def int(self):
+ """Casts this storage to int type."""
+ return self._to(torch.int)
+
+ def short(self):
+ """Casts this storage to short type."""
+ return self._to(torch.short)
+
+ def char(self):
+ """Casts this storage to char type."""
+ return self._to(torch.int8)
+
+ def byte(self):
+ """Casts this storage to byte type."""
+ return self._to(torch.uint8)
+
+ def bool(self):
+ """Casts this storage to bool type."""
+ return self._to(torch.bool)
+
+ def bfloat16(self):
+ """Casts this storage to bfloat16 type."""
+ return self._to(torch.bfloat16)
+
+ def complex_double(self):
+ """Casts this storage to complex double type."""
+ return self._to(torch.cdouble)
+
+ def complex_float(self):
+ """Casts this storage to complex float type."""
+ return self._to(torch.cfloat)
+
+ def float8_e5m2(self):
+ """Casts this storage to float8_e5m2 type"""
+ return self._to(torch.float8_e5m2)
+
+ def float8_e4m3fn(self):
+ """Casts this storage to float8_e4m3fn type"""
+ return self._to(torch.float8_e4m3fn)
+
+ def is_pinned(self, device: Union[str, torch.device] = 'cuda'):
+ r"""Determine whether the CPU storage is already pinned on device.
+
+ Args:
+ device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
+
+ Returns:
+ A boolean variable.
+ """
+ return torch.tensor([], dtype=torch.uint8, device=self.device).set_(
+ cast(Storage, self)).is_pinned(device)
+
+ def pin_memory(self, device: Union[str, torch.device] = 'cuda'):
+ r"""Copy the CPU storage to pinned memory, if it's not already pinned.
+
+ Args:
+ device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
+
+ Returns:
+ A pinned CPU storage.
+ """
+ if self.device.type != 'cpu':
+ raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
+
+ pinned_tensor = torch.tensor([], dtype=torch.uint8, device=self.device).set_(
+ cast(Storage, self)).pin_memory(device)
+ return pinned_tensor.untyped_storage()
+
+ def share_memory_(self):
+ """See :meth:`torch.UntypedStorage.share_memory_`"""
+ from torch.multiprocessing import get_sharing_strategy
+ if self.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
+ pass # CUDA or PrivateUse1 doesn't use POSIX shared memory
+ elif get_sharing_strategy() == 'file_system':
+ self._share_filename_cpu_()
+ else:
+ self._share_fd_cpu_()
+ return self
+
+ @classmethod
+ def _new_shared(cls, size, *, device='cpu'):
+ """Create a new storage in shared memory with the same data type."""
+ from torch.multiprocessing import get_sharing_strategy
+ device = torch.device(device)
+ if device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
+ return cls(size, device=device)
+ elif get_sharing_strategy() == 'file_system':
+ return cls._new_using_filename_cpu(size)
+ else:
+ return cls._new_using_fd_cpu(size)
+
+ def untyped(self):
+ return self
+
+ def byteswap(self, dtype):
+ """Swap bytes in underlying data."""
+ elem_size = torch._utils._element_size(dtype)
+ # for complex types, don't swap first and second numbers
+ if dtype.is_complex:
+ elem_size = max(int(elem_size / 2), 1)
+ self._byteswap(elem_size)
+
+
+def _share_memory_lock_protected(fn):
+ @functools.wraps(fn)
+ def wrapper(self, *args, **kwargs):
+ to_free = None
+ to_wait = None
+ with _share_memory_lock:
+ key = self._cdata
+ if key in _share_memory_map:
+ to_wait = _share_memory_map[key]
+ else:
+ _share_memory_map[key] = threading.RLock()
+ _share_memory_map[key].acquire()
+ to_free = key
+
+ # If we're already in the process of sharing the storage, wait
+ # for it to be done.
+ if to_wait is not None:
+ with to_wait:
+ pass
+
+ try:
+ return fn(self, *args, **kwargs)
+ finally:
+ # If we acquired the storage lock here and we're done working on it
+ # we can now release it and free the entry.
+ if to_free is not None:
+ # Ensure that the cdata from the storage didn't change and only
+ # the data_ptr did.
+ assert self._cdata == to_free
+ with _share_memory_lock:
+ _share_memory_map[to_free].release()
+ del _share_memory_map[to_free]
+ return wrapper
+
+class UntypedStorage(torch._C.StorageBase, _StorageBase):
+ def __getitem__(self, *args, **kwargs):
+ if self.device.type == 'meta':
+ raise NotImplementedError("Not available for 'meta' device type")
+ return super().__getitem__(*args, **kwargs)
+
+ @property
+ def is_cuda(self):
+ return self.device.type == 'cuda'
+
+ @property
+ def is_hpu(self):
+ return self.device.type == 'hpu'
+
+ @property
+ def filename(self) -> _Optional[str]:
+ """Returns the file name associated with this storage if the storage was memory mapped from a file.
+ or ``None`` if the storage was not created by memory mapping a file."""
+ return self._get_filename()
+
+ @_share_memory_lock_protected
+ def share_memory_(self, *args, **kwargs):
+ """
+ Moves the storage to shared memory.
+
+ This is a no-op for storages already in shared memory and for CUDA
+ storages, which do not need to be moved for sharing across processes.
+ Storages in shared memory cannot be resized.
+
+ Note that to mitigate issues like `this `_
+ it is thread safe to call this function from multiple threads on the same object.
+ It is NOT thread safe though to call any other function on self without proper
+ synchronization. Please see :doc:`/notes/multiprocessing` for more details.
+
+ .. note::
+ When all references to a storage in shared memory are deleted, the associated shared memory
+ object will also be deleted. PyTorch has a special cleanup process to ensure that this happens
+ even if the current process exits unexpectedly.
+
+ It is worth noting the difference between :meth:`share_memory_` and :meth:`from_file` with ``shared = True``
+
+ #. ``share_memory_`` uses `shm_open(3) `_ to create a
+ POSIX shared memory object while :meth:`from_file` uses
+ `open(2) `_ to open the filename passed by the user.
+ #. Both use an `mmap(2) call `_ with ``MAP_SHARED``
+ to map the file/object into the current virtual address space
+ #. ``share_memory_`` will call ``shm_unlink(3)`` on the object after mapping it to make sure the shared memory
+ object is freed when no process has the object open. ``torch.from_file(shared=True)`` does not unlink the
+ file. This file is persistent and will remain until it is deleted by the user.
+
+ Returns:
+ ``self``
+ """
+ return super().share_memory_(*args, **kwargs)
+
+ @_share_memory_lock_protected
+ def _share_fd_cpu_(self, *args, **kwargs):
+ return super()._share_fd_cpu_(*args, **kwargs)
+
+ @_share_memory_lock_protected
+ def _share_filename_cpu_(self, *args, **kwargs):
+ return super()._share_filename_cpu_(*args, **kwargs)
+
+def _load_from_bytes(b):
+ return torch.load(io.BytesIO(b))
+
+
+_StorageBase.type = _type # type: ignore[assignment]
+_StorageBase.cuda = _cuda # type: ignore[assignment]
+_StorageBase.hpu = _hpu # type: ignore[assignment]
+
+
+@lru_cache(maxsize=None)
+def _dtype_to_storage_type_map():
+ # NOTE: We should no longer add dtypes to this map. This map
+ # is only used for BC/FC with older PyTorch versions. Going forward,
+ # new dtypes of TypedStorage should not translate to a legacy
+ # Storage class. Instead, new dtypes of TypedStorage should
+ # be serialized as an UntypedStorage paired with a torch.dtype
+ return {
+ torch.double: 'DoubleStorage',
+ torch.float: 'FloatStorage',
+ torch.half: 'HalfStorage',
+ torch.long: 'LongStorage',
+ torch.int: 'IntStorage',
+ torch.int16: 'ShortStorage',
+ torch.int8: 'CharStorage',
+ torch.uint8: 'ByteStorage',
+ torch.bool: 'BoolStorage',
+ torch.bfloat16: 'BFloat16Storage',
+ torch.cdouble: 'ComplexDoubleStorage',
+ torch.cfloat: 'ComplexFloatStorage',
+ torch.qint8: 'QInt8Storage',
+ torch.qint32: 'QInt32Storage',
+ torch.quint8: 'QUInt8Storage',
+ torch.quint4x2: 'QUInt4x2Storage',
+ torch.quint2x4: 'QUInt2x4Storage',
+ }
+
+@lru_cache(maxsize=None)
+def _storage_type_to_dtype_map():
+ dtype_map = {
+ val: key for key, val in _dtype_to_storage_type_map().items()}
+ return dtype_map
+
+def _get_storage_from_sequence(sequence, dtype, device):
+ if dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ interpret_dtypes = {
+ torch.quint8: torch.uint8,
+ torch.quint4x2: torch.uint8,
+ torch.quint2x4: torch.uint8,
+ torch.qint32: torch.int32,
+ torch.qint8: torch.int8
+ }
+ tmp_tensor = torch.tensor(
+ sequence,
+ dtype=interpret_dtypes[dtype],
+ device=device)
+
+ else:
+ tmp_tensor = torch.tensor(
+ sequence,
+ dtype=dtype,
+ device=device)
+
+ return tmp_tensor._typed_storage()._untyped_storage
+
+def _isint(x):
+ if HAS_NUMPY:
+ return isinstance(x, (int, np.integer))
+ else:
+ return isinstance(x, int)
+
+_always_warn_typed_storage_removal = False
+
+def _get_always_warn_typed_storage_removal():
+ return _always_warn_typed_storage_removal
+
+def _set_always_warn_typed_storage_removal(always_warn):
+ global _always_warn_typed_storage_removal
+ assert isinstance(always_warn, bool)
+ _always_warn_typed_storage_removal = always_warn
+
+def _warn_typed_storage_removal(stacklevel=2):
+ global _always_warn_typed_storage_removal
+
+ def is_first_time():
+ if not hasattr(_warn_typed_storage_removal, 'has_warned'):
+ return True
+ else:
+ return not _warn_typed_storage_removal.__dict__['has_warned']
+
+ if _get_always_warn_typed_storage_removal() or is_first_time():
+ message = (
+ "TypedStorage is deprecated. It will be removed in the future and "
+ "UntypedStorage will be the only storage class. This should only matter "
+ "to you if you are using storages directly. To access UntypedStorage "
+ "directly, use tensor.untyped_storage() instead of tensor.storage()"
+ )
+ warnings.warn(message, UserWarning, stacklevel=stacklevel + 1)
+ _warn_typed_storage_removal.__dict__['has_warned'] = True
+
+def _reset_warn_typed_storage_removal():
+ _warn_typed_storage_removal.__dict__['has_warned'] = False
+
+def _get_device_from_module(module: str):
+ if module.split(".")[-1] in ["cuda", torch._C._get_privateuse1_backend_name()]:
+ return module.split(".")[-1]
+ else:
+ return "cpu"
+
+class TypedStorage:
+ is_sparse = False
+
+ dtype: torch.dtype
+
+ @property
+ def _dtype(self):
+ return self.dtype
+
+ @property
+ def filename(self) -> _Optional[str]:
+ """Returns the file name associated with this storage if the storage was memory mapped from a file.
+ or ``None`` if the storage was not created by memory mapping a file."""
+ return self._untyped_storage.filename
+
+ def fill_(self, value):
+ _warn_typed_storage_removal()
+ self._setitem(slice(0, self._size()), value)
+ return self
+
+ def __new__(cls, *args, wrap_storage=None, dtype=None, device=None, _internal=False):
+ if not _internal:
+ _warn_typed_storage_removal()
+
+ if cls == torch.storage._LegacyStorage:
+ raise RuntimeError("Only child classes of _LegacyStorage can be instantiated")
+
+ if cls == TypedStorage:
+ return super().__new__(cls)
+
+ else:
+ arg_error_msg = (
+ f'{cls}.__new__ received an invalid combination '
+ f'of arguments. Expected one of:\n'
+ ' * no arguments\n'
+ ' * (int size)\n'
+ ' * (Sequence data)\n'
+ ' * (*, UntypedStorage wrap_storage)')
+
+ if device is not None:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nKeyword argument 'device' cannot be specified")
+
+ if dtype is not None:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nKeyword argument 'dtype' cannot be specified")
+
+ if wrap_storage is None:
+ if len(args) > 1:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nToo many positional arguments")
+
+ if len(args) == 1 and not _isint(args[0]) and not isinstance(args[0], collections.abc.Sequence):
+ raise TypeError(
+ arg_error_msg +
+ f"\nArgument type not recognized: {type(args[0])}")
+
+ return TypedStorage(
+ *args,
+ dtype=cls._dtype,
+ device=_get_device_from_module(cls.__module__),
+ _internal=True)
+
+ else:
+ if len(args) != 0:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nNo positional arguments should be given when using "
+ "'wrap_storage'")
+
+ if not isinstance(wrap_storage, torch.UntypedStorage):
+ raise TypeError(
+ arg_error_msg +
+ f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
+
+ cls_device = _get_device_from_module(cls.__module__)
+
+ if wrap_storage.device.type != cls_device:
+ raise RuntimeError(
+ arg_error_msg +
+ f"\nDevice of 'wrap_storage' must be {cls_device}"
+ f", but got {wrap_storage.device.type}")
+
+ return TypedStorage(
+ *args,
+ wrap_storage=wrap_storage,
+ dtype=cls.dtype,
+ _internal=True)
+
+ def __init__(self, *args, device=None, dtype=None, wrap_storage=None, _internal=False):
+ if not _internal:
+ _warn_typed_storage_removal()
+ arg_error_msg = (
+ 'TypedStorage.__init__ received an invalid combination '
+ 'of arguments. Expected one of:\n'
+ ' * (*, torch.device device, torch.dtype dtype)\n'
+ ' * (int size, *, torch.device device, torch.dtype dtype)\n'
+ ' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'
+ ' * (*, UntypedStorage wrap_storage, torch.dtype dtype)')
+
+ if wrap_storage is not None:
+ if len(args) != 0:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nNo positional arguments should be given when using "
+ "'wrap_storage'")
+
+ if dtype is None:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nArgument 'dtype' must be specified")
+
+ if not isinstance(dtype, torch.dtype):
+ raise TypeError(
+ arg_error_msg +
+ f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")
+
+ if device is not None:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nArgument 'device' should not be specified when 'wrap_storage' is given")
+
+ self.dtype = dtype
+
+ if not isinstance(wrap_storage, torch.UntypedStorage):
+ raise TypeError(
+ arg_error_msg +
+ f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
+
+ self._untyped_storage = wrap_storage
+
+ else:
+ self.dtype = torch.get_default_dtype() if dtype is None else dtype
+ device = torch.device('cpu' if device is None else device)
+
+ if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ if device.type == 'cuda':
+ raise RuntimeError("Cannot create CUDA storage with quantized dtype")
+
+ if len(args) == 0:
+ self._untyped_storage = torch.UntypedStorage(device=device)
+
+ elif len(args) == 1:
+ if _isint(args[0]):
+ self._untyped_storage = torch.UntypedStorage(int(args[0]) * self._element_size(), device=device)
+ elif isinstance(args[0], collections.abc.Sequence):
+ self._untyped_storage = _get_storage_from_sequence(args[0], self.dtype, device)
+ else:
+ raise TypeError(
+ arg_error_msg +
+ f"\nArgument type not recognized: {type(args[0])}")
+
+ else:
+ raise RuntimeError(
+ arg_error_msg +
+ "\nToo many positional arguments")
+
+ @property
+ def is_cuda(self):
+ _warn_typed_storage_removal()
+ return self._untyped_storage.device.type == 'cuda'
+
+ @property
+ def is_hpu(self):
+ _warn_typed_storage_removal()
+ return self._untyped_storage.device.type == 'hpu'
+
+ def untyped(self):
+ """Return the internal :class:`torch.UntypedStorage`."""
+ _warn_typed_storage_removal()
+ return self._untyped_storage
+
+ def _new_wrapped_storage(self, untyped_storage):
+ assert type(untyped_storage) == torch.UntypedStorage
+
+ if type(self) == TypedStorage:
+ return TypedStorage(
+ wrap_storage=untyped_storage,
+ dtype=self.dtype,
+ _internal=True)
+ else:
+ return type(self)(wrap_storage=untyped_storage)
+
+ def __len__(self):
+ _warn_typed_storage_removal()
+ return self._size()
+
+ def _maybe_wrap_index(self, idx, is_stop=False):
+ if idx is None:
+ if is_stop:
+ return self._size()
+ else:
+ return 0
+
+ else:
+ if type(idx) != int:
+ raise TypeError(
+ f"can't index a {type(self)} with {type(idx)}")
+ if is_stop:
+ if (idx > self._size()) or (idx < -self._size()):
+ raise IndexError(
+ f'index {idx} out of range for storage of size {self.size()}')
+ if idx > 0:
+ return idx
+ else:
+ return idx % self._size()
+ else:
+ if (idx >= self._size()) or (idx < -self._size()):
+ raise IndexError(
+ f'index {idx} out of range for storage of size {self.size()}')
+ return idx % self._size()
+
+ def __setitem__(self, idx, value):
+ _warn_typed_storage_removal()
+ return self._setitem(idx, value)
+
+ def _setitem(self, idx, value):
+ if not isinstance(idx, (int, slice)):
+ raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
+ if torch.is_storage(value):
+ raise RuntimeError(f'cannot set item with value type {type(value)}')
+ if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ interpret_dtypes = {
+ torch.quint8: torch.uint8,
+ torch.quint4x2: torch.uint8,
+ torch.quint2x4: torch.uint8,
+ torch.qint32: torch.int32,
+ torch.qint8: torch.int8
+ }
+ tmp_dtype = interpret_dtypes[self.dtype]
+ tmp_tensor = torch.tensor([], dtype=tmp_dtype, device=self._untyped_storage.device)
+ tmp_tensor.set_(TypedStorage(
+ wrap_storage=self._untyped_storage,
+ dtype=tmp_dtype,
+ _internal=True))
+ else:
+ tmp_tensor = torch.tensor([], dtype=self.dtype, device=self._untyped_storage.device).set_(self)
+
+ tmp_tensor[idx] = value
+
+ def __getitem__(self, idx):
+ _warn_typed_storage_removal()
+ return self._getitem(idx)
+
+ def _getitem(self, idx):
+ if self._untyped_storage.device.type == 'meta':
+ raise NotImplementedError("Not available for 'meta' device type")
+
+ # NOTE: Before TypedStorage existed, indexing with a slice used to be
+ # possible for Storage objects. However, it would return
+ # a storage view, which would be a hassle to implement in TypedStorage,
+ # so it was disabled
+ if isinstance(idx, slice):
+ raise RuntimeError('slices are only supported in UntypedStorage.__getitem__')
+ elif not isinstance(idx, int):
+ raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
+
+ if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ interpret_dtypes = {
+ torch.quint8: torch.uint8,
+ torch.quint4x2: torch.uint8,
+ torch.quint2x4: torch.uint8,
+ torch.qint32: torch.int32,
+ torch.qint8: torch.int8
+ }
+ return TypedStorage(
+ wrap_storage=self._untyped_storage,
+ dtype=interpret_dtypes[self.dtype],
+ _internal=True)._getitem(idx)
+
+ idx_wrapped = self._maybe_wrap_index(idx)
+ tmp_tensor = torch.tensor([], dtype=self.dtype, device=self._untyped_storage.device).set_(self)
+ return tmp_tensor[idx_wrapped].item()
+
+ def copy_(self, source: T, non_blocking: _Optional[bool] = None):
+ _warn_typed_storage_removal()
+ if isinstance(source, TypedStorage):
+ self._untyped_storage.copy_(source._untyped_storage, non_blocking) # type: ignore[arg-type]
+ else:
+ self._untyped_storage.copy_(source, non_blocking) # type: ignore[arg-type]
+ return self
+
+ def nbytes(self):
+ _warn_typed_storage_removal()
+ return self._nbytes()
+
+ # For internal use only, to avoid deprecation warning
+ def _nbytes(self):
+ return self._untyped_storage.nbytes()
+
+ def type(self, dtype: _Optional[str] = None, non_blocking: bool = False) -> Union[T, str]:
+ _warn_typed_storage_removal()
+ if dtype is None:
+ legacy_class = self._get_legacy_storage_class()
+
+ if legacy_class is not None:
+ return legacy_class.__module__ + '.' + legacy_class.__name__
+
+ return '.'.join([self.__module__, type(self).__name__])
+
+ else:
+ return self._untyped_storage.type(dtype, non_blocking)
+
+ def cuda(self, device=None, non_blocking=False, **kwargs) -> T: # type: ignore[misc, type-var]
+ _warn_typed_storage_removal()
+ if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ raise RuntimeError("Cannot create CUDA storage with quantized dtype")
+ cuda_storage: torch.UntypedStorage = self._untyped_storage.cuda(device, non_blocking, **kwargs)
+ return self._new_wrapped_storage(cuda_storage)
+
+ def hpu(self, device=None, non_blocking=False, **kwargs) -> T: # type: ignore[misc, type-var]
+ _warn_typed_storage_removal()
+ if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
+ raise RuntimeError("Cannot create HPU storage with quantized dtype")
+ hpu_storage: torch.UntypedStorage = self._untyped_storage.hpu(device, non_blocking, **kwargs)
+ return self._new_wrapped_storage(hpu_storage)
+
+ def element_size(self):
+ _warn_typed_storage_removal()
+ return self._element_size()
+
+ # For internal use only, to avoid deprecation warning
+ def _element_size(self):
+ return torch._utils._element_size(self.dtype)
+
+ def get_device(self) -> int:
+ _warn_typed_storage_removal()
+ return self._untyped_storage.get_device()
+
+ def __str__(self):
+ _warn_typed_storage_removal()
+ info_str = (
+ f'[{torch.typename(self)}(dtype={self.dtype}, '
+ f'device={self.device}) of size {len(self)}]')
+ if self.device.type == 'meta':
+ return '...\n' + info_str
+ else:
+ data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
+ return data_str + '\n' + info_str
+
+ def __repr__(self):
+ _warn_typed_storage_removal()
+ return str(self)
+
+ def __iter__(self):
+ _warn_typed_storage_removal()
+ return iter(self[i] for i in range(self.size()))
+
+ def __copy__(self):
+ _warn_typed_storage_removal()
+ return self._new_wrapped_storage(copy.copy(self._untyped_storage))
+
+ def __deepcopy__(self, memo):
+ _warn_typed_storage_removal()
+ return self._deepcopy(memo)
+
+ # For internal use only, to avoid deprecation warning
+ def _deepcopy(self, memo):
+ return self._new_wrapped_storage(copy.deepcopy(self._untyped_storage, memo))
+
+ def __sizeof__(self):
+ _warn_typed_storage_removal()
+ return super().__sizeof__() + self.nbytes()
+
+ def clone(self):
+ """Return a copy of this storage."""
+ _warn_typed_storage_removal()
+ return self._new_wrapped_storage(self._untyped_storage.clone())
+
+ def tolist(self):
+ """Return a list containing the elements of this storage."""
+ _warn_typed_storage_removal()
+ return list(self)
+
+ def cpu(self):
+ """Return a CPU copy of this storage if it's not already on the CPU."""
+ _warn_typed_storage_removal()
+ return self._new_wrapped_storage(self._untyped_storage.cpu())
+
+ def is_pinned(self, device: Union[str, torch.device] = 'cuda'):
+ r"""Determine whether the CPU TypedStorage is already pinned on device.
+
+ Args:
+ device (str or torch.device): The device to pin memory on. Default: ``'cuda'``
+
+ Returns:
+ A boolean variable.
+ """
+ _warn_typed_storage_removal()
+ return self._untyped_storage.is_pinned(device)
+
+ def pin_memory(self, device: Union[str, torch.device] = 'cuda'):
+ r"""Copy the CPU TypedStorage to pinned memory, if it's not already pinned.
+
+ Args:
+ device (str or torch.device): The device to pin memory on. Default: ``'cuda'``.
+
+ Returns:
+ A pinned CPU storage.
+ """
+ _warn_typed_storage_removal()
+ return self._new_wrapped_storage(self._untyped_storage.pin_memory(device=device))
+
+ def share_memory_(self):
+ """See :meth:`torch.UntypedStorage.share_memory_`"""
+ _warn_typed_storage_removal()
+ return self._share_memory_()
+
+ # For internal use only, to avoid deprecation warning
+ def _share_memory_(self):
+ self._untyped_storage.share_memory_()
+ return self
+
+ def _new_shared(self, size, *, device=None):
+ """Create a new storage in shared memory with the same data type."""
+ if device is None:
+ device = 'cpu'
+ device = torch.device(device)
+ untyped_storage = torch.UntypedStorage._new_shared(size * self._element_size(), device=device)
+ return TypedStorage(
+ wrap_storage=untyped_storage,
+ dtype=self.dtype,
+ _internal=True)
+
+ @property
+ def _cdata(self):
+ return self._untyped_storage._cdata
+
+ @property
+ def device(self):
+ _warn_typed_storage_removal()
+ return self._untyped_storage.device
+
+ def size(self):
+ _warn_typed_storage_removal()
+ return self._size()
+
+ # For internal use only, to avoid deprecation warning
+ def _size(self):
+ # NB: don't indirect through __len__, as that requires
+ # an int to be returned
+ return self._untyped_storage.nbytes() // self._element_size()
+
+ def pickle_storage_type(self):
+ _warn_typed_storage_removal()
+ return self._pickle_storage_type()
+
+ # For internal use only, to avoid deprecation warning
+ def _pickle_storage_type(self):
+ try:
+ return _dtype_to_storage_type_map()[self.dtype]
+ except KeyError as e:
+ raise KeyError(f'dtype {self.dtype} is not recognized') from e
+
+ def __reduce__(self):
+ b = io.BytesIO()
+ torch.save(self, b, _use_new_zipfile_serialization=False)
+ return (_load_from_bytes, (b.getvalue(),))
+
+ def data_ptr(self):
+ _warn_typed_storage_removal()
+ return self._data_ptr()
+
+ # For internal use only, to avoid deprecation warning
+ def _data_ptr(self):
+ return self._untyped_storage.data_ptr()
+
+ def resize_(self, size):
+ _warn_typed_storage_removal()
+ self._resize_(size)
+
+ # For internal use only, to avoid deprecation warning
+ def _resize_(self, size):
+ self._untyped_storage.resize_(size * self._element_size())
+
+ @classmethod
+ def _free_weak_ref(cls, *args, **kwargs):
+ return UntypedStorage._free_weak_ref(*args, **kwargs)
+
+ def _weak_ref(self, *args, **kwargs):
+ return self._untyped_storage._weak_ref(*args, **kwargs)
+
+ @classmethod
+ def from_buffer(cls, *args, **kwargs):
+ _warn_typed_storage_removal()
+ return cls._from_buffer(*args, **kwargs)
+
+ @classmethod
+ def _from_buffer(cls, *args, dtype=None, device=None, **kwargs):
+ if cls == TypedStorage:
+ dtype = torch.get_default_dtype() if dtype is None else dtype
+ device = torch.device('cpu' if device is None else device)
+ if device.type != 'cpu':
+ raise RuntimeError(f'TypedStorage.from_buffer: Not available for device {device.type}')
+ untyped_storage: torch.UntypedStorage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
+
+ else:
+ if dtype is not None or len(args) == 5:
+ raise RuntimeError(
+ "from_buffer: 'dtype' can only be specified in "
+ "UntypedStorage.from_buffer and TypedStorage.from_buffer")
+ if device is not None:
+ raise RuntimeError(
+ "from_buffer: 'device' can only be specified in "
+ "UntypedStorage.from_buffer and TypedStorage.from_buffer")
+
+ dtype = cls._dtype
+ untyped_storage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
+
+ return TypedStorage(
+ wrap_storage=untyped_storage,
+ dtype=dtype,
+ _internal=True)
+
+ def _to(self, dtype):
+ if not isinstance(dtype, torch.dtype):
+ raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
+ storage = torch.tensor([], dtype=self.dtype, device=self.device).set_(self).to(dtype)._typed_storage()
+ if storage.data_ptr() == self.data_ptr():
+ storage = storage.clone()
+ return storage
+
+ def double(self):
+ """Casts this storage to double type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.double)
+
+ def float(self):
+ """Casts this storage to float type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.float)
+
+ def half(self):
+ """Casts this storage to half type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.half)
+
+ def long(self):
+ """Casts this storage to long type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.long)
+
+ def int(self):
+ """Casts this storage to int type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.int)
+
+ def short(self):
+ """Casts this storage to short type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.short)
+
+ def char(self):
+ """Casts this storage to char type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.int8)
+
+ def byte(self):
+ """Casts this storage to byte type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.uint8)
+
+ def bool(self):
+ """Casts this storage to bool type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.bool)
+
+ def bfloat16(self):
+ """Casts this storage to bfloat16 type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.bfloat16)
+
+ def complex_double(self):
+ """Casts this storage to complex double type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.cdouble)
+
+ def complex_float(self):
+ """Casts this storage to complex float type."""
+ _warn_typed_storage_removal()
+ return self._to(torch.cfloat)
+
+ def float8_e5m2(self):
+ """Casts this storage to float8_e5m2 type"""
+ _warn_typed_storage_removal()
+ return self._to(torch.float8_e5m2)
+
+ def float8_e4m3fn(self):
+ """Casts this storage to float8_e4m3fn type"""
+ _warn_typed_storage_removal()
+ return self._to(torch.float8_e4m3fn)
+
+ @classmethod
+ def from_file(cls, filename, shared, size):
+ """from_file(filename, shared=False, size=0) -> Storage
+
+ Creates a CPU storage backed by a memory-mapped file.
+
+ If ``shared`` is ``True``, then memory is shared between all processes.
+ All changes are written to the file. If ``shared`` is ``False``, then the changes on
+ the storage do not affect the file.
+
+ ``size`` is the number of elements in the storage. If ``shared`` is ``False``,
+ then the file must contain at least ``size * sizeof(Type)`` bytes
+ (``Type`` is the type of storage). If ``shared`` is ``True`` the file will be created if needed.
+
+ Args:
+ filename (str): file name to map
+ shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
+ underlying `mmap(2) call `_)
+ size (int): number of elements in the storage
+ """
+ _warn_typed_storage_removal()
+ if cls == TypedStorage:
+ raise RuntimeError('from_file can only be called on derived classes')
+ untyped_storage: UntypedStorage = UntypedStorage.from_file(
+ filename,
+ shared,
+ size * torch._utils._element_size(cls.dtype))
+ storage = cls(wrap_storage=untyped_storage)
+ return storage
+
+ @classmethod
+ def _expired(cls, *args, **kwargs):
+ return UntypedStorage._expired(*args, **kwargs)
+
+ def _write_file(self, *args, **kwargs):
+ return self._untyped_storage._write_file(*args, **kwargs)
+
+ def _set_from_file(self, *args, **kwargs):
+ return self._untyped_storage._set_from_file(*args, **kwargs)
+
+ def _set_cdata(self, *args, **kwargs):
+ return self._untyped_storage._set_cdata(*args, **kwargs)
+
+ def _share_cuda_(self, *args, **kwargs):
+ return self._untyped_storage._share_cuda_(*args, **kwargs)
+
+ def is_shared(self):
+ _warn_typed_storage_removal()
+ return self._is_shared()
+
+ # For internal use only, to avoid deprecation warning
+ def _is_shared(self):
+ return self._untyped_storage.is_shared()
+
+ @classmethod
+ def _new_shared_cuda(cls, *args, **kwargs):
+ return torch.UntypedStorage._new_shared_cuda(*args, **kwargs)
+
+ def _share_filename_cpu_(self, *args, **kwargs):
+ manager_handle, storage_handle, size = self._untyped_storage._share_filename_cpu_(*args, **kwargs)
+ return manager_handle, storage_handle, size // self._element_size()
+
+ def _shared_decref(self):
+ self._untyped_storage._shared_decref()
+ return self
+
+ @classmethod
+ def _release_ipc_counter(cls, *args, device=None, **kwargs):
+ return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
+
+ def _shared_incref(self, *args, **kwargs):
+ return self._untyped_storage._shared_incref(*args, **kwargs)
+
+ def _share_fd_cpu_(self, *args, **kwargs):
+ fd, size = self._untyped_storage._share_fd_cpu_(*args, **kwargs)
+ return fd, size // self._element_size()
+
+ def _get_legacy_storage_class(self):
+ if self.dtype not in _dtype_to_storage_type_map():
+ return None
+
+ storage_name = _dtype_to_storage_type_map()[self.dtype]
+
+ if self.device.type not in ['cpu', 'cuda', torch._C._get_privateuse1_backend_name()]:
+ return None
+
+ module = torch if self.device.type == 'cpu' else getattr(torch, self.device.type)
+
+ try:
+ return getattr(module, storage_name)
+ except AttributeError:
+ return None
+
+TypedStorage.type.__doc__ = _type.__doc__
+TypedStorage.cuda.__doc__ = _cuda.__doc__
+TypedStorage.hpu.__doc__ = _hpu.__doc__
+
+class _LegacyStorageMeta(type):
+ dtype: torch.dtype
+
+ def __instancecheck__(cls, instance):
+ if type(instance) == TypedStorage:
+ cls_device = _get_device_from_module(cls.__module__)
+ return (cls_device == instance.device.type) and (cls.dtype == instance.dtype)
+ return False
+
+class _LegacyStorage(TypedStorage, metaclass=_LegacyStorageMeta):
+ @classmethod
+ def _new_shared(cls, size):
+ """Create a new storage in shared memory with the same data type."""
+ untyped_storage = torch.UntypedStorage._new_shared(size * cls()._element_size())
+ return cls(wrap_storage=untyped_storage)
+
+ @classmethod
+ def _release_ipc_counter(cls, *args, **kwargs):
+ return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
+
+ @classmethod
+ def _new_shared_filename(cls, manager, obj, size):
+ bytes_size = size * torch._utils._element_size(cls.dtype)
+ return cls(wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(manager, obj, bytes_size))
+
+def _get_dtype_from_pickle_storage_type(pickle_storage_type: str):
+ try:
+ return _storage_type_to_dtype_map()[pickle_storage_type]
+ except KeyError as e:
+ raise KeyError(
+ f'pickle storage type "{pickle_storage_type}" is not recognized') from e
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/torch_version.py b/env-llmeval/lib/python3.10/site-packages/torch/torch_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d3b5aed2fa5f6e096eb4904e366df7861ab9c41
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/torch_version.py
@@ -0,0 +1,56 @@
+from typing import Any, Iterable
+from .version import __version__ as internal_version
+from ._vendor.packaging.version import Version, InvalidVersion
+
+__all__ = ['TorchVersion']
+
+
+class TorchVersion(str):
+ """A string with magic powers to compare to both Version and iterables!
+ Prior to 1.10.0 torch.__version__ was stored as a str and so many did
+ comparisons against torch.__version__ as if it were a str. In order to not
+ break them we have TorchVersion which masquerades as a str while also
+ having the ability to compare against both packaging.version.Version as
+ well as tuples of values, eg. (1, 2, 1)
+ Examples:
+ Comparing a TorchVersion object to a Version object
+ TorchVersion('1.10.0a') > Version('1.10.0a')
+ Comparing a TorchVersion object to a Tuple object
+ TorchVersion('1.10.0a') > (1, 2) # 1.2
+ TorchVersion('1.10.0a') > (1, 2, 1) # 1.2.1
+ Comparing a TorchVersion object against a string
+ TorchVersion('1.10.0a') > '1.2'
+ TorchVersion('1.10.0a') > '1.2.1'
+ """
+ # fully qualified type names here to appease mypy
+ def _convert_to_version(self, inp: Any) -> Any:
+ if isinstance(inp, Version):
+ return inp
+ elif isinstance(inp, str):
+ return Version(inp)
+ elif isinstance(inp, Iterable):
+ # Ideally this should work for most cases by attempting to group
+ # the version tuple, assuming the tuple looks (MAJOR, MINOR, ?PATCH)
+ # Examples:
+ # * (1) -> Version("1")
+ # * (1, 20) -> Version("1.20")
+ # * (1, 20, 1) -> Version("1.20.1")
+ return Version('.'.join(str(item) for item in inp))
+ else:
+ raise InvalidVersion(inp)
+
+ def _cmp_wrapper(self, cmp: Any, method: str) -> bool:
+ try:
+ return getattr(Version(self), method)(self._convert_to_version(cmp))
+ except BaseException as e:
+ if not isinstance(e, InvalidVersion):
+ raise
+ # Fall back to regular string comparison if dealing with an invalid
+ # version like 'parrot'
+ return getattr(super(), method)(cmp)
+
+
+for cmp_method in ["__gt__", "__lt__", "__eq__", "__ge__", "__le__"]:
+ setattr(TorchVersion, cmp_method, lambda x, y, method=cmp_method: x._cmp_wrapper(y, method))
+
+__version__ = TorchVersion(internal_version)
diff --git a/env-llmeval/lib/python3.10/site-packages/torch/types.py b/env-llmeval/lib/python3.10/site-packages/torch/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..22c01e3bb9795ec2ca23d6149ebbbfc0ab19bb7e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/torch/types.py
@@ -0,0 +1,79 @@
+import torch
+from typing import Any, List, Optional, Sequence, Tuple, Union
+
+import builtins
+
+# Convenience aliases for common composite types that we need
+# to talk about in PyTorch
+
+_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]]
+_TensorOrTensorsOrGradEdge = Union[
+ torch.Tensor, Sequence[torch.Tensor],
+ "torch.autograd.graph.GradientEdge",
+ Sequence["torch.autograd.graph.GradientEdge"]]
+
+# In some cases, these basic types are shadowed by corresponding
+# top-level values. The underscore variants let us refer to these
+# types. See https://github.com/python/mypy/issues/4146 for why these
+# workarounds is necessary
+_int = builtins.int
+_float = builtins.float
+_bool = builtins.bool
+_complex = builtins.complex
+
+_dtype = torch.dtype
+_device = torch.device
+_qscheme = torch.qscheme
+_size = Union[torch.Size, List[_int], Tuple[_int, ...]]
+_layout = torch.layout
+_dispatchkey = Union[str, torch._C.DispatchKey]
+
+# Meta-type for "numeric" things; matches our docs
+Number = Union[builtins.int, builtins.float, builtins.bool]
+
+# Meta-type for "device-like" things. Not to be confused with 'device' (a
+# literal device object). This nomenclature is consistent with PythonArgParser.
+# None means use the default device (typically CPU)
+Device = Optional[Union[_device, str, _int]]
+del Optional
+
+# Storage protocol implemented by ${Type}StorageBase classes
+
+class Storage:
+ _cdata: int
+ device: torch.device
+ dtype: torch.dtype
+ _torch_load_uninitialized: bool
+
+ def __deepcopy__(self, memo) -> 'Storage': # type: ignore[empty-body]
+ ...
+
+ def _new_shared(self, int) -> 'Storage': # type: ignore[empty-body]
+ ...
+
+ def _write_file(self, f: Any, is_real_file: _bool, save_size: _bool, element_size: int) -> None:
+ ...
+
+ def element_size(self) -> int: # type: ignore[empty-body]
+ ...
+
+ def is_shared(self) -> bool: # type: ignore[empty-body]
+ ...
+
+ def share_memory_(self) -> 'Storage': # type: ignore[empty-body]
+ ...
+
+ def nbytes(self) -> int: # type: ignore[empty-body]
+ ...
+
+ def cpu(self) -> 'Storage': # type: ignore[empty-body]
+ ...
+
+ def data_ptr(self) -> int: # type: ignore[empty-body]
+ ...
+
+ def from_file(self, filename: str, shared: bool = False, nbytes: int = 0) -> 'Storage': # type: ignore[empty-body]
+ ...
+
+ def _new_with_file(self, f: Any, element_size: int) -> 'Storage': # type: ignore[empty-body]
+ ...