diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__config__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__config__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..122c7779a025edf044e4872e43e23f9ca3e01928 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__config__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33f356b5c0fc8dd48eed277ad56ef88390269374 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d1f90fc63d9bc3444b7d53244941e9da2af7a49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3b7c70a433176118aea554a25e9a386ebf6b233 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa0cdd93501cd88cda0281231fbebc9f306e8c0f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/constants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2805070eef1d77567ecf094aa08049d0b0a797 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/constants/__init__.py @@ -0,0 +1,347 @@ +r""" +================================== +Constants (:mod:`scipy.constants`) +================================== + +.. currentmodule:: scipy.constants + +Physical and mathematical constants and units. + + +Mathematical constants +====================== + +================ ================================================================= +``pi`` Pi +``golden`` Golden ratio +``golden_ratio`` Golden ratio +================ ================================================================= + + +Physical constants +================== + +=========================== ================================================================= +``c`` speed of light in vacuum +``speed_of_light`` speed of light in vacuum +``mu_0`` the magnetic constant :math:`\mu_0` +``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` +``h`` the Planck constant :math:`h` +``Planck`` the Planck constant :math:`h` +``hbar`` :math:`\hbar = h/(2\pi)` +``G`` Newtonian constant of gravitation +``gravitational_constant`` Newtonian constant of gravitation +``g`` standard acceleration of gravity +``e`` elementary charge +``elementary_charge`` elementary charge +``R`` molar gas constant +``gas_constant`` molar gas constant +``alpha`` fine-structure constant +``fine_structure`` fine-structure constant +``N_A`` Avogadro constant +``Avogadro`` Avogadro constant +``k`` Boltzmann constant +``Boltzmann`` Boltzmann constant +``sigma`` Stefan-Boltzmann constant :math:`\sigma` +``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` +``Wien`` Wien displacement law constant +``Rydberg`` Rydberg constant +``m_e`` electron mass +``electron_mass`` electron mass +``m_p`` proton mass +``proton_mass`` proton mass +``m_n`` neutron mass +``neutron_mass`` neutron mass +=========================== ================================================================= + + +Constants database +------------------ + +In addition to the above variables, :mod:`scipy.constants` also contains the +2018 CODATA recommended values [CODATA2018]_ database containing more physical +constants. + +.. autosummary:: + :toctree: generated/ + + value -- Value in physical_constants indexed by key + unit -- Unit in physical_constants indexed by key + precision -- Relative precision in physical_constants indexed by key + find -- Return list of physical_constant keys with a given string + ConstantWarning -- Constant sought not in newest CODATA data set + +.. data:: physical_constants + + Dictionary of physical constants, of the format + ``physical_constants[name] = (value, unit, uncertainty)``. + +Available constants: + +====================================================================== ==== +%(constant_names)s +====================================================================== ==== + + +Units +===== + +SI prefixes +----------- + +============ ================================================================= +``quetta`` :math:`10^{30}` +``ronna`` :math:`10^{27}` +``yotta`` :math:`10^{24}` +``zetta`` :math:`10^{21}` +``exa`` :math:`10^{18}` +``peta`` :math:`10^{15}` +``tera`` :math:`10^{12}` +``giga`` :math:`10^{9}` +``mega`` :math:`10^{6}` +``kilo`` :math:`10^{3}` +``hecto`` :math:`10^{2}` +``deka`` :math:`10^{1}` +``deci`` :math:`10^{-1}` +``centi`` :math:`10^{-2}` +``milli`` :math:`10^{-3}` +``micro`` :math:`10^{-6}` +``nano`` :math:`10^{-9}` +``pico`` :math:`10^{-12}` +``femto`` :math:`10^{-15}` +``atto`` :math:`10^{-18}` +``zepto`` :math:`10^{-21}` +``yocto`` :math:`10^{-24}` +``ronto`` :math:`10^{-27}` +``quecto`` :math:`10^{-30}` +============ ================================================================= + +Binary prefixes +--------------- + +============ ================================================================= +``kibi`` :math:`2^{10}` +``mebi`` :math:`2^{20}` +``gibi`` :math:`2^{30}` +``tebi`` :math:`2^{40}` +``pebi`` :math:`2^{50}` +``exbi`` :math:`2^{60}` +``zebi`` :math:`2^{70}` +``yobi`` :math:`2^{80}` +============ ================================================================= + +Mass +---- + +================= ============================================================ +``gram`` :math:`10^{-3}` kg +``metric_ton`` :math:`10^{3}` kg +``grain`` one grain in kg +``lb`` one pound (avoirdupous) in kg +``pound`` one pound (avoirdupous) in kg +``blob`` one inch version of a slug in kg (added in 1.0.0) +``slinch`` one inch version of a slug in kg (added in 1.0.0) +``slug`` one slug in kg (added in 1.0.0) +``oz`` one ounce in kg +``ounce`` one ounce in kg +``stone`` one stone in kg +``grain`` one grain in kg +``long_ton`` one long ton in kg +``short_ton`` one short ton in kg +``troy_ounce`` one Troy ounce in kg +``troy_pound`` one Troy pound in kg +``carat`` one carat in kg +``m_u`` atomic mass constant (in kg) +``u`` atomic mass constant (in kg) +``atomic_mass`` atomic mass constant (in kg) +================= ============================================================ + +Angle +----- + +================= ============================================================ +``degree`` degree in radians +``arcmin`` arc minute in radians +``arcminute`` arc minute in radians +``arcsec`` arc second in radians +``arcsecond`` arc second in radians +================= ============================================================ + + +Time +---- + +================= ============================================================ +``minute`` one minute in seconds +``hour`` one hour in seconds +``day`` one day in seconds +``week`` one week in seconds +``year`` one year (365 days) in seconds +``Julian_year`` one Julian year (365.25 days) in seconds +================= ============================================================ + + +Length +------ + +===================== ============================================================ +``inch`` one inch in meters +``foot`` one foot in meters +``yard`` one yard in meters +``mile`` one mile in meters +``mil`` one mil in meters +``pt`` one point in meters +``point`` one point in meters +``survey_foot`` one survey foot in meters +``survey_mile`` one survey mile in meters +``nautical_mile`` one nautical mile in meters +``fermi`` one Fermi in meters +``angstrom`` one Angstrom in meters +``micron`` one micron in meters +``au`` one astronomical unit in meters +``astronomical_unit`` one astronomical unit in meters +``light_year`` one light year in meters +``parsec`` one parsec in meters +===================== ============================================================ + +Pressure +-------- + +================= ============================================================ +``atm`` standard atmosphere in pascals +``atmosphere`` standard atmosphere in pascals +``bar`` one bar in pascals +``torr`` one torr (mmHg) in pascals +``mmHg`` one torr (mmHg) in pascals +``psi`` one psi in pascals +================= ============================================================ + +Area +---- + +================= ============================================================ +``hectare`` one hectare in square meters +``acre`` one acre in square meters +================= ============================================================ + + +Volume +------ + +=================== ======================================================== +``liter`` one liter in cubic meters +``litre`` one liter in cubic meters +``gallon`` one gallon (US) in cubic meters +``gallon_US`` one gallon (US) in cubic meters +``gallon_imp`` one gallon (UK) in cubic meters +``fluid_ounce`` one fluid ounce (US) in cubic meters +``fluid_ounce_US`` one fluid ounce (US) in cubic meters +``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters +``bbl`` one barrel in cubic meters +``barrel`` one barrel in cubic meters +=================== ======================================================== + +Speed +----- + +================== ========================================================== +``kmh`` kilometers per hour in meters per second +``mph`` miles per hour in meters per second +``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second +``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second +``knot`` one knot in meters per second +================== ========================================================== + + +Temperature +----------- + +===================== ======================================================= +``zero_Celsius`` zero of Celsius scale in Kelvin +``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins +===================== ======================================================= + +.. autosummary:: + :toctree: generated/ + + convert_temperature + +Energy +------ + +==================== ======================================================= +``eV`` one electron volt in Joules +``electron_volt`` one electron volt in Joules +``calorie`` one calorie (thermochemical) in Joules +``calorie_th`` one calorie (thermochemical) in Joules +``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules +``erg`` one erg in Joules +``Btu`` one British thermal unit (International Steam Table) in Joules +``Btu_IT`` one British thermal unit (International Steam Table) in Joules +``Btu_th`` one British thermal unit (thermochemical) in Joules +``ton_TNT`` one ton of TNT in Joules +==================== ======================================================= + +Power +----- + +==================== ======================================================= +``hp`` one horsepower in watts +``horsepower`` one horsepower in watts +==================== ======================================================= + +Force +----- + +==================== ======================================================= +``dyn`` one dyne in newtons +``dyne`` one dyne in newtons +``lbf`` one pound force in newtons +``pound_force`` one pound force in newtons +``kgf`` one kilogram force in newtons +``kilogram_force`` one kilogram force in newtons +==================== ======================================================= + +Optics +------ + +.. autosummary:: + :toctree: generated/ + + lambda2nu + nu2lambda + +References +========== + +.. [CODATA2018] CODATA Recommended Values of the Fundamental + Physical Constants 2018. + + https://physics.nist.gov/cuu/Constants/ + +""" # noqa: E501 +# Modules contributed by BasSw (wegwerp@gmail.com) +from ._codata import * +from ._constants import * +from ._codata import _obsolete_constants, physical_constants + +# Deprecated namespaces, to be removed in v2.0.0 +from . import codata, constants + +_constant_names_list = [(_k.lower(), _k, _v) + for _k, _v in physical_constants.items() + if _k not in _obsolete_constants] +_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])), + _x[2][0], _x[2][1]) + for _x in sorted(_constant_names_list)]) +if __doc__: + __doc__ = __doc__ % dict(constant_names=_constant_names) + +del _constant_names +del _constant_names_list + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/_codata.py b/llmeval-env/lib/python3.10/site-packages/scipy/constants/_codata.py new file mode 100644 index 0000000000000000000000000000000000000000..0f2fd4580fac82e53ed372219e77ddf843f2c68b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/constants/_codata.py @@ -0,0 +1,1748 @@ +""" +Fundamental Physical Constants +------------------------------ + +These constants are taken from CODATA Recommended Values of the Fundamental +Physical Constants 2018. + +Object +------ +physical_constants : dict + A dictionary containing physical constants. Keys are the names of physical + constants, values are tuples (value, units, precision). + +Functions +--------- +value(key): + Returns the value of the physical constant(key). +unit(key): + Returns the units of the physical constant(key). +precision(key): + Returns the relative precision of the physical constant(key). +find(sub): + Prints or returns list of keys containing the string sub, default is all. + +Source +------ +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. Also available is an introduction to the +constants for non-experts at + +https://physics.nist.gov/cuu/Constants/introduction.html + +References +---------- +Theoretical and experimental publications relevant to the fundamental constants +and closely related precision measurements published since the mid 1980s, but +also including many older papers of particular interest, some of which date +back to the 1800s. To search the bibliography, visit + +https://physics.nist.gov/cuu/Constants/ + +""" + +# Compiled by Charles Harris, dated October 3, 2002 +# updated to 2002 values by BasSw, 2006 +# Updated to 2006 values by Vincent Davis June 2010 +# Updated to 2014 values by Joseph Booker, 2015 +# Updated to 2018 values by Jakob Jakobson, 2019 + +from __future__ import annotations + +import warnings + +from typing import Any + +__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning'] + +""" +Source: https://physics.nist.gov/cuu/Constants/ + +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. +""" + +# +# Source: https://physics.nist.gov/cuu/Constants/ +# + +# Quantity Value Uncertainty Unit +# ---------------------------------------------------- --------------------- -------------------- ------------- +txt2002 = """\ +Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K +atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 +atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m +atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 +atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 +atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 +atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T +deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 +deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 +deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 +deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 +deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 +deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 +electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 +electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 +electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 +electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 +electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 +electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 +electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 +electron to shielded helion magn. moment ratio 864.058 255 0.000 010 +electron-deuteron magn. moment ratio -2143.923 493 0.000 023 +electron-muon magn. moment ratio 206.766 9894 0.000 0054 +electron-neutron magn. moment ratio 960.920 50 0.000 23 +electron-proton magn. moment ratio -658.210 6862 0.000 0066 +magn. constant 12.566 370 614...e-7 0 N A^-2 +magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb +muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 +muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 +muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 +muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 +neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 +neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 +neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 +neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 +neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 +neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 +neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 +proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 +proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 +proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 +proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 +proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 +proton magn. shielding correction 25.689e-6 0.015e-6 +proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 +shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 +shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 +shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 +shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 +shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 +shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 +{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m""" + +txt2006 = """\ +lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m +alpha particle-electron mass ratio 7294.299 5365 0.000 0031 +alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg +alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41 +Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV +atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV +atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K +atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C +atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3 +atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A +atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m +atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1 +atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 383 86 0.000 000 68 V +atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2 +atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J +atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N +atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T +atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2 +atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s +atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1 +Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1 +Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1 +Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 +Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m +Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m +Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m +conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9654 0.000 0016 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg +deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J +deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV +deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u +deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22 +deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4 +electron g factor -2.002 319 304 3622 0.000 000 000 0015 +electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1 +electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74 +electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80 +electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J +electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV +electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u +electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9877 0.000 0052 +electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4 +electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4 +electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u +electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h +electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz +electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1 +electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K +electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg +elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C +elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1 +Faraday constant 96 485.3399 0.0024 C mol^-1 +Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1 +Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2 +fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3 +first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u +hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV +Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J +Hartree energy in eV 27.211 383 86 0.000 000 68 eV +hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz +hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1 +hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J +hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K +hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg +helion-electron mass ratio 5495.885 2765 0.000 0052 +helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg +helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J +helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV +helion mass in u 3.014 932 2473 0.000 000 0026 u +helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6713 0.000 000 0026 +hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u +hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV +hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J +hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K +hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg +inverse fine-structure constant 137.035 999 679 0.000 000 094 +inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u +inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV +inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J +inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K +inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg +inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm +Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u +joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV +joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h +joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz +joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1 +joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u +kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV +kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h +kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz +kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 +kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J +kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u +kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV +kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h +kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz +kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K +lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb +molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1 +molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1 +Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2823 0.000 0052 +muon g factor -2.002 331 8414 0.000 000 0012 +muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23 +muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg +muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J +muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV +muon mass in u 0.113 428 9256 0.000 000 0029 u +muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0029 +muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085 +muon-proton mass ratio 0.112 609 5261 0.000 000 0029 +muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2 +natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J +natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV +natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m +natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c +natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg +neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J +neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV +neutron mass in u 1.008 664 915 97 0.000 000 000 43 u +neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 09 0.000 000 23 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46 +neutron-tau mass ratio 0.528 740 0.000 086 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1 +Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s +Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s +Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s +Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm +Planck length 1.616 252 e-35 0.000 081 e-35 m +Planck mass 2.176 44 e-8 0.000 11 e-8 kg +Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV +Planck temperature 1.416 785 e32 0.000 071 e32 K +Planck time 5.391 24 e-44 0.000 27 e-44 s +proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1 +proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m +proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m +proton-electron mass ratio 1836.152 672 47 0.000 000 80 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1 +proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg +proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J +proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV +proton mass in u 1.007 276 466 77 0.000 000 000 10 u +proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 39 0.000 000 23 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46 +proton rms charge radius 0.8768 e-15 0.0069 e-15 m +proton-tau mass ratio 0.528 012 0.000 086 +quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 527 0.000 073 m^-1 +Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz +Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV +Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 +second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K +shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1 +shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1 +shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m +tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m +tau-electron mass ratio 3477.48 0.57 +tau mass 3.167 77 e-27 0.000 52 e-27 kg +tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J +tau mass energy equivalent in MeV 1776.99 0.29 MeV +tau mass in u 1.907 68 0.000 31 u +tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1 +tau-muon mass ratio 16.8183 0.0027 +tau-neutron mass ratio 1.891 29 0.000 31 +tau-proton mass ratio 1.893 90 0.000 31 +Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2 +triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3 +triton-electron mass ratio 5496.921 5269 0.000 0051 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg +triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J +triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37 +triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010 +triton-proton mass ratio 2.993 717 0309 0.000 000 0025 +unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg +von Klitzing constant 25 812.807 557 0.000 018 ohm +weak mixing angle 0.222 55 0.000 56 +Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K""" + +txt2010 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 5361 0.000 0029 +alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg +alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV +alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV +atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV +atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K +atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C +atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3 +atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A +atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m +atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 385 05 0.000 000 60 V +atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2 +atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J +atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N +atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T +atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2 +atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s +atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1 +Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1 +Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1 +Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m +Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m +Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m +conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9652 0.000 0015 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg +deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J +deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV +deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u +deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18 +deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4 +electron g factor -2.002 319 304 361 53 0.000 000 000 000 53 +electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1 +electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4 +electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27 +electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75 +electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J +electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV +electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u +electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9896 0.000 0052 +electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4 +electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u +electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h +electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz +electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1 +electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K +electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg +elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C +elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1 +Faraday constant 96 485.3365 0.0021 C mol^-1 +Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1 +Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2 +fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3 +first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u +hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV +Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J +Hartree energy in eV 27.211 385 05 0.000 000 60 eV +hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz +hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1 +hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J +hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K +hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg +helion-electron mass ratio 5495.885 2754 0.000 0050 +helion g factor -4.255 250 613 0.000 000 050 +helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025 +helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg +helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J +helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV +helion mass in u 3.014 932 2468 0.000 000 0025 u +helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6707 0.000 000 0025 +hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u +hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV +hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J +hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K +hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg +inverse fine-structure constant 137.035 999 074 0.000 000 044 +inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u +inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV +inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J +inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K +inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg +inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm +Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u +joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV +joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h +joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz +joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1 +joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u +kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV +kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h +kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz +kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1 +kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J +kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u +kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV +kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h +kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz +kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb +molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1 +molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2843 0.000 0052 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22 +muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg +muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J +muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV +muon mass in u 0.113 428 9267 0.000 000 0029 u +muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5177 0.000 000 0028 +muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084 +muon-proton mass ratio 0.112 609 5272 0.000 000 0028 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J +natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV +natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m +natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c +natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45 +neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg +neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J +neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV +neutron mass in u 1.008 664 916 00 0.000 000 000 43 u +neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 00 0.000 000 22 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30 +neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42 +neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45 +neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1 +Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s +Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s +Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm +Planck length 1.616 199 e-35 0.000 097 e-35 m +Planck mass 2.176 51 e-8 0.000 13 e-8 kg +Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV +Planck temperature 1.416 833 e32 0.000 085 e32 K +Planck time 5.391 06 e-44 0.000 32 e-44 s +proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1 +proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m +proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m +proton-electron mass ratio 1836.152 672 45 0.000 000 75 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1 +proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg +proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J +proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV +proton mass in u 1.007 276 466 812 0.000 000 000 090 u +proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 31 0.000 000 22 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45 +proton rms charge radius 0.8775 e-15 0.0051 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 539 0.000 055 m^-1 +Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz +Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV +Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023 +second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K +shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1 +shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1 +shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2 +triton-electron mass ratio 5496.921 5267 0.000 0050 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg +triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J +triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 0308 0.000 000 0025 +unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg +von Klitzing constant 25 812.807 4434 0.000 0084 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K""" + +txt2014 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24 +alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg +alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV +atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV +atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K +atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3 +atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A +atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m +atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 02 0.000 000 17 V +atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2 +atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J +atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N +atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T +atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2 +atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s +atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1 +Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1 +Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1 +Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1 +Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m +Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m +Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m +conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4 +deuteron-electron mass ratio 3670.482 967 85 0.000 000 13 +deuteron g factor 0.857 438 2311 0.000 000 0048 +deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048 +deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg +deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J +deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015 +deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19 +deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 499 0.000 012 +electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 361 82 0.000 000 000 000 52 +electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1 +electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4 +electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26 +electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17 +electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J +electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV +electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9880 0.000 0046 +electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4 +electron-proton mag. mom. ratio -658.210 6866 0.000 0020 +electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4 +electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u +electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h +electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz +electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1 +electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K +electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg +elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1 +Faraday constant 96 485.332 89 0.000 59 C mol^-1 +Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3 +first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u +hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV +Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J +Hartree energy in eV 27.211 386 02 0.000 000 17 eV +hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz +hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1 +hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J +hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K +hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg +helion-electron mass ratio 5495.885 279 22 0.000 000 27 +helion g factor -4.255 250 616 0.000 000 050 +helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025 +helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg +helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J +helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV +helion mass in u 3.014 932 246 73 0.000 000 000 12 u +helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29 +hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u +hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV +hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J +hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K +hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg +inverse fine-structure constant 137.035 999 139 0.000 000 031 +inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u +inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV +inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J +inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K +inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg +inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm +Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u +joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV +joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h +joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz +joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1 +joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u +kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV +kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h +kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz +kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1 +kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J +kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg +kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u +kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV +kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h +kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz +kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb +molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m +muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m +muon-electron mass ratio 206.768 2826 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20 +muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg +muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J +muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV +muon mass in u 0.113 428 9257 0.000 000 0025 u +muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5262 0.000 000 0025 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J +natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV +natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m +natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c +natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 58 0.000 000 90 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg +neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J +neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV +neutron mass in u 1.008 664 915 88 0.000 000 000 49 u +neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 08 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30 +neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48 +neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51 +neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1 +Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s +Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s +Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm +Planck length 1.616 229 e-35 0.000 038 e-35 m +Planck mass 2.176 470 e-8 0.000 051 e-8 kg +Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV +Planck temperature 1.416 808 e32 0.000 033 e32 K +Planck time 5.391 16 e-44 0.000 13 e-44 s +proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1 +proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m +proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m +proton-electron mass ratio 1836.152 673 89 0.000 000 17 +proton g factor 5.585 694 702 0.000 000 017 +proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1 +proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085 +proton mag. shielding correction 25.691 e-6 0.011 e-6 +proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg +proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J +proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV +proton mass in u 1.007 276 466 879 0.000 000 000 091 u +proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 38 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51 +proton rms charge radius 0.8751 e-15 0.0061 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 508 0.000 065 m^-1 +Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz +Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV +Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014 +second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K +shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1 +shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1 +shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2 +triton-electron mass ratio 5496.921 535 88 0.000 000 26 +triton g factor 5.957 924 920 0.000 000 028 +triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014 +triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg +triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J +triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV +triton mass in u 3.015 500 716 32 0.000 000 000 11 u +triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22 +unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg +von Klitzing constant 25 812.807 4555 0.000 0059 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K""" + +txt2018 = """\ +alpha particle-electron mass ratio 7294.299 541 42 0.000 000 24 +alpha particle mass 6.644 657 3357 e-27 0.000 000 0020 e-27 kg +alpha particle mass energy equivalent 5.971 920 1914 e-10 0.000 000 0018 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 4066 0.000 0011 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 1777 e-3 0.000 000 0012 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 690 09 0.000 000 000 22 +alpha particle relative atomic mass 4.001 506 179 127 0.000 000 000 063 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic mass constant energy equivalent 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass constant energy equivalent in MeV 931.494 102 42 0.000 000 28 MeV +atomic mass unit-electron volt relationship 9.314 941 0242 e8 0.000 000 0028 e8 eV +atomic mass unit-hartree relationship 3.423 177 6874 e7 0.000 000 0010 e7 E_h +atomic mass unit-hertz relationship 2.252 342 718 71 e23 0.000 000 000 68 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6104 e14 0.000 000 0023 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass unit-kelvin relationship 1.080 954 019 16 e13 0.000 000 000 33 e13 K +atomic mass unit-kilogram relationship 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 3061 e-53 0.000 000 0015 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 379 9905 e-65 0.000 000 0038 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 817... e-34 (exact) J s +atomic unit of charge 1.602 176 634 e-19 (exact) C +atomic unit of charge density 1.081 202 384 57 e12 0.000 000 000 49 e12 C m^-3 +atomic unit of current 6.623 618 237 510 e-3 0.000 000 000 013 e-3 A +atomic unit of electric dipole mom. 8.478 353 6255 e-30 0.000 000 0013 e-30 C m +atomic unit of electric field 5.142 206 747 63 e11 0.000 000 000 78 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 4292 e21 0.000 000 0029 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 274 36 e-41 0.000 000 000 50 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 245 988 0.000 000 000 053 V +atomic unit of electric quadrupole mom. 4.486 551 5246 e-40 0.000 000 0014 e-40 C m^2 +atomic unit of energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +atomic unit of force 8.238 723 4983 e-8 0.000 000 0012 e-8 N +atomic unit of length 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +atomic unit of mag. dipole mom. 1.854 802 015 66 e-23 0.000 000 000 56 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 567 58 e5 0.000 000 000 71 e5 T +atomic unit of magnetizability 7.891 036 6008 e-29 0.000 000 0048 e-29 J T^-2 +atomic unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +atomic unit of momentum 1.992 851 914 10 e-24 0.000 000 000 30 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 055 45 e-10 0.000 000 000 17 e-10 F m^-1 +atomic unit of time 2.418 884 326 5857 e-17 0.000 000 000 0047 e-17 s +atomic unit of velocity 2.187 691 263 64 e6 0.000 000 000 33 e6 m s^-1 +Avogadro constant 6.022 140 76 e23 (exact) mol^-1 +Bohr magneton 9.274 010 0783 e-24 0.000 000 0028 e-24 J T^-1 +Bohr magneton in eV/T 5.788 381 8060 e-5 0.000 000 0017 e-5 eV T^-1 +Bohr magneton in Hz/T 1.399 624 493 61 e10 0.000 000 000 42 e10 Hz T^-1 +Bohr magneton in inverse meter per tesla 46.686 447 783 0.000 000 014 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 815 63 0.000 000 000 20 K T^-1 +Bohr radius 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +Boltzmann constant 1.380 649 e-23 (exact) J K^-1 +Boltzmann constant in eV/K 8.617 333 262... e-5 (exact) eV K^-1 +Boltzmann constant in Hz/K 2.083 661 912... e10 (exact) Hz K^-1 +Boltzmann constant in inverse meter per kelvin 69.503 480 04... (exact) m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 668 0.000 000 057 ohm +classical electron radius 2.817 940 3262 e-15 0.000 000 0013 e-15 m +Compton wavelength 2.426 310 238 67 e-12 0.000 000 000 73 e-12 m +conductance quantum 7.748 091 729... e-5 (exact) S +conventional value of ampere-90 1.000 000 088 87... (exact) A +conventional value of coulomb-90 1.000 000 088 87... (exact) C +conventional value of farad-90 0.999 999 982 20... (exact) F +conventional value of henry-90 1.000 000 017 79... (exact) H +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of ohm-90 1.000 000 017 79... (exact) ohm +conventional value of volt-90 1.000 000 106 66... (exact) V +conventional value of von Klitzing constant 25 812.807 (exact) ohm +conventional value of watt-90 1.000 000 195 53... (exact) W +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 551 e-4 0.000 000 012 e-4 +deuteron-electron mass ratio 3670.482 967 88 0.000 000 13 +deuteron g factor 0.857 438 2338 0.000 000 0022 +deuteron mag. mom. 4.330 735 094 e-27 0.000 000 011 e-27 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 4.669 754 570 e-4 0.000 000 012 e-4 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2338 0.000 000 0022 +deuteron mass 3.343 583 7724 e-27 0.000 000 0010 e-27 kg +deuteron mass energy equivalent 3.005 063 231 02 e-10 0.000 000 000 91 e-10 J +deuteron mass energy equivalent in MeV 1875.612 942 57 0.000 000 57 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 05 e-3 0.000 000 000 61 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 53 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 209 39 0.000 000 000 79 +deuteron-proton mass ratio 1.999 007 501 39 0.000 000 000 11 +deuteron relative atomic mass 2.013 553 212 745 0.000 000 000 040 +deuteron rms charge radius 2.127 99 e-15 0.000 74 e-15 m +electron charge to mass quotient -1.758 820 010 76 e11 0.000 000 000 53 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 4915 0.000 0056 +electron-deuteron mass ratio 2.724 437 107 462 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 362 56 0.000 000 000 000 35 +electron gyromag. ratio 1.760 859 630 23 e11 0.000 000 000 53 e11 s^-1 T^-1 +electron gyromag. ratio in MHz/T 28 024.951 4242 0.000 0085 MHz T^-1 +electron-helion mass ratio 1.819 543 074 573 e-4 0.000 000 000 079 e-4 +electron mag. mom. -9.284 764 7043 e-24 0.000 000 0028 e-24 J T^-1 +electron mag. mom. anomaly 1.159 652 181 28 e-3 0.000 000 000 18 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 28 0.000 000 000 000 18 +electron mag. mom. to nuclear magneton ratio -1838.281 971 88 0.000 000 11 +electron mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +electron mass energy equivalent 8.187 105 7769 e-14 0.000 000 0025 e-14 J +electron mass energy equivalent in MeV 0.510 998 950 00 0.000 000 000 15 MeV +electron mass in u 5.485 799 090 65 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 0888 e-7 0.000 000 0017 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9883 0.000 0046 +electron-muon mass ratio 4.836 331 69 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4424 e-4 0.000 000 0026 e-4 +electron-proton mag. mom. ratio -658.210 687 89 0.000 000 20 +electron-proton mass ratio 5.446 170 214 87 e-4 0.000 000 000 33 e-4 +electron relative atomic mass 5.485 799 090 65 e-4 0.000 000 000 16 e-4 +electron-tau mass ratio 2.875 85 e-4 0.000 19 e-4 +electron to alpha particle mass ratio 1.370 933 554 787 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 251 e-4 0.000 000 000 090 e-4 +electron volt 1.602 176 634 e-19 (exact) J +electron volt-atomic mass unit relationship 1.073 544 102 33 e-9 0.000 000 000 32 e-9 u +electron volt-hartree relationship 3.674 932 217 5655 e-2 0.000 000 000 0071 e-2 E_h +electron volt-hertz relationship 2.417 989 242... e14 (exact) Hz +electron volt-inverse meter relationship 8.065 543 937... e5 (exact) m^-1 +electron volt-joule relationship 1.602 176 634 e-19 (exact) J +electron volt-kelvin relationship 1.160 451 812... e4 (exact) K +electron volt-kilogram relationship 1.782 661 921... e-36 (exact) kg +elementary charge 1.602 176 634 e-19 (exact) C +elementary charge over h-bar 1.519 267 447... e15 (exact) A J^-1 +Faraday constant 96 485.332 12... (exact) C mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5693 e-3 0.000 000 0011 e-3 +first radiation constant 3.741 771 852... e-16 (exact) W m^2 +first radiation constant for spectral radiance 1.191 042 972... e-16 (exact) W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 322 05 e-8 0.000 000 000 88 e-8 u +hartree-electron volt relationship 27.211 386 245 988 0.000 000 000 053 eV +Hartree energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +Hartree energy in eV 27.211 386 245 988 0.000 000 000 053 eV +hartree-hertz relationship 6.579 683 920 502 e15 0.000 000 000 013 e15 Hz +hartree-inverse meter relationship 2.194 746 313 6320 e7 0.000 000 000 0043 e7 m^-1 +hartree-joule relationship 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +hartree-kelvin relationship 3.157 750 248 0407 e5 0.000 000 000 0061 e5 K +hartree-kilogram relationship 4.850 870 209 5432 e-35 0.000 000 000 0094 e-35 kg +helion-electron mass ratio 5495.885 280 07 0.000 000 24 +helion g factor -4.255 250 615 0.000 000 050 +helion mag. mom. -1.074 617 532 e-26 0.000 000 013 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 307 0.000 000 025 +helion mass 5.006 412 7796 e-27 0.000 000 0015 e-27 kg +helion mass energy equivalent 4.499 539 4125 e-10 0.000 000 0014 e-10 J +helion mass energy equivalent in MeV 2808.391 607 43 0.000 000 85 MeV +helion mass in u 3.014 932 247 175 0.000 000 000 097 u +helion molar mass 3.014 932 246 13 e-3 0.000 000 000 91 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 671 67 0.000 000 000 13 +helion relative atomic mass 3.014 932 247 175 0.000 000 000 097 +helion shielding shift 5.996 743 e-5 0.000 010 e-5 +hertz-atomic mass unit relationship 4.439 821 6652 e-24 0.000 000 0013 e-24 u +hertz-electron volt relationship 4.135 667 696... e-15 (exact) eV +hertz-hartree relationship 1.519 829 846 0570 e-16 0.000 000 000 0029 e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 15 e-34 (exact) J +hertz-kelvin relationship 4.799 243 073... e-11 (exact) K +hertz-kilogram relationship 7.372 497 323... e-51 (exact) kg +hyperfine transition frequency of Cs-133 9 192 631 770 (exact) Hz +inverse fine-structure constant 137.035 999 084 0.000 000 021 +inverse meter-atomic mass unit relationship 1.331 025 050 10 e-15 0.000 000 000 40 e-15 u +inverse meter-electron volt relationship 1.239 841 984... e-6 (exact) eV +inverse meter-hartree relationship 4.556 335 252 9120 e-8 0.000 000 000 0088 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 857... e-25 (exact) J +inverse meter-kelvin relationship 1.438 776 877... e-2 (exact) K +inverse meter-kilogram relationship 2.210 219 094... e-42 (exact) kg +inverse of conductance quantum 12 906.403 72... (exact) ohm +Josephson constant 483 597.848 4... e9 (exact) Hz V^-1 +joule-atomic mass unit relationship 6.700 535 2565 e9 0.000 000 0020 e9 u +joule-electron volt relationship 6.241 509 074... e18 (exact) eV +joule-hartree relationship 2.293 712 278 3963 e17 0.000 000 000 0045 e17 E_h +joule-hertz relationship 1.509 190 179... e33 (exact) Hz +joule-inverse meter relationship 5.034 116 567... e24 (exact) m^-1 +joule-kelvin relationship 7.242 970 516... e22 (exact) K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 087 3014 e-14 0.000 000 0028 e-14 u +kelvin-electron volt relationship 8.617 333 262... e-5 (exact) eV +kelvin-hartree relationship 3.166 811 563 4556 e-6 0.000 000 000 0061 e-6 E_h +kelvin-hertz relationship 2.083 661 912... e10 (exact) Hz +kelvin-inverse meter relationship 69.503 480 04... (exact) m^-1 +kelvin-joule relationship 1.380 649 e-23 (exact) J +kelvin-kilogram relationship 1.536 179 187... e-40 (exact) kg +kilogram-atomic mass unit relationship 6.022 140 7621 e26 0.000 000 0018 e26 u +kilogram-electron volt relationship 5.609 588 603... e35 (exact) eV +kilogram-hartree relationship 2.061 485 788 7409 e34 0.000 000 000 0040 e34 E_h +kilogram-hertz relationship 1.356 392 489... e50 (exact) Hz +kilogram-inverse meter relationship 4.524 438 335... e41 (exact) m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 657 260... e39 (exact) K +lattice parameter of silicon 5.431 020 511 e-10 0.000 000 089 e-10 m +lattice spacing of ideal Si (220) 1.920 155 716 e-10 0.000 000 032 e-10 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 645 804... e25 (exact) m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 780 111... e25 (exact) m^-3 +luminous efficacy 683 (exact) lm W^-1 +mag. flux quantum 2.067 833 848... e-15 (exact) Wb +molar gas constant 8.314 462 618... (exact) J mol^-1 K^-1 +molar mass constant 0.999 999 999 65 e-3 0.000 000 000 30 e-3 kg mol^-1 +molar mass of carbon-12 11.999 999 9958 e-3 0.000 000 0036 e-3 kg mol^-1 +molar Planck constant 3.990 312 712... e-10 (exact) J Hz^-1 mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 954 64... e-3 (exact) m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 969 54... e-3 (exact) m^3 mol^-1 +molar volume of silicon 1.205 883 199 e-5 0.000 000 060 e-5 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 1.173 444 110 e-14 0.000 000 026 e-14 m +muon-electron mass ratio 206.768 2830 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 30 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 47 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 03 0.000 000 20 +muon mass 1.883 531 627 e-28 0.000 000 042 e-28 kg +muon mass energy equivalent 1.692 833 804 e-11 0.000 000 038 e-11 J +muon mass energy equivalent in MeV 105.658 3755 0.000 0023 MeV +muon mass in u 0.113 428 9259 0.000 000 0025 u +muon molar mass 1.134 289 259 e-4 0.000 000 025 e-4 kg mol^-1 +muon-neutron mass ratio 0.112 454 5170 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5264 0.000 000 0025 +muon-tau mass ratio 5.946 35 e-2 0.000 40 e-2 +natural unit of action 1.054 571 817... e-34 (exact) J s +natural unit of action in eV s 6.582 119 569... e-16 (exact) eV s +natural unit of energy 8.187 105 7769 e-14 0.000 000 0025 e-14 J +natural unit of energy in MeV 0.510 998 950 00 0.000 000 000 15 MeV +natural unit of length 3.861 592 6796 e-13 0.000 000 0012 e-13 m +natural unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +natural unit of momentum 2.730 924 530 75 e-22 0.000 000 000 82 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 950 00 0.000 000 000 15 MeV/c +natural unit of time 1.288 088 668 19 e-21 0.000 000 000 39 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 905 81 e-15 0.000 000 000 75 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 73 0.000 000 89 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 71 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio in MHz/T 29.164 6931 0.000 0069 MHz T^-1 +neutron mag. mom. -9.662 3651 e-27 0.000 0023 e-27 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 498 04 e-27 0.000 000 000 95 e-27 kg +neutron mass energy equivalent 1.505 349 762 87 e-10 0.000 000 000 86 e-10 J +neutron mass energy equivalent in MeV 939.565 420 52 0.000 000 54 MeV +neutron mass in u 1.008 664 915 95 0.000 000 000 49 u +neutron molar mass 1.008 664 915 60 e-3 0.000 000 000 57 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 06 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 574 35 e-30 0.000 000 82 e-30 kg +neutron-proton mass difference energy equivalent 2.072 146 89 e-13 0.000 000 74 e-13 J +neutron-proton mass difference energy equivalent in MeV 1.293 332 36 0.000 000 46 MeV +neutron-proton mass difference in u 1.388 449 33 e-3 0.000 000 49 e-3 u +neutron-proton mass ratio 1.001 378 419 31 0.000 000 000 49 +neutron relative atomic mass 1.008 664 915 95 0.000 000 000 49 +neutron-tau mass ratio 0.528 779 0.000 036 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 30 e-11 0.000 15 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 83 e-39 0.000 15 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 7461 e-27 0.000 000 0015 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 258 44 e-8 0.000 000 000 96 e-8 eV T^-1 +nuclear magneton in inverse meter per tesla 2.542 623 413 53 e-2 0.000 000 000 78 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 267 7756 e-4 0.000 000 0011 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 2291 0.000 000 0023 MHz T^-1 +Planck constant 6.626 070 15 e-34 (exact) J Hz^-1 +Planck constant in eV/Hz 4.135 667 696... e-15 (exact) eV Hz^-1 +Planck length 1.616 255 e-35 0.000 018 e-35 m +Planck mass 2.176 434 e-8 0.000 024 e-8 kg +Planck mass energy equivalent in GeV 1.220 890 e19 0.000 014 e19 GeV +Planck temperature 1.416 784 e32 0.000 016 e32 K +Planck time 5.391 247 e-44 0.000 060 e-44 s +proton charge to mass quotient 9.578 833 1560 e7 0.000 000 0029 e7 C kg^-1 +proton Compton wavelength 1.321 409 855 39 e-15 0.000 000 000 40 e-15 m +proton-electron mass ratio 1836.152 673 43 0.000 000 11 +proton g factor 5.585 694 6893 0.000 000 0016 +proton gyromag. ratio 2.675 221 8744 e8 0.000 000 0011 e8 s^-1 T^-1 +proton gyromag. ratio in MHz/T 42.577 478 518 0.000 000 018 MHz T^-1 +proton mag. mom. 1.410 606 797 36 e-26 0.000 000 000 60 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 202 30 e-3 0.000 000 000 46 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 344 63 0.000 000 000 82 +proton mag. shielding correction 2.5689 e-5 0.0011 e-5 +proton mass 1.672 621 923 69 e-27 0.000 000 000 51 e-27 kg +proton mass energy equivalent 1.503 277 615 98 e-10 0.000 000 000 46 e-10 J +proton mass energy equivalent in MeV 938.272 088 16 0.000 000 29 MeV +proton mass in u 1.007 276 466 621 0.000 000 000 053 u +proton molar mass 1.007 276 466 27 e-3 0.000 000 000 31 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 37 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 12 0.000 000 000 49 +proton relative atomic mass 1.007 276 466 621 0.000 000 000 053 +proton rms charge radius 8.414 e-16 0.019 e-16 m +proton-tau mass ratio 0.528 051 0.000 036 +quantum of circulation 3.636 947 5516 e-4 0.000 000 0011 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1032 e-4 0.000 000 0022 e-4 m^2 s^-1 +reduced Compton wavelength 3.861 592 6796 e-13 0.000 000 0012 e-13 m +reduced muon Compton wavelength 1.867 594 306 e-15 0.000 000 042 e-15 m +reduced neutron Compton wavelength 2.100 194 1552 e-16 0.000 000 0012 e-16 m +reduced Planck constant 1.054 571 817... e-34 (exact) J s +reduced Planck constant in eV s 6.582 119 569... e-16 (exact) eV s +reduced Planck constant times c in MeV fm 197.326 980 4... (exact) MeV fm +reduced proton Compton wavelength 2.103 089 103 36 e-16 0.000 000 000 64 e-16 m +reduced tau Compton wavelength 1.110 538 e-16 0.000 075 e-16 m +Rydberg constant 10 973 731.568 160 0.000 021 m^-1 +Rydberg constant times c in Hz 3.289 841 960 2508 e15 0.000 000 000 0064 e15 Hz +Rydberg constant times hc in eV 13.605 693 122 994 0.000 000 000 026 eV +Rydberg constant times hc in J 2.179 872 361 1035 e-18 0.000 000 000 0042 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 707 537 06 0.000 000 000 45 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 870 523 58 0.000 000 000 45 +second radiation constant 1.438 776 877... e-2 (exact) m K +shielded helion gyromag. ratio 2.037 894 569 e8 0.000 000 024 e8 s^-1 T^-1 +shielded helion gyromag. ratio in MHz/T 32.434 099 42 0.000 000 38 MHz T^-1 +shielded helion mag. mom. -1.074 553 090 e-26 0.000 000 013 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 719 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5618 0.000 000 0089 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 151 e8 0.000 000 029 e8 s^-1 T^-1 +shielded proton gyromag. ratio in MHz/T 42.576 384 74 0.000 000 46 MHz T^-1 +shielded proton mag. mom. 1.410 570 560 e-26 0.000 000 015 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 599 0.000 000 030 +shielding difference of d and p in HD 2.0200 e-8 0.0020 e-8 +shielding difference of t and p in HT 2.4140 e-8 0.0020 e-8 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 374 419... e-8 (exact) W m^-2 K^-4 +tau Compton wavelength 6.977 71 e-16 0.000 47 e-16 m +tau-electron mass ratio 3477.23 0.23 +tau energy equivalent 1776.86 0.12 MeV +tau mass 3.167 54 e-27 0.000 21 e-27 kg +tau mass energy equivalent 2.846 84 e-10 0.000 19 e-10 J +tau mass in u 1.907 54 0.000 13 u +tau molar mass 1.907 54 e-3 0.000 13 e-3 kg mol^-1 +tau-muon mass ratio 16.8170 0.0011 +tau-neutron mass ratio 1.891 15 0.000 13 +tau-proton mass ratio 1.893 76 0.000 13 +Thomson cross section 6.652 458 7321 e-29 0.000 000 0060 e-29 m^2 +triton-electron mass ratio 5496.921 535 73 0.000 000 27 +triton g factor 5.957 924 931 0.000 000 012 +triton mag. mom. 1.504 609 5202 e-26 0.000 000 0030 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6651 e-3 0.000 000 0032 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 4656 0.000 000 0059 +triton mass 5.007 356 7446 e-27 0.000 000 0015 e-27 kg +triton mass energy equivalent 4.500 387 8060 e-10 0.000 000 0014 e-10 J +triton mass energy equivalent in MeV 2808.921 132 98 0.000 000 85 MeV +triton mass in u 3.015 500 716 21 0.000 000 000 12 u +triton molar mass 3.015 500 715 17 e-3 0.000 000 000 92 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 034 14 0.000 000 000 15 +triton relative atomic mass 3.015 500 716 21 0.000 000 000 12 +triton to proton mag. mom. ratio 1.066 639 9191 0.000 000 0021 +unified atomic mass unit 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +vacuum electric permittivity 8.854 187 8128 e-12 0.000 000 0013 e-12 F m^-1 +vacuum mag. permeability 1.256 637 062 12 e-6 0.000 000 000 19 e-6 N A^-2 +von Klitzing constant 25 812.807 45... (exact) ohm +weak mixing angle 0.222 90 0.000 30 +Wien frequency displacement law constant 5.878 925 757... e10 (exact) Hz K^-1 +Wien wavelength displacement law constant 2.897 771 955... e-3 (exact) m K +W to Z mass ratio 0.881 53 0.000 17 """ + +# ----------------------------------------------------------------------------- + +physical_constants: dict[str, tuple[float, str, float]] = {} + + +def parse_constants_2002to2014(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:55].rstrip() + val = float(line[55:77].replace(' ', '').replace('...', '')) + uncert = float(line[77:99].replace(' ', '').replace('(exact)', '0')) + units = line[99:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +def parse_constants_2018toXXXX(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:60].rstrip() + val = float(line[60:85].replace(' ', '').replace('...', '')) + uncert = float(line[85:110].replace(' ', '').replace('(exact)', '0')) + units = line[110:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +_physical_constants_2002 = parse_constants_2002to2014(txt2002) +_physical_constants_2006 = parse_constants_2002to2014(txt2006) +_physical_constants_2010 = parse_constants_2002to2014(txt2010) +_physical_constants_2014 = parse_constants_2002to2014(txt2014) +_physical_constants_2018 = parse_constants_2018toXXXX(txt2018) + + +physical_constants.update(_physical_constants_2002) +physical_constants.update(_physical_constants_2006) +physical_constants.update(_physical_constants_2010) +physical_constants.update(_physical_constants_2014) +physical_constants.update(_physical_constants_2018) +_current_constants = _physical_constants_2018 +_current_codata = "CODATA 2018" + +# check obsolete values +_obsolete_constants = {} +for k in physical_constants: + if k not in _current_constants: + _obsolete_constants[k] = True + +# generate some additional aliases +_aliases = {} +for k in _physical_constants_2002: + if 'magn.' in k: + _aliases[k] = k.replace('magn.', 'mag.') +for k in _physical_constants_2006: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') +for k in _physical_constants_2018: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') + +# CODATA 2018: renamed and no longer exact; use as aliases +_aliases['mag. constant'] = 'vacuum mag. permeability' +_aliases['electric constant'] = 'vacuum electric permittivity' + + +class ConstantWarning(DeprecationWarning): + """Accessing a constant no longer in current CODATA data set""" + pass + + +def _check_obsolete(key: str) -> None: + if key in _obsolete_constants and key not in _aliases: + warnings.warn(f"Constant '{key}' is not in current {_current_codata} data set", + ConstantWarning, stacklevel=3) + + +def value(key: str) -> float: + """ + Value in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + value : float + Value in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.value('elementary charge') + 1.602176634e-19 + + """ + _check_obsolete(key) + return physical_constants[key][0] + + +def unit(key: str) -> str: + """ + Unit in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + unit : Python string + Unit in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.unit('proton mass') + 'kg' + + """ + _check_obsolete(key) + return physical_constants[key][1] + + +def precision(key: str) -> float: + """ + Relative precision in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + prec : float + Relative precision in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.precision('proton mass') + 5.1e-37 + + """ + _check_obsolete(key) + return physical_constants[key][2] / physical_constants[key][0] + + +def find(sub: str | None = None, disp: bool = False) -> Any: + """ + Return list of physical_constant keys containing a given string. + + Parameters + ---------- + sub : str + Sub-string to search keys for. By default, return all keys. + disp : bool + If True, print the keys that are found and return None. + Otherwise, return the list of keys without printing anything. + + Returns + ------- + keys : list or None + If `disp` is False, the list of keys is returned. + Otherwise, None is returned. + + Examples + -------- + >>> from scipy.constants import find, physical_constants + + Which keys in the ``physical_constants`` dictionary contain 'boltzmann'? + + >>> find('boltzmann') + ['Boltzmann constant', + 'Boltzmann constant in Hz/K', + 'Boltzmann constant in eV/K', + 'Boltzmann constant in inverse meter per kelvin', + 'Stefan-Boltzmann constant'] + + Get the constant called 'Boltzmann constant in Hz/K': + + >>> physical_constants['Boltzmann constant in Hz/K'] + (20836619120.0, 'Hz K^-1', 0.0) + + Find constants with 'radius' in the key: + + >>> find('radius') + ['Bohr radius', + 'classical electron radius', + 'deuteron rms charge radius', + 'proton rms charge radius'] + >>> physical_constants['classical electron radius'] + (2.8179403262e-15, 'm', 1.3e-24) + + """ + if sub is None: + result = list(_current_constants.keys()) + else: + result = [key for key in _current_constants + if sub.lower() in key.lower()] + + result.sort() + if disp: + for key in result: + print(key) + return + else: + return result + + +c = value('speed of light in vacuum') +mu0 = value('vacuum mag. permeability') +epsilon0 = value('vacuum electric permittivity') + +# Table is lacking some digits for exact values: calculate from definition +exact_values = { + 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0), + 'kilogram-joule relationship': (c * c, 'J', 0.0), + 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0), +} + +# sanity check +for key in exact_values: + val = physical_constants[key][0] + if abs(exact_values[key][0] - val) / val > 1e-9: + raise ValueError("Constants.codata: exact values too far off.") + if exact_values[key][2] == 0 and physical_constants[key][2] != 0: + raise ValueError("Constants.codata: value not exact") + +physical_constants.update(exact_values) + +_tested_keys = ['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of mom.um', + 'natural unit of mom.um in MeV/c', + 'natural unit of length', + 'natural unit of time'] + +# finally, insert aliases for values +for k, v in list(_aliases.items()): + if v in _current_constants or v in _tested_keys: + physical_constants[k] = physical_constants[v] + else: + del _aliases[k] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/_constants.py b/llmeval-env/lib/python3.10/site-packages/scipy/constants/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fa379828ddd62bedc92f2e0e81b51ce550ca90fd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/constants/_constants.py @@ -0,0 +1,362 @@ +""" +Collection of physical constants and conversion factors. + +Most constants are in SI units, so you can do +print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots' + +The list is not meant to be comprehensive, but just convenient for everyday use. +""" + +from __future__ import annotations + +import math as _math +from typing import TYPE_CHECKING, Any + +from ._codata import value as _cd +import numpy as _np + +if TYPE_CHECKING: + import numpy.typing as npt + +""" +BasSw 2006 +physical constants: imported from CODATA +unit conversion: see e.g., NIST special publication 811 +Use at own risk: double-check values before calculating your Mars orbit-insertion burn. +Some constants exist in a few variants, which are marked with suffixes. +The ones without any suffix should be the most common ones. +""" + +__all__ = [ + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'quecto', 'quetta', 'ronna', 'ronto', + 'short_ton', 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +# mathematical constants +pi = _math.pi +golden = golden_ratio = (1 + _math.sqrt(5)) / 2 + +# SI prefixes +quetta = 1e30 +ronna = 1e27 +yotta = 1e24 +zetta = 1e21 +exa = 1e18 +peta = 1e15 +tera = 1e12 +giga = 1e9 +mega = 1e6 +kilo = 1e3 +hecto = 1e2 +deka = 1e1 +deci = 1e-1 +centi = 1e-2 +milli = 1e-3 +micro = 1e-6 +nano = 1e-9 +pico = 1e-12 +femto = 1e-15 +atto = 1e-18 +zepto = 1e-21 +yocto = 1e-24 +ronto = 1e-27 +quecto = 1e-30 + +# binary prefixes +kibi = 2**10 +mebi = 2**20 +gibi = 2**30 +tebi = 2**40 +pebi = 2**50 +exbi = 2**60 +zebi = 2**70 +yobi = 2**80 + +# physical constants +c = speed_of_light = _cd('speed of light in vacuum') +mu_0 = _cd('vacuum mag. permeability') +epsilon_0 = _cd('vacuum electric permittivity') +h = Planck = _cd('Planck constant') +hbar = h / (2 * pi) +G = gravitational_constant = _cd('Newtonian constant of gravitation') +g = _cd('standard acceleration of gravity') +e = elementary_charge = _cd('elementary charge') +R = gas_constant = _cd('molar gas constant') +alpha = fine_structure = _cd('fine-structure constant') +N_A = Avogadro = _cd('Avogadro constant') +k = Boltzmann = _cd('Boltzmann constant') +sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant') +Wien = _cd('Wien wavelength displacement law constant') +Rydberg = _cd('Rydberg constant') + +# mass in kg +gram = 1e-3 +metric_ton = 1e3 +grain = 64.79891e-6 +lb = pound = 7000 * grain # avoirdupois +blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0) +slug = blob / 12 # lbf*s**2/foot (added in 1.0.0) +oz = ounce = pound / 16 +stone = 14 * pound +long_ton = 2240 * pound +short_ton = 2000 * pound + +troy_ounce = 480 * grain # only for metals / gems +troy_pound = 12 * troy_ounce +carat = 200e-6 + +m_e = electron_mass = _cd('electron mass') +m_p = proton_mass = _cd('proton mass') +m_n = neutron_mass = _cd('neutron mass') +m_u = u = atomic_mass = _cd('atomic mass constant') + +# angle in rad +degree = pi / 180 +arcmin = arcminute = degree / 60 +arcsec = arcsecond = arcmin / 60 + +# time in second +minute = 60.0 +hour = 60 * minute +day = 24 * hour +week = 7 * day +year = 365 * day +Julian_year = 365.25 * day + +# length in meter +inch = 0.0254 +foot = 12 * inch +yard = 3 * foot +mile = 1760 * yard +mil = inch / 1000 +pt = point = inch / 72 # typography +survey_foot = 1200.0 / 3937 +survey_mile = 5280 * survey_foot +nautical_mile = 1852.0 +fermi = 1e-15 +angstrom = 1e-10 +micron = 1e-6 +au = astronomical_unit = 149597870700.0 +light_year = Julian_year * c +parsec = au / arcsec + +# pressure in pascal +atm = atmosphere = _cd('standard atmosphere') +bar = 1e5 +torr = mmHg = atm / 760 +psi = pound * g / (inch * inch) + +# area in meter**2 +hectare = 1e4 +acre = 43560 * foot**2 + +# volume in meter**3 +litre = liter = 1e-3 +gallon = gallon_US = 231 * inch**3 # US +# pint = gallon_US / 8 +fluid_ounce = fluid_ounce_US = gallon_US / 128 +bbl = barrel = 42 * gallon_US # for oil + +gallon_imp = 4.54609e-3 # UK +fluid_ounce_imp = gallon_imp / 160 + +# speed in meter per second +kmh = 1e3 / hour +mph = mile / hour +# approx value of mach at 15 degrees in 1 atm. Is this a common value? +mach = speed_of_sound = 340.5 +knot = nautical_mile / hour + +# temperature in kelvin +zero_Celsius = 273.15 +degree_Fahrenheit = 1/1.8 # only for differences + +# energy in joule +eV = electron_volt = elementary_charge # * 1 Volt +calorie = calorie_th = 4.184 +calorie_IT = 4.1868 +erg = 1e-7 +Btu_th = pound * degree_Fahrenheit * calorie_th / gram +Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram +ton_TNT = 1e9 * calorie_th +# Wh = watt_hour + +# power in watt +hp = horsepower = 550 * foot * pound * g + +# force in newton +dyn = dyne = 1e-5 +lbf = pound_force = pound * g +kgf = kilogram_force = g # * 1 kg + +# functions for conversions that are not linear + + +def convert_temperature( + val: npt.ArrayLike, + old_scale: str, + new_scale: str, +) -> Any: + """ + Convert from a temperature scale to another one among Celsius, Kelvin, + Fahrenheit, and Rankine scales. + + Parameters + ---------- + val : array_like + Value(s) of the temperature(s) to be converted expressed in the + original scale. + old_scale : str + Specifies as a string the original scale from which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + new_scale : str + Specifies as a string the new scale to which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + + Returns + ------- + res : float or array of floats + Value(s) of the converted temperature(s) expressed in the new scale. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy.constants import convert_temperature + >>> import numpy as np + >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin') + array([ 233.15, 313.15]) + + """ + # Convert from `old_scale` to Kelvin + if old_scale.lower() in ['celsius', 'c']: + tempo = _np.asanyarray(val) + zero_Celsius + elif old_scale.lower() in ['kelvin', 'k']: + tempo = _np.asanyarray(val) + elif old_scale.lower() in ['fahrenheit', 'f']: + tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius + elif old_scale.lower() in ['rankine', 'r']: + tempo = _np.asanyarray(val) * 5 / 9 + else: + raise NotImplementedError("%s scale is unsupported: supported scales " + "are Celsius, Kelvin, Fahrenheit, and " + "Rankine" % old_scale) + # and from Kelvin to `new_scale`. + if new_scale.lower() in ['celsius', 'c']: + res = tempo - zero_Celsius + elif new_scale.lower() in ['kelvin', 'k']: + res = tempo + elif new_scale.lower() in ['fahrenheit', 'f']: + res = (tempo - zero_Celsius) * 9 / 5 + 32 + elif new_scale.lower() in ['rankine', 'r']: + res = tempo * 9 / 5 + else: + raise NotImplementedError("'%s' scale is unsupported: supported " + "scales are 'Celsius', 'Kelvin', " + "'Fahrenheit', and 'Rankine'" % new_scale) + + return res + + +# optics + + +def lambda2nu(lambda_: npt.ArrayLike) -> Any: + """ + Convert wavelength to optical frequency + + Parameters + ---------- + lambda_ : array_like + Wavelength(s) to be converted. + + Returns + ------- + nu : float or array of floats + Equivalent optical frequency. + + Notes + ----- + Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import lambda2nu, speed_of_light + >>> import numpy as np + >>> lambda2nu(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(lambda_) + + +def nu2lambda(nu: npt.ArrayLike) -> Any: + """ + Convert optical frequency to wavelength. + + Parameters + ---------- + nu : array_like + Optical frequency to be converted. + + Returns + ------- + lambda : float or array of floats + Equivalent wavelength(s). + + Notes + ----- + Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import nu2lambda, speed_of_light + >>> import numpy as np + >>> nu2lambda(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(nu) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/codata.py b/llmeval-env/lib/python3.10/site-packages/scipy/constants/codata.py new file mode 100644 index 0000000000000000000000000000000000000000..72177f20545d673d5bbb179c705f72cdbb1afcc4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/constants/codata.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning', 'txt2002', 'txt2006', 'txt2010', 'txt2014', + 'txt2018', 'parse_constants_2002to2014', + 'parse_constants_2018toXXXX', 'k', 'c', 'mu0', 'epsilon0', + 'exact_values', 'key', 'val', 'v' + +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="codata", + private_modules=["_codata"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/constants.py b/llmeval-env/lib/python3.10/site-packages/scipy/constants/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..855901ba802881090b99b7e8972de741331c7ab9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/constants/constants.py @@ -0,0 +1,53 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'short_ton', + 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="constants", + private_modules=["_constants"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6952aaf017a58389dca916e5f77e6f739eb4412 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10f4b39e48e2d6c0b042582ca65f572bde6ba575 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__init__.py @@ -0,0 +1,103 @@ +""" +========================================================= +Legacy discrete Fourier transforms (:mod:`scipy.fftpack`) +========================================================= + +.. legacy:: + + New code should use :mod:`scipy.fft`. + +Fast Fourier Transforms (FFTs) +============================== + +.. autosummary:: + :toctree: generated/ + + fft - Fast (discrete) Fourier Transform (FFT) + ifft - Inverse FFT + fft2 - 2-D FFT + ifft2 - 2-D inverse FFT + fftn - N-D FFT + ifftn - N-D inverse FFT + rfft - FFT of strictly real-valued sequence + irfft - Inverse of rfft + dct - Discrete cosine transform + idct - Inverse discrete cosine transform + dctn - N-D Discrete cosine transform + idctn - N-D Inverse discrete cosine transform + dst - Discrete sine transform + idst - Inverse discrete sine transform + dstn - N-D Discrete sine transform + idstn - N-D Inverse discrete sine transform + +Differential and pseudo-differential operators +============================================== + +.. autosummary:: + :toctree: generated/ + + diff - Differentiation and integration of periodic sequences + tilbert - Tilbert transform: cs_diff(x,h,h) + itilbert - Inverse Tilbert transform: sc_diff(x,h,h) + hilbert - Hilbert transform: cs_diff(x,inf,inf) + ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf) + cs_diff - cosh/sinh pseudo-derivative of periodic sequences + sc_diff - sinh/cosh pseudo-derivative of periodic sequences + ss_diff - sinh/sinh pseudo-derivative of periodic sequences + cc_diff - cosh/cosh pseudo-derivative of periodic sequences + shift - Shift periodic sequences + +Helper functions +================ + +.. autosummary:: + :toctree: generated/ + + fftshift - Shift the zero-frequency component to the center of the spectrum + ifftshift - The inverse of `fftshift` + fftfreq - Return the Discrete Fourier Transform sample frequencies + rfftfreq - DFT sample frequencies (for usage with rfft, irfft) + next_fast_len - Find the optimal length to zero-pad an FFT for speed + +Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions +exposed by ``fftpack``; importing them from ``numpy`` should be preferred. + +Convolutions (:mod:`scipy.fftpack.convolve`) +============================================ + +.. module:: scipy.fftpack.convolve + +.. autosummary:: + :toctree: generated/ + + convolve + convolve_z + init_convolution_kernel + destroy_convolve_cache + +""" + + +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2', + 'diff', + 'tilbert','itilbert','hilbert','ihilbert', + 'sc_diff','cs_diff','cc_diff','ss_diff', + 'shift', + 'fftfreq', 'rfftfreq', + 'fftshift', 'ifftshift', + 'next_fast_len', + 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn' + ] + +from ._basic import * +from ._pseudo_diffs import * +from ._helper import * +from ._realtransforms import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import basic, helper, pseudo_diffs, realtransforms + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..534833c002cf9cc5ee6eb483971128fd25da2af6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7468a3f39fd356ac86ef67fe82c9b8a16663cd62 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddffdb92ad7755c467f13d11e2142ad0893b5645 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4a7c08172c638462eda085d8a08fc8b6df4de56 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98dc573c8a0c47c80b24c474eb25f3faa84fedfd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4820bcd860d6e780180530e15657e6c531d99bca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2e40de9d974aaa4b74d12b6277affc1214b9c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea0e22c9457efcb24fb4ac7dac2f4093ace2c145 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec6ae602eb5453513d3bdc309c6e466c7c2abab3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_basic.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..59c85ae4b364464a66489ef221f7f7ac45624694 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_basic.py @@ -0,0 +1,428 @@ +""" +Discrete Fourier Transforms - _basic.py +""" +# Created by Pearu Peterson, August,September 2002 +__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2'] + +from scipy.fft import _pocketfft +from ._helper import _good_shape + + +def fft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)``, where + + ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``. + + Parameters + ---------- + x : array_like + Array to Fourier transform. + n : int, optional + Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the fft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + z : complex ndarray + with the elements:: + + [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even + [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1 + + See Also + -------- + ifft : Inverse FFT + rfft : FFT of a real sequence + + Notes + ----- + The packing of the result is "standard": If ``A = fft(a, n)``, then + ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the + positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency + terms, in order of decreasingly negative frequency. So ,for an 8-point + transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1]. + To rearrange the fft output so that the zero-frequency component is + centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``. + If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real. + + If the data type of `x` is real, a "real FFT" algorithm is automatically + used, which roughly halves the computation time. To increase efficiency + a little further, use `rfft`, which does the same calculation, but only + outputs half of the symmetrical spectrum. If the data is both real and + symmetrical, the `dct` can again double the efficiency by generating + half of the spectrum from half of the signal. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft, ifft + >>> x = np.arange(5) + >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + return _pocketfft.fft(x, n, axis, None, overwrite_x) + + +def ifft(x, n=None, axis=-1, overwrite_x=False): + """ + Return discrete inverse Fourier transform of real or complex sequence. + + The returned complex array contains ``y(0), y(1),..., y(n-1)``, where + + ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. If ``n < x.shape[axis]``, + `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded. + The default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the ifft's are computed; the default is over the + last axis (i.e., ``axis=-1``). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + ifft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + fft : Forward FFT + + Notes + ----- + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + This function is most efficient when `n` is a power of two, and least + efficient when `n` is prime. + + If the data type of `x` is real, a "real IFFT" algorithm is automatically + used, which roughly halves the computation time. + + Examples + -------- + >>> from scipy.fftpack import fft, ifft + >>> import numpy as np + >>> x = np.arange(5) + >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy. + True + + """ + return _pocketfft.ifft(x, n, axis, None, overwrite_x) + + +def rfft(x, n=None, axis=-1, overwrite_x=False): + """ + Discrete Fourier transform of a real sequence. + + Parameters + ---------- + x : array_like, real-valued + The data to transform. + n : int, optional + Defines the length of the Fourier transform. If `n` is not specified + (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``, + `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded. + axis : int, optional + The axis along which the transform is applied. The default is the + last axis. + overwrite_x : bool, optional + If set to true, the contents of `x` can be overwritten. Default is + False. + + Returns + ------- + z : real ndarray + The returned real array contains:: + + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd + + where:: + + y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n) + j = 0..n-1 + + See Also + -------- + fft, irfft, scipy.fft.rfft + + Notes + ----- + Within numerical accuracy, ``y == rfft(irfft(y))``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + To get an output with a complex datatype, consider using the newer + function `scipy.fft.rfft`. + + Examples + -------- + >>> from scipy.fftpack import fft, rfft + >>> a = [9, -9, 1, 3] + >>> fft(a) + array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j]) + >>> rfft(a) + array([ 4., 8., 12., 16.]) + + """ + return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x) + + +def irfft(x, n=None, axis=-1, overwrite_x=False): + """ + Return inverse discrete Fourier transform of real sequence x. + + The contents of `x` are interpreted as the output of the `rfft` + function. + + Parameters + ---------- + x : array_like + Transformed data to invert. + n : int, optional + Length of the inverse Fourier transform. + If n < x.shape[axis], x is truncated. + If n > x.shape[axis], x is zero-padded. + The default results in n = x.shape[axis]. + axis : int, optional + Axis along which the ifft's are computed; the default is over + the last axis (i.e., axis=-1). + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + irfft : ndarray of floats + The inverse discrete Fourier transform. + + See Also + -------- + rfft, ifft, scipy.fft.irfft + + Notes + ----- + The returned real array contains:: + + [y(0),y(1),...,y(n-1)] + + where for n is even:: + + y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0] + (-1)**(j) x[n-1]) + + and for n is odd:: + + y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k]) + * exp(sqrt(-1)*j*k* 2*pi/n) + + c.c. + x[0]) + + c.c. denotes complex conjugate of preceding expression. + + For details on input parameters, see `rfft`. + + To process (conjugate-symmetric) frequency-domain data with a complex + datatype, consider using the newer function `scipy.fft.irfft`. + + Examples + -------- + >>> from scipy.fftpack import rfft, irfft + >>> a = [1.0, 2.0, 3.0, 4.0, 5.0] + >>> irfft(a) + array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473]) + >>> irfft(rfft(a)) + array([1., 2., 3., 4., 5.]) + + """ + return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x) + + +def fftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return multidimensional discrete Fourier transform. + + The returned array contains:: + + y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) + + where d = len(x.shape) and n = x.shape. + + Parameters + ---------- + x : array_like + The (N-D) array to transform. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + The axes of `x` (`y` if `shape` is not None) along which the + transform is applied. + The default is over all axes. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed. Default is False. + + Returns + ------- + y : complex-valued N-D NumPy array + The (N-D) DFT of the input array. + + See Also + -------- + ifftn + + Notes + ----- + If ``x`` is real-valued, then + ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``. + + Both single and double precision routines are implemented. Half precision + inputs will be converted to single precision. Non-floating-point inputs + will be converted to double precision. Long-double precision inputs are + not supported. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fftn, ifftn + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, fftn(ifftn(y))) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.fftn(x, shape, axes, None, overwrite_x) + + +def ifftn(x, shape=None, axes=None, overwrite_x=False): + """ + Return inverse multidimensional discrete Fourier transform. + + The sequence can be of an arbitrary type. + + The returned array contains:: + + y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] + x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) + + where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``. + + For description of parameters see `fftn`. + + See Also + -------- + fftn : for detailed information. + + Examples + -------- + >>> from scipy.fftpack import fftn, ifftn + >>> import numpy as np + >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) + >>> np.allclose(y, ifftn(fftn(y))) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.ifftn(x, shape, axes, None, overwrite_x) + + +def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete Fourier transform. + + Return the 2-D discrete Fourier transform of the 2-D argument + `x`. + + See Also + -------- + fftn : for detailed information. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft2, ifft2 + >>> y = np.mgrid[:5, :5][0] + >>> y + array([[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]]) + >>> np.allclose(y, ifft2(fft2(y))) + True + """ + return fftn(x,shape,axes,overwrite_x) + + +def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False): + """ + 2-D discrete inverse Fourier transform of real or complex sequence. + + Return inverse 2-D discrete Fourier transform of + arbitrary type sequence x. + + See `ifft` for more information. + + See Also + -------- + fft2, ifft + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import fft2, ifft2 + >>> y = np.mgrid[:5, :5][0] + >>> y + array([[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]]) + >>> np.allclose(y, fft2(ifft2(y))) + True + + """ + return ifftn(x,shape,axes,overwrite_x) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_helper.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0dd7b0f8d6dce5fe717fe585f5af82a7d0c651 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_helper.py @@ -0,0 +1,115 @@ +import operator + +import numpy as np +from numpy.fft import fftshift, ifftshift, fftfreq + +import scipy.fft._pocketfft.helper as _helper + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] + + +def rfftfreq(n, d=1.0): + """DFT sample frequencies (for usage with rfft, irfft). + + The returned float array contains the frequency bins in + cycles/unit (with zero at the start) given a window length `n` and a + sample spacing `d`:: + + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even + f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing. Default is 1. + + Returns + ------- + out : ndarray + The array of length `n`, containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> from scipy import fftpack + >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> sig_fft = fftpack.rfft(sig) + >>> n = sig_fft.size + >>> timestep = 0.1 + >>> freq = fftpack.rfftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) + + """ + n = operator.index(n) + if n < 0: + raise ValueError("n = %s is not valid. " + "n must be a nonnegative integer." % n) + + return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d) + + +def next_fast_len(target): + """ + Find the next fast size of input data to `fft`, for zero-padding, etc. + + SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this + returns the next composite of the prime factors 2, 3, and 5 which is + greater than or equal to `target`. (These are also known as 5-smooth + numbers, regular numbers, or Hamming numbers.) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + + Returns + ------- + out : int + The first 5-smooth number greater than or equal to `target`. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + On a particular machine, an FFT of prime length takes 133 ms: + + >>> from scipy import fftpack + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> min_len = 10007 # prime length is worst case for speed + >>> a = rng.standard_normal(min_len) + >>> b = fftpack.fft(a) + + Zero-padding to the next 5-smooth length reduces computation time to + 211 us, a speedup of 630 times: + + >>> fftpack.next_fast_len(min_len) + 10125 + >>> b = fftpack.fft(a, 10125) + + Rounding up to the next power of 2 is not optimal, taking 367 us to + compute, 1.7 times as long as the 5-smooth size: + + >>> b = fftpack.fft(a, 16384) + + """ + # Real transforms use regular sizes so this is backwards compatible + return _helper.good_size(target, True) + + +def _good_shape(x, shape, axes): + """Ensure that shape argument is valid for scipy.fftpack + + scipy.fftpack does not support len(shape) < x.ndim when axes is not given. + """ + if shape is not None and axes is None: + shape = _helper._iterable_of_int(shape, 'shape') + if len(shape) != np.ndim(x): + raise ValueError("when given, axes and shape arguments" + " have to be of the same length") + return shape diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_pseudo_diffs.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_pseudo_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..b8ef40efc07484b3bf594ae3ff904cd85f498fc9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_pseudo_diffs.py @@ -0,0 +1,551 @@ +""" +Differential and pseudo-differential operators. +""" +# Created by Pearu Peterson, September 2002 + +__all__ = ['diff', + 'tilbert','itilbert','hilbert','ihilbert', + 'cs_diff','cc_diff','sc_diff','ss_diff', + 'shift'] + +from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj +from . import convolve + +from scipy.fft._pocketfft.helper import _datacopied + + +_cache = {} + + +def diff(x,order=1,period=None, _cache=_cache): + """ + Return kth derivative (or integral) of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j + y_0 = 0 if order is not 0. + + Parameters + ---------- + x : array_like + Input array. + order : int, optional + The order of differentiation. Default order is 1. If order is + negative, then integration is carried out under the assumption + that ``x_0 == 0``. + period : float, optional + The assumed period of the sequence. Default is ``2*pi``. + + Notes + ----- + If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within + numerical accuracy). + + For odd order and even ``len(x)``, the Nyquist mode is taken zero. + + """ + tmp = asarray(x) + if order == 0: + return tmp + if iscomplexobj(tmp): + return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period) + if period is not None: + c = 2*pi/period + else: + c = 1.0 + n = len(x) + omega = _cache.get((n,order,c)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,order=order,c=c): + if k: + return pow(c*k,order) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=order, + zero_nyquist=1) + _cache[(n,order,c)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=order % 2, + overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def tilbert(x, h, period=None, _cache=_cache): + """ + Return h-Tilbert transform of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The input array to transform. + h : float + Defines the parameter of the Tilbert transform. + period : float, optional + The assumed period of the sequence. Default period is ``2*pi``. + + Returns + ------- + tilbert : ndarray + The result of the transform. + + Notes + ----- + If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd, then + ``tilbert(itilbert(x)) == x``. + + If ``2 * pi * h / period`` is approximately 10 or larger, then + numerically ``tilbert == hilbert`` + (theoretically oo-Tilbert == Hilbert). + + For even ``len(x)``, the Nyquist mode of ``x`` is taken zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return tilbert(tmp.real, h, period) + \ + 1j * tilbert(tmp.imag, h, period) + + if period is not None: + h = h * 2 * pi / period + + n = len(x) + omega = _cache.get((n, h)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k, h=h): + if k: + return 1.0/tanh(h*k) + + return 0 + + omega = convolve.init_convolution_kernel(n, kernel, d=1) + _cache[(n,h)] = omega + + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def itilbert(x,h,period=None, _cache=_cache): + """ + Return inverse h-Tilbert transform of a periodic sequence x. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j + y_0 = 0 + + For more details, see `tilbert`. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return itilbert(tmp.real,h,period) + \ + 1j*itilbert(tmp.imag,h,period) + if period is not None: + h = h*2*pi/period + n = len(x) + omega = _cache.get((n,h)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,h=h): + if k: + return -tanh(h*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,h)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def hilbert(x, _cache=_cache): + """ + Return Hilbert transform of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*sign(j) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The input array, should be periodic. + _cache : dict, optional + Dictionary that contains the kernel used to do a convolution with. + + Returns + ------- + y : ndarray + The transformed input. + + See Also + -------- + scipy.signal.hilbert : Compute the analytic signal, using the Hilbert + transform. + + Notes + ----- + If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``. + + For even len(x), the Nyquist mode of x is taken zero. + + The sign of the returned transform does not have a factor -1 that is more + often than not found in the definition of the Hilbert transform. Note also + that `scipy.signal.hilbert` does have an extra -1 factor compared to this + function. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return hilbert(tmp.real)+1j*hilbert(tmp.imag) + n = len(x) + omega = _cache.get(n) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k): + if k > 0: + return 1.0 + elif k < 0: + return -1.0 + return 0.0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[n] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +def ihilbert(x): + """ + Return inverse Hilbert transform of a periodic sequence x. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*sign(j) * x_j + y_0 = 0 + + """ + return -hilbert(x) + + +_cache = {} + + +def cs_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence. + + If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a, b : float + Defines the parameters of the cosh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence. Default period is ``2*pi``. + + Returns + ------- + cs_diff : ndarray + Pseudo-derivative of periodic sequence `x`. + + Notes + ----- + For even len(`x`), the Nyquist mode of `x` is taken as zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return cs_diff(tmp.real,a,b,period) + \ + 1j*cs_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return -cosh(a*k)/sinh(b*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def sc_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j + y_0 = 0 + + Parameters + ---------- + x : array_like + Input array. + a,b : float + Defines the parameters of the sinh/cosh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is 2*pi. + + Notes + ----- + ``sc_diff(cs_diff(x,a,b),b,a) == x`` + For even ``len(x)``, the Nyquist mode of x is taken as zero. + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return sc_diff(tmp.real,a,b,period) + \ + 1j*sc_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return sinh(a*k)/cosh(b*k) + return 0 + omega = convolve.init_convolution_kernel(n,kernel,d=1) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def ss_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j + y_0 = a/b * x_0 + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a,b + Defines the parameters of the sinh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is ``2*pi``. + + Notes + ----- + ``ss_diff(ss_diff(x,a,b),b,a) == x`` + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return ss_diff(tmp.real,a,b,period) + \ + 1j*ss_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + if k: + return sinh(a*k)/sinh(b*k) + return float(a)/b + omega = convolve.init_convolution_kernel(n,kernel) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def cc_diff(x, a, b, period=None, _cache=_cache): + """ + Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence. + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a,b : float + Defines the parameters of the sinh/sinh pseudo-differential + operator. + period : float, optional + The period of the sequence x. Default is ``2*pi``. + + Returns + ------- + cc_diff : ndarray + Pseudo-derivative of periodic sequence `x`. + + Notes + ----- + ``cc_diff(cc_diff(x,a,b),b,a) == x`` + + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return cc_diff(tmp.real,a,b,period) + \ + 1j*cc_diff(tmp.imag,a,b,period) + if period is not None: + a = a*2*pi/period + b = b*2*pi/period + n = len(x) + omega = _cache.get((n,a,b)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel(k,a=a,b=b): + return cosh(a*k)/cosh(b*k) + omega = convolve.init_convolution_kernel(n,kernel) + _cache[(n,a,b)] = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve(tmp,omega,overwrite_x=overwrite_x) + + +del _cache + + +_cache = {} + + +def shift(x, a, period=None, _cache=_cache): + """ + Shift periodic sequence x by a: y(u) = x(u+a). + + If x_j and y_j are Fourier coefficients of periodic functions x + and y, respectively, then:: + + y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f + + Parameters + ---------- + x : array_like + The array to take the pseudo-derivative from. + a : float + Defines the parameters of the sinh/sinh pseudo-differential + period : float, optional + The period of the sequences x and y. Default period is ``2*pi``. + """ + tmp = asarray(x) + if iscomplexobj(tmp): + return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period) + if period is not None: + a = a*2*pi/period + n = len(x) + omega = _cache.get((n,a)) + if omega is None: + if len(_cache) > 20: + while _cache: + _cache.popitem() + + def kernel_real(k,a=a): + return cos(a*k) + + def kernel_imag(k,a=a): + return sin(a*k) + omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0, + zero_nyquist=0) + omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1, + zero_nyquist=0) + _cache[(n,a)] = omega_real,omega_imag + else: + omega_real,omega_imag = omega + overwrite_x = _datacopied(tmp, x) + return convolve.convolve_z(tmp,omega_real,omega_imag, + overwrite_x=overwrite_x) + + +del _cache diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..f56f68fce4ea447b6b946b14d7610dc4ef07c47c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py @@ -0,0 +1,598 @@ +""" +Real spectrum transforms (DCT, DST, MDCT) +""" + +__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'] + +from scipy.fft import _pocketfft +from ._helper import _good_shape + +_inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4} + + +def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idctn : Inverse multidimensional DCT + + Notes + ----- + For full details of the DCT types and normalization modes, as well as + references, see `dct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x) + + +def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Cosine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dctn : multidimensional DCT + + Notes + ----- + For full details of the IDCT types and normalization modes, as well as + references, see `idct`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dctn, idctn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) + True + + """ + type = _inverse_typemap[type] + shape = _good_shape(x, shape, axes) + return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x) + + +def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the DCT is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idstn : Inverse multidimensional DST + + Notes + ----- + For full details of the DST types and normalization modes, as well as + references, see `dst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + shape = _good_shape(x, shape, axes) + return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x) + + +def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): + """ + Return multidimensional Discrete Sine Transform along the specified axes. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + shape : int or array_like of ints or None, optional + The shape of the result. If both `shape` and `axes` (see below) are + None, `shape` is ``x.shape``; if `shape` is None but `axes` is + not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``. + If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros. + If ``shape[i] < x.shape[i]``, the ith dimension is truncated to + length ``shape[i]``. + If any element of `shape` is -1, the size of the corresponding + dimension of `x` is used. + axes : int or array_like of ints or None, optional + Axes along which the IDST is computed. + The default is over all axes. + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + dstn : multidimensional DST + + Notes + ----- + For full details of the IDST types and normalization modes, as well as + references, see `idst`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.fftpack import dstn, idstn + >>> rng = np.random.default_rng() + >>> y = rng.standard_normal((16, 16)) + >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho')) + True + + """ + type = _inverse_typemap[type] + shape = _good_shape(x, shape, axes) + return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x) + + +def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + r""" + Return the Discrete Cosine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + y : ndarray of real + The transformed input array. + + See Also + -------- + idct : Inverse DCT + + Notes + ----- + For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to + MATLAB ``dct(x)``. + + There are, theoretically, 8 types of the DCT, only the first 4 types are + implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the' + Inverse DCT generally refers to DCT type 3. + + **Type I** + + There are several definitions of the DCT-I; we use the following + (for ``norm=None``) + + .. math:: + + y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left( + \frac{\pi k n}{N-1} \right) + + If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling + factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor + ``f`` + + .. math:: + + f = \begin{cases} + \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\ + \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases} + + .. versionadded:: 1.2.0 + Orthonormalization in DCT-I. + + .. note:: + The DCT-I is only supported for input size > 1. + + **Type II** + + There are several definitions of the DCT-II; we use the following + (for ``norm=None``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right) + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + f = \begin{cases} + \sqrt{\frac{1}{4N}} & \text{if }k=0, \\ + \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases} + + which makes the corresponding matrix of coefficients orthonormal + (``O @ O.T = np.eye(N)``). + + **Type III** + + There are several definitions, we use the following (for ``norm=None``) + + .. math:: + + y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + or, for ``norm='ortho'`` + + .. math:: + + y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n + \cos\left(\frac{\pi(2k+1)n}{2N}\right) + + The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up + to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of + the orthonormalized DCT-II. + + **Type IV** + + There are several definitions of the DCT-IV; we use the following + (for ``norm=None``) + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right) + + If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + + f = \frac{1}{\sqrt{2N}} + + .. versionadded:: 1.2.0 + Support for DCT-IV. + + References + ---------- + .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J. + Makhoul, `IEEE Transactions on acoustics, speech and signal + processing` vol. 28(1), pp. 27-34, + :doi:`10.1109/TASSP.1980.1163351` (1980). + .. [2] Wikipedia, "Discrete cosine transform", + https://en.wikipedia.org/wiki/Discrete_cosine_transform + + Examples + -------- + The Type 1 DCT is equivalent to the FFT (though faster) for real, + even-symmetrical inputs. The output is also real and even-symmetrical. + Half of the FFT input is used to generate half of the FFT output: + + >>> from scipy.fftpack import fft, dct + >>> import numpy as np + >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real + array([ 30., -8., 6., -2., 6., -8.]) + >>> dct(np.array([4., 3., 5., 10.]), 1) + array([ 30., -8., 6., -2.]) + + """ + return _pocketfft.dct(x, type, n, axis, norm, overwrite_x) + + +def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DCT (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idct is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idct : ndarray of real + The transformed input array. + + See Also + -------- + dct : Forward DCT + + Notes + ----- + For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to + MATLAB ``idct(x)``. + + 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3. + + IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type + 3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT + of type 4. For the definition of these types, see `dct`. + + Examples + -------- + The Type 1 DCT is equivalent to the DFT for real, even-symmetrical + inputs. The output is also real and even-symmetrical. Half of the IFFT + input is used to generate half of the IFFT output: + + >>> from scipy.fftpack import ifft, idct + >>> import numpy as np + >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real + array([ 4., 3., 5., 10., 5., 3.]) + >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6 + array([ 4., 3., 5., 10.]) + + """ + type = _inverse_typemap[type] + return _pocketfft.dct(x, type, n, axis, norm, overwrite_x) + + +def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + r""" + Return the Discrete Sine Transform of arbitrary type sequence x. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the dst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + dst : ndarray of reals + The transformed input array. + + See Also + -------- + idst : Inverse DST + + Notes + ----- + For a single dimension array ``x``. + + There are, theoretically, 8 types of the DST for different combinations of + even/odd boundary conditions and boundary off sets [1]_, only the first + 4 types are implemented in scipy. + + **Type I** + + There are several definitions of the DST-I; we use the following + for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`. + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right) + + Note that the DST-I is only supported for input size > 1. + The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`. + The orthonormalized DST-I is exactly its own inverse. + + **Type II** + + There are several definitions of the DST-II; we use the following for + ``norm=None``. DST-II assumes the input is odd around `n=-1/2` and + `n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right) + + if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f`` + + .. math:: + + f = \begin{cases} + \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\ + \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases} + + **Type III** + + There are several definitions of the DST-III, we use the following (for + ``norm=None``). DST-III assumes the input is odd around `n=-1` and even + around `n=N-1` + + .. math:: + + y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left( + \frac{\pi(2k+1)(n+1)}{2N}\right) + + The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up + to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the + orthonormalized DST-II. + + .. versionadded:: 0.11.0 + + **Type IV** + + There are several definitions of the DST-IV, we use the following (for + ``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even + around `n=N-0.5` + + .. math:: + + y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right) + + The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The + orthonormalized DST-IV is exactly its own inverse. + + .. versionadded:: 1.2.0 + Support for DST-IV. + + References + ---------- + .. [1] Wikipedia, "Discrete sine transform", + https://en.wikipedia.org/wiki/Discrete_sine_transform + + """ + return _pocketfft.dst(x, type, n, axis, norm, overwrite_x) + + +def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False): + """ + Return the Inverse Discrete Sine Transform of an arbitrary type sequence. + + Parameters + ---------- + x : array_like + The input array. + type : {1, 2, 3, 4}, optional + Type of the DST (see Notes). Default type is 2. + n : int, optional + Length of the transform. If ``n < x.shape[axis]``, `x` is + truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The + default results in ``n = x.shape[axis]``. + axis : int, optional + Axis along which the idst is computed; the default is over the + last axis (i.e., ``axis=-1``). + norm : {None, 'ortho'}, optional + Normalization mode (see Notes). Default is None. + overwrite_x : bool, optional + If True, the contents of `x` can be destroyed; the default is False. + + Returns + ------- + idst : ndarray of real + The transformed input array. + + See Also + -------- + dst : Forward DST + + Notes + ----- + 'The' IDST is the IDST of type 2, which is the same as DST of type 3. + + IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type + 3, and IDST of type 3 is the DST of type 2. For the definition of these + types, see `dst`. + + .. versionadded:: 0.11.0 + + """ + type = _inverse_typemap[type] + return _pocketfft.dst(x, type, n, axis, norm, overwrite_x) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/basic.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..553f456fe1561c28928ecc4ebe2238459cc60443 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/basic.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'fft','ifft','fftn','ifftn','rfft','irfft', + 'fft2','ifft2' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="basic", + private_modules=["_basic"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a52eeb49d01317365bd80950faefd7117d9e1fec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/helper.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc7000c215f8a7605a2a59b5767b27b2fcd969d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/helper.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="helper", + private_modules=["_helper"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..07a245cdc41edfc4904b8d6e5d5cd4b76a4f3328 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'diff', + 'tilbert', 'itilbert', 'hilbert', 'ihilbert', + 'cs_diff', 'cc_diff', 'sc_diff', 'ss_diff', + 'shift', 'iscomplexobj', 'convolve' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="pseudo_diffs", + private_modules=["_pseudo_diffs"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/realtransforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/realtransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..9a392198fccf213bc988a79058bd69515e39f510 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/realtransforms.py @@ -0,0 +1,19 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.fftpack` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="fftpack", module="realtransforms", + private_modules=["_realtransforms"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b87232848b0b424e5caa7f931415ab0abc04441 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a67e37551352562d2eb731bd144ce2629f5bbba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..250d0bda5ae5a988db2083766149fa4cf5960234 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a439e268d3c8a82a2a6cdbed1544b06ab634b758 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4db45ae8284b881f4f5a91e7a12001e82fc835f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3859a543c08d2ec5549f194b2f8eab1dbab899f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c4b1de867fb4eadc72e17e2e70a02c9ba190a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py @@ -0,0 +1,873 @@ +# Created by Pearu Peterson, September 2002 + +from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_array_less) +import pytest +from pytest import raises as assert_raises +from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2 + +from numpy import (arange, array, asarray, zeros, dot, exp, pi, + swapaxes, double, cdouble) +import numpy as np +import numpy.fft +from numpy.random import rand + +# "large" composite numbers supported by FFTPACK +LARGE_COMPOSITE_SIZES = [ + 2**13, + 2**5 * 3**5, + 2**3 * 3**3 * 5**2, +] +SMALL_COMPOSITE_SIZES = [ + 2, + 2*3*5, + 2*2*3*3, +] +# prime +LARGE_PRIME_SIZES = [ + 2011 +] +SMALL_PRIME_SIZES = [ + 29 +] + + +def _assert_close_in_norm(x, y, rtol, size, rdt): + # helper function for testing + err_msg = f"size: {size} rdt: {rdt}" + assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg) + + +def random(size): + return rand(*size) + + +def direct_dft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = -arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x) + return y + + +def direct_idft(x): + x = asarray(x) + n = len(x) + y = zeros(n, dtype=cdouble) + w = arange(n)*(2j*pi/n) + for i in range(n): + y[i] = dot(exp(i*w), x)/n + return y + + +def direct_dftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = fft(x, axis=axis) + return x + + +def direct_idftn(x): + x = asarray(x) + for axis in range(len(x.shape)): + x = ifft(x, axis=axis) + return x + + +def direct_rdft(x): + x = asarray(x) + n = len(x) + w = -arange(n)*(2j*pi/n) + r = zeros(n, dtype=double) + for i in range(n//2+1): + y = dot(exp(i*w), x) + if i: + r[2*i-1] = y.real + if 2*i < n: + r[2*i] = y.imag + else: + r[0] = y.real + return r + + +def direct_irdft(x): + x = asarray(x) + n = len(x) + x1 = zeros(n, dtype=cdouble) + for i in range(n//2+1): + if i: + if 2*i < n: + x1[i] = x[2*i-1] + 1j*x[2*i] + x1[n-i] = x[2*i-1] - 1j*x[2*i] + else: + x1[i] = x[2*i-1] + else: + x1[0] = x[0] + return direct_idft(x1).real + + +class _TestFFTBase: + def setup_method(self): + self.cdt = None + self.rdt = None + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt) + y = fft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_dft(x) + assert_array_almost_equal(y,y1) + x = np.array([1,2,3,4+0j,5], dtype=self.cdt) + assert_array_almost_equal(fft(x),direct_dft(x)) + + def test_n_argument_real(self): + x1 = np.array([1,2,3,4], dtype=self.rdt) + x2 = np.array([1,2,3,4], dtype=self.rdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def _test_n_argument_complex(self): + x1 = np.array([1,2,3,4+1j], dtype=self.cdt) + x2 = np.array([1,2,3,4+1j], dtype=self.cdt) + y = fft([x1,x2],n=4) + assert_equal(y.dtype, self.cdt) + assert_equal(y.shape,(2,4)) + assert_array_almost_equal(y[0],direct_dft(x1)) + assert_array_almost_equal(y[1],direct_dft(x2)) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft, []) + assert_raises(ValueError, fft, [[1,1],[2,2]], -5) + + +class TestDoubleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestSingleFFT(_TestFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + reason = ("single-precision FFT implementation is partially disabled, " + "until accuracy issues with large prime powers are resolved") + + @pytest.mark.xfail(run=False, reason=reason) + def test_notice(self): + pass + + +class TestFloat16FFT: + + def test_1_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft(x1, n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (4, )) + assert_array_almost_equal(y, direct_dft(x1.astype(np.float32))) + + def test_n_argument_real(self): + x1 = np.array([1, 2, 3, 4], dtype=np.float16) + x2 = np.array([1, 2, 3, 4], dtype=np.float16) + y = fft([x1, x2], n=4) + assert_equal(y.dtype, np.complex64) + assert_equal(y.shape, (2, 4)) + assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32))) + assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32))) + + +class _TestIFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt) + y = ifft(x) + y1 = direct_idft(x) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4+0j,5], self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_definition_real(self): + x = np.array([1,2,3,4,1,2,3,4], self.rdt) + y = ifft(x) + assert_equal(y.dtype, self.cdt) + y1 = direct_idft(x) + assert_array_almost_equal(y,y1) + + x = np.array([1,2,3,4,5], dtype=self.rdt) + assert_equal(y.dtype, self.cdt) + assert_array_almost_equal(ifft(x),direct_idft(x)) + + def test_random_complex(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.cdt) + x = random([size]).astype(self.cdt) + 1j*x + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = ifft(fft(x)) + y2 = fft(ifft(x)) + assert_equal(y1.dtype, self.cdt) + assert_equal(y2.dtype, self.cdt) + assert_array_almost_equal(y1, x) + assert_array_almost_equal(y2, x) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + x = (x + 1j*np.random.rand(size)).astype(self.cdt) + y = ifft(fft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = fft(ifft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, ifft, []) + assert_raises(ValueError, ifft, [[1,1],[2,2]], -5) + + +class TestDoubleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestSingleIFFT(_TestIFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]: + x = np.array(t, dtype=self.rdt) + y = rfft(x) + y1 = direct_rdft(x) + assert_array_almost_equal(y,y1) + assert_equal(y.dtype, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, rfft, []) + assert_raises(ValueError, rfft, [[1,1],[2,2]], -5) + + # See gh-5790 + class MockSeries: + def __init__(self, data): + self.data = np.asarray(data) + + def __getattr__(self, item): + try: + return getattr(self.data, item) + except AttributeError as e: + raise AttributeError("'MockSeries' object " + f"has no attribute '{item}'") from e + + def test_non_ndarray_with_dtype(self): + x = np.array([1., 2., 3., 4., 5.]) + xs = _TestRFFTBase.MockSeries(x) + + expected = [1, 2, 3, 4, 5] + rfft(xs) + + # Data should not have been overwritten + assert_equal(x, expected) + assert_equal(xs.data, expected) + + def test_complex_input(self): + assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64)) + + +class TestRFFTDouble(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + + +class TestRFFTSingle(_TestRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + + +class _TestIRFFTBase: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x1 = [1,2,3,4,1,2,3,4] + x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j] + x2 = [1,2,3,4,1,2,3,4,5] + x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j] + + def _test(x, xr): + y = irfft(np.array(x, dtype=self.rdt)) + y1 = direct_irdft(x) + assert_equal(y.dtype, self.rdt) + assert_array_almost_equal(y,y1, decimal=self.ndec) + assert_array_almost_equal(y,ifft(xr), decimal=self.ndec) + + _test(x1, x1_1) + _test(x2, x2_1) + + def test_random_real(self): + for size in [1,51,111,100,200,64,128,256,1024]: + x = random([size]).astype(self.rdt) + y1 = irfft(rfft(x)) + y2 = rfft(irfft(x)) + assert_equal(y1.dtype, self.rdt) + assert_equal(y2.dtype, self.rdt) + assert_array_almost_equal(y1, x, decimal=self.ndec, + err_msg="size=%d" % size) + assert_array_almost_equal(y2, x, decimal=self.ndec, + err_msg="size=%d" % size) + + def test_size_accuracy(self): + # Sanity check for the accuracy for prime and non-prime sized inputs + if self.rdt == np.float32: + rtol = 1e-5 + elif self.rdt == np.float64: + rtol = 1e-10 + + for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES: + np.random.seed(1234) + x = np.random.rand(size).astype(self.rdt) + y = irfft(rfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + y = rfft(irfft(x)) + _assert_close_in_norm(x, y, rtol, size, self.rdt) + + def test_invalid_sizes(self): + assert_raises(ValueError, irfft, []) + assert_raises(ValueError, irfft, [[1,1],[2,2]], -5) + + def test_complex_input(self): + assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64)) + + +# self.ndec is bogus; we should have a assert_array_approx_equal for number of +# significant digits + +class TestIRFFTDouble(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex128 + self.rdt = np.float64 + self.ndec = 14 + + +class TestIRFFTSingle(_TestIRFFTBase): + def setup_method(self): + self.cdt = np.complex64 + self.rdt = np.float32 + self.ndec = 5 + + +class Testfft2: + def setup_method(self): + np.random.seed(1234) + + def test_regression_244(self): + """FFT returns wrong result with axes parameter.""" + # fftn (and hence fft2) used to break when both axes and shape were + # used + x = numpy.ones((4, 4, 2)) + y = fft2(x, shape=(8, 8), axes=(-3, -2)) + y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2)) + assert_array_almost_equal(y, y_r) + + def test_invalid_sizes(self): + assert_raises(ValueError, fft2, [[]]) + assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3)) + + +class TestFftnSingle: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float32)) + assert_(y.dtype == np.complex64, + msg="double precision output with single precision") + + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_size_accuracy_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_size_accuracy_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float32)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2000) + + def test_definition_float16(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(np.array(x, np.float16)) + assert_equal(y.dtype, np.complex64) + y_r = np.array(fftn(x), np.complex64) + assert_array_almost_equal_nulp(y, y_r) + + @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES) + def test_float16_input_small(self, size): + x = np.random.rand(size, size) + 1j*np.random.rand(size, size) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 5e5) + + @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES) + def test_float16_input_large(self, size): + x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3) + y1 = fftn(x.real.astype(np.float16)) + y2 = fftn(x.real.astype(np.float64)).astype(np.complex64) + + assert_equal(y1.dtype, np.complex64) + assert_array_almost_equal_nulp(y1, y2, 2e6) + + +class TestFftn: + def setup_method(self): + np.random.seed(1234) + + def test_definition(self): + x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + y = fftn(x) + assert_array_almost_equal(y, direct_dftn(x)) + + x = random((20, 26)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal(fftn(x), direct_dftn(x)) + + def test_axes_argument(self): + # plane == ji_plane, x== kji_space + plane1 = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + plane2 = [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]] + plane3 = [[19, 20, 21], + [22, 23, 24], + [25, 26, 27]] + ki_plane1 = [[1, 2, 3], + [10, 11, 12], + [19, 20, 21]] + ki_plane2 = [[4, 5, 6], + [13, 14, 15], + [22, 23, 24]] + ki_plane3 = [[7, 8, 9], + [16, 17, 18], + [25, 26, 27]] + jk_plane1 = [[1, 10, 19], + [4, 13, 22], + [7, 16, 25]] + jk_plane2 = [[2, 11, 20], + [5, 14, 23], + [8, 17, 26]] + jk_plane3 = [[3, 12, 21], + [6, 15, 24], + [9, 18, 27]] + kj_plane1 = [[1, 4, 7], + [10, 13, 16], [19, 22, 25]] + kj_plane2 = [[2, 5, 8], + [11, 14, 17], [20, 23, 26]] + kj_plane3 = [[3, 6, 9], + [12, 15, 18], [21, 24, 27]] + ij_plane1 = [[1, 4, 7], + [2, 5, 8], + [3, 6, 9]] + ij_plane2 = [[10, 13, 16], + [11, 14, 17], + [12, 15, 18]] + ij_plane3 = [[19, 22, 25], + [20, 23, 26], + [21, 24, 27]] + ik_plane1 = [[1, 10, 19], + [2, 11, 20], + [3, 12, 21]] + ik_plane2 = [[4, 13, 22], + [5, 14, 23], + [6, 15, 24]] + ik_plane3 = [[7, 16, 25], + [8, 17, 26], + [9, 18, 27]] + ijk_space = [jk_plane1, jk_plane2, jk_plane3] + ikj_space = [kj_plane1, kj_plane2, kj_plane3] + jik_space = [ik_plane1, ik_plane2, ik_plane3] + jki_space = [ki_plane1, ki_plane2, ki_plane3] + kij_space = [ij_plane1, ij_plane2, ij_plane3] + x = array([plane1, plane2, plane3]) + + assert_array_almost_equal(fftn(x), + fftn(x, axes=(-3, -2, -1))) # kji_space + assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2))) + assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1))) + y = fftn(x, axes=(2, 1, 0)) # ijk_space + assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space)) + y = fftn(x, axes=(2, 0, 1)) # ikj_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2), + fftn(ikj_space)) + y = fftn(x, axes=(1, 2, 0)) # jik_space + assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2), + fftn(jik_space)) + y = fftn(x, axes=(1, 0, 2)) # jki_space + assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space)) + y = fftn(x, axes=(0, 2, 1)) # kij_space + assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space)) + + y = fftn(x, axes=(-2, -1)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(1, 2)) # ji_plane + assert_array_almost_equal(fftn(plane1), y[0]) + assert_array_almost_equal(fftn(plane2), y[1]) + assert_array_almost_equal(fftn(plane3), y[2]) + + y = fftn(x, axes=(-3, -2)) # kj_plane + assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0]) + assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1]) + assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2]) + + y = fftn(x, axes=(-3, -1)) # ki_plane + assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :]) + assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :]) + assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :]) + + y = fftn(x, axes=(-1, -2)) # ij_plane + assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1)) + assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1)) + assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1)) + + y = fftn(x, axes=(-1, -3)) # ik_plane + assert_array_almost_equal(fftn(ik_plane1), + swapaxes(y[:, 0, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane2), + swapaxes(y[:, 1, :], -1, -2)) + assert_array_almost_equal(fftn(ik_plane3), + swapaxes(y[:, 2, :], -1, -2)) + + y = fftn(x, axes=(-2, -3)) # jk_plane + assert_array_almost_equal(fftn(jk_plane1), + swapaxes(y[:, :, 0], -1, -2)) + assert_array_almost_equal(fftn(jk_plane2), + swapaxes(y[:, :, 1], -1, -2)) + assert_array_almost_equal(fftn(jk_plane3), + swapaxes(y[:, :, 2], -1, -2)) + + y = fftn(x, axes=(-1,)) # i_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :]) + y = fftn(x, axes=(-2,)) # j_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j]) + y = fftn(x, axes=(0,)) # k_line + for i in range(3): + for j in range(3): + assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j]) + + y = fftn(x, axes=()) # point + assert_array_almost_equal(y, x) + + def test_shape_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6]] + large_x1 = [[1, 2, 3, 0], + [4, 5, 6, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + + y = fftn(small_x, shape=(4, 4)) + assert_array_almost_equal(y, fftn(large_x1)) + + y = fftn(small_x, shape=(3, 4)) + assert_array_almost_equal(y, fftn(large_x1[:-1])) + + def test_shape_axes_argument(self): + small_x = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + large_x1 = array([[1, 2, 3, 0], + [4, 5, 6, 0], + [7, 8, 9, 0], + [0, 0, 0, 0]]) + y = fftn(small_x, shape=(4, 4), axes=(-2, -1)) + assert_array_almost_equal(y, fftn(large_x1)) + y = fftn(small_x, shape=(4, 4), axes=(-1, -2)) + + assert_array_almost_equal(y, swapaxes( + fftn(swapaxes(large_x1, -1, -2)), -1, -2)) + + def test_shape_axes_argument2(self): + # Change shape of the last axis + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-1,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-1, n=8)) + + # Change shape of an arbitrary axis which is not the last one + x = numpy.random.random((10, 5, 3, 7)) + y = fftn(x, axes=(-2,), shape=(8,)) + assert_array_almost_equal(y, fft(x, axis=-2, n=8)) + + # Change shape of axes: cf #244, where shape and axes were mixed up + x = numpy.random.random((4, 4, 2)) + y = fftn(x, axes=(-3, -2), shape=(8, 8)) + assert_array_almost_equal(y, + numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8))) + + def test_shape_argument_more(self): + x = zeros((4, 4, 2)) + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fftn(x, shape=(8, 8, 2, 1)) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + fftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + fftn([[1, 1], [2, 2]], (4, -3)) + + +class TestIfftn: + dtype = None + cdtype = None + + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize('dtype,cdtype,maxnlp', + [(np.float64, np.complex128, 2000), + (np.float32, np.complex64, 3500)]) + def test_definition(self, dtype, cdtype, maxnlp): + x = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], dtype=dtype) + y = ifftn(x) + assert_equal(y.dtype, cdtype) + assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp) + + x = random((20, 26)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + x = random((5, 4, 3, 20)) + assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp) + + @pytest.mark.parametrize('maxnlp', [2000, 3500]) + @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92]) + def test_random_complex(self, maxnlp, size): + x = random([size, size]) + 1j*random([size, size]) + assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp) + assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp) + + def test_invalid_sizes(self): + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[1, 0\]\) specified"): + ifftn([[]]) + + with assert_raises(ValueError, + match="invalid number of data points" + r" \(\[4, -3\]\) specified"): + ifftn([[1, 1], [2, 2]], (4, -3)) + + +class FakeArray: + def __init__(self, data): + self._data = data + self.__array_interface__ = data.__array_interface__ + + +class FakeArray2: + def __init__(self, data): + self._data = data + + def __array__(self, dtype=None, copy=None): + return self._data + + +class TestOverwrite: + """Check input overwrite behavior of the FFT functions.""" + + real_dtypes = (np.float32, np.float64) + dtypes = real_dtypes + (np.complex64, np.complex128) + fftsizes = [8, 16, 32] + + def _check(self, x, routine, fftsize, axis, overwrite_x): + x2 = x.copy() + for fake in [lambda x: x, FakeArray, FakeArray2]: + routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes, + fftsize, overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + self._check(data, routine, fftsize, axis, + overwrite_x=overwrite_x) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_1d(fft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(ifft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + @pytest.mark.parametrize('dtype', real_dtypes) + @pytest.mark.parametrize('fftsize', fftsizes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), -1), + ((16, 2), 0), + ((2, 16), 1)]) + def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes): + overwritable = self.real_dtypes + self._check_1d(irfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + self._check_1d(rfft, dtype, shape, axes, overwritable, + fftsize, overwrite_x) + + def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes, + overwrite_x): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + def fftshape_iter(shp): + if len(shp) <= 0: + yield () + else: + for j in (shp[0]//2, shp[0], shp[0]*2): + for rest in fftshape_iter(shp[1:]): + yield (j,) + rest + + if axes is None: + part_shape = shape + else: + part_shape = tuple(np.take(shape, axes)) + + for fftshape in fftshape_iter(part_shape): + self._check(data, routine, fftshape, axes, + overwrite_x=overwrite_x) + if data.ndim > 1: + self._check(data.T, routine, fftshape, axes, + overwrite_x=overwrite_x) + + @pytest.mark.parametrize('dtype', dtypes) + @pytest.mark.parametrize('overwrite_x', [True, False]) + @pytest.mark.parametrize('shape,axes', [((16,), None), + ((16,), (0,)), + ((16, 2), (0,)), + ((2, 16), (1,)), + ((8, 16), None), + ((8, 16), (0, 1)), + ((8, 16, 2), (0, 1)), + ((8, 16, 2), (1, 2)), + ((8, 16, 2), (0,)), + ((8, 16, 2), (1,)), + ((8, 16, 2), (2,)), + ((8, 16, 2), None), + ((8, 16, 2), (0, 1, 2))]) + def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes): + overwritable = (np.complex128, np.complex64) + self._check_nd_one(fftn, dtype, shape, axes, overwritable, + overwrite_x) + self._check_nd_one(ifftn, dtype, shape, axes, overwritable, + overwrite_x) + + +@pytest.mark.parametrize('func', [fftn, ifftn, fft2]) +def test_shape_axes_ndarray(func): + # Test fftn and ifftn work with NumPy arrays for shape and axes arguments + # Regression test for gh-13342 + a = np.random.rand(10, 10) + + expect = func(a, shape=(5, 5)) + actual = func(a, shape=np.array([5, 5])) + assert_equal(expect, actual) + + expect = func(a, axes=(-1,)) + actual = func(a, axes=np.array([-1,])) + assert_equal(expect, actual) + + expect = func(a, shape=(4, 7), axes=(1, 0)) + actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0])) + assert_equal(expect, actual) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7be04f3c0291502b50b101db82d299aadc7772 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py @@ -0,0 +1,54 @@ +# Created by Pearu Peterson, September 2002 + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_helper.py [] +""" + +from numpy.testing import assert_array_almost_equal +from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq + +from numpy import pi, random + +class TestFFTShift: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + y = [-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + y = [-5,-4,-3,-2,-1,0,1,2,3,4] + assert_array_almost_equal(fftshift(x),y) + assert_array_almost_equal(ifftshift(y),x) + + def test_inverse(self): + for n in [1,4,9,100,211]: + x = random.random((n,)) + assert_array_almost_equal(ifftshift(fftshift(x)),x) + + +class TestFFTFreq: + + def test_definition(self): + x = [0,1,2,3,4,-4,-3,-2,-1] + assert_array_almost_equal(9*fftfreq(9),x) + assert_array_almost_equal(9*pi*fftfreq(9,pi),x) + x = [0,1,2,3,4,-5,-4,-3,-2,-1] + assert_array_almost_equal(10*fftfreq(10),x) + assert_array_almost_equal(10*pi*fftfreq(10,pi),x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0,1,1,2,2,3,3,4,4] + assert_array_almost_equal(9*rfftfreq(9),x) + assert_array_almost_equal(9*pi*rfftfreq(9,pi),x) + x = [0,1,1,2,2,3,3,4,4,5] + assert_array_almost_equal(10*rfftfreq(10),x) + assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py new file mode 100644 index 0000000000000000000000000000000000000000..8a978d9651e54c3dd2b3ae5e15ac6635f08d8cc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py @@ -0,0 +1,31 @@ +"""Test possibility of patching fftpack with pyfftw. + +No module source outside of scipy.fftpack should contain an import of +the form `from scipy.fftpack import ...`, so that a simple replacement +of scipy.fftpack by the corresponding fftw interface completely swaps +the two FFT implementations. + +Because this simply inspects source files, we only need to run the test +on one version of Python. +""" + + +from pathlib import Path +import re +import tokenize +from numpy.testing import assert_ +import scipy + +class TestFFTPackImport: + def test_fftpack_import(self): + base = Path(scipy.__file__).parent + regexp = r"\s*from.+\.fftpack import .*\n" + for path in base.rglob("*.py"): + if base / "fftpack" in path.parents: + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g., LANG='C') + with tokenize.open(str(path)) as file: + assert_(all(not re.fullmatch(regexp, line) + for line in file), + f"{path} contains an import from fftpack") diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py new file mode 100644 index 0000000000000000000000000000000000000000..cec131caced4ccf9cf7c34255f7693769e2ebb12 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py @@ -0,0 +1,380 @@ +# Created by Pearu Peterson, September 2002 + +__usage__ = """ +Build fftpack: + python setup_fftpack.py build +Run tests if scipy is installed: + python -c 'import scipy;scipy.fftpack.test()' +Run tests if fftpack is not installed: + python tests/test_pseudo_diffs.py [] +""" + +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal) +from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert, + ihilbert, shift, fftfreq, cs_diff, sc_diff, + ss_diff, cc_diff) + +import numpy as np +from numpy import arange, sin, cos, pi, exp, tanh, sum, sign +from numpy.random import random + + +def direct_diff(x,k=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*2j*pi/period*n + if k < 0: + w = 1 / w**k + w[0] = 0.0 + else: + w = w**k + if n > 2000: + w[250:n-250] = 0.0 + return ifft(w*fx).real + + +def direct_tilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w[0] = 1 + w = 1j/tanh(w) + w[0] = 0j + return ifft(w*fx) + + +def direct_itilbert(x,h=1,period=None): + fx = fft(x) + n = len(fx) + if period is None: + period = 2*pi + w = fftfreq(n)*h*2*pi/period*n + w = -1j*tanh(w) + return ifft(w*fx) + + +def direct_hilbert(x): + fx = fft(x) + n = len(fx) + w = fftfreq(n)*n + w = 1j*sign(w) + return ifft(w*fx) + + +def direct_ihilbert(x): + return -direct_hilbert(x) + + +def direct_shift(x,a,period=None): + n = len(x) + if period is None: + k = fftfreq(n)*1j*n + else: + k = fftfreq(n)*2j*pi/period*n + return ifft(fft(x)*exp(k*a)).real + + +class TestDiff: + + def test_definition(self): + for n in [16,17,64,127,32]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x))) + assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2)) + assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3)) + assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4)) + assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5)) + assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3)) + assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4)) + assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x))) + assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2)) + assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3)) + assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4)) + assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x))) + assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8))) + assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8))) + for k in range(5): + assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k)) + assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k)) + + def test_period(self): + for n in [17,64]: + x = arange(n)/float(n) + assert_array_almost_equal(diff(sin(2*pi*x),period=1), + 2*pi*cos(2*pi*x)) + assert_array_almost_equal(diff(sin(2*pi*x),3,period=1), + -(2*pi)**3*cos(2*pi*x)) + + def test_sin(self): + for n in [32,64,77]: + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x)),cos(x)) + assert_array_almost_equal(diff(cos(x)),-sin(x)) + assert_array_almost_equal(diff(sin(x),2),-sin(x)) + assert_array_almost_equal(diff(sin(x),4),sin(x)) + assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x)) + assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x))) + + def test_expr(self): + for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + d1 = diff(f) + assert_array_almost_equal(d1,df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(f,2),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + + def test_expr_large(self): + for n in [2048,4096]: + x = arange(n)*2*pi/n + f = sin(x)*cos(4*x)+exp(sin(3*x)) + df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x)) + ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\ + - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x)) + assert_array_almost_equal(diff(f),df) + assert_array_almost_equal(diff(df),ddf) + assert_array_almost_equal(diff(ddf,-1),df) + assert_array_almost_equal(diff(f,2),ddf) + + def test_int(self): + n = 64 + x = arange(n)*2*pi/n + assert_array_almost_equal(diff(sin(x),-1),-cos(x)) + assert_array_almost_equal(diff(sin(x),-2),-sin(x)) + assert_array_almost_equal(diff(sin(x),-4),sin(x)) + assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x)) + + def test_random_even(self): + for k in [0,2,4,6]: + for n in [60,32,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_random_odd(self): + for k in [0,1,2,3,4,5,6]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + def test_zero_nyquist(self): + for k in [0,1,2,3,4,5,6]: + for n in [32,33,64,56,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(diff(diff(f,k),-k),f) + assert_array_almost_equal(diff(diff(f,-k),k),f) + + +class TestTilbert: + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = tilbert(sin(x),h) + y1 = direct_tilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(tilbert(sin(x),h), + direct_tilbert(sin(x),h)) + assert_array_almost_equal(tilbert(sin(2*x),h), + direct_tilbert(sin(2*x),h)) + + def test_random_even(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f) + + def test_random_odd(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(itilbert(tilbert(f,h),h),f) + assert_array_almost_equal(tilbert(itilbert(f,h),h),f) + + +class TestITilbert: + + def test_definition(self): + for h in [0.1,0.5,1,5.5,10]: + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = itilbert(sin(x),h) + y1 = direct_itilbert(sin(x),h) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(itilbert(sin(x),h), + direct_itilbert(sin(x),h)) + assert_array_almost_equal(itilbert(sin(2*x),h), + direct_itilbert(sin(2*x),h)) + + +class TestHilbert: + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = hilbert(sin(x)) + y1 = direct_hilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(hilbert(sin(2*x)), + direct_hilbert(sin(2*x))) + + def test_tilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = hilbert(f) + y1 = direct_hilbert(f) + assert_array_almost_equal(y,y1) + y2 = tilbert(f,h=10) + assert_array_almost_equal(y,y2) + + def test_random_odd(self): + for n in [33,65,55]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(ihilbert(hilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + def test_random_even(self): + for n in [32,64,56]: + f = random((n,)) + af = sum(f,axis=0)/n + f = f-af + # zeroing Nyquist mode: + f = diff(diff(f,1),-1) + assert_almost_equal(sum(f,axis=0),0.0) + assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f) + assert_array_almost_equal(hilbert(ihilbert(f)),f) + + +class TestIHilbert: + + def test_definition(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + y = ihilbert(sin(x)) + y1 = direct_ihilbert(sin(x)) + assert_array_almost_equal(y,y1) + assert_array_almost_equal(ihilbert(sin(2*x)), + direct_ihilbert(sin(2*x))) + + def test_itilbert_relation(self): + for n in [16,17,64,127]: + x = arange(n)*2*pi/n + f = sin(x)+cos(2*x)*sin(x) + y = ihilbert(f) + y1 = direct_ihilbert(f) + assert_array_almost_equal(y,y1) + y2 = itilbert(f,h=10) + assert_array_almost_equal(y,y2) + + +class TestShift: + + def test_definition(self): + for n in [18,17,64,127,32,2048,256]: + x = arange(n)*2*pi/n + for a in [0.1,3]: + assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a)) + assert_array_almost_equal(shift(sin(x),a),sin(x+a)) + assert_array_almost_equal(shift(cos(x),a),cos(x+a)) + assert_array_almost_equal(shift(cos(2*x)+sin(x),a), + cos(2*(x+a))+sin(x+a)) + assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a))) + assert_array_almost_equal(shift(sin(x),2*pi),sin(x)) + assert_array_almost_equal(shift(sin(x),pi),-sin(x)) + assert_array_almost_equal(shift(sin(x),pi/2),cos(x)) + + +class TestOverwrite: + """Check input overwrite behavior """ + + real_dtypes = (np.float32, np.float64) + dtypes = real_dtypes + (np.complex64, np.complex128) + + def _check(self, x, routine, *args, **kwargs): + x2 = x.copy() + routine(x2, *args, **kwargs) + sig = routine.__name__ + if args: + sig += repr(args) + if kwargs: + sig += repr(kwargs) + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, *args, **kwargs): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + self._check(data, routine, *args, **kwargs) + + def test_diff(self): + for dtype in self.dtypes: + self._check_1d(diff, dtype, (16,)) + + def test_tilbert(self): + for dtype in self.dtypes: + self._check_1d(tilbert, dtype, (16,), 1.6) + + def test_itilbert(self): + for dtype in self.dtypes: + self._check_1d(itilbert, dtype, (16,), 1.6) + + def test_hilbert(self): + for dtype in self.dtypes: + self._check_1d(hilbert, dtype, (16,)) + + def test_cs_diff(self): + for dtype in self.dtypes: + self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0) + + def test_sc_diff(self): + for dtype in self.dtypes: + self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0) + + def test_ss_diff(self): + for dtype in self.dtypes: + self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0) + + def test_cc_diff(self): + for dtype in self.dtypes: + self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0) + + def test_shift(self): + for dtype in self.dtypes: + self._check_1d(shift, dtype, (16,), 1.0) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6108d460c7864bdc5dd9425bddf93576fac5b39d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py @@ -0,0 +1,815 @@ +from os.path import join, dirname + +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_equal +import pytest +from pytest import raises as assert_raises + +from scipy.fftpack._realtransforms import ( + dct, idct, dst, idst, dctn, idctn, dstn, idstn) + +# Matlab reference data +MDATA = np.load(join(dirname(__file__), 'test.npz')) +X = [MDATA['x%d' % i] for i in range(8)] +Y = [MDATA['y%d' % i] for i in range(8)] + +# FFTW reference data: the data are organized as follows: +# * SIZES is an array containing all available sizes +# * for every type (1, 2, 3, 4) and every size, the array dct_type_size +# contains the output of the DCT applied to the input np.linspace(0, size-1, +# size) +FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz')) +FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz')) +FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes'] + + +def fftw_dct_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dct_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def fftw_dst_ref(type, size, dt): + x = np.linspace(0, size-1, size).astype(dt) + dt = np.result_type(np.float32, dt) + if dt == np.float64: + data = FFTWDATA_DOUBLE + elif dt == np.float32: + data = FFTWDATA_SINGLE + else: + raise ValueError() + y = (data['dst_%d_%d' % (type, size)]).astype(dt) + return x, y, dt + + +def dct_2d_ref(x, **kwargs): + """Calculate reference values for testing dct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dct(x[:, col], **kwargs) + return x + + +def idct_2d_ref(x, **kwargs): + """Calculate reference values for testing idct2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idct(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idct(x[:, col], **kwargs) + return x + + +def dst_2d_ref(x, **kwargs): + """Calculate reference values for testing dst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = dst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = dst(x[:, col], **kwargs) + return x + + +def idst_2d_ref(x, **kwargs): + """Calculate reference values for testing idst2.""" + x = np.array(x, copy=True) + for row in range(x.shape[0]): + x[row, :] = idst(x[row, :], **kwargs) + for col in range(x.shape[1]): + x[:, col] = idst(x[:, col], **kwargs) + return x + + +def naive_dct1(x, norm=None): + """Calculate textbook definition version of DCT-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N-1 + y = np.zeros(N) + m0, m = 1, 2 + if norm == 'ortho': + m0 = np.sqrt(1.0/M) + m = np.sqrt(2.0/M) + for k in range(N): + for n in range(1, N-1): + y[k] += m*x[n]*np.cos(np.pi*n*k/M) + y[k] += m0 * x[0] + y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1) + if norm == 'ortho': + y[0] *= 1/np.sqrt(2) + y[N-1] *= 1/np.sqrt(2) + return y + + +def naive_dst1(x, norm=None): + """Calculate textbook definition version of DST-I.""" + x = np.array(x, copy=True) + N = len(x) + M = N+1 + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M) + if norm == 'ortho': + y *= np.sqrt(0.5/M) + return y + + +def naive_dct4(x, norm=None): + """Calculate textbook definition version of DCT-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +def naive_dst4(x, norm=None): + """Calculate textbook definition version of DST-IV.""" + x = np.array(x, copy=True) + N = len(x) + y = np.zeros(N) + for k in range(N): + for n in range(N): + y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N)) + if norm == 'ortho': + y *= np.sqrt(2.0/N) + else: + y *= 2 + return y + + +class TestComplex: + def test_dct_complex64(self): + y = dct(1j*np.arange(5, dtype=np.complex64)) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dct_complex(self): + y = dct(np.arange(5)*1j) + x = 1j*dct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idct_complex(self): + y = idct(np.arange(5)*1j) + x = 1j*idct(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex64(self): + y = dst(np.arange(5, dtype=np.complex64)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_dst_complex(self): + y = dst(np.arange(5)*1j) + x = 1j*dst(np.arange(5)) + assert_array_almost_equal(x, y) + + def test_idst_complex(self): + y = idst(np.arange(5)*1j) + x = 1j*idst(np.arange(5)) + assert_array_almost_equal(x, y) + + +class _TestDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + x, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + y = dct(x, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + def test_axis(self): + nt = 2 + for i in [7, 8, 9, 16, 32, 64]: + x = np.random.randn(nt, i) + y = dct(x, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[j], dct(x[j], type=self.type), + decimal=self.dec) + + x = x.T + y = dct(x, axis=0, type=self.type) + for j in range(nt): + assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type), + decimal=self.dec) + + +class _TestDCTIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=1) + y2 = naive_dct1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDCTIIBase(_TestDCTBase): + def test_definition_matlab(self): + # Test correspondence with MATLAB (orthornomal mode). + dt = np.result_type(np.float32, self.rdt) + for xr, yr in zip(X, Y): + x = np.array(xr, dtype=dt) + y = dct(x, norm="ortho", type=2) + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, yr, decimal=self.dec) + + +class _TestDCTIIIBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=2) + xi = dct(y, norm="ortho", type=3) + assert_equal(xi.dtype, dt) + assert_array_almost_equal(xi, x, decimal=self.dec) + +class _TestDCTIVBase(_TestDCTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dct(x, norm='ortho', type=4) + y2 = naive_dct4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + + +class TestDCTIDouble(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestDCTIFloat(_TestDCTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDCTIInt(_TestDCTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDCTIIDouble(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestDCTIIFloat(_TestDCTIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestDCTIIInt(_TestDCTIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestDCTIIIDouble(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDCTIIIFloat(_TestDCTIIIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIIIInt(_TestDCTIIIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class TestDCTIVDouble(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 3 + + +class TestDCTIVFloat(_TestDCTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestDCTIVInt(_TestDCTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + + +class _TestIDCTBase: + def setup_method(self): + self.rdt = None + self.dec = 14 + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt) + x = idct(yr, type=self.type) + if self.type == 1: + x /= 2 * (i-1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDCTIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 1 + + +class TestIDCTIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDCTIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDCTIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 10 + self.type = 2 + + +class TestIDCTIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 2 + + +class TestIDCTIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 2 + + +class TestIDCTIIIDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDCTIIIFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 3 + + +class TestIDCTIIIInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 3 + +class TestIDCTIVDouble(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDCTIVFloat(_TestIDCTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 5 + self.type = 4 + + +class TestIDCTIVInt(_TestIDCTBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + +class _TestDSTBase: + def setup_method(self): + self.rdt = None # dtype + self.dec = None # number of decimals to match + self.type = None # dst type + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + y = dst(xr, type=self.type) + assert_equal(y.dtype, dt) + # XXX: we divide by np.max(y) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class _TestDSTIBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=1) + y2 = naive_dst1(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec) + +class _TestDSTIVBase(_TestDSTBase): + def test_definition_ortho(self): + # Test orthornomal mode. + dt = np.result_type(np.float32, self.rdt) + for xr in X: + x = np.array(xr, dtype=self.rdt) + y = dst(x, norm='ortho', type=4) + y2 = naive_dst4(x, norm='ortho') + assert_equal(y.dtype, dt) + assert_array_almost_equal(y, y2, decimal=self.dec) + +class TestDSTIDouble(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestDSTIFloat(_TestDSTIBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestDSTIInt(_TestDSTIBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 1 + + +class TestDSTIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestDSTIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestDSTIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestDSTIIIDouble(_TestDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestDSTIIIFloat(_TestDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 7 + self.type = 3 + + +class TestDSTIIIInt(_TestDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 7 + self.type = 3 + + +class TestDSTIVDouble(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestDSTIVFloat(_TestDSTIVBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 4 + + +class TestDSTIVInt(_TestDSTIVBase): + def setup_method(self): + self.rdt = int + self.dec = 5 + self.type = 4 + + +class _TestIDSTBase: + def setup_method(self): + self.rdt = None + self.dec = None + self.type = None + + def test_definition(self): + for i in FFTWDATA_SIZES: + xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt) + x = idst(yr, type=self.type) + if self.type == 1: + x /= 2 * (i+1) + else: + x /= 2 * i + assert_equal(x.dtype, dt) + # XXX: we divide by np.max(x) because the tests fail otherwise. We + # should really use something like assert_array_approx_equal. The + # difference is due to fftw using a better algorithm w.r.t error + # propagation compared to the ones from fftpack. + assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec, + err_msg="Size %d failed" % i) + + +class TestIDSTIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 1 + + +class TestIDSTIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 4 + self.type = 1 + + +class TestIDSTIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 4 + self.type = 1 + + +class TestIDSTIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 2 + + +class TestIDSTIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 2 + + +class TestIDSTIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 2 + + +class TestIDSTIIIDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 14 + self.type = 3 + + +class TestIDSTIIIFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 3 + + +class TestIDSTIIIInt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 3 + + +class TestIDSTIVDouble(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float64 + self.dec = 12 + self.type = 4 + + +class TestIDSTIVFloat(_TestIDSTBase): + def setup_method(self): + self.rdt = np.float32 + self.dec = 6 + self.type = 4 + + +class TestIDSTIVnt(_TestIDSTBase): + def setup_method(self): + self.rdt = int + self.dec = 6 + self.type = 4 + + +class TestOverwrite: + """Check input overwrite behavior.""" + + real_dtypes = [np.float32, np.float64] + + def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw): + x2 = x.copy() + routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x) + + sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format( + routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x) + if not overwrite_x: + assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig) + + def _check_1d(self, routine, dtype, shape, axis): + np.random.seed(1234) + if np.issubdtype(dtype, np.complexfloating): + data = np.random.randn(*shape) + 1j*np.random.randn(*shape) + else: + data = np.random.randn(*shape) + data = data.astype(dtype) + + for type in [1, 2, 3, 4]: + for overwrite_x in [True, False]: + for norm in [None, 'ortho']: + self._check(data, routine, type, None, axis, norm, + overwrite_x) + + def test_dct(self): + for dtype in self.real_dtypes: + self._check_1d(dct, dtype, (16,), -1) + self._check_1d(dct, dtype, (16, 2), 0) + self._check_1d(dct, dtype, (2, 16), 1) + + def test_idct(self): + for dtype in self.real_dtypes: + self._check_1d(idct, dtype, (16,), -1) + self._check_1d(idct, dtype, (16, 2), 0) + self._check_1d(idct, dtype, (2, 16), 1) + + def test_dst(self): + for dtype in self.real_dtypes: + self._check_1d(dst, dtype, (16,), -1) + self._check_1d(dst, dtype, (16, 2), 0) + self._check_1d(dst, dtype, (2, 16), 1) + + def test_idst(self): + for dtype in self.real_dtypes: + self._check_1d(idst, dtype, (16,), -1) + self._check_1d(idst, dtype, (16, 2), 0) + self._check_1d(idst, dtype, (2, 16), 1) + + +class Test_DCTN_IDCTN: + dec = 14 + dct_type = [1, 2, 3, 4] + norms = [None, 'ortho'] + rstate = np.random.RandomState(1234) + shape = (32, 16) + data = rstate.randn(*shape) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [None, + 1, (1,), [1], + 0, (0,), [0], + (0, 1), [0, 1], + (-2, -1), [-2, -1]]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', ['ortho']) + def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm): + tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm) + tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm) + assert_array_almost_equal(self.data, tmp, decimal=12) + + @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref), + (dstn, dst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', norms) + def test_dctn_vs_2d_reference(self, fforward, fforward_ref, + dct_type, norm): + y1 = fforward(self.data, type=dct_type, axes=None, norm=norm) + y2 = fforward_ref(self.data, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref), + (idstn, idst_2d_ref)]) + @pytest.mark.parametrize('dct_type', dct_type) + @pytest.mark.parametrize('norm', [None, 'ortho']) + def test_idctn_vs_2d_reference(self, finverse, finverse_ref, + dct_type, norm): + fdata = dctn(self.data, type=dct_type, norm=norm) + y1 = finverse(fdata, type=dct_type, norm=norm) + y2 = finverse_ref(fdata, type=dct_type, norm=norm) + assert_array_almost_equal(y1, y2, decimal=11) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + def test_axes_and_shape(self, fforward, finverse): + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=(0, 1)) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape[0], axes=None) + + with assert_raises(ValueError, + match="when given, axes and shape arguments" + " have to be of the same length"): + fforward(self.data, shape=self.data.shape, axes=0) + + @pytest.mark.parametrize('fforward', [dctn, dstn]) + def test_shape(self, fforward): + tmp = fforward(self.data, shape=(128, 128), axes=None) + assert_equal(tmp.shape, (128, 128)) + + @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn), + (dstn, idstn)]) + @pytest.mark.parametrize('axes', [1, (1,), [1], + 0, (0,), [0]]) + def test_shape_is_none_with_axes(self, fforward, finverse, axes): + tmp = fforward(self.data, shape=None, axes=axes, norm='ortho') + tmp = finverse(tmp, shape=None, axes=axes, norm='ortho') + assert_array_almost_equal(self.data, tmp, decimal=self.dec) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b710df1ba896d850f777ded4de96c9a85eac98 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9585437e35fa4a6bde02886ced6e5f895e6730f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quadrature.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ce404a2134d738a638cd700f46af8b6db831b6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/README b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/README new file mode 100644 index 0000000000000000000000000000000000000000..a355e0c447f2d36275877225fcbddd42f14636dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/README @@ -0,0 +1,76 @@ +From the website for the L-BFGS-B code (from at +http://www.ece.northwestern.edu/~nocedal/lbfgsb.html): + +""" +L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained +optimization, i.e. for problems where the only constraints are of the +form l<= x <= u. +""" + +This is a Python wrapper (using F2PY) written by David M. Cooke + and released as version 0.9 on April 9, 2004. +The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version +in March 2012. + +License of L-BFGS-B (Fortran code) +================================== + +The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was +written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal . It +carries the following condition for use: + + """ + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + """ + +The Python wrapper +================== + +This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate +the wrapper around the Fortran code. + +The Python code and wrapper are copyrighted 2004 by David M. Cooke +. + +Example usage +============= + +An example of the usage is given at the bottom of the lbfgsb.py file. +Run it with 'python lbfgsb.py'. + +License for the Python wrapper +============================== + +Copyright (c) 2004 David M. Cooke + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b103e89cc47e4921152b1443ba41e07d25d5fc90 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__init__.py @@ -0,0 +1,451 @@ +""" +===================================================== +Optimization and root finding (:mod:`scipy.optimize`) +===================================================== + +.. currentmodule:: scipy.optimize + +.. toctree:: + :hidden: + + optimize.cython_optimize + +SciPy ``optimize`` provides functions for minimizing (or maximizing) +objective functions, possibly subject to constraints. It includes +solvers for nonlinear problems (with support for both local and global +optimization algorithms), linear programming, constrained +and nonlinear least-squares, root finding, and curve fitting. + +Common functions and objects, shared across different solvers, are: + +.. autosummary:: + :toctree: generated/ + + show_options - Show specific options optimization solvers. + OptimizeResult - The optimization result returned by some optimizers. + OptimizeWarning - The optimization encountered problems. + + +Optimization +============ + +Scalar functions optimization +----------------------------- + +.. autosummary:: + :toctree: generated/ + + minimize_scalar - Interface for minimizers of univariate functions + +The `minimize_scalar` function supports the following methods: + +.. toctree:: + + optimize.minimize_scalar-brent + optimize.minimize_scalar-bounded + optimize.minimize_scalar-golden + +Local (multivariate) optimization +--------------------------------- + +.. autosummary:: + :toctree: generated/ + + minimize - Interface for minimizers of multivariate functions. + +The `minimize` function supports the following methods: + +.. toctree:: + + optimize.minimize-neldermead + optimize.minimize-powell + optimize.minimize-cg + optimize.minimize-bfgs + optimize.minimize-newtoncg + optimize.minimize-lbfgsb + optimize.minimize-tnc + optimize.minimize-cobyla + optimize.minimize-slsqp + optimize.minimize-trustconstr + optimize.minimize-dogleg + optimize.minimize-trustncg + optimize.minimize-trustkrylov + optimize.minimize-trustexact + +Constraints are passed to `minimize` function as a single object or +as a list of objects from the following classes: + +.. autosummary:: + :toctree: generated/ + + NonlinearConstraint - Class defining general nonlinear constraints. + LinearConstraint - Class defining general linear constraints. + +Simple bound constraints are handled separately and there is a special class +for them: + +.. autosummary:: + :toctree: generated/ + + Bounds - Bound constraints. + +Quasi-Newton strategies implementing `HessianUpdateStrategy` +interface can be used to approximate the Hessian in `minimize` +function (available only for the 'trust-constr' method). Available +quasi-Newton methods implementing this interface are: + +.. autosummary:: + :toctree: generated/ + + BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + SR1 - Symmetric-rank-1 Hessian update strategy. + +.. _global_optimization: + +Global optimization +------------------- + +.. autosummary:: + :toctree: generated/ + + basinhopping - Basinhopping stochastic optimizer. + brute - Brute force searching optimizer. + differential_evolution - Stochastic optimizer using differential evolution. + + shgo - Simplicial homology global optimizer. + dual_annealing - Dual annealing stochastic optimizer. + direct - DIRECT (Dividing Rectangles) optimizer. + +Least-squares and curve fitting +=============================== + +Nonlinear least-squares +----------------------- + +.. autosummary:: + :toctree: generated/ + + least_squares - Solve a nonlinear least-squares problem with bounds on the variables. + +Linear least-squares +-------------------- + +.. autosummary:: + :toctree: generated/ + + nnls - Linear least-squares problem with non-negativity constraint. + lsq_linear - Linear least-squares problem with bound constraints. + isotonic_regression - Least squares problem of isotonic regression via PAVA. + +Curve fitting +------------- + +.. autosummary:: + :toctree: generated/ + + curve_fit -- Fit curve to a set of points. + +Root finding +============ + +Scalar functions +---------------- +.. autosummary:: + :toctree: generated/ + + root_scalar - Unified interface for nonlinear solvers of scalar functions. + brentq - quadratic interpolation Brent method. + brenth - Brent method, modified by Harris with hyperbolic extrapolation. + ridder - Ridder's method. + bisect - Bisection method. + newton - Newton's method (also Secant and Halley's methods). + toms748 - Alefeld, Potra & Shi Algorithm 748. + RootResults - The root finding result returned by some root finders. + +The `root_scalar` function supports the following methods: + +.. toctree:: + + optimize.root_scalar-brentq + optimize.root_scalar-brenth + optimize.root_scalar-bisect + optimize.root_scalar-ridder + optimize.root_scalar-newton + optimize.root_scalar-toms748 + optimize.root_scalar-secant + optimize.root_scalar-halley + + + +The table below lists situations and appropriate methods, along with +*asymptotic* convergence rates per iteration (and per function evaluation) +for successful convergence to a simple root(*). +Bisection is the slowest of them all, adding one bit of accuracy for each +function evaluation, but is guaranteed to converge. +The other bracketing methods all (eventually) increase the number of accurate +bits by about 50% for every function evaluation. +The derivative-based methods, all built on `newton`, can converge quite quickly +if the initial value is close to the root. They can also be applied to +functions defined on (a subset of) the complex plane. + ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| Domain of f | Bracket? | Derivatives? | Solvers | Convergence | ++ + +----------+-----------+ +-------------+----------------+ +| | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) | ++=============+==========+==========+===========+=============+=============+================+ +| `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" | +| | | | | - brentq | - Yes | - >=1, <= 1.62 | +| | | | | - brenth | - Yes | - >=1, <= 1.62 | +| | | | | - ridder | - Yes | - 2.0 (1.41) | +| | | | | - toms748 | - Yes | - 2.7 (1.65) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ +| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) | ++-------------+----------+----------+-----------+-------------+-------------+----------------+ + +.. seealso:: + + `scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions + +Fixed point finding: + +.. autosummary:: + :toctree: generated/ + + fixed_point - Single-variable fixed-point solver. + +Multidimensional +---------------- + +.. autosummary:: + :toctree: generated/ + + root - Unified interface for nonlinear solvers of multivariate functions. + +The `root` function supports the following methods: + +.. toctree:: + + optimize.root-hybr + optimize.root-lm + optimize.root-broyden1 + optimize.root-broyden2 + optimize.root-anderson + optimize.root-linearmixing + optimize.root-diagbroyden + optimize.root-excitingmixing + optimize.root-krylov + optimize.root-dfsane + +Linear programming / MILP +========================= + +.. autosummary:: + :toctree: generated/ + + milp -- Mixed integer linear programming. + linprog -- Unified interface for minimizers of linear programming problems. + +The `linprog` function supports the following methods: + +.. toctree:: + + optimize.linprog-simplex + optimize.linprog-interior-point + optimize.linprog-revised_simplex + optimize.linprog-highs-ipm + optimize.linprog-highs-ds + optimize.linprog-highs + +The simplex, interior-point, and revised simplex methods support callback +functions, such as: + +.. autosummary:: + :toctree: generated/ + + linprog_verbose_callback -- Sample callback function for linprog (simplex). + +Assignment problems +=================== + +.. autosummary:: + :toctree: generated/ + + linear_sum_assignment -- Solves the linear-sum assignment problem. + quadratic_assignment -- Solves the quadratic assignment problem. + +The `quadratic_assignment` function supports the following methods: + +.. toctree:: + + optimize.qap-faq + optimize.qap-2opt + +Utilities +========= + +Finite-difference approximation +------------------------------- + +.. autosummary:: + :toctree: generated/ + + approx_fprime - Approximate the gradient of a scalar function. + check_grad - Check the supplied derivative using finite differences. + + +Line search +----------- + +.. autosummary:: + :toctree: generated/ + + bracket - Bracket a minimum, given two starting points. + line_search - Return a step that satisfies the strong Wolfe conditions. + +Hessian approximation +--------------------- + +.. autosummary:: + :toctree: generated/ + + LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian. + HessianUpdateStrategy - Interface for implementing Hessian update strategies + +Benchmark problems +------------------ + +.. autosummary:: + :toctree: generated/ + + rosen - The Rosenbrock function. + rosen_der - The derivative of the Rosenbrock function. + rosen_hess - The Hessian matrix of the Rosenbrock function. + rosen_hess_prod - Product of the Rosenbrock Hessian with a vector. + +Legacy functions +================ + +The functions below are not recommended for use in new scripts; +all of these methods are accessible via a newer, more consistent +interfaces, provided by the interfaces above. + +Optimization +------------ + +General-purpose multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin - Nelder-Mead Simplex algorithm. + fmin_powell - Powell's (modified) conjugate direction method. + fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm. + fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno). + fmin_ncg - Line-search Newton Conjugate Gradient. + +Constrained multivariate methods: + +.. autosummary:: + :toctree: generated/ + + fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer. + fmin_tnc - Truncated Newton code. + fmin_cobyla - Constrained optimization by linear approximation. + fmin_slsqp - Minimization using sequential least-squares programming. + +Univariate (scalar) minimization methods: + +.. autosummary:: + :toctree: generated/ + + fminbound - Bounded minimization of a scalar function. + brent - 1-D function minimization using Brent method. + golden - 1-D function minimization using Golden Section method. + +Least-squares +------------- + +.. autosummary:: + :toctree: generated/ + + leastsq - Minimize the sum of squares of M equations in N unknowns. + +Root finding +------------ + +General nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + fsolve - Non-linear multivariable equation solver. + broyden1 - Broyden's first method. + broyden2 - Broyden's second method. + NoConvergence - Exception raised when nonlinear solver does not converge. + +Large-scale nonlinear solvers: + +.. autosummary:: + :toctree: generated/ + + newton_krylov + anderson + + BroydenFirst + InverseJacobian + KrylovJacobian + +Simple iteration solvers: + +.. autosummary:: + :toctree: generated/ + + excitingmixing + linearmixing + diagbroyden + +""" # noqa: E501 + +from ._optimize import * +from ._minimize import * +from ._root import * +from ._root_scalar import * +from ._minpack_py import * +from ._zeros_py import * +from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct +from ._tnc import fmin_tnc +from ._cobyla_py import fmin_cobyla +from ._nonlin import * +from ._slsqp_py import fmin_slsqp +from ._nnls import nnls +from ._basinhopping import basinhopping +from ._linprog import linprog, linprog_verbose_callback +from ._lsap import linear_sum_assignment +from ._differentialevolution import differential_evolution +from ._lsq import least_squares, lsq_linear +from ._isotonic import isotonic_regression +from ._constraints import (NonlinearConstraint, + LinearConstraint, + Bounds) +from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1 +from ._shgo import shgo +from ._dual_annealing import dual_annealing +from ._qap import quadratic_assignment +from ._direct_py import direct +from ._milp import milp + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize, + slsqp, tnc, zeros +) + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py new file mode 100644 index 0000000000000000000000000000000000000000..d874a708b9a22ba72be1e63a18a082298e84bbe8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py @@ -0,0 +1,753 @@ +""" +basinhopping: The basinhopping global optimization algorithm +""" +import numpy as np +import math +import inspect +import scipy.optimize +from scipy._lib._util import check_random_state + +__all__ = ['basinhopping'] + + +_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY), + inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY)) +_new_accept_test_signature = inspect.Signature(parameters=_params) + + +class Storage: + """ + Class used to store the lowest energy structure + """ + def __init__(self, minres): + self._add(minres) + + def _add(self, minres): + self.minres = minres + self.minres.x = np.copy(minres.x) + + def update(self, minres): + if minres.success and (minres.fun < self.minres.fun + or not self.minres.success): + self._add(minres) + return True + else: + return False + + def get_lowest(self): + return self.minres + + +class BasinHoppingRunner: + """This class implements the core of the basinhopping algorithm. + + x0 : ndarray + The starting coordinates. + minimizer : callable + The local minimizer, with signature ``result = minimizer(x)``. + The return value is an `optimize.OptimizeResult` object. + step_taking : callable + This function displaces the coordinates randomly. Signature should + be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. + accept_tests : list of callables + Each test is passed the kwargs `f_new`, `x_new`, `f_old` and + `x_old`. These tests will be used to judge whether or not to accept + the step. The acceptable return values are True, False, or ``"force + accept"``. If any of the tests return False then the step is rejected. + If ``"force accept"``, then this will override any other tests in + order to accept the step. This can be used, for example, to forcefully + escape from a local minimum that ``basinhopping`` is trapped in. + disp : bool, optional + Display status messages. + + """ + def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): + self.x = np.copy(x0) + self.minimizer = minimizer + self.step_taking = step_taking + self.accept_tests = accept_tests + self.disp = disp + + self.nstep = 0 + + # initialize return object + self.res = scipy.optimize.OptimizeResult() + self.res.minimization_failures = 0 + + # do initial minimization + minres = minimizer(self.x) + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + self.x = np.copy(minres.x) + self.energy = minres.fun + self.incumbent_minres = minres # best minimize result found so far + if self.disp: + print("basinhopping step %d: f %g" % (self.nstep, self.energy)) + + # initialize storage class + self.storage = Storage(minres) + + if hasattr(minres, "nfev"): + self.res.nfev = minres.nfev + if hasattr(minres, "njev"): + self.res.njev = minres.njev + if hasattr(minres, "nhev"): + self.res.nhev = minres.nhev + + def _monte_carlo_step(self): + """Do one Monte Carlo iteration + + Randomly displace the coordinates, minimize, and decide whether + or not to accept the new coordinates. + """ + # Take a random step. Make a copy of x because the step_taking + # algorithm might change x in place + x_after_step = np.copy(self.x) + x_after_step = self.step_taking(x_after_step) + + # do a local minimization + minres = self.minimizer(x_after_step) + x_after_quench = minres.x + energy_after_quench = minres.fun + if not minres.success: + self.res.minimization_failures += 1 + if self.disp: + print("warning: basinhopping: local minimization failure") + if hasattr(minres, "nfev"): + self.res.nfev += minres.nfev + if hasattr(minres, "njev"): + self.res.njev += minres.njev + if hasattr(minres, "nhev"): + self.res.nhev += minres.nhev + + # accept the move based on self.accept_tests. If any test is False, + # then reject the step. If any test returns the special string + # 'force accept', then accept the step regardless. This can be used + # to forcefully escape from a local minimum if normal basin hopping + # steps are not sufficient. + accept = True + for test in self.accept_tests: + if inspect.signature(test) == _new_accept_test_signature: + testres = test(res_new=minres, res_old=self.incumbent_minres) + else: + testres = test(f_new=energy_after_quench, x_new=x_after_quench, + f_old=self.energy, x_old=self.x) + + if testres == 'force accept': + accept = True + break + elif testres is None: + raise ValueError("accept_tests must return True, False, or " + "'force accept'") + elif not testres: + accept = False + + # Report the result of the acceptance test to the take step class. + # This is for adaptive step taking + if hasattr(self.step_taking, "report"): + self.step_taking.report(accept, f_new=energy_after_quench, + x_new=x_after_quench, f_old=self.energy, + x_old=self.x) + + return accept, minres + + def one_cycle(self): + """Do one cycle of the basinhopping algorithm + """ + self.nstep += 1 + new_global_min = False + + accept, minres = self._monte_carlo_step() + + if accept: + self.energy = minres.fun + self.x = np.copy(minres.x) + self.incumbent_minres = minres # best minimize result found so far + new_global_min = self.storage.update(minres) + + # print some information + if self.disp: + self.print_report(minres.fun, accept) + if new_global_min: + print("found new global minimum on step %d with function" + " value %g" % (self.nstep, self.energy)) + + # save some variables as BasinHoppingRunner attributes + self.xtrial = minres.x + self.energy_trial = minres.fun + self.accept = accept + + return new_global_min + + def print_report(self, energy_trial, accept): + """print a status update""" + minres = self.storage.get_lowest() + print("basinhopping step %d: f %g trial_f %g accepted %d " + " lowest_f %g" % (self.nstep, self.energy, energy_trial, + accept, minres.fun)) + + +class AdaptiveStepsize: + """ + Class to implement adaptive stepsize. + + This class wraps the step taking class and modifies the stepsize to + ensure the true acceptance rate is as close as possible to the target. + + Parameters + ---------- + takestep : callable + The step taking routine. Must contain modifiable attribute + takestep.stepsize + accept_rate : float, optional + The target step acceptance rate + interval : int, optional + Interval for how often to update the stepsize + factor : float, optional + The step size is multiplied or divided by this factor upon each + update. + verbose : bool, optional + Print information about each update + + """ + def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, + verbose=True): + self.takestep = takestep + self.target_accept_rate = accept_rate + self.interval = interval + self.factor = factor + self.verbose = verbose + + self.nstep = 0 + self.nstep_tot = 0 + self.naccept = 0 + + def __call__(self, x): + return self.take_step(x) + + def _adjust_step_size(self): + old_stepsize = self.takestep.stepsize + accept_rate = float(self.naccept) / self.nstep + if accept_rate > self.target_accept_rate: + # We're accepting too many steps. This generally means we're + # trapped in a basin. Take bigger steps. + self.takestep.stepsize /= self.factor + else: + # We're not accepting enough steps. Take smaller steps. + self.takestep.stepsize *= self.factor + if self.verbose: + print("adaptive stepsize: acceptance rate {:f} target {:f} new " + "stepsize {:g} old stepsize {:g}".format(accept_rate, + self.target_accept_rate, self.takestep.stepsize, + old_stepsize)) + + def take_step(self, x): + self.nstep += 1 + self.nstep_tot += 1 + if self.nstep % self.interval == 0: + self._adjust_step_size() + return self.takestep(x) + + def report(self, accept, **kwargs): + "called by basinhopping to report the result of the step" + if accept: + self.naccept += 1 + + +class RandomDisplacement: + """Add a random displacement of maximum size `stepsize` to each coordinate. + + Calling this updates `x` in-place. + + Parameters + ---------- + stepsize : float, optional + Maximum stepsize in any dimension + random_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + + def __init__(self, stepsize=0.5, random_gen=None): + self.stepsize = stepsize + self.random_gen = check_random_state(random_gen) + + def __call__(self, x): + x += self.random_gen.uniform(-self.stepsize, self.stepsize, + np.shape(x)) + return x + + +class MinimizerWrapper: + """ + wrap a minimizer function as a minimizer class + """ + def __init__(self, minimizer, func=None, **kwargs): + self.minimizer = minimizer + self.func = func + self.kwargs = kwargs + + def __call__(self, x0): + if self.func is None: + return self.minimizer(x0, **self.kwargs) + else: + return self.minimizer(self.func, x0, **self.kwargs) + + +class Metropolis: + """Metropolis acceptance criterion. + + Parameters + ---------- + T : float + The "temperature" parameter for the accept or reject criterion. + random_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Random number generator used for acceptance test. + + """ + + def __init__(self, T, random_gen=None): + # Avoid ZeroDivisionError since "MBH can be regarded as a special case + # of the BH framework with the Metropolis criterion, where temperature + # T = 0." (Reject all steps that increase energy.) + self.beta = 1.0 / T if T != 0 else float('inf') + self.random_gen = check_random_state(random_gen) + + def accept_reject(self, res_new, res_old): + """ + Assuming the local search underlying res_new was successful: + If new energy is lower than old, it will always be accepted. + If new is higher than old, there is a chance it will be accepted, + less likely for larger differences. + """ + with np.errstate(invalid='ignore'): + # The energy values being fed to Metropolis are 1-length arrays, and if + # they are equal, their difference is 0, which gets multiplied by beta, + # which is inf, and array([0]) * float('inf') causes + # + # RuntimeWarning: invalid value encountered in multiply + # + # Ignore this warning so when the algorithm is on a flat plane, it always + # accepts the step, to try to move off the plane. + prod = -(res_new.fun - res_old.fun) * self.beta + w = math.exp(min(0, prod)) + + rand = self.random_gen.uniform() + return w >= rand and (res_new.success or not res_old.success) + + def __call__(self, *, res_new, res_old): + """ + f_new and f_old are mandatory in kwargs + """ + return bool(self.accept_reject(res_new, res_old)) + + +def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, + minimizer_kwargs=None, take_step=None, accept_test=None, + callback=None, interval=50, disp=False, niter_success=None, + seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9): + """Find the global minimum of a function using the basin-hopping algorithm. + + Basin-hopping is a two-phase method that combines a global stepping + algorithm with local minimization at each step. Designed to mimic + the natural process of energy minimization of clusters of atoms, it works + well for similar problems with "funnel-like, but rugged" energy landscapes + [5]_. + + As the step-taking, step acceptance, and minimization methods are all + customizable, this function can also be used to implement other two-phase + methods. + + Parameters + ---------- + func : callable ``f(x, *args)`` + Function to be optimized. ``args`` can be passed as an optional item + in the dict `minimizer_kwargs` + x0 : array_like + Initial guess. + niter : integer, optional + The number of basin-hopping iterations. There will be a total of + ``niter + 1`` runs of the local minimizer. + T : float, optional + The "temperature" parameter for the acceptance or rejection criterion. + Higher "temperatures" mean that larger jumps in function value will be + accepted. For best results `T` should be comparable to the + separation (in function value) between local minima. + stepsize : float, optional + Maximum step size for use in the random displacement. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + `scipy.optimize.minimize` Some important options could be: + + method : str + The minimization method (e.g. ``"L-BFGS-B"``) + args : tuple + Extra arguments passed to the objective function (`func`) and + its derivatives (Jacobian, Hessian). + + take_step : callable ``take_step(x)``, optional + Replace the default step-taking routine with this routine. The default + step-taking routine is a random displacement of the coordinates, but + other step-taking algorithms may be better for some systems. + `take_step` can optionally have the attribute ``take_step.stepsize``. + If this attribute exists, then `basinhopping` will adjust + ``take_step.stepsize`` in order to try to optimize the global minimum + search. + accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional + Define a test which will be used to judge whether to accept the + step. This will be used in addition to the Metropolis test based on + "temperature" `T`. The acceptable return values are True, + False, or ``"force accept"``. If any of the tests return False + then the step is rejected. If the latter, then this will override any + other tests in order to accept the step. This can be used, for example, + to forcefully escape from a local minimum that `basinhopping` is + trapped in. + callback : callable, ``callback(x, f, accept)``, optional + A callback function which will be called for all minima found. ``x`` + and ``f`` are the coordinates and function value of the trial minimum, + and ``accept`` is whether that minimum was accepted. This can + be used, for example, to save the lowest N minima found. Also, + `callback` can be used to specify a user defined stop criterion by + optionally returning True to stop the `basinhopping` routine. + interval : integer, optional + interval for how often to update the `stepsize` + disp : bool, optional + Set to True to print status messages + niter_success : integer, optional + Stop the run if the global minimum candidate remains the same for this + number of iterations. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the default Metropolis + `accept_test` and the default `take_step`. If you supply your own + `take_step` and `accept_test`, and these functions use random + number generation, then those functions are responsible for the state + of their random number generator. + target_accept_rate : float, optional + The target acceptance rate that is used to adjust the `stepsize`. + If the current acceptance rate is greater than the target, + then the `stepsize` is increased. Otherwise, it is decreased. + Range is (0, 1). Default is 0.5. + + .. versionadded:: 1.8.0 + + stepwise_factor : float, optional + The `stepsize` is multiplied or divided by this stepwise factor upon + each update. Range is (0, 1). Default is 0.9. + + .. versionadded:: 1.8.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. The ``OptimizeResult`` object returned by the + selected minimizer at the lowest minimum is also contained within this + object and can be accessed through the ``lowest_optimization_result`` + attribute. See `OptimizeResult` for a description of other attributes. + + See Also + -------- + minimize : + The local minimization function called once for each basinhopping step. + `minimizer_kwargs` is passed to this routine. + + Notes + ----- + Basin-hopping is a stochastic algorithm which attempts to find the global + minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ + [4]_. The algorithm in its current form was described by David Wales and + Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. + + The algorithm is iterative with each cycle composed of the following + features + + 1) random perturbation of the coordinates + + 2) local minimization + + 3) accept or reject the new coordinates based on the minimized function + value + + The acceptance test used here is the Metropolis criterion of standard Monte + Carlo algorithms, although there are many other possibilities [3]_. + + This global minimization method has been shown to be extremely efficient + for a wide variety of problems in physics and chemistry. It is + particularly useful when the function has many minima separated by large + barriers. See the `Cambridge Cluster Database + `_ for databases of molecular + systems that have been optimized primarily using basin-hopping. This + database includes minimization problems exceeding 300 degrees of freedom. + + See the free software program `GMIN `_ + for a Fortran implementation of basin-hopping. This implementation has many + variations of the procedure described above, including more + advanced step taking algorithms and alternate acceptance criterion. + + For stochastic global optimization there is no way to determine if the true + global minimum has actually been found. Instead, as a consistency check, + the algorithm can be run from a number of different random starting points + to ensure the lowest minimum found in each example has converged to the + global minimum. For this reason, `basinhopping` will by default simply + run for the number of iterations `niter` and return the lowest minimum + found. It is left to the user to ensure that this is in fact the global + minimum. + + Choosing `stepsize`: This is a crucial parameter in `basinhopping` and + depends on the problem being solved. The step is chosen uniformly in the + region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it + should be comparable to the typical separation (in argument values) between + local minima of the function being optimized. `basinhopping` will, by + default, adjust `stepsize` to find an optimal value, but this may take + many iterations. You will get quicker results if you set a sensible + initial value for ``stepsize``. + + Choosing `T`: The parameter `T` is the "temperature" used in the + Metropolis criterion. Basinhopping steps are always accepted if + ``func(xnew) < func(xold)``. Otherwise, they are accepted with + probability:: + + exp( -(func(xnew) - func(xold)) / T ) + + So, for best results, `T` should to be comparable to the typical + difference (in function values) between local minima. (The height of + "walls" between local minima is irrelevant.) + + If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all + steps that increase energy are rejected. + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, + Cambridge, UK. + .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and + the Lowest Energy Structures of Lennard-Jones Clusters Containing up to + 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. + .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the + multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, + 1987, 84, 6611. + .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, + crystals, and biomolecules, Science, 1999, 285, 1368. + .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as + a General and Versatile Optimization Framework for the Characterization + of Biological Macromolecules, Advances in Artificial Intelligence, + Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` + + Examples + -------- + The following example is a 1-D minimization problem, with many + local minima superimposed on a parabola. + + >>> import numpy as np + >>> from scipy.optimize import basinhopping + >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x + >>> x0 = [1.] + + Basinhopping, internally, uses a local minimization algorithm. We will use + the parameter `minimizer_kwargs` to tell basinhopping which algorithm to + use and how to set up that minimizer. This parameter will be passed to + `scipy.optimize.minimize`. + + >>> minimizer_kwargs = {"method": "BFGS"} + >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = %.4f, f(x) = %.4f" % (ret.x, ret.fun)) + global minimum: x = -0.1951, f(x) = -1.0009 + + Next consider a 2-D minimization problem. Also, this time, we + will use gradient information to significantly speed up the search. + + >>> def func2d(x): + ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + + ... 0.2) * x[0] + ... df = np.zeros(2) + ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + ... df[1] = 2. * x[1] + 0.2 + ... return f, df + + We'll also use a different local minimization algorithm. Also, we must tell + the minimizer that our function returns both energy and gradient (Jacobian). + + >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} + >>> x0 = [1.0, 1.0] + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Here is an example using a custom step-taking routine. Imagine you want + the first coordinate to take larger steps than the rest of the coordinates. + This can be implemented like so: + + >>> class MyTakeStep: + ... def __init__(self, stepsize=0.5): + ... self.stepsize = stepsize + ... self.rng = np.random.default_rng() + ... def __call__(self, x): + ... s = self.stepsize + ... x[0] += self.rng.uniform(-2.*s, 2.*s) + ... x[1:] += self.rng.uniform(-s, s, x[1:].shape) + ... return x + + Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude + of `stepsize` to optimize the search. We'll use the same 2-D function as + before + + >>> mytakestep = MyTakeStep() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=200, take_step=mytakestep) + >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], + ... ret.x[1], + ... ret.fun)) + global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 + + Now, let's do an example using a custom callback function which prints the + value of every minimum found + + >>> def print_fun(x, f, accepted): + ... print("at minimum %.4f accepted %d" % (f, int(accepted))) + + We'll run it for only 10 basinhopping steps this time. + + >>> rng = np.random.default_rng() + >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, + ... niter=10, callback=print_fun, seed=rng) + at minimum 0.4159 accepted 1 + at minimum -0.4317 accepted 1 + at minimum -1.0109 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.1021 accepted 1 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + at minimum -0.4317 accepted 0 + at minimum -0.7425 accepted 1 + at minimum -0.9073 accepted 1 + + The minimum at -1.0109 is actually the global minimum, found already on the + 8th iteration. + + """ # numpy/numpydoc#87 # noqa: E501 + if target_accept_rate <= 0. or target_accept_rate >= 1.: + raise ValueError('target_accept_rate has to be in range (0, 1)') + if stepwise_factor <= 0. or stepwise_factor >= 1.: + raise ValueError('stepwise_factor has to be in range (0, 1)') + + x0 = np.array(x0) + + # set up the np.random generator + rng = check_random_state(seed) + + # set up minimizer + if minimizer_kwargs is None: + minimizer_kwargs = dict() + wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, + **minimizer_kwargs) + + # set up step-taking algorithm + if take_step is not None: + if not callable(take_step): + raise TypeError("take_step must be callable") + # if take_step.stepsize exists then use AdaptiveStepsize to control + # take_step.stepsize + if hasattr(take_step, "stepsize"): + take_step_wrapped = AdaptiveStepsize( + take_step, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + else: + take_step_wrapped = take_step + else: + # use default + displace = RandomDisplacement(stepsize=stepsize, random_gen=rng) + take_step_wrapped = AdaptiveStepsize(displace, interval=interval, + accept_rate=target_accept_rate, + factor=stepwise_factor, + verbose=disp) + + # set up accept tests + accept_tests = [] + if accept_test is not None: + if not callable(accept_test): + raise TypeError("accept_test must be callable") + accept_tests = [accept_test] + + # use default + metropolis = Metropolis(T, random_gen=rng) + accept_tests.append(metropolis) + + if niter_success is None: + niter_success = niter + 2 + + bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, + accept_tests, disp=disp) + + # The wrapped minimizer is called once during construction of + # BasinHoppingRunner, so run the callback + if callable(callback): + callback(bh.storage.minres.x, bh.storage.minres.fun, True) + + # start main iteration loop + count, i = 0, 0 + message = ["requested number of basinhopping iterations completed" + " successfully"] + for i in range(niter): + new_global_min = bh.one_cycle() + + if callable(callback): + # should we pass a copy of x? + val = callback(bh.xtrial, bh.energy_trial, bh.accept) + if val is not None: + if val: + message = ["callback function requested stop early by" + "returning True"] + break + + count += 1 + if new_global_min: + count = 0 + elif count > niter_success: + message = ["success condition satisfied"] + break + + # prepare return object + res = bh.res + res.lowest_optimization_result = bh.storage.get_lowest() + res.x = np.copy(res.lowest_optimization_result.x) + res.fun = res.lowest_optimization_result.fun + res.message = message + res.nit = i + 1 + res.success = res.lowest_optimization_result.success + return res diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b4df4763ba4f699869431a0b6528383c2f0328 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py @@ -0,0 +1,728 @@ +import numpy as np + +""" +# 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python +c MINPACK-1 Project. June 1983. +c Argonne National Laboratory. +c Jorge J. More' and David J. Thuente. +c +c MINPACK-2 Project. November 1993. +c Argonne National Laboratory and University of Minnesota. +c Brett M. Averick, Richard G. Carter, and Jorge J. More'. +""" + +# NOTE this file was linted by black on first commit, and can be kept that way. + + +class DCSRCH: + """ + Parameters + ---------- + phi : callable phi(alpha) + Function at point `alpha` + derphi : callable phi'(alpha) + Objective function derivative. Returns a scalar. + ftol : float + A nonnegative tolerance for the sufficient decrease condition. + gtol : float + A nonnegative tolerance for the curvature condition. + xtol : float + A nonnegative relative tolerance for an acceptable step. The + subroutine exits with a warning if the relative difference between + sty and stx is less than xtol. + stpmin : float + A nonnegative lower bound for the step. + stpmax : + A nonnegative upper bound for the step. + + Notes + ----- + + This subroutine finds a step that satisfies a sufficient + decrease condition and a curvature condition. + + Each call of the subroutine updates an interval with + endpoints stx and sty. The interval is initially chosen + so that it contains a minimizer of the modified function + + psi(stp) = f(stp) - f(0) - ftol*stp*f'(0). + + If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + interval is chosen so that it contains a minimizer of f. + + The algorithm is designed to find a step that satisfies + the sufficient decrease condition + + f(stp) <= f(0) + ftol*stp*f'(0), + + and the curvature condition + + abs(f'(stp)) <= gtol*abs(f'(0)). + + If ftol is less than gtol and if, for example, the function + is bounded below, then there is always a step which satisfies + both conditions. + + If no step can be found that satisfies both conditions, then + the algorithm stops with a warning. In this case stp only + satisfies the sufficient decrease condition. + + A typical invocation of dcsrch has the following outline: + + Evaluate the function at stp = 0.0d0; store in f. + Evaluate the gradient at stp = 0.0d0; store in g. + Choose a starting step stp. + + task = 'START' + 10 continue + call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax, + isave,dsave) + if (task .eq. 'FG') then + Evaluate the function and the gradient at stp + go to 10 + end if + + NOTE: The user must not alter work arrays between calls. + + The subroutine statement is + + subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax, + task,isave,dsave) + where + + stp is a double precision variable. + On entry stp is the current estimate of a satisfactory + step. On initial entry, a positive initial estimate + must be provided. + On exit stp is the current estimate of a satisfactory step + if task = 'FG'. If task = 'CONV' then stp satisfies + the sufficient decrease and curvature condition. + + f is a double precision variable. + On initial entry f is the value of the function at 0. + On subsequent entries f is the value of the + function at stp. + On exit f is the value of the function at stp. + + g is a double precision variable. + On initial entry g is the derivative of the function at 0. + On subsequent entries g is the derivative of the + function at stp. + On exit g is the derivative of the function at stp. + + ftol is a double precision variable. + On entry ftol specifies a nonnegative tolerance for the + sufficient decrease condition. + On exit ftol is unchanged. + + gtol is a double precision variable. + On entry gtol specifies a nonnegative tolerance for the + curvature condition. + On exit gtol is unchanged. + + xtol is a double precision variable. + On entry xtol specifies a nonnegative relative tolerance + for an acceptable step. The subroutine exits with a + warning if the relative difference between sty and stx + is less than xtol. + + On exit xtol is unchanged. + + task is a character variable of length at least 60. + On initial entry task must be set to 'START'. + On exit task indicates the required action: + + If task(1:2) = 'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) = 'CONV' then the search is successful. + + If task(1:4) = 'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) = 'ERROR' then there is an error in the + input arguments. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + stpmin is a double precision variable. + On entry stpmin is a nonnegative lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is a nonnegative upper bound for the step. + On exit stpmax is unchanged. + + isave is an integer work array of dimension 2. + + dsave is a double precision work array of dimension 13. + + Subprograms called + + MINPACK-2 ... dcstep + MINPACK-1 Project. June 1983. + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick, Richard G. Carter, and Jorge J. More'. + """ + + def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax): + self.stage = None + self.ginit = None + self.gtest = None + self.gx = None + self.gy = None + self.finit = None + self.fx = None + self.fy = None + self.stx = None + self.sty = None + self.stmin = None + self.stmax = None + self.width = None + self.width1 = None + + # leave all assessment of tolerances/limits to the first call of + # this object + self.ftol = ftol + self.gtol = gtol + self.xtol = xtol + self.stpmin = stpmin + self.stpmax = stpmax + + self.phi = phi + self.derphi = derphi + + def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100): + """ + Parameters + ---------- + alpha1 : float + alpha1 is the current estimate of a satisfactory + step. A positive initial estimate must be provided. + phi0 : float + the value of `phi` at 0 (if known). + derphi0 : float + the derivative of `derphi` at 0 (if known). + maxiter : int + + Returns + ------- + alpha : float + Step size, or None if no suitable step was found. + phi : float + Value of `phi` at the new point `alpha`. + phi0 : float + Value of `phi` at `alpha=0`. + task : bytes + On exit task indicates status information. + + If task[:4] == b'CONV' then the search is successful. + + If task[:4] == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task[:5] == b'ERROR' then there is an error in the + input arguments. + """ + if phi0 is None: + phi0 = self.phi(0.0) + if derphi0 is None: + derphi0 = self.derphi(0.0) + + phi1 = phi0 + derphi1 = derphi0 + + task = b"START" + for i in range(maxiter): + stp, phi1, derphi1, task = self._iterate( + alpha1, phi1, derphi1, task + ) + + if not np.isfinite(stp): + task = b"WARN" + stp = None + break + + if task[:2] == b"FG": + alpha1 = stp + phi1 = self.phi(stp) + derphi1 = self.derphi(stp) + else: + break + else: + # maxiter reached, the line search did not converge + stp = None + task = b"WARNING: dcsrch did not converge within max iterations" + + if task[:5] == b"ERROR" or task[:4] == b"WARN": + stp = None # failed + + return stp, phi1, phi0, task + + def _iterate(self, stp, f, g, task): + """ + Parameters + ---------- + stp : float + The current estimate of a satisfactory step. On initial entry, a + positive initial estimate must be provided. + f : float + On first call f is the value of the function at 0. On subsequent + entries f should be the value of the function at stp. + g : float + On initial entry g is the derivative of the function at 0. On + subsequent entries g is the derivative of the function at stp. + task : bytes + On initial entry task must be set to 'START'. + + On exit with convergence, a warning or an error, the + variable task contains additional information. + + + Returns + ------- + stp, f, g, task: tuple + + stp : float + the current estimate of a satisfactory step if task = 'FG'. If + task = 'CONV' then stp satisfies the sufficient decrease and + curvature condition. + f : float + the value of the function at stp. + g : float + the derivative of the function at stp. + task : bytes + On exit task indicates the required action: + + If task(1:2) == b'FG' then evaluate the function and + derivative at stp and call dcsrch again. + + If task(1:4) == b'CONV' then the search is successful. + + If task(1:4) == b'WARN' then the subroutine is not able + to satisfy the convergence conditions. The exit value of + stp contains the best point found during the search. + + If task(1:5) == b'ERROR' then there is an error in the + input arguments. + """ + p5 = 0.5 + p66 = 0.66 + xtrapl = 1.1 + xtrapu = 4.0 + + if task[:5] == b"START": + if stp < self.stpmin: + task = b"ERROR: STP .LT. STPMIN" + if stp > self.stpmax: + task = b"ERROR: STP .GT. STPMAX" + if g >= 0: + task = b"ERROR: INITIAL G .GE. ZERO" + if self.ftol < 0: + task = b"ERROR: FTOL .LT. ZERO" + if self.gtol < 0: + task = b"ERROR: GTOL .LT. ZERO" + if self.xtol < 0: + task = b"ERROR: XTOL .LT. ZERO" + if self.stpmin < 0: + task = b"ERROR: STPMIN .LT. ZERO" + if self.stpmax < self.stpmin: + task = b"ERROR: STPMAX .LT. STPMIN" + + if task[:5] == b"ERROR": + return stp, f, g, task + + # Initialize local variables. + + self.brackt = False + self.stage = 1 + self.finit = f + self.ginit = g + self.gtest = self.ftol * self.ginit + self.width = self.stpmax - self.stpmin + self.width1 = self.width / p5 + + # The variables stx, fx, gx contain the values of the step, + # function, and derivative at the best step. + # The variables sty, fy, gy contain the value of the step, + # function, and derivative at sty. + # The variables stp, f, g contain the values of the step, + # function, and derivative at stp. + + self.stx = 0.0 + self.fx = self.finit + self.gx = self.ginit + self.sty = 0.0 + self.fy = self.finit + self.gy = self.ginit + self.stmin = 0 + self.stmax = stp + xtrapu * stp + task = b"FG" + return stp, f, g, task + + # in the original Fortran this was a location to restore variables + # we don't need to do that because they're attributes. + + # If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the + # algorithm enters the second stage. + ftest = self.finit + stp * self.gtest + + if self.stage == 1 and f <= ftest and g >= 0: + self.stage = 2 + + # test for warnings + if self.brackt and (stp <= self.stmin or stp >= self.stmax): + task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS" + if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax: + task = b"WARNING: XTOL TEST SATISFIED" + if stp == self.stpmax and f <= ftest and g <= self.gtest: + task = b"WARNING: STP = STPMAX" + if stp == self.stpmin and (f > ftest or g >= self.gtest): + task = b"WARNING: STP = STPMIN" + + # test for convergence + if f <= ftest and abs(g) <= self.gtol * -self.ginit: + task = b"CONVERGENCE" + + # test for termination + if task[:4] == b"WARN" or task[:4] == b"CONV": + return stp, f, g, task + + # A modified function is used to predict the step during the + # first stage if a lower function value has been obtained but + # the decrease is not sufficient. + if self.stage == 1 and f <= self.fx and f > ftest: + # Define the modified function and derivative values. + fm = f - stp * self.gtest + fxm = self.fx - self.stx * self.gtest + fym = self.fy - self.sty * self.gtest + gm = g - self.gtest + gxm = self.gx - self.gtest + gym = self.gy - self.gtest + + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + fxm, + gxm, + self.sty, + fym, + gym, + stp, + fm, + gm, + self.brackt, + self.stmin, + self.stmax, + ) + self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup + + # Reset the function and derivative values for f + self.fx = fxm + self.stx * self.gtest + self.fy = fym + self.sty * self.gtest + self.gx = gxm + self.gtest + self.gy = gym + self.gtest + + else: + # Call dcstep to update stx, sty, and to compute the new step. + # dcstep can have several operations which can produce NaN + # e.g. inf/inf. Filter these out. + + with np.errstate(invalid="ignore", over="ignore"): + tup = dcstep( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + f, + g, + self.brackt, + self.stmin, + self.stmax, + ) + ( + self.stx, + self.fx, + self.gx, + self.sty, + self.fy, + self.gy, + stp, + self.brackt, + ) = tup + + # Decide if a bisection step is needed + if self.brackt: + if abs(self.sty - self.stx) >= p66 * self.width1: + stp = self.stx + p5 * (self.sty - self.stx) + self.width1 = self.width + self.width = abs(self.sty - self.stx) + + # Set the minimum and maximum steps allowed for stp. + if self.brackt: + self.stmin = min(self.stx, self.sty) + self.stmax = max(self.stx, self.sty) + else: + self.stmin = stp + xtrapl * (stp - self.stx) + self.stmax = stp + xtrapu * (stp - self.stx) + + # Force the step to be within the bounds stpmax and stpmin. + stp = np.clip(stp, self.stpmin, self.stpmax) + + # If further progress is not possible, let stp be the best + # point obtained during the search. + if ( + self.brackt + and (stp <= self.stmin or stp >= self.stmax) + or ( + self.brackt + and self.stmax - self.stmin <= self.xtol * self.stmax + ) + ): + stp = self.stx + + # Obtain another function and derivative + task = b"FG" + return stp, f, g, task + + +def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax): + """ + Subroutine dcstep + + This subroutine computes a safeguarded step for a search + procedure and updates an interval that contains a step that + satisfies a sufficient decrease and a curvature condition. + + The parameter stx contains the step with the least function + value. If brackt is set to .true. then a minimizer has + been bracketed in an interval with endpoints stx and sty. + The parameter stp contains the current step. + The subroutine assumes that if brackt is set to .true. then + + min(stx,sty) < stp < max(stx,sty), + + and that the derivative at stx is negative in the direction + of the step. + + The subroutine statement is + + subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt, + stpmin,stpmax) + + where + + stx is a double precision variable. + On entry stx is the best step obtained so far and is an + endpoint of the interval that contains the minimizer. + On exit stx is the updated best step. + + fx is a double precision variable. + On entry fx is the function at stx. + On exit fx is the function at stx. + + dx is a double precision variable. + On entry dx is the derivative of the function at + stx. The derivative must be negative in the direction of + the step, that is, dx and stp - stx must have opposite + signs. + On exit dx is the derivative of the function at stx. + + sty is a double precision variable. + On entry sty is the second endpoint of the interval that + contains the minimizer. + On exit sty is the updated endpoint of the interval that + contains the minimizer. + + fy is a double precision variable. + On entry fy is the function at sty. + On exit fy is the function at sty. + + dy is a double precision variable. + On entry dy is the derivative of the function at sty. + On exit dy is the derivative of the function at the exit sty. + + stp is a double precision variable. + On entry stp is the current step. If brackt is set to .true. + then on input stp must be between stx and sty. + On exit stp is a new trial step. + + fp is a double precision variable. + On entry fp is the function at stp + On exit fp is unchanged. + + dp is a double precision variable. + On entry dp is the derivative of the function at stp. + On exit dp is unchanged. + + brackt is an logical variable. + On entry brackt specifies if a minimizer has been bracketed. + Initially brackt must be set to .false. + On exit brackt specifies if a minimizer has been bracketed. + When a minimizer is bracketed brackt is set to .true. + + stpmin is a double precision variable. + On entry stpmin is a lower bound for the step. + On exit stpmin is unchanged. + + stpmax is a double precision variable. + On entry stpmax is an upper bound for the step. + On exit stpmax is unchanged. + + MINPACK-1 Project. June 1983 + Argonne National Laboratory. + Jorge J. More' and David J. Thuente. + + MINPACK-2 Project. November 1993. + Argonne National Laboratory and University of Minnesota. + Brett M. Averick and Jorge J. More'. + + """ + sgn_dp = np.sign(dp) + sgn_dx = np.sign(dx) + + # sgnd = dp * (dx / abs(dx)) + sgnd = sgn_dp * sgn_dx + + # First case: A higher function value. The minimum is bracketed. + # If the cubic step is closer to stx than the quadratic step, the + # cubic step is taken, otherwise the average of the cubic and + # quadratic steps is taken. + if fp > fx: + theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp < stx: + gamma *= -1 + p = (gamma - dx) + theta + q = ((gamma - dx) + gamma) + dp + r = p / q + stpc = stx + r * (stp - stx) + stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx) + if abs(stpc - stx) <= abs(stpq - stx): + stpf = stpc + else: + stpf = stpc + (stpq - stpc) / 2.0 + brackt = True + elif sgnd < 0.0: + # Second case: A lower function value and derivatives of opposite + # sign. The minimum is bracketed. If the cubic step is farther from + # stp than the secant step, the cubic step is taken, otherwise the + # secant step is taken. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s)) + if stp > stx: + gamma *= -1 + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dx + r = p / q + stpc = stp + r * (stx - stp) + stpq = stp + (dp / (dp - dx)) * (stx - stp) + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + brackt = True + elif abs(dp) < abs(dx): + # Third case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative decreases. + + # The cubic step is computed only if the cubic tends to infinity + # in the direction of the step or if the minimum of the cubic + # is beyond stp. Otherwise the cubic step is defined to be the + # secant step. + theta = 3 * (fx - fp) / (stp - stx) + dx + dp + s = max(abs(theta), abs(dx), abs(dp)) + + # The case gamma = 0 only arises if the cubic does not tend + # to infinity in the direction of the step. + gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s))) + if stp > stx: + gamma = -gamma + p = (gamma - dp) + theta + q = (gamma + (dx - dp)) + gamma + r = p / q + if r < 0 and gamma != 0: + stpc = stp + r * (stx - stp) + elif stp > stx: + stpc = stpmax + else: + stpc = stpmin + stpq = stp + (dp / (dp - dx)) * (stx - stp) + + if brackt: + # A minimizer has been bracketed. If the cubic step is + # closer to stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) < abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + + if stp > stx: + stpf = min(stp + 0.66 * (sty - stp), stpf) + else: + stpf = max(stp + 0.66 * (sty - stp), stpf) + else: + # A minimizer has not been bracketed. If the cubic step is + # farther from stp than the secant step, the cubic step is + # taken, otherwise the secant step is taken. + if abs(stpc - stp) > abs(stpq - stp): + stpf = stpc + else: + stpf = stpq + stpf = np.clip(stpf, stpmin, stpmax) + + else: + # Fourth case: A lower function value, derivatives of the same sign, + # and the magnitude of the derivative does not decrease. If the + # minimum is not bracketed, the step is either stpmin or stpmax, + # otherwise the cubic step is taken. + if brackt: + theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp + s = max(abs(theta), abs(dy), abs(dp)) + gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s)) + if stp > sty: + gamma = -gamma + p = (gamma - dp) + theta + q = ((gamma - dp) + gamma) + dy + r = p / q + stpc = stp + r * (sty - stp) + stpf = stpc + elif stp > stx: + stpf = stpmax + else: + stpf = stpmin + + # Update the interval which contains a minimizer. + if fp > fx: + sty = stp + fy = fp + dy = dp + else: + if sgnd < 0: + sty = stx + fy = fx + dy = dx + stx = stp + fx = fp + dx = dp + + # Compute the new step. + stp = stpf + + return stx, fx, dx, sty, fy, dy, stp, brackt diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..5df1196abf56e4b3e2dbc9cf3c879f8631c6d083 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py @@ -0,0 +1,646 @@ +import numpy as np +import scipy.sparse as sps +from ._numdiff import approx_derivative, group_columns +from ._hessian_update_strategy import HessianUpdateStrategy +from scipy.sparse.linalg import LinearOperator +from scipy._lib._array_api import atleast_nd, array_namespace + + +FD_METHODS = ('2-point', '3-point', 'cs') + + +class ScalarFunction: + """Scalar function and its derivatives. + + This class defines a scalar function F: R^n->R and methods for + computing or approximating its first and second derivatives. + + Parameters + ---------- + fun : callable + evaluates the scalar function. Must be of the form ``fun(x, *args)``, + where ``x`` is the argument in the form of a 1-D array and ``args`` is + a tuple of any additional fixed parameters needed to completely specify + the function. Should return a scalar. + x0 : array-like + Provides an initial set of variables for evaluating fun. Array of real + elements of size (n,), where 'n' is the number of independent + variables. + args : tuple, optional + Any additional fixed parameters needed to completely specify the scalar + function. + grad : {callable, '2-point', '3-point', 'cs'} + Method for computing the gradient vector. + If it is a callable, it should be a function that returns the gradient + vector: + + ``grad(x, *args) -> array_like, shape (n,)`` + + where ``x`` is an array with shape (n,) and ``args`` is a tuple with + the fixed parameters. + Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used + to select a finite difference scheme for numerical estimation of the + gradient with a relative step size. These finite difference schemes + obey any specified `bounds`. + hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy} + Method for computing the Hessian matrix. If it is callable, it should + return the Hessian matrix: + + ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` + + where x is a (n,) ndarray and `args` is a tuple with the fixed + parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} + select a finite difference scheme for numerical estimation. Or, objects + implementing `HessianUpdateStrategy` interface can be used to + approximate the Hessian. + Whenever the gradient is estimated via finite-differences, the Hessian + cannot be estimated with options {'2-point', '3-point', 'cs'} and needs + to be estimated using one of the quasi-Newton strategies. + finite_diff_rel_step : None or array_like + Relative step size to use. The absolute step size is computed as + ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly + adjusted to fit into the bounds. For ``method='3-point'`` the sign + of `h` is ignored. If None then finite_diff_rel_step is selected + automatically, + finite_diff_bounds : tuple of array_like + Lower and upper bounds on independent variables. Defaults to no bounds, + (-np.inf, np.inf). Each bound must match the size of `x0` or be a + scalar, in the latter case the bound will be the same for all + variables. Use it to limit the range of function evaluation. + epsilon : None or array_like, optional + Absolute step size to use, possibly adjusted to fit into the bounds. + For ``method='3-point'`` the sign of `epsilon` is ignored. By default + relative steps are used, only if ``epsilon is not None`` are absolute + steps used. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `grad` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, + finite_diff_bounds, epsilon=None): + if not callable(grad) and grad not in FD_METHODS: + raise ValueError( + f"`grad` must be either callable or one of {FD_METHODS}." + ) + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError( + f"`hess` must be either callable, HessianUpdateStrategy" + f" or one of {FD_METHODS}." + ) + + if grad in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the gradient is estimated via " + "finite-differences, we require the Hessian " + "to be estimated using one of the " + "quasi-Newton strategies.") + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + self.n = self.x.size + self.nfev = 0 + self.ngev = 0 + self.nhev = 0 + self.f_updated = False + self.g_updated = False + self.H_updated = False + + self._lowest_x = None + self._lowest_f = np.inf + + finite_diff_options = {} + if grad in FD_METHODS: + finite_diff_options["method"] = grad + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["bounds"] = finite_diff_bounds + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["abs_step"] = epsilon + finite_diff_options["as_linear_operator"] = True + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + # Send a copy because the user may overwrite it. + # Overwriting results in undefined behaviour because + # fun(self.x) will change self.x, with the two no longer linked. + fx = fun(np.copy(x), *args) + # Make sure the function returns a true scalar + if not np.isscalar(fx): + try: + fx = np.asarray(fx).item() + except (TypeError, ValueError) as e: + raise ValueError( + "The user-provided objective function " + "must return a scalar value." + ) from e + + if fx < self._lowest_f: + self._lowest_x = x + self._lowest_f = fx + + return fx + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + self._update_fun() + + # Gradient evaluation + if callable(grad): + def grad_wrapped(x): + self.ngev += 1 + return np.atleast_1d(grad(np.copy(x), *args)) + + def update_grad(): + self.g = grad_wrapped(self.x) + + elif grad in FD_METHODS: + def update_grad(): + self._update_fun() + self.ngev += 1 + self.g = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + + self._update_grad_impl = update_grad + self._update_grad() + + # Hessian Evaluation + if callable(hess): + self.H = hess(np.copy(x0), *args) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x): + self.nhev += 1 + return sps.csr_matrix(hess(np.copy(x), *args)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x): + self.nhev += 1 + return hess(np.copy(x), *args) + + else: + def hess_wrapped(x): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(np.copy(x), *args))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x) + + elif hess in FD_METHODS: + def update_hess(): + self._update_grad() + self.H = approx_derivative(grad_wrapped, self.x, f0=self.g, + **finite_diff_options) + return self.H + + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.g_prev = None + + def update_hess(): + self._update_grad() + self.H.update(self.x - self.x_prev, self.g - self.g_prev) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_grad() + self.x_prev = self.x + self.g_prev = self.g + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + # ensure that self.x is a copy of x. Don't store a reference + # otherwise the memoization doesn't work properly. + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.g_updated = False + self.H_updated = False + self._update_x_impl = update_x + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_grad(self): + if not self.g_updated: + self._update_grad_impl() + self.g_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_fun() + return self.f + + def grad(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_grad() + return self.g + + def hess(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_hess() + return self.H + + def fun_and_grad(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + self._update_fun() + self._update_grad() + return self.f, self.g + + +class VectorFunction: + """Vector function and its derivatives. + + This class defines a vector function F: R^n->R^m and methods for + computing or approximating its first and second derivatives. + + Notes + ----- + This class implements a memoization logic. There are methods `fun`, + `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following + things should be considered: + + 1. Use only public methods `fun`, `jac` and `hess`. + 2. After one of the methods is called, the corresponding attribute + will be set. However, a subsequent call with a different argument + of *any* of the methods may overwrite the attribute. + """ + def __init__(self, fun, x0, jac, hess, + finite_diff_rel_step, finite_diff_jac_sparsity, + finite_diff_bounds, sparse_jacobian): + if not callable(jac) and jac not in FD_METHODS: + raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.") + + if not (callable(hess) or hess in FD_METHODS + or isinstance(hess, HessianUpdateStrategy)): + raise ValueError("`hess` must be either callable," + f"HessianUpdateStrategy or one of {FD_METHODS}.") + + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.n = self.x.size + self.nfev = 0 + self.njev = 0 + self.nhev = 0 + self.f_updated = False + self.J_updated = False + self.H_updated = False + + finite_diff_options = {} + if jac in FD_METHODS: + finite_diff_options["method"] = jac + finite_diff_options["rel_step"] = finite_diff_rel_step + if finite_diff_jac_sparsity is not None: + sparsity_groups = group_columns(finite_diff_jac_sparsity) + finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, + sparsity_groups) + finite_diff_options["bounds"] = finite_diff_bounds + self.x_diff = np.copy(self.x) + if hess in FD_METHODS: + finite_diff_options["method"] = hess + finite_diff_options["rel_step"] = finite_diff_rel_step + finite_diff_options["as_linear_operator"] = True + self.x_diff = np.copy(self.x) + if jac in FD_METHODS and hess in FD_METHODS: + raise ValueError("Whenever the Jacobian is estimated via " + "finite-differences, we require the Hessian to " + "be estimated using one of the quasi-Newton " + "strategies.") + + # Function evaluation + def fun_wrapped(x): + self.nfev += 1 + return np.atleast_1d(fun(x)) + + def update_fun(): + self.f = fun_wrapped(self.x) + + self._update_fun_impl = update_fun + update_fun() + + self.v = np.zeros_like(self.f) + self.m = self.v.size + + # Jacobian Evaluation + if callable(jac): + self.J = jac(self.x) + self.J_updated = True + self.njev += 1 + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def jac_wrapped(x): + self.njev += 1 + return sps.csr_matrix(jac(x)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def jac_wrapped(x): + self.njev += 1 + return jac(x).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def jac_wrapped(x): + self.njev += 1 + return np.atleast_2d(jac(x)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + def update_jac(): + self.J = jac_wrapped(self.x) + + elif jac in FD_METHODS: + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options) + self.J_updated = True + + if (sparse_jacobian or + sparse_jacobian is None and sps.issparse(self.J)): + def update_jac(): + self._update_fun() + self.J = sps.csr_matrix( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = sps.csr_matrix(self.J) + self.sparse_jacobian = True + + elif sps.issparse(self.J): + def update_jac(): + self._update_fun() + self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options).toarray() + self.J = self.J.toarray() + self.sparse_jacobian = False + + else: + def update_jac(): + self._update_fun() + self.J = np.atleast_2d( + approx_derivative(fun_wrapped, self.x, f0=self.f, + **finite_diff_options)) + self.J = np.atleast_2d(self.J) + self.sparse_jacobian = False + + self._update_jac_impl = update_jac + + # Define Hessian + if callable(hess): + self.H = hess(self.x, self.v) + self.H_updated = True + self.nhev += 1 + + if sps.issparse(self.H): + def hess_wrapped(x, v): + self.nhev += 1 + return sps.csr_matrix(hess(x, v)) + self.H = sps.csr_matrix(self.H) + + elif isinstance(self.H, LinearOperator): + def hess_wrapped(x, v): + self.nhev += 1 + return hess(x, v) + + else: + def hess_wrapped(x, v): + self.nhev += 1 + return np.atleast_2d(np.asarray(hess(x, v))) + self.H = np.atleast_2d(np.asarray(self.H)) + + def update_hess(): + self.H = hess_wrapped(self.x, self.v) + elif hess in FD_METHODS: + def jac_dot_v(x, v): + return jac_wrapped(x).T.dot(v) + + def update_hess(): + self._update_jac() + self.H = approx_derivative(jac_dot_v, self.x, + f0=self.J.T.dot(self.v), + args=(self.v,), + **finite_diff_options) + update_hess() + self.H_updated = True + elif isinstance(hess, HessianUpdateStrategy): + self.H = hess + self.H.initialize(self.n, 'hess') + self.H_updated = True + self.x_prev = None + self.J_prev = None + + def update_hess(): + self._update_jac() + # When v is updated before x was updated, then x_prev and + # J_prev are None and we need this check. + if self.x_prev is not None and self.J_prev is not None: + delta_x = self.x - self.x_prev + delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) + self.H.update(delta_x, delta_g) + + self._update_hess_impl = update_hess + + if isinstance(hess, HessianUpdateStrategy): + def update_x(x): + self._update_jac() + self.x_prev = self.x + self.J_prev = self.J + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + self._update_hess() + else: + def update_x(x): + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + self.J_updated = False + self.H_updated = False + + self._update_x_impl = update_x + + def _update_v(self, v): + if not np.array_equal(v, self.v): + self.v = v + self.H_updated = False + + def _update_x(self, x): + if not np.array_equal(x, self.x): + self._update_x_impl(x) + + def _update_fun(self): + if not self.f_updated: + self._update_fun_impl() + self.f_updated = True + + def _update_jac(self): + if not self.J_updated: + self._update_jac_impl() + self.J_updated = True + + def _update_hess(self): + if not self.H_updated: + self._update_hess_impl() + self.H_updated = True + + def fun(self, x): + self._update_x(x) + self._update_fun() + return self.f + + def jac(self, x): + self._update_x(x) + self._update_jac() + return self.J + + def hess(self, x, v): + # v should be updated before x. + self._update_v(v) + self._update_x(x) + self._update_hess() + return self.H + + +class LinearVectorFunction: + """Linear vector function and its derivatives. + + Defines a linear function F = A x, where x is N-D vector and + A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian + is identically zero and it is returned as a csr matrix. + """ + def __init__(self, A, x0, sparse_jacobian): + if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): + self.J = sps.csr_matrix(A) + self.sparse_jacobian = True + elif sps.issparse(A): + self.J = A.toarray() + self.sparse_jacobian = False + else: + # np.asarray makes sure A is ndarray and not matrix + self.J = np.atleast_2d(np.asarray(A)) + self.sparse_jacobian = False + + self.m, self.n = self.J.shape + + self.xp = xp = array_namespace(x0) + _x = atleast_nd(x0, ndim=1, xp=xp) + _dtype = xp.float64 + if xp.isdtype(_x.dtype, "real floating"): + _dtype = _x.dtype + + # promotes to floating + self.x = xp.astype(_x, _dtype) + self.x_dtype = _dtype + + self.f = self.J.dot(self.x) + self.f_updated = True + + self.v = np.zeros(self.m, dtype=float) + self.H = sps.csr_matrix((self.n, self.n)) + + def _update_x(self, x): + if not np.array_equal(x, self.x): + _x = atleast_nd(x, ndim=1, xp=self.xp) + self.x = self.xp.astype(_x, self.x_dtype) + self.f_updated = False + + def fun(self, x): + self._update_x(x) + if not self.f_updated: + self.f = self.J.dot(x) + self.f_updated = True + return self.f + + def jac(self, x): + self._update_x(x) + return self.J + + def hess(self, x, v): + self._update_x(x) + self.v = v + return self.H + + +class IdentityVectorFunction(LinearVectorFunction): + """Identity vector function and its derivatives. + + The Jacobian is the identity matrix, returned as a dense array when + `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is + identically zero and it is returned as a csr matrix. + """ + def __init__(self, x0, sparse_jacobian): + n = len(x0) + if sparse_jacobian or sparse_jacobian is None: + A = sps.eye(n, format='csr') + sparse_jacobian = True + else: + A = np.eye(n) + sparse_jacobian = False + super().__init__(A, x0, sparse_jacobian) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py new file mode 100644 index 0000000000000000000000000000000000000000..2510d7190cdefd12e511d5df24c2c605e3428f45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py @@ -0,0 +1,1897 @@ +""" +differential_evolution: The differential evolution global optimization algorithm +Added by Andrew Nelson 2014 +""" +import warnings + +import numpy as np +from scipy.optimize import OptimizeResult, minimize +from scipy.optimize._optimize import _status_message, _wrap_callback +from scipy._lib._util import check_random_state, MapWrapper, _FunctionWrapper + +from scipy.optimize._constraints import (Bounds, new_bounds_to_old, + NonlinearConstraint, LinearConstraint) +from scipy.sparse import issparse + +__all__ = ['differential_evolution'] + + +_MACHEPS = np.finfo(np.float64).eps + + +def differential_evolution(func, bounds, args=(), strategy='best1bin', + maxiter=1000, popsize=15, tol=0.01, + mutation=(0.5, 1), recombination=0.7, seed=None, + callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1, constraints=(), x0=None, *, + integrality=None, vectorized=False): + """Finds the global minimum of a multivariate function. + + The differential evolution method [1]_ is stochastic in nature. It does + not use gradient methods to find the minimum, and can search large areas + of candidate space, but often requires larger numbers of function + evaluations than conventional gradient-based techniques. + + The algorithm is due to Storn and Price [2]_. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. The number of parameters, N, is equal + to ``len(x)``. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``, defining the + finite lower and upper bounds for the optimizing argument of + `func`. + + The total number of bounds is used to determine the number of + parameters, N. If there are parameters whose bounds are equal the total + number of free parameters is ``N - N_equal``. + + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : {str, callable}, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1bin' + - 'rand1exp' + - 'rand2bin' + - 'rand2exp' + - 'randtobest1bin' + - 'randtobest1exp' + - 'currenttobest1bin' + - 'currenttobest1exp' + - 'best2exp' + - 'best2bin' + + The default is 'best1bin'. Strategies that may be implemented are + outlined in 'Notes'. + Alternatively the differential evolution strategy can be customized by + providing a callable that constructs a trial vector. The callable must + have the form ``strategy(candidate: int, population: np.ndarray, rng=None)``, + where ``candidate`` is an integer specifying which entry of the + population is being evolved, ``population`` is an array of shape + ``(S, N)`` containing all the population members (where S is the + total population size), and ``rng`` is the random number generator + being used within the solver. + ``candidate`` will be in the range ``[0, S)``. + ``strategy`` must return a trial vector with shape `(N,)`. The + fitness of this trial vector is compared against the fitness of + ``population[candidate]``. + + .. versionchanged:: 1.12.0 + Customization of evolution strategy via a callable. + + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * (N - N_equal)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * (N - N_equal)`` individuals. This keyword is overridden if + an initial population is supplied via the `init` keyword. When using + ``init='sobol'`` the population size is calculated as the next power + of 2 after ``popsize * (N - N_equal)``. + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by F. + If specified as a float it should be in the range [0, 2]. + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + ``U[min, max)``. Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. + disp : bool, optional + Prints the evaluated `func` at every iteration. + callback : callable, optional + A callable called after each iteration. Has the signature: + + ``callback(intermediate_result: OptimizeResult)`` + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the best solution + found so far and the objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. + + The callback also supports a signature like: + + ``callback(x, convergence: float=val)`` + + ``val`` represents the fractional value of the population convergence. + When ``val`` is greater than ``1.0``, the function halts. + + Introspection is used to determine which of the signatures is invoked. + + Global minimization will halt if the callback raises ``StopIteration`` + or returns ``True``; any polishing is still carried out. + + .. versionchanged:: 1.12.0 + callback accepts the ``intermediate_result`` keyword. + + polish : bool, optional + If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` + method is used to polish the best population member at the end, which + can improve the minimization slightly. If a constrained problem is + being studied then the `trust-constr` method is used instead. For large + problems with many constraints, polishing can take a long time due to + the Jacobian computations. + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'sobol' + - 'halton' + - 'random' + - array specifying the initial population. The array should have + shape ``(S, N)``, where S is the total population size and N is + the number of parameters. + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. + + 'sobol' and 'halton' are superior alternatives and maximize even more + the parameter space. 'sobol' will enforce an initial population + size which is calculated as the next power of 2 after + ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit + less efficient. See `scipy.stats.qmc` for more details. + + 'random' initializes the population randomly - this has the drawback + that clustering can occur, preventing the whole of parameter space + being covered. Use of an array to specify a population could be used, + for example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If ``'immediate'``, the best solution vector is continuously updated + within a single generation [4]_. This can lead to faster convergence as + trial vectors can take advantage of continuous improvements in the best + solution. + With ``'deferred'``, the best solution vector is updated once per + generation. Only ``'deferred'`` is compatible with parallelization or + vectorization, and the `workers` and `vectorized` keywords can + over-ride this option. + + .. versionadded:: 1.2.0 + + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel + (uses `multiprocessing.Pool `). + Supply -1 to use all available CPU cores. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + ``updating='deferred'`` if ``workers != 1``. + This option overrides the `vectorized` keyword if ``workers != 1``. + Requires that `func` be pickleable. + + .. versionadded:: 1.2.0 + + constraints : {NonLinearConstraint, LinearConstraint, Bounds} + Constraints on the solver, over and above those applied by the `bounds` + kwd. Uses the approach by Lampinen [5]_. + + .. versionadded:: 1.4.0 + + x0 : None or array-like, optional + Provides an initial guess to the minimization. Once the population has + been initialized this vector replaces the first (best) member. This + replacement is done even if `init` is given an initial population. + ``x0.shape == (N,)``. + + .. versionadded:: 1.7.0 + + integrality : 1-D array, optional + For each decision variable, a boolean value indicating whether the + decision variable is constrained to integer values. The array is + broadcast to ``(N,)``. + If any decision variables are constrained to be integral, they will not + be changed during polishing. + Only integer values lying between the lower and upper bounds are used. + If there are no integer values lying between the bounds then a + `ValueError` is raised. + + .. versionadded:: 1.9.0 + + vectorized : bool, optional + If ``vectorized is True``, `func` is sent an `x` array with + ``x.shape == (N, S)``, and is expected to return an array of shape + ``(S,)``, where `S` is the number of solution vectors to be calculated. + If constraints are applied, each of the functions used to construct + a `Constraint` object should accept an `x` array with + ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where + `M` is the number of constraint components. + This option is an alternative to the parallelization offered by + `workers`, and may help in optimization speed by reducing interpreter + overhead from multiple function calls. This keyword is ignored if + ``workers != 1``. + This option will override the `updating` keyword to + ``updating='deferred'``. + See the notes section for further discussion on when to use + ``'vectorized'``, and when to use ``'workers'``. + + .. versionadded:: 1.9.0 + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully, + ``message`` which describes the cause of the termination, + ``population`` the solution vectors present in the population, and + ``population_energies`` the value of the objective function for each + entry in ``population``. + See `OptimizeResult` for a description of other attributes. If `polish` + was employed, and a lower minimum was obtained by the polishing, then + OptimizeResult also contains the ``jac`` attribute. + If the eventual solution does not satisfy the applied constraints + ``success`` will be `False`. + + Notes + ----- + Differential evolution is a stochastic population based method that is + useful for global optimization problems. At each pass through the + population the algorithm mutates each candidate solution by mixing with + other candidate solutions to create a trial candidate. There are several + strategies [3]_ for creating trial candidates, which suit some problems + more than others. The 'best1bin' strategy is a good starting point for + many systems. In this strategy two members of the population are randomly + chosen. Their difference is used to mutate the best member (the 'best' in + 'best1bin'), :math:`x_0`, so far: + + .. math:: + + b' = x_0 + mutation * (x_{r_0} - x_{r_1}) + + A trial vector is then constructed. Starting with a randomly chosen ith + parameter the trial is sequentially filled (in modulo) with parameters + from ``b'`` or the original candidate. The choice of whether to use ``b'`` + or the original candidate is made with a binomial distribution (the 'bin' + in 'best1bin') - a random number in [0, 1) is generated. If this number is + less than the `recombination` constant then the parameter is loaded from + ``b'``, otherwise it is loaded from the original candidate. The final + parameter is always loaded from ``b'``. Once the trial candidate is built + its fitness is assessed. If the trial is better than the original candidate + then it takes its place. If it is also better than the best overall + candidate it also replaces that. + + The other strategies available are outlined in Qiang and + Mitchell (2014) [3]_. + + .. math:: + rand1* : b' = x_{r_0} + mutation*(x_{r_1} - x_{r_2}) + + rand2* : b' = x_{r_0} + mutation*(x_{r_1} + x_{r_2} + - x_{r_3} - x_{r_4}) + + best1* : b' = x_0 + mutation*(x_{r_0} - x_{r_1}) + + best2* : b' = x_0 + mutation*(x_{r_0} + x_{r_1} + - x_{r_2} - x_{r_3}) + + currenttobest1* : b' = x_i + mutation*(x_0 - x_i + + x_{r_0} - x_{r_1}) + + randtobest1* : b' = x_{r_0} + mutation*(x_0 - x_{r_0} + + x_{r_1} - x_{r_2}) + + where the integers :math:`r_0, r_1, r_2, r_3, r_4` are chosen randomly + from the interval [0, NP) with `NP` being the total population size and + the original candidate having index `i`. The user can fully customize the + generation of the trial candidates by supplying a callable to ``strategy``. + + To improve your chances of finding a global minimum use higher `popsize` + values, with higher `mutation` and (dithering), but lower `recombination` + values. This has the effect of widening the search radius, but slowing + convergence. + + By default the best solution vector is updated continuously within a single + iteration (``updating='immediate'``). This is a modification [4]_ of the + original differential evolution algorithm which can lead to faster + convergence as trial vectors can immediately benefit from improved + solutions. To use the original Storn and Price behaviour, updating the best + solution once per iteration, set ``updating='deferred'``. + The ``'deferred'`` approach is compatible with both parallelization and + vectorization (``'workers'`` and ``'vectorized'`` keywords). These may + improve minimization speed by using computer resources more efficiently. + The ``'workers'`` distribute calculations over multiple processors. By + default the Python `multiprocessing` module is used, but other approaches + are also possible, such as the Message Passing Interface (MPI) used on + clusters [6]_ [7]_. The overhead from these approaches (creating new + Processes, etc) may be significant, meaning that computational speed + doesn't necessarily scale with the number of processors used. + Parallelization is best suited to computationally expensive objective + functions. If the objective function is less expensive, then + ``'vectorized'`` may aid by only calling the objective function once per + iteration, rather than multiple times for all the population members; the + interpreter overhead is reduced. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Differential evolution, Wikipedia, + http://en.wikipedia.org/wiki/Differential_evolution + .. [2] Storn, R and Price, K, Differential Evolution - a Simple and + Efficient Heuristic for Global Optimization over Continuous Spaces, + Journal of Global Optimization, 1997, 11, 341 - 359. + .. [3] Qiang, J., Mitchell, C., A Unified Differential Evolution Algorithm + for Global Optimization, 2014, https://www.osti.gov/servlets/purl/1163659 + .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., - + Characterization of structures from X-ray scattering data using + genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357, + 2827-2848 + .. [5] Lampinen, J., A constraint handling approach for the differential + evolution algorithm. Proceedings of the 2002 Congress on + Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE, + 2002. + .. [6] https://mpi4py.readthedocs.io/en/stable/ + .. [7] https://schwimmbad.readthedocs.io/en/latest/ + + + Examples + -------- + Let us consider the problem of minimizing the Rosenbrock function. This + function is implemented in `rosen` in `scipy.optimize`. + + >>> import numpy as np + >>> from scipy.optimize import rosen, differential_evolution + >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] + >>> result = differential_evolution(rosen, bounds) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Now repeat, but with parallelization. + + >>> result = differential_evolution(rosen, bounds, updating='deferred', + ... workers=2) + >>> result.x, result.fun + (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) + + Let's do a constrained minimization. + + >>> from scipy.optimize import LinearConstraint, Bounds + + We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less + than or equal to 1.9. This is a linear constraint, which may be written + ``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as + a `LinearConstraint` instance: + + >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9) + + Specify limits using a `Bounds` object. + + >>> bounds = Bounds([0., 0.], [2., 2.]) + >>> result = differential_evolution(rosen, bounds, constraints=lc, + ... seed=1) + >>> result.x, result.fun + (array([0.96632622, 0.93367155]), 0.0011352416852625719) + + Next find the minimum of the Ackley function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> def ackley(x): + ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2)) + ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1])) + ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e + >>> bounds = [(-5, 5), (-5, 5)] + >>> result = differential_evolution(ackley, bounds, seed=1) + >>> result.x, result.fun + (array([0., 0.]), 4.440892098500626e-16) + + The Ackley function is written in a vectorized manner, so the + ``'vectorized'`` keyword can be employed. Note the reduced number of + function evaluations. + + >>> result = differential_evolution( + ... ackley, bounds, vectorized=True, updating='deferred', seed=1 + ... ) + >>> result.x, result.fun + (array([0., 0.]), 4.440892098500626e-16) + + The following custom strategy function mimics 'best1bin': + + >>> def custom_strategy_fn(candidate, population, rng=None): + ... parameter_count = population.shape(-1) + ... mutation, recombination = 0.7, 0.9 + ... trial = np.copy(population[candidate]) + ... fill_point = rng.choice(parameter_count) + ... + ... pool = np.arange(len(population)) + ... rng.shuffle(pool) + ... + ... # two unique random numbers that aren't the same, and + ... # aren't equal to candidate. + ... idxs = [] + ... while len(idxs) < 2 and len(pool) > 0: + ... idx = pool[0] + ... pool = pool[1:] + ... if idx != candidate: + ... idxs.append(idx) + ... + ... r0, r1 = idxs[:2] + ... + ... bprime = (population[0] + mutation * + ... (population[r0] - population[r1])) + ... + ... crossovers = rng.uniform(size=parameter_count) + ... crossovers = crossovers < recombination + ... crossovers[fill_point] = True + ... trial = np.where(crossovers, bprime, trial) + ... return trial + + """ + + # using a context manager means that any created Pool objects are + # cleared up. + with DifferentialEvolutionSolver(func, bounds, args=args, + strategy=strategy, + maxiter=maxiter, + popsize=popsize, tol=tol, + mutation=mutation, + recombination=recombination, + seed=seed, polish=polish, + callback=callback, + disp=disp, init=init, atol=atol, + updating=updating, + workers=workers, + constraints=constraints, + x0=x0, + integrality=integrality, + vectorized=vectorized) as solver: + ret = solver.solve() + + return ret + + +class DifferentialEvolutionSolver: + + """This class implements the differential evolution solver + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. The number of parameters, N, is equal + to ``len(x)``. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``, defining the + finite lower and upper bounds for the optimizing argument of + `func`. + + The total number of bounds is used to determine the number of + parameters, N. If there are parameters whose bounds are equal the total + number of free parameters is ``N - N_equal``. + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + strategy : {str, callable}, optional + The differential evolution strategy to use. Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1bin' + - 'rand1exp' + - 'rand2bin' + - 'rand2exp' + - 'randtobest1bin' + - 'randtobest1exp' + - 'currenttobest1bin' + - 'currenttobest1exp' + - 'best2exp' + - 'best2bin' + + The default is 'best1bin'. Strategies that may be + implemented are outlined in 'Notes'. + + Alternatively the differential evolution strategy can be customized + by providing a callable that constructs a trial vector. The callable + must have the form + ``strategy(candidate: int, population: np.ndarray, rng=None)``, + where ``candidate`` is an integer specifying which entry of the + population is being evolved, ``population`` is an array of shape + ``(S, N)`` containing all the population members (where S is the + total population size), and ``rng`` is the random number generator + being used within the solver. + ``candidate`` will be in the range ``[0, S)``. + ``strategy`` must return a trial vector with shape `(N,)`. The + fitness of this trial vector is compared against the fitness of + ``population[candidate]``. + maxiter : int, optional + The maximum number of generations over which the entire population is + evolved. The maximum number of function evaluations (with no polishing) + is: ``(maxiter + 1) * popsize * (N - N_equal)`` + popsize : int, optional + A multiplier for setting the total population size. The population has + ``popsize * (N - N_equal)`` individuals. This keyword is overridden if + an initial population is supplied via the `init` keyword. When using + ``init='sobol'`` the population size is calculated as the next power + of 2 after ``popsize * (N - N_equal)``. + tol : float, optional + Relative tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + mutation : float or tuple(float, float), optional + The mutation constant. In the literature this is also known as + differential weight, being denoted by F. + If specified as a float it should be in the range [0, 2]. + If specified as a tuple ``(min, max)`` dithering is employed. Dithering + randomly changes the mutation constant on a generation by generation + basis. The mutation constant for that generation is taken from + U[min, max). Dithering can help speed convergence significantly. + Increasing the mutation constant increases the search radius, but will + slow down convergence. + recombination : float, optional + The recombination constant, should be in the range [0, 1]. In the + literature this is also known as the crossover probability, being + denoted by CR. Increasing this value allows a larger number of mutants + to progress into the next generation, but at the risk of population + stability. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. + disp : bool, optional + Prints the evaluated `func` at every iteration. + callback : callable, optional + A callable called after each iteration. Has the signature: + + ``callback(intermediate_result: OptimizeResult)`` + + where ``intermediate_result`` is a keyword parameter containing an + `OptimizeResult` with attributes ``x`` and ``fun``, the best solution + found so far and the objective function. Note that the name + of the parameter must be ``intermediate_result`` for the callback + to be passed an `OptimizeResult`. + + The callback also supports a signature like: + + ``callback(x, convergence: float=val)`` + + ``val`` represents the fractional value of the population convergence. + When ``val`` is greater than ``1.0``, the function halts. + + Introspection is used to determine which of the signatures is invoked. + + Global minimization will halt if the callback raises ``StopIteration`` + or returns ``True``; any polishing is still carried out. + + .. versionchanged:: 1.12.0 + callback accepts the ``intermediate_result`` keyword. + + polish : bool, optional + If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` + method is used to polish the best population member at the end, which + can improve the minimization slightly. If a constrained problem is + being studied then the `trust-constr` method is used instead. For large + problems with many constraints, polishing can take a long time due to + the Jacobian computations. + maxfun : int, optional + Set the maximum number of function evaluations. However, it probably + makes more sense to set `maxiter` instead. + init : str or array-like, optional + Specify which type of population initialization is performed. Should be + one of: + + - 'latinhypercube' + - 'sobol' + - 'halton' + - 'random' + - array specifying the initial population. The array should have + shape ``(S, N)``, where S is the total population size and + N is the number of parameters. + `init` is clipped to `bounds` before use. + + The default is 'latinhypercube'. Latin Hypercube sampling tries to + maximize coverage of the available parameter space. + + 'sobol' and 'halton' are superior alternatives and maximize even more + the parameter space. 'sobol' will enforce an initial population + size which is calculated as the next power of 2 after + ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit + less efficient. See `scipy.stats.qmc` for more details. + + 'random' initializes the population randomly - this has the drawback + that clustering can occur, preventing the whole of parameter space + being covered. Use of an array to specify a population could be used, + for example, to create a tight bunch of initial guesses in an location + where the solution is known to exist, thereby reducing time for + convergence. + atol : float, optional + Absolute tolerance for convergence, the solving stops when + ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, + where and `atol` and `tol` are the absolute and relative tolerance + respectively. + updating : {'immediate', 'deferred'}, optional + If ``'immediate'``, the best solution vector is continuously updated + within a single generation [4]_. This can lead to faster convergence as + trial vectors can take advantage of continuous improvements in the best + solution. + With ``'deferred'``, the best solution vector is updated once per + generation. Only ``'deferred'`` is compatible with parallelization or + vectorization, and the `workers` and `vectorized` keywords can + over-ride this option. + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel + (uses `multiprocessing.Pool `). + Supply `-1` to use all cores available to the Process. + Alternatively supply a map-like callable, such as + `multiprocessing.Pool.map` for evaluating the population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + This option will override the `updating` keyword to + `updating='deferred'` if `workers != 1`. + Requires that `func` be pickleable. + constraints : {NonLinearConstraint, LinearConstraint, Bounds} + Constraints on the solver, over and above those applied by the `bounds` + kwd. Uses the approach by Lampinen. + x0 : None or array-like, optional + Provides an initial guess to the minimization. Once the population has + been initialized this vector replaces the first (best) member. This + replacement is done even if `init` is given an initial population. + ``x0.shape == (N,)``. + integrality : 1-D array, optional + For each decision variable, a boolean value indicating whether the + decision variable is constrained to integer values. The array is + broadcast to ``(N,)``. + If any decision variables are constrained to be integral, they will not + be changed during polishing. + Only integer values lying between the lower and upper bounds are used. + If there are no integer values lying between the bounds then a + `ValueError` is raised. + vectorized : bool, optional + If ``vectorized is True``, `func` is sent an `x` array with + ``x.shape == (N, S)``, and is expected to return an array of shape + ``(S,)``, where `S` is the number of solution vectors to be calculated. + If constraints are applied, each of the functions used to construct + a `Constraint` object should accept an `x` array with + ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where + `M` is the number of constraint components. + This option is an alternative to the parallelization offered by + `workers`, and may help in optimization speed. This keyword is + ignored if ``workers != 1``. + This option will override the `updating` keyword to + ``updating='deferred'``. + """ + + # Dispatch of mutation strategy method (binomial or exponential). + _binomial = {'best1bin': '_best1', + 'randtobest1bin': '_randtobest1', + 'currenttobest1bin': '_currenttobest1', + 'best2bin': '_best2', + 'rand2bin': '_rand2', + 'rand1bin': '_rand1'} + _exponential = {'best1exp': '_best1', + 'rand1exp': '_rand1', + 'randtobest1exp': '_randtobest1', + 'currenttobest1exp': '_currenttobest1', + 'best2exp': '_best2', + 'rand2exp': '_rand2'} + + __init_error_msg = ("The population initialization method must be one of " + "'latinhypercube' or 'random', or an array of shape " + "(S, N) where N is the number of parameters and S>5") + + def __init__(self, func, bounds, args=(), + strategy='best1bin', maxiter=1000, popsize=15, + tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, + maxfun=np.inf, callback=None, disp=False, polish=True, + init='latinhypercube', atol=0, updating='immediate', + workers=1, constraints=(), x0=None, *, integrality=None, + vectorized=False): + + if callable(strategy): + # a callable strategy is going to be stored in self.strategy anyway + pass + elif strategy in self._binomial: + self.mutation_func = getattr(self, self._binomial[strategy]) + elif strategy in self._exponential: + self.mutation_func = getattr(self, self._exponential[strategy]) + else: + raise ValueError("Please select a valid mutation strategy") + self.strategy = strategy + + self.callback = _wrap_callback(callback, "differential_evolution") + self.polish = polish + + # set the updating / parallelisation options + if updating in ['immediate', 'deferred']: + self._updating = updating + + self.vectorized = vectorized + + # want to use parallelisation, but updating is immediate + if workers != 1 and updating == 'immediate': + warnings.warn("differential_evolution: the 'workers' keyword has" + " overridden updating='immediate' to" + " updating='deferred'", UserWarning, stacklevel=2) + self._updating = 'deferred' + + if vectorized and workers != 1: + warnings.warn("differential_evolution: the 'workers' keyword" + " overrides the 'vectorized' keyword", stacklevel=2) + self.vectorized = vectorized = False + + if vectorized and updating == 'immediate': + warnings.warn("differential_evolution: the 'vectorized' keyword" + " has overridden updating='immediate' to updating" + "='deferred'", UserWarning, stacklevel=2) + self._updating = 'deferred' + + # an object with a map method. + if vectorized: + def maplike_for_vectorized_func(func, x): + # send an array (N, S) to the user func, + # expect to receive (S,). Transposition is required because + # internally the population is held as (S, N) + return np.atleast_1d(func(x.T)) + workers = maplike_for_vectorized_func + + self._mapwrapper = MapWrapper(workers) + + # relative and absolute tolerances for convergence + self.tol, self.atol = tol, atol + + # Mutation constant should be in [0, 2). If specified as a sequence + # then dithering is performed. + self.scale = mutation + if (not np.all(np.isfinite(mutation)) or + np.any(np.array(mutation) >= 2) or + np.any(np.array(mutation) < 0)): + raise ValueError('The mutation constant must be a float in ' + 'U[0, 2), or specified as a tuple(min, max)' + ' where min < max and min, max are in U[0, 2).') + + self.dither = None + if hasattr(mutation, '__iter__') and len(mutation) > 1: + self.dither = [mutation[0], mutation[1]] + self.dither.sort() + + self.cross_over_probability = recombination + + # we create a wrapped function to allow the use of map (and Pool.map + # in the future) + self.func = _FunctionWrapper(func, args) + self.args = args + + # convert tuple of lower and upper bounds to limits + # [(low_0, high_0), ..., (low_n, high_n] + # -> [[low_0, ..., low_n], [high_0, ..., high_n]] + if isinstance(bounds, Bounds): + self.limits = np.array(new_bounds_to_old(bounds.lb, + bounds.ub, + len(bounds.lb)), + dtype=float).T + else: + self.limits = np.array(bounds, dtype='float').T + + if (np.size(self.limits, 0) != 2 or not + np.all(np.isfinite(self.limits))): + raise ValueError('bounds should be a sequence containing finite ' + 'real valued (min, max) pairs for each value' + ' in x') + + if maxiter is None: # the default used to be None + maxiter = 1000 + self.maxiter = maxiter + if maxfun is None: # the default used to be None + maxfun = np.inf + self.maxfun = maxfun + + # population is scaled to between [0, 1]. + # We have to scale between parameter <-> population + # save these arguments for _scale_parameter and + # _unscale_parameter. This is an optimization + self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1]) + self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1]) + with np.errstate(divide='ignore'): + # if lb == ub then the following line will be 1/0, which is why + # we ignore the divide by zero warning. The result from 1/0 is + # inf, so replace those values by 0. + self.__recip_scale_arg2 = 1 / self.__scale_arg2 + self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0 + + self.parameter_count = np.size(self.limits, 1) + + self.random_number_generator = check_random_state(seed) + + # Which parameters are going to be integers? + if np.any(integrality): + # # user has provided a truth value for integer constraints + integrality = np.broadcast_to( + integrality, + self.parameter_count + ) + integrality = np.asarray(integrality, bool) + # For integrality parameters change the limits to only allow + # integer values lying between the limits. + lb, ub = np.copy(self.limits) + + lb = np.ceil(lb) + ub = np.floor(ub) + if not (lb[integrality] <= ub[integrality]).all(): + # there's a parameter that doesn't have an integer value + # lying between the limits + raise ValueError("One of the integrality constraints does not" + " have any possible integer values between" + " the lower/upper bounds.") + nlb = np.nextafter(lb[integrality] - 0.5, np.inf) + nub = np.nextafter(ub[integrality] + 0.5, -np.inf) + + self.integrality = integrality + self.limits[0, self.integrality] = nlb + self.limits[1, self.integrality] = nub + else: + self.integrality = False + + # check for equal bounds + eb = self.limits[0] == self.limits[1] + eb_count = np.count_nonzero(eb) + + # default population initialization is a latin hypercube design, but + # there are other population initializations possible. + # the minimum is 5 because 'best2bin' requires a population that's at + # least 5 long + # 202301 - reduced population size to account for parameters with + # equal bounds. If there are no varying parameters set N to at least 1 + self.num_population_members = max( + 5, + popsize * max(1, self.parameter_count - eb_count) + ) + self.population_shape = (self.num_population_members, + self.parameter_count) + + self._nfev = 0 + # check first str otherwise will fail to compare str with array + if isinstance(init, str): + if init == 'latinhypercube': + self.init_population_lhs() + elif init == 'sobol': + # must be Ns = 2**m for Sobol' + n_s = int(2 ** np.ceil(np.log2(self.num_population_members))) + self.num_population_members = n_s + self.population_shape = (self.num_population_members, + self.parameter_count) + self.init_population_qmc(qmc_engine='sobol') + elif init == 'halton': + self.init_population_qmc(qmc_engine='halton') + elif init == 'random': + self.init_population_random() + else: + raise ValueError(self.__init_error_msg) + else: + self.init_population_array(init) + + if x0 is not None: + # scale to within unit interval and + # ensure parameters are within bounds. + x0_scaled = self._unscale_parameters(np.asarray(x0)) + if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any(): + raise ValueError( + "Some entries in x0 lay outside the specified bounds" + ) + self.population[0] = x0_scaled + + # infrastructure for constraints + self.constraints = constraints + self._wrapped_constraints = [] + + if hasattr(constraints, '__len__'): + # sequence of constraints, this will also deal with default + # keyword parameter + for c in constraints: + self._wrapped_constraints.append( + _ConstraintWrapper(c, self.x) + ) + else: + self._wrapped_constraints = [ + _ConstraintWrapper(constraints, self.x) + ] + self.total_constraints = np.sum( + [c.num_constr for c in self._wrapped_constraints] + ) + self.constraint_violation = np.zeros((self.num_population_members, 1)) + self.feasible = np.ones(self.num_population_members, bool) + + self.disp = disp + + def init_population_lhs(self): + """ + Initializes the population with Latin Hypercube Sampling. + Latin Hypercube Sampling ensures that each parameter is uniformly + sampled over its range. + """ + rng = self.random_number_generator + + # Each parameter range needs to be sampled uniformly. The scaled + # parameter range ([0, 1)) needs to be split into + # `self.num_population_members` segments, each of which has the following + # size: + segsize = 1.0 / self.num_population_members + + # Within each segment we sample from a uniform random distribution. + # We need to do this sampling for each parameter. + samples = (segsize * rng.uniform(size=self.population_shape) + + # Offset each segment to cover the entire parameter range [0, 1) + + np.linspace(0., 1., self.num_population_members, + endpoint=False)[:, np.newaxis]) + + # Create an array for population of candidate solutions. + self.population = np.zeros_like(samples) + + # Initialize population of candidate solutions by permutation of the + # random samples. + for j in range(self.parameter_count): + order = rng.permutation(range(self.num_population_members)) + self.population[:, j] = samples[order, j] + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_qmc(self, qmc_engine): + """Initializes the population with a QMC method. + + QMC methods ensures that each parameter is uniformly + sampled over its range. + + Parameters + ---------- + qmc_engine : str + The QMC method to use for initialization. Can be one of + ``latinhypercube``, ``sobol`` or ``halton``. + + """ + from scipy.stats import qmc + + rng = self.random_number_generator + + # Create an array for population of candidate solutions. + if qmc_engine == 'latinhypercube': + sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng) + elif qmc_engine == 'sobol': + sampler = qmc.Sobol(d=self.parameter_count, seed=rng) + elif qmc_engine == 'halton': + sampler = qmc.Halton(d=self.parameter_count, seed=rng) + else: + raise ValueError(self.__init_error_msg) + + self.population = sampler.random(n=self.num_population_members) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_random(self): + """ + Initializes the population at random. This type of initialization + can possess clustering, Latin Hypercube sampling is generally better. + """ + rng = self.random_number_generator + self.population = rng.uniform(size=self.population_shape) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + def init_population_array(self, init): + """ + Initializes the population with a user specified population. + + Parameters + ---------- + init : np.ndarray + Array specifying subset of the initial population. The array should + have shape (S, N), where N is the number of parameters. + The population is clipped to the lower and upper bounds. + """ + # make sure you're using a float array + popn = np.asarray(init, dtype=np.float64) + + if (np.size(popn, 0) < 5 or + popn.shape[1] != self.parameter_count or + len(popn.shape) != 2): + raise ValueError("The population supplied needs to have shape" + " (S, len(x)), where S > 4.") + + # scale values and clip to bounds, assigning to population + self.population = np.clip(self._unscale_parameters(popn), 0, 1) + + self.num_population_members = np.size(self.population, 0) + + self.population_shape = (self.num_population_members, + self.parameter_count) + + # reset population energies + self.population_energies = np.full(self.num_population_members, + np.inf) + + # reset number of function evaluations counter + self._nfev = 0 + + @property + def x(self): + """ + The best solution from the solver + """ + return self._scale_parameters(self.population[0]) + + @property + def convergence(self): + """ + The standard deviation of the population energies divided by their + mean. + """ + if np.any(np.isinf(self.population_energies)): + return np.inf + return (np.std(self.population_energies) / + (np.abs(np.mean(self.population_energies)) + _MACHEPS)) + + def converged(self): + """ + Return True if the solver has converged. + """ + if np.any(np.isinf(self.population_energies)): + return False + + return (np.std(self.population_energies) <= + self.atol + + self.tol * np.abs(np.mean(self.population_energies))) + + def solve(self): + """ + Runs the DifferentialEvolutionSolver. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully, + ``message`` which describes the cause of the termination, + ``population`` the solution vectors present in the population, and + ``population_energies`` the value of the objective function for + each entry in ``population``. + See `OptimizeResult` for a description of other attributes. If + `polish` was employed, and a lower minimum was obtained by the + polishing, then OptimizeResult also contains the ``jac`` attribute. + If the eventual solution does not satisfy the applied constraints + ``success`` will be `False`. + """ + nit, warning_flag = 0, False + status_message = _status_message['success'] + + # The population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies. + # Although this is also done in the evolve generator it's possible + # that someone can set maxiter=0, at which point we still want the + # initial energies to be calculated (the following loop isn't run). + if np.all(np.isinf(self.population_energies)): + self.feasible, self.constraint_violation = ( + self._calculate_population_feasibilities(self.population)) + + # only work out population energies for feasible solutions + self.population_energies[self.feasible] = ( + self._calculate_population_energies( + self.population[self.feasible])) + + self._promote_lowest_energy() + + # do the optimization. + for nit in range(1, self.maxiter + 1): + # evolve the population by a generation + try: + next(self) + except StopIteration: + warning_flag = True + if self._nfev > self.maxfun: + status_message = _status_message['maxfev'] + elif self._nfev == self.maxfun: + status_message = ('Maximum number of function evaluations' + ' has been reached.') + break + + if self.disp: + print(f"differential_evolution step {nit}: f(x)=" + f" {self.population_energies[0]}" + ) + + if self.callback: + c = self.tol / (self.convergence + _MACHEPS) + res = self._result(nit=nit, message="in progress") + res.convergence = c + try: + warning_flag = bool(self.callback(res)) + except StopIteration: + warning_flag = True + + if warning_flag: + status_message = 'callback function requested stop early' + + # should the solver terminate? + if warning_flag or self.converged(): + break + + else: + status_message = _status_message['maxiter'] + warning_flag = True + + DE_result = self._result( + nit=nit, message=status_message, warning_flag=warning_flag + ) + + if self.polish and not np.all(self.integrality): + # can't polish if all the parameters are integers + if np.any(self.integrality): + # set the lower/upper bounds equal so that any integrality + # constraints work. + limits, integrality = self.limits, self.integrality + limits[0, integrality] = DE_result.x[integrality] + limits[1, integrality] = DE_result.x[integrality] + + polish_method = 'L-BFGS-B' + + if self._wrapped_constraints: + polish_method = 'trust-constr' + + constr_violation = self._constraint_violation_fn(DE_result.x) + if np.any(constr_violation > 0.): + warnings.warn("differential evolution didn't find a " + "solution satisfying the constraints, " + "attempting to polish from the least " + "infeasible solution", + UserWarning, stacklevel=2) + if self.disp: + print(f"Polishing solution with '{polish_method}'") + result = minimize(self.func, + np.copy(DE_result.x), + method=polish_method, + bounds=self.limits.T, + constraints=self.constraints) + + self._nfev += result.nfev + DE_result.nfev = self._nfev + + # Polishing solution is only accepted if there is an improvement in + # cost function, the polishing was successful and the solution lies + # within the bounds. + if (result.fun < DE_result.fun and + result.success and + np.all(result.x <= self.limits[1]) and + np.all(self.limits[0] <= result.x)): + DE_result.fun = result.fun + DE_result.x = result.x + DE_result.jac = result.jac + # to keep internal state consistent + self.population_energies[0] = result.fun + self.population[0] = self._unscale_parameters(result.x) + + if self._wrapped_constraints: + DE_result.constr = [c.violation(DE_result.x) for + c in self._wrapped_constraints] + DE_result.constr_violation = np.max( + np.concatenate(DE_result.constr)) + DE_result.maxcv = DE_result.constr_violation + if DE_result.maxcv > 0: + # if the result is infeasible then success must be False + DE_result.success = False + DE_result.message = ("The solution does not satisfy the " + f"constraints, MAXCV = {DE_result.maxcv}") + + return DE_result + + def _result(self, **kwds): + # form an intermediate OptimizeResult + nit = kwds.get('nit', None) + message = kwds.get('message', None) + warning_flag = kwds.get('warning_flag', False) + result = OptimizeResult( + x=self.x, + fun=self.population_energies[0], + nfev=self._nfev, + nit=nit, + message=message, + success=(warning_flag is not True), + population=self._scale_parameters(self.population), + population_energies=self.population_energies + ) + if self._wrapped_constraints: + result.constr = [c.violation(result.x) + for c in self._wrapped_constraints] + result.constr_violation = np.max(np.concatenate(result.constr)) + result.maxcv = result.constr_violation + if result.maxcv > 0: + result.success = False + + return result + + def _calculate_population_energies(self, population): + """ + Calculate the energies of a population. + + Parameters + ---------- + population : ndarray + An array of parameter vectors normalised to [0, 1] using lower + and upper limits. Has shape ``(np.size(population, 0), N)``. + + Returns + ------- + energies : ndarray + An array of energies corresponding to each population member. If + maxfun will be exceeded during this call, then the number of + function evaluations will be reduced and energies will be + right-padded with np.inf. Has shape ``(np.size(population, 0),)`` + """ + num_members = np.size(population, 0) + # S is the number of function evals left to stay under the + # maxfun budget + S = min(num_members, self.maxfun - self._nfev) + + energies = np.full(num_members, np.inf) + + parameters_pop = self._scale_parameters(population) + try: + calc_energies = list( + self._mapwrapper(self.func, parameters_pop[0:S]) + ) + calc_energies = np.squeeze(calc_energies) + except (TypeError, ValueError) as e: + # wrong number of arguments for _mapwrapper + # or wrong length returned from the mapper + raise RuntimeError( + "The map-like callable must be of the form f(func, iterable), " + "returning a sequence of numbers the same length as 'iterable'" + ) from e + + if calc_energies.size != S: + if self.vectorized: + raise RuntimeError("The vectorized function must return an" + " array of shape (S,) when given an array" + " of shape (len(x), S)") + raise RuntimeError("func(x, *args) must return a scalar value") + + energies[0:S] = calc_energies + + if self.vectorized: + self._nfev += 1 + else: + self._nfev += S + + return energies + + def _promote_lowest_energy(self): + # swaps 'best solution' into first population entry + + idx = np.arange(self.num_population_members) + feasible_solutions = idx[self.feasible] + if feasible_solutions.size: + # find the best feasible solution + idx_t = np.argmin(self.population_energies[feasible_solutions]) + l = feasible_solutions[idx_t] + else: + # no solution was feasible, use 'best' infeasible solution, which + # will violate constraints the least + l = np.argmin(np.sum(self.constraint_violation, axis=1)) + + self.population_energies[[0, l]] = self.population_energies[[l, 0]] + self.population[[0, l], :] = self.population[[l, 0], :] + self.feasible[[0, l]] = self.feasible[[l, 0]] + self.constraint_violation[[0, l], :] = ( + self.constraint_violation[[l, 0], :]) + + def _constraint_violation_fn(self, x): + """ + Calculates total constraint violation for all the constraints, for a + set of solutions. + + Parameters + ---------- + x : ndarray + Solution vector(s). Has shape (S, N), or (N,), where S is the + number of solutions to investigate and N is the number of + parameters. + + Returns + ------- + cv : ndarray + Total violation of constraints. Has shape ``(S, M)``, where M is + the total number of constraint components (which is not necessarily + equal to len(self._wrapped_constraints)). + """ + # how many solution vectors you're calculating constraint violations + # for + S = np.size(x) // self.parameter_count + _out = np.zeros((S, self.total_constraints)) + offset = 0 + for con in self._wrapped_constraints: + # the input/output of the (vectorized) constraint function is + # {(N, S), (N,)} --> (M, S) + # The input to _constraint_violation_fn is (S, N) or (N,), so + # transpose to pass it to the constraint. The output is transposed + # from (M, S) to (S, M) for further use. + c = con.violation(x.T).T + + # The shape of c should be (M,), (1, M), or (S, M). Check for + # those shapes, as an incorrect shape indicates that the + # user constraint function didn't return the right thing, and + # the reshape operation will fail. Intercept the wrong shape + # to give a reasonable error message. I'm not sure what failure + # modes an inventive user will come up with. + if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S): + raise RuntimeError("An array returned from a Constraint has" + " the wrong shape. If `vectorized is False`" + " the Constraint should return an array of" + " shape (M,). If `vectorized is True` then" + " the Constraint must return an array of" + " shape (M, S), where S is the number of" + " solution vectors and M is the number of" + " constraint components in a given" + " Constraint object.") + + # the violation function may return a 1D array, but is it a + # sequence of constraints for one solution (S=1, M>=1), or the + # value of a single constraint for a sequence of solutions + # (S>=1, M=1) + c = np.reshape(c, (S, con.num_constr)) + _out[:, offset:offset + con.num_constr] = c + offset += con.num_constr + + return _out + + def _calculate_population_feasibilities(self, population): + """ + Calculate the feasibilities of a population. + + Parameters + ---------- + population : ndarray + An array of parameter vectors normalised to [0, 1] using lower + and upper limits. Has shape ``(np.size(population, 0), N)``. + + Returns + ------- + feasible, constraint_violation : ndarray, ndarray + Boolean array of feasibility for each population member, and an + array of the constraint violation for each population member. + constraint_violation has shape ``(np.size(population, 0), M)``, + where M is the number of constraints. + """ + num_members = np.size(population, 0) + if not self._wrapped_constraints: + # shortcut for no constraints + return np.ones(num_members, bool), np.zeros((num_members, 1)) + + # (S, N) + parameters_pop = self._scale_parameters(population) + + if self.vectorized: + # (S, M) + constraint_violation = np.array( + self._constraint_violation_fn(parameters_pop) + ) + else: + # (S, 1, M) + constraint_violation = np.array([self._constraint_violation_fn(x) + for x in parameters_pop]) + # if you use the list comprehension in the line above it will + # create an array of shape (S, 1, M), because each iteration + # generates an array of (1, M). In comparison the vectorized + # version returns (S, M). It's therefore necessary to remove axis 1 + constraint_violation = constraint_violation[:, 0] + + feasible = ~(np.sum(constraint_violation, axis=1) > 0) + + return feasible, constraint_violation + + def __iter__(self): + return self + + def __enter__(self): + return self + + def __exit__(self, *args): + return self._mapwrapper.__exit__(*args) + + def _accept_trial(self, energy_trial, feasible_trial, cv_trial, + energy_orig, feasible_orig, cv_orig): + """ + Trial is accepted if: + * it satisfies all constraints and provides a lower or equal objective + function value, while both the compared solutions are feasible + - or - + * it is feasible while the original solution is infeasible, + - or - + * it is infeasible, but provides a lower or equal constraint violation + for all constraint functions. + + This test corresponds to section III of Lampinen [1]_. + + Parameters + ---------- + energy_trial : float + Energy of the trial solution + feasible_trial : float + Feasibility of trial solution + cv_trial : array-like + Excess constraint violation for the trial solution + energy_orig : float + Energy of the original solution + feasible_orig : float + Feasibility of original solution + cv_orig : array-like + Excess constraint violation for the original solution + + Returns + ------- + accepted : bool + + """ + if feasible_orig and feasible_trial: + return energy_trial <= energy_orig + elif feasible_trial and not feasible_orig: + return True + elif not feasible_trial and (cv_trial <= cv_orig).all(): + # cv_trial < cv_orig would imply that both trial and orig are not + # feasible + return True + + return False + + def __next__(self): + """ + Evolve the population by a single generation + + Returns + ------- + x : ndarray + The best solution from the solver. + fun : float + Value of objective function obtained from the best solution. + """ + # the population may have just been initialized (all entries are + # np.inf). If it has you have to calculate the initial energies + if np.all(np.isinf(self.population_energies)): + self.feasible, self.constraint_violation = ( + self._calculate_population_feasibilities(self.population)) + + # only need to work out population energies for those that are + # feasible + self.population_energies[self.feasible] = ( + self._calculate_population_energies( + self.population[self.feasible])) + + self._promote_lowest_energy() + + if self.dither is not None: + self.scale = self.random_number_generator.uniform(self.dither[0], + self.dither[1]) + + if self._updating == 'immediate': + # update best solution immediately + for candidate in range(self.num_population_members): + if self._nfev > self.maxfun: + raise StopIteration + + # create a trial solution + trial = self._mutate(candidate) + + # ensuring that it's in the range [0, 1) + self._ensure_constraint(trial) + + # scale from [0, 1) to the actual parameter value + parameters = self._scale_parameters(trial) + + # determine the energy of the objective function + if self._wrapped_constraints: + cv = self._constraint_violation_fn(parameters) + feasible = False + energy = np.inf + if not np.sum(cv) > 0: + # solution is feasible + feasible = True + energy = self.func(parameters) + self._nfev += 1 + else: + feasible = True + cv = np.atleast_2d([0.]) + energy = self.func(parameters) + self._nfev += 1 + + # compare trial and population member + if self._accept_trial(energy, feasible, cv, + self.population_energies[candidate], + self.feasible[candidate], + self.constraint_violation[candidate]): + self.population[candidate] = trial + self.population_energies[candidate] = np.squeeze(energy) + self.feasible[candidate] = feasible + self.constraint_violation[candidate] = cv + + # if the trial candidate is also better than the best + # solution then promote it. + if self._accept_trial(energy, feasible, cv, + self.population_energies[0], + self.feasible[0], + self.constraint_violation[0]): + self._promote_lowest_energy() + + elif self._updating == 'deferred': + # update best solution once per generation + if self._nfev >= self.maxfun: + raise StopIteration + + # 'deferred' approach, vectorised form. + # create trial solutions + trial_pop = np.array( + [self._mutate(i) for i in range(self.num_population_members)]) + + # enforce bounds + self._ensure_constraint(trial_pop) + + # determine the energies of the objective function, but only for + # feasible trials + feasible, cv = self._calculate_population_feasibilities(trial_pop) + trial_energies = np.full(self.num_population_members, np.inf) + + # only calculate for feasible entries + trial_energies[feasible] = self._calculate_population_energies( + trial_pop[feasible]) + + # which solutions are 'improved'? + loc = [self._accept_trial(*val) for val in + zip(trial_energies, feasible, cv, self.population_energies, + self.feasible, self.constraint_violation)] + loc = np.array(loc) + self.population = np.where(loc[:, np.newaxis], + trial_pop, + self.population) + self.population_energies = np.where(loc, + trial_energies, + self.population_energies) + self.feasible = np.where(loc, + feasible, + self.feasible) + self.constraint_violation = np.where(loc[:, np.newaxis], + cv, + self.constraint_violation) + + # make sure the best solution is updated if updating='deferred'. + # put the lowest energy into the best solution position. + self._promote_lowest_energy() + + return self.x, self.population_energies[0] + + def _scale_parameters(self, trial): + """Scale from a number between 0 and 1 to parameters.""" + # trial either has shape (N, ) or (L, N), where L is the number of + # solutions being scaled + scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2 + if np.any(self.integrality): + i = np.broadcast_to(self.integrality, scaled.shape) + scaled[i] = np.round(scaled[i]) + return scaled + + def _unscale_parameters(self, parameters): + """Scale from parameters to a number between 0 and 1.""" + return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5 + + def _ensure_constraint(self, trial): + """Make sure the parameters lie between the limits.""" + mask = np.where((trial > 1) | (trial < 0)) + trial[mask] = self.random_number_generator.uniform(size=mask[0].shape) + + def _mutate(self, candidate): + """Create a trial vector based on a mutation strategy.""" + rng = self.random_number_generator + + if callable(self.strategy): + _population = self._scale_parameters(self.population) + trial = np.array( + self.strategy(candidate, _population, rng=rng), dtype=float + ) + if trial.shape != (self.parameter_count,): + raise RuntimeError( + "strategy must have signature" + " f(candidate: int, population: np.ndarray, rng=None)" + " returning an array of shape (N,)" + ) + return self._unscale_parameters(trial) + + trial = np.copy(self.population[candidate]) + fill_point = rng.choice(self.parameter_count) + + if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: + bprime = self.mutation_func(candidate, + self._select_samples(candidate, 5)) + else: + bprime = self.mutation_func(self._select_samples(candidate, 5)) + + if self.strategy in self._binomial: + crossovers = rng.uniform(size=self.parameter_count) + crossovers = crossovers < self.cross_over_probability + # the last one is always from the bprime vector for binomial + # If you fill in modulo with a loop you have to set the last one to + # true. If you don't use a loop then you can have any random entry + # be True. + crossovers[fill_point] = True + trial = np.where(crossovers, bprime, trial) + return trial + + elif self.strategy in self._exponential: + i = 0 + crossovers = rng.uniform(size=self.parameter_count) + crossovers = crossovers < self.cross_over_probability + crossovers[0] = True + while (i < self.parameter_count and crossovers[i]): + trial[fill_point] = bprime[fill_point] + fill_point = (fill_point + 1) % self.parameter_count + i += 1 + + return trial + + def _best1(self, samples): + """best1bin, best1exp""" + r0, r1 = samples[:2] + return (self.population[0] + self.scale * + (self.population[r0] - self.population[r1])) + + def _rand1(self, samples): + """rand1bin, rand1exp""" + r0, r1, r2 = samples[:3] + return (self.population[r0] + self.scale * + (self.population[r1] - self.population[r2])) + + def _randtobest1(self, samples): + """randtobest1bin, randtobest1exp""" + r0, r1, r2 = samples[:3] + bprime = np.copy(self.population[r0]) + bprime += self.scale * (self.population[0] - bprime) + bprime += self.scale * (self.population[r1] - + self.population[r2]) + return bprime + + def _currenttobest1(self, candidate, samples): + """currenttobest1bin, currenttobest1exp""" + r0, r1 = samples[:2] + bprime = (self.population[candidate] + self.scale * + (self.population[0] - self.population[candidate] + + self.population[r0] - self.population[r1])) + return bprime + + def _best2(self, samples): + """best2bin, best2exp""" + r0, r1, r2, r3 = samples[:4] + bprime = (self.population[0] + self.scale * + (self.population[r0] + self.population[r1] - + self.population[r2] - self.population[r3])) + + return bprime + + def _rand2(self, samples): + """rand2bin, rand2exp""" + r0, r1, r2, r3, r4 = samples + bprime = (self.population[r0] + self.scale * + (self.population[r1] + self.population[r2] - + self.population[r3] - self.population[r4])) + + return bprime + + def _select_samples(self, candidate, number_samples): + """ + obtain random integers from range(self.num_population_members), + without replacement. You can't have the original candidate either. + """ + pool = np.arange(self.num_population_members) + self.random_number_generator.shuffle(pool) + + idxs = [] + while len(idxs) < number_samples and len(pool) > 0: + idx = pool[0] + pool = pool[1:] + if idx != candidate: + idxs.append(idx) + + return idxs + + +class _ConstraintWrapper: + """Object to wrap/evaluate user defined constraints. + + Very similar in practice to `PreparedConstraint`, except that no evaluation + of jac/hess is performed (explicit or implicit). + + If created successfully, it will contain the attributes listed below. + + Parameters + ---------- + constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`} + Constraint to check and prepare. + x0 : array_like + Initial vector of independent variables, shape (N,) + + Attributes + ---------- + fun : callable + Function defining the constraint wrapped by one of the convenience + classes. + bounds : 2-tuple + Contains lower and upper bounds for the constraints --- lb and ub. + These are converted to ndarray and have a size equal to the number of + the constraints. + + Notes + ----- + _ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent + arrays of shape (N, S) or (N,), where S is the number of vectors of shape + (N,) to consider constraints for. + """ + def __init__(self, constraint, x0): + self.constraint = constraint + + if isinstance(constraint, NonlinearConstraint): + def fun(x): + x = np.asarray(x) + return np.atleast_1d(constraint.fun(x)) + elif isinstance(constraint, LinearConstraint): + def fun(x): + if issparse(constraint.A): + A = constraint.A + else: + A = np.atleast_2d(constraint.A) + + res = A.dot(x) + # x either has shape (N, S) or (N) + # (M, N) x (N, S) --> (M, S) + # (M, N) x (N,) --> (M,) + # However, if (M, N) is a matrix then: + # (M, N) * (N,) --> (M, 1), we need this to be (M,) + if x.ndim == 1 and res.ndim == 2: + # deal with case that constraint.A is an np.matrix + # see gh20041 + res = np.asarray(res)[:, 0] + + return res + elif isinstance(constraint, Bounds): + def fun(x): + return np.asarray(x) + else: + raise ValueError("`constraint` of an unknown type is passed.") + + self.fun = fun + + lb = np.asarray(constraint.lb, dtype=float) + ub = np.asarray(constraint.ub, dtype=float) + + x0 = np.asarray(x0) + + # find out the number of constraints + f0 = fun(x0) + self.num_constr = m = f0.size + self.parameter_count = x0.size + + if lb.ndim == 0: + lb = np.resize(lb, m) + if ub.ndim == 0: + ub = np.resize(ub, m) + + self.bounds = (lb, ub) + + def __call__(self, x): + return np.atleast_1d(self.fun(x)) + + def violation(self, x): + """How much the constraint is exceeded by. + + Parameters + ---------- + x : array-like + Vector of independent variables, (N, S), where N is number of + parameters and S is the number of solutions to be investigated. + + Returns + ------- + excess : array-like + How much the constraint is exceeded by, for each of the + constraints specified by `_ConstraintWrapper.fun`. + Has shape (M, S) where M is the number of constraint components. + """ + # expect ev to have shape (num_constr, S) or (num_constr,) + ev = self.fun(np.asarray(x)) + + try: + excess_lb = np.maximum(self.bounds[0] - ev.T, 0) + excess_ub = np.maximum(ev.T - self.bounds[1], 0) + except ValueError as e: + raise RuntimeError("An array returned from a Constraint has" + " the wrong shape. If `vectorized is False`" + " the Constraint should return an array of" + " shape (M,). If `vectorized is True` then" + " the Constraint must return an array of" + " shape (M, S), where S is the number of" + " solution vectors and M is the number of" + " constraint components in a given" + " Constraint object.") from e + + v = (excess_lb + excess_ub).T + return v diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..52d4dd69994a38d7db77949e42c646280926879f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct_py.py new file mode 100644 index 0000000000000000000000000000000000000000..440cbb5ae866462b6299b1e12d4a6ba1e407fd62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_direct_py.py @@ -0,0 +1,278 @@ +from __future__ import annotations +from typing import ( # noqa: UP035 + Any, Callable, Iterable, TYPE_CHECKING +) + +import numpy as np +from scipy.optimize import OptimizeResult +from ._constraints import old_bound_to_new, Bounds +from ._direct import direct as _direct # type: ignore + +if TYPE_CHECKING: + import numpy.typing as npt + +__all__ = ['direct'] + +ERROR_MESSAGES = ( + "Number of function evaluations done is larger than maxfun={}", + "Number of iterations is larger than maxiter={}", + "u[i] < l[i] for some i", + "maxfun is too large", + "Initialization failed", + "There was an error in the creation of the sample points", + "An error occurred while the function was sampled", + "Maximum number of levels has been reached.", + "Forced stop", + "Invalid arguments", + "Out of memory", +) + +SUCCESS_MESSAGES = ( + ("The best function value found is within a relative error={} " + "of the (known) global optimum f_min"), + ("The volume of the hyperrectangle containing the lowest function value " + "found is below vol_tol={}"), + ("The side length measure of the hyperrectangle containing the lowest " + "function value found is below len_tol={}"), +) + + +def direct( + func: Callable[[npt.ArrayLike, tuple[Any]], float], + bounds: Iterable | Bounds, + *, + args: tuple = (), + eps: float = 1e-4, + maxfun: int | None = None, + maxiter: int = 1000, + locally_biased: bool = True, + f_min: float = -np.inf, + f_min_rtol: float = 1e-4, + vol_tol: float = 1e-16, + len_tol: float = 1e-6, + callback: Callable[[npt.ArrayLike], None] | None = None +) -> OptimizeResult: + """ + Finds the global minimum of a function using the + DIRECT algorithm. + + Parameters + ---------- + func : callable + The objective function to be minimized. + ``func(x, *args) -> float`` + where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of + the fixed parameters needed to completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. ``(min, max)`` pairs for each element in ``x``. + + args : tuple, optional + Any additional fixed parameters needed to + completely specify the objective function. + eps : float, optional + Minimal required difference of the objective function values + between the current best hyperrectangle and the next potentially + optimal hyperrectangle to be divided. In consequence, `eps` serves as a + tradeoff between local and global search: the smaller, the more local + the search becomes. Default is 1e-4. + maxfun : int or None, optional + Approximate upper bound on objective function evaluations. + If `None`, will be automatically set to ``1000 * N`` where ``N`` + represents the number of dimensions. Will be capped if necessary to + limit DIRECT's RAM usage to app. 1GiB. This will only occur for very + high dimensional problems and excessive `max_fun`. Default is `None`. + maxiter : int, optional + Maximum number of iterations. Default is 1000. + locally_biased : bool, optional + If `True` (default), use the locally biased variant of the + algorithm known as DIRECT_L. If `False`, use the original unbiased + DIRECT algorithm. For hard problems with many local minima, + `False` is recommended. + f_min : float, optional + Function value of the global optimum. Set this value only if the + global optimum is known. Default is ``-np.inf``, so that this + termination criterion is deactivated. + f_min_rtol : float, optional + Terminate the optimization once the relative error between the + current best minimum `f` and the supplied global minimum `f_min` + is smaller than `f_min_rtol`. This parameter is only used if + `f_min` is also set. Must lie between 0 and 1. Default is 1e-4. + vol_tol : float, optional + Terminate the optimization once the volume of the hyperrectangle + containing the lowest function value is smaller than `vol_tol` + of the complete search space. Must lie between 0 and 1. + Default is 1e-16. + len_tol : float, optional + If `locally_biased=True`, terminate the optimization once half of + the normalized maximal side length of the hyperrectangle containing + the lowest function value is smaller than `len_tol`. + If `locally_biased=False`, terminate the optimization once half of + the normalized diagonal of the hyperrectangle containing the lowest + function value is smaller than `len_tol`. Must lie between 0 and 1. + Default is 1e-6. + callback : callable, optional + A callback function with signature ``callback(xk)`` where ``xk`` + represents the best function value found so far. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: ``x`` the solution array, ``success`` a + Boolean flag indicating if the optimizer exited successfully and + ``message`` which describes the cause of the termination. See + `OptimizeResult` for a description of other attributes. + + Notes + ----- + DIviding RECTangles (DIRECT) is a deterministic global + optimization algorithm capable of minimizing a black box function with + its variables subject to lower and upper bound constraints by sampling + potential solutions in the search space [1]_. The algorithm starts by + normalising the search space to an n-dimensional unit hypercube. + It samples the function at the center of this hypercube and at 2n + (n is the number of variables) more points, 2 in each coordinate + direction. Using these function values, DIRECT then divides the + domain into hyperrectangles, each having exactly one of the sampling + points as its center. In each iteration, DIRECT chooses, using the `eps` + parameter which defaults to 1e-4, some of the existing hyperrectangles + to be further divided. This division process continues until either the + maximum number of iterations or maximum function evaluations allowed + are exceeded, or the hyperrectangle containing the minimal value found + so far becomes small enough. If `f_min` is specified, the optimization + will stop once this function value is reached within a relative tolerance. + The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is + used by default. It makes the search more locally biased and more + efficient for cases with only a few local minima. + + A note about termination criteria: `vol_tol` refers to the volume of the + hyperrectangle containing the lowest function value found so far. This + volume decreases exponentially with increasing dimensionality of the + problem. Therefore `vol_tol` should be decreased to avoid premature + termination of the algorithm for higher dimensions. This does not hold + for `len_tol`: it refers either to half of the maximal side length + (for ``locally_biased=True``) or half of the diagonal of the + hyperrectangle (for ``locally_biased=False``). + + This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at + https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz . + This original version was initially converted via f2c and then cleaned up + and reorganized by Steven G. Johnson, August 2007, for the NLopt project. + The `direct` function wraps the C implementation. + + .. versionadded:: 1.9.0 + + References + ---------- + .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian + optimization without the Lipschitz constant. J Optim Theory Appl + 79, 157-181 (1993). + .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT + Algorithm. Journal of Global Optimization 21, 27-37 (2001). + + Examples + -------- + The following example is a 2-D problem with four local minima: minimizing + the Styblinski-Tang function + (https://en.wikipedia.org/wiki/Test_functions_for_optimization). + + >>> from scipy.optimize import direct, Bounds + >>> def styblinski_tang(pos): + ... x, y = pos + ... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y) + >>> bounds = Bounds([-4., -4.], [4., 4.]) + >>> result = direct(styblinski_tang, bounds) + >>> result.x, result.fun, result.nfev + array([-2.90321597, -2.90321597]), -78.3323279095383, 2011 + + The correct global minimum was found but with a huge number of function + evaluations (2011). Loosening the termination tolerances `vol_tol` and + `len_tol` can be used to stop DIRECT earlier. + + >>> result = direct(styblinski_tang, bounds, len_tol=1e-3) + >>> result.x, result.fun, result.nfev + array([-2.9044353, -2.9044353]), -78.33230330754142, 207 + + """ + # convert bounds to new Bounds class if necessary + if not isinstance(bounds, Bounds): + if isinstance(bounds, list) or isinstance(bounds, tuple): + lb, ub = old_bound_to_new(bounds) + bounds = Bounds(lb, ub) + else: + message = ("bounds must be a sequence or " + "instance of Bounds class") + raise ValueError(message) + + lb = np.ascontiguousarray(bounds.lb, dtype=np.float64) + ub = np.ascontiguousarray(bounds.ub, dtype=np.float64) + + # validate bounds + # check that lower bounds are smaller than upper bounds + if not np.all(lb < ub): + raise ValueError('Bounds are not consistent min < max') + # check for infs + if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))): + raise ValueError("Bounds must not be inf.") + + # validate tolerances + if (vol_tol < 0 or vol_tol > 1): + raise ValueError("vol_tol must be between 0 and 1.") + if (len_tol < 0 or len_tol > 1): + raise ValueError("len_tol must be between 0 and 1.") + if (f_min_rtol < 0 or f_min_rtol > 1): + raise ValueError("f_min_rtol must be between 0 and 1.") + + # validate maxfun and maxiter + if maxfun is None: + maxfun = 1000 * lb.shape[0] + if not isinstance(maxfun, int): + raise ValueError("maxfun must be of type int.") + if maxfun < 0: + raise ValueError("maxfun must be > 0.") + if not isinstance(maxiter, int): + raise ValueError("maxiter must be of type int.") + if maxiter < 0: + raise ValueError("maxiter must be > 0.") + + # validate boolean parameters + if not isinstance(locally_biased, bool): + raise ValueError("locally_biased must be True or False.") + + def _func_wrap(x, args=None): + x = np.asarray(x) + if args is None: + f = func(x) + else: + f = func(x, *args) + # always return a float + return np.asarray(f).item() + + # TODO: fix disp argument + x, fun, ret_code, nfev, nit = _direct( + _func_wrap, + np.asarray(lb), np.asarray(ub), + args, + False, eps, maxfun, maxiter, + locally_biased, + f_min, f_min_rtol, + vol_tol, len_tol, callback + ) + + format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol) + if ret_code > 2: + message = SUCCESS_MESSAGES[ret_code - 3].format( + format_val[ret_code - 1]) + elif 0 < ret_code <= 2: + message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1]) + elif 0 > ret_code > -100: + message = ERROR_MESSAGES[abs(ret_code) + 1] + else: + message = ERROR_MESSAGES[ret_code + 99] + + return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code, + success=ret_code > 2, message=message, + nfev=nfev, nit=nit) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py new file mode 100644 index 0000000000000000000000000000000000000000..0dd9eed9dbf9670ee6c435fa2cbd60e55971e804 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py @@ -0,0 +1,715 @@ +# Dual Annealing implementation. +# Copyright (c) 2018 Sylvain Gubian , +# Yang Xiang +# Author: Sylvain Gubian, Yang Xiang, PMP S.A. + +""" +A Dual Annealing global optimization algorithm +""" + +import numpy as np +from scipy.optimize import OptimizeResult +from scipy.optimize import minimize, Bounds +from scipy.special import gammaln +from scipy._lib._util import check_random_state +from scipy.optimize._constraints import new_bounds_to_old + +__all__ = ['dual_annealing'] + + +class VisitingDistribution: + """ + Class used to generate new coordinates based on the distorted + Cauchy-Lorentz distribution. Depending on the steps within the strategy + chain, the class implements the strategy for generating new location + changes. + + Parameters + ---------- + lb : array_like + A 1-D NumPy ndarray containing lower bounds of the generated + components. Neither NaN or inf are allowed. + ub : array_like + A 1-D NumPy ndarray containing upper bounds for the generated + components. Neither NaN or inf are allowed. + visiting_param : float + Parameter for visiting distribution. Default value is 2.62. + Higher values give the visiting distribution a heavier tail, this + makes the algorithm jump to a more distant region. + The value range is (1, 3]. Its value is fixed for the life of the + object. + rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`} + A `~numpy.random.RandomState`, `~numpy.random.Generator` object + for using the current state of the created random generator container. + + """ + TAIL_LIMIT = 1.e8 + MIN_VISIT_BOUND = 1.e-10 + + def __init__(self, lb, ub, visiting_param, rand_gen): + # if you wish to make _visiting_param adjustable during the life of + # the object then _factor2, _factor3, _factor5, _d1, _factor6 will + # have to be dynamically calculated in `visit_fn`. They're factored + # out here so they don't need to be recalculated all the time. + self._visiting_param = visiting_param + self.rand_gen = rand_gen + self.lower = lb + self.upper = ub + self.bound_range = ub - lb + + # these are invariant numbers unless visiting_param changes + self._factor2 = np.exp((4.0 - self._visiting_param) * np.log( + self._visiting_param - 1.0)) + self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0) + / (self._visiting_param - 1.0)) + self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * ( + 3.0 - self._visiting_param)) + + self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5 + self._d1 = 2.0 - self._factor5 + self._factor6 = np.pi * (1.0 - self._factor5) / np.sin( + np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1)) + + def visiting(self, x, step, temperature): + """ Based on the step in the strategy chain, new coordinates are + generated by changing all components is the same time or only + one of them, the new values are computed with visit_fn method + """ + dim = x.size + if step < dim: + # Changing all coordinates with a new visiting value + visits = self.visit_fn(temperature, dim) + upper_sample, lower_sample = self.rand_gen.uniform(size=2) + visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample + visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample + x_visit = visits + x + a = x_visit - self.lower + b = np.fmod(a, self.bound_range) + self.bound_range + x_visit = np.fmod(b, self.bound_range) + self.lower + x_visit[np.fabs( + x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10 + else: + # Changing only one coordinate at a time based on strategy + # chain step + x_visit = np.copy(x) + visit = self.visit_fn(temperature, 1)[0] + if visit > self.TAIL_LIMIT: + visit = self.TAIL_LIMIT * self.rand_gen.uniform() + elif visit < -self.TAIL_LIMIT: + visit = -self.TAIL_LIMIT * self.rand_gen.uniform() + index = step - dim + x_visit[index] = visit + x[index] + a = x_visit[index] - self.lower[index] + b = np.fmod(a, self.bound_range[index]) + self.bound_range[index] + x_visit[index] = np.fmod(b, self.bound_range[ + index]) + self.lower[index] + if np.fabs(x_visit[index] - self.lower[ + index]) < self.MIN_VISIT_BOUND: + x_visit[index] += self.MIN_VISIT_BOUND + return x_visit + + def visit_fn(self, temperature, dim): + """ Formula Visita from p. 405 of reference [2] """ + x, y = self.rand_gen.normal(size=(dim, 2)).T + + factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0)) + factor4 = self._factor4_p * factor1 + + # sigmax + x *= np.exp(-(self._visiting_param - 1.0) * np.log( + self._factor6 / factor4) / (3.0 - self._visiting_param)) + + den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) / + (3.0 - self._visiting_param)) + + return x / den + + +class EnergyState: + """ + Class used to record the energy state. At any time, it knows what is the + currently used coordinates and the most recent best location. + + Parameters + ---------- + lower : array_like + A 1-D NumPy ndarray containing lower bounds for generating an initial + random components in the `reset` method. + upper : array_like + A 1-D NumPy ndarray containing upper bounds for generating an initial + random components in the `reset` method + components. Neither NaN or inf are allowed. + callback : callable, ``callback(x, f, context)``, optional + A callback function which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and `context` has value in [0, 1, 2] + """ + # Maximum number of trials for generating a valid starting point + MAX_REINIT_COUNT = 1000 + + def __init__(self, lower, upper, callback=None): + self.ebest = None + self.current_energy = None + self.current_location = None + self.xbest = None + self.lower = lower + self.upper = upper + self.callback = callback + + def reset(self, func_wrapper, rand_gen, x0=None): + """ + Initialize current location is the search domain. If `x0` is not + provided, a random location within the bounds is generated. + """ + if x0 is None: + self.current_location = rand_gen.uniform(self.lower, self.upper, + size=len(self.lower)) + else: + self.current_location = np.copy(x0) + init_error = True + reinit_counter = 0 + while init_error: + self.current_energy = func_wrapper.fun(self.current_location) + if self.current_energy is None: + raise ValueError('Objective function is returning None') + if (not np.isfinite(self.current_energy) or np.isnan( + self.current_energy)): + if reinit_counter >= EnergyState.MAX_REINIT_COUNT: + init_error = False + message = ( + 'Stopping algorithm because function ' + 'create NaN or (+/-) infinity values even with ' + 'trying new random parameters' + ) + raise ValueError(message) + self.current_location = rand_gen.uniform(self.lower, + self.upper, + size=self.lower.size) + reinit_counter += 1 + else: + init_error = False + # If first time reset, initialize ebest and xbest + if self.ebest is None and self.xbest is None: + self.ebest = self.current_energy + self.xbest = np.copy(self.current_location) + # Otherwise, we keep them in case of reannealing reset + + def update_best(self, e, x, context): + self.ebest = e + self.xbest = np.copy(x) + if self.callback is not None: + val = self.callback(x, e, context) + if val is not None: + if val: + return ('Callback function requested to stop early by ' + 'returning True') + + def update_current(self, e, x): + self.current_energy = e + self.current_location = np.copy(x) + + +class StrategyChain: + """ + Class that implements within a Markov chain the strategy for location + acceptance and local search decision making. + + Parameters + ---------- + acceptance_param : float + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + visit_dist : VisitingDistribution + Instance of `VisitingDistribution` class. + func_wrapper : ObjectiveFunWrapper + Instance of `ObjectiveFunWrapper` class. + minimizer_wrapper: LocalSearchWrapper + Instance of `LocalSearchWrapper` class. + rand_gen : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + energy_state: EnergyState + Instance of `EnergyState` class. + + """ + + def __init__(self, acceptance_param, visit_dist, func_wrapper, + minimizer_wrapper, rand_gen, energy_state): + # Local strategy chain minimum energy and location + self.emin = energy_state.current_energy + self.xmin = np.array(energy_state.current_location) + # Global optimizer state + self.energy_state = energy_state + # Acceptance parameter + self.acceptance_param = acceptance_param + # Visiting distribution instance + self.visit_dist = visit_dist + # Wrapper to objective function + self.func_wrapper = func_wrapper + # Wrapper to the local minimizer + self.minimizer_wrapper = minimizer_wrapper + self.not_improved_idx = 0 + self.not_improved_max_idx = 1000 + self._rand_gen = rand_gen + self.temperature_step = 0 + self.K = 100 * len(energy_state.current_location) + + def accept_reject(self, j, e, x_visit): + r = self._rand_gen.uniform() + pqv_temp = 1.0 - ((1.0 - self.acceptance_param) * + (e - self.energy_state.current_energy) / self.temperature_step) + if pqv_temp <= 0.: + pqv = 0. + else: + pqv = np.exp(np.log(pqv_temp) / ( + 1. - self.acceptance_param)) + + if r <= pqv: + # We accept the new location and update state + self.energy_state.update_current(e, x_visit) + self.xmin = np.copy(self.energy_state.current_location) + + # No improvement for a long time + if self.not_improved_idx >= self.not_improved_max_idx: + if j == 0 or self.energy_state.current_energy < self.emin: + self.emin = self.energy_state.current_energy + self.xmin = np.copy(self.energy_state.current_location) + + def run(self, step, temperature): + self.temperature_step = temperature / float(step + 1) + self.not_improved_idx += 1 + for j in range(self.energy_state.current_location.size * 2): + if j == 0: + if step == 0: + self.energy_state_improved = True + else: + self.energy_state_improved = False + x_visit = self.visit_dist.visiting( + self.energy_state.current_location, j, temperature) + # Calling the objective function + e = self.func_wrapper.fun(x_visit) + if e < self.energy_state.current_energy: + # We have got a better energy value + self.energy_state.update_current(e, x_visit) + if e < self.energy_state.ebest: + val = self.energy_state.update_best(e, x_visit, 0) + if val is not None: + if val: + return val + self.energy_state_improved = True + self.not_improved_idx = 0 + else: + # We have not improved but do we accept the new location? + self.accept_reject(j, e, x_visit) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during annealing') + # End of StrategyChain loop + + def local_search(self): + # Decision making for performing a local search + # based on strategy chain results + # If energy has been improved or no improvement since too long, + # performing a local search with the best strategy chain location + if self.energy_state_improved: + # Global energy has improved, let's see if LS improves further + e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest, + self.energy_state.ebest) + if e < self.energy_state.ebest: + self.not_improved_idx = 0 + val = self.energy_state.update_best(e, x, 1) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during local search') + # Check probability of a need to perform a LS even if no improvement + do_ls = False + if self.K < 90 * len(self.energy_state.current_location): + pls = np.exp(self.K * ( + self.energy_state.ebest - self.energy_state.current_energy) / + self.temperature_step) + if pls >= self._rand_gen.uniform(): + do_ls = True + # Global energy not improved, let's see what LS gives + # on the best strategy chain location + if self.not_improved_idx >= self.not_improved_max_idx: + do_ls = True + if do_ls: + e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin) + self.xmin = np.copy(x) + self.emin = e + self.not_improved_idx = 0 + self.not_improved_max_idx = self.energy_state.current_location.size + if e < self.energy_state.ebest: + val = self.energy_state.update_best( + self.emin, self.xmin, 2) + if val is not None: + if val: + return val + self.energy_state.update_current(e, x) + if self.func_wrapper.nfev >= self.func_wrapper.maxfun: + return ('Maximum number of function call reached ' + 'during dual annealing') + + +class ObjectiveFunWrapper: + + def __init__(self, func, maxfun=1e7, *args): + self.func = func + self.args = args + # Number of objective function evaluations + self.nfev = 0 + # Number of gradient function evaluation if used + self.ngev = 0 + # Number of hessian of the objective function if used + self.nhev = 0 + self.maxfun = maxfun + + def fun(self, x): + self.nfev += 1 + return self.func(x, *self.args) + + +class LocalSearchWrapper: + """ + Class used to wrap around the minimizer used for local search + Default local minimizer is SciPy minimizer L-BFGS-B + """ + + LS_MAXITER_RATIO = 6 + LS_MAXITER_MIN = 100 + LS_MAXITER_MAX = 1000 + + def __init__(self, search_bounds, func_wrapper, *args, **kwargs): + self.func_wrapper = func_wrapper + self.kwargs = kwargs + self.jac = self.kwargs.get('jac', None) + self.minimizer = minimize + bounds_list = list(zip(*search_bounds)) + self.lower = np.array(bounds_list[0]) + self.upper = np.array(bounds_list[1]) + + # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method + if not self.kwargs: + n = len(self.lower) + ls_max_iter = min(max(n * self.LS_MAXITER_RATIO, + self.LS_MAXITER_MIN), + self.LS_MAXITER_MAX) + self.kwargs['method'] = 'L-BFGS-B' + self.kwargs['options'] = { + 'maxiter': ls_max_iter, + } + self.kwargs['bounds'] = list(zip(self.lower, self.upper)) + elif callable(self.jac): + def wrapped_jac(x): + return self.jac(x, *args) + self.kwargs['jac'] = wrapped_jac + + def local_search(self, x, e): + # Run local search from the given x location where energy value is e + x_tmp = np.copy(x) + mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs) + if 'njev' in mres: + self.func_wrapper.ngev += mres.njev + if 'nhev' in mres: + self.func_wrapper.nhev += mres.nhev + # Check if is valid value + is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun) + in_bounds = np.all(mres.x >= self.lower) and np.all( + mres.x <= self.upper) + is_valid = is_finite and in_bounds + + # Use the new point only if it is valid and return a better results + if is_valid and mres.fun < e: + return mres.fun, mres.x + else: + return e, x_tmp + + +def dual_annealing(func, bounds, args=(), maxiter=1000, + minimizer_kwargs=None, initial_temp=5230., + restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, + maxfun=1e7, seed=None, no_local_search=False, + callback=None, x0=None): + """ + Find the global minimum of a function using Dual Annealing. + + Parameters + ---------- + func : callable + The objective function to be minimized. Must be in the form + ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array + and ``args`` is a tuple of any additional fixed parameters needed to + completely specify the function. + bounds : sequence or `Bounds` + Bounds for variables. There are two ways to specify the bounds: + + 1. Instance of `Bounds` class. + 2. Sequence of ``(min, max)`` pairs for each element in `x`. + + args : tuple, optional + Any additional fixed parameters needed to completely specify the + objective function. + maxiter : int, optional + The maximum number of global search iterations. Default value is 1000. + minimizer_kwargs : dict, optional + Extra keyword arguments to be passed to the local minimizer + (`minimize`). Some important options could be: + ``method`` for the minimizer method to use and ``args`` for + objective function additional arguments. + initial_temp : float, optional + The initial temperature, use higher values to facilitates a wider + search of the energy landscape, allowing dual_annealing to escape + local minima that it is trapped in. Default value is 5230. Range is + (0.01, 5.e4]. + restart_temp_ratio : float, optional + During the annealing process, temperature is decreasing, when it + reaches ``initial_temp * restart_temp_ratio``, the reannealing process + is triggered. Default value of the ratio is 2e-5. Range is (0, 1). + visit : float, optional + Parameter for visiting distribution. Default value is 2.62. Higher + values give the visiting distribution a heavier tail, this makes + the algorithm jump to a more distant region. The value range is (1, 3]. + accept : float, optional + Parameter for acceptance distribution. It is used to control the + probability of acceptance. The lower the acceptance parameter, the + smaller the probability of acceptance. Default value is -5.0 with + a range (-1e4, -5]. + maxfun : int, optional + Soft limit for the number of objective function calls. If the + algorithm is in the middle of a local search, this number will be + exceeded, the algorithm will stop just after the local search is + done. Default value is 1e7. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + Specify `seed` for repeatable minimizations. The random numbers + generated with this seed only affect the visiting distribution function + and new coordinates generation. + no_local_search : bool, optional + If `no_local_search` is set to True, a traditional Generalized + Simulated Annealing will be performed with no local search + strategy applied. + callback : callable, optional + A callback function with signature ``callback(x, f, context)``, + which will be called for all minima found. + ``x`` and ``f`` are the coordinates and function value of the + latest minimum found, and ``context`` has value in [0, 1, 2], with the + following meaning: + + - 0: minimum detected in the annealing process. + - 1: detection occurred in the local search process. + - 2: detection done in the dual annealing process. + + If the callback implementation returns True, the algorithm will stop. + x0 : ndarray, shape(n,), optional + Coordinates of a single N-D starting point. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a `OptimizeResult` object. + Important attributes are: ``x`` the solution array, ``fun`` the value + of the function at the solution, and ``message`` which describes the + cause of the termination. + See `OptimizeResult` for a description of other attributes. + + Notes + ----- + This function implements the Dual Annealing optimization. This stochastic + approach derived from [3]_ combines the generalization of CSA (Classical + Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled + to a strategy for applying a local search on accepted locations [4]_. + An alternative implementation of this same algorithm is described in [5]_ + and benchmarks are presented in [6]_. This approach introduces an advanced + method to refine the solution found by the generalized annealing + process. This algorithm uses a distorted Cauchy-Lorentz visiting + distribution, with its shape controlled by the parameter :math:`q_{v}` + + .. math:: + + g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\ + \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\ + \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\ + \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\ + \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}} + + Where :math:`t` is the artificial time. This visiting distribution is used + to generate a trial jump distance :math:`\\Delta x(t)` of variable + :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`. + + From the starting point, after calling the visiting distribution + function, the acceptance probability is computed as follows: + + .. math:: + + p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\ + \\frac{1}{1-q_{a}}}\\}} + + Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero + acceptance probability is assigned to the cases where + + .. math:: + + [1-(1-q_{a}) \\beta \\Delta E] < 0 + + The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to + + .. math:: + + T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\ + 1 + t\\right)^{q_{v}-1}-1} + + Where :math:`q_{v}` is the visiting parameter. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs + statistics. Journal of Statistical Physics, 52, 479-487 (1998). + .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing. + Physica A, 233, 395-406 (1996). + .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated + Annealing Algorithm and Its Application to the Thomson Model. + Physics Letters A, 233, 216-220 (1997). + .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated + Annealing. Physical Review E, 62, 4473 (2000). + .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized + Simulated Annealing for Efficient Global Optimization: the GenSA + Package for R. The R Journal, Volume 5/1 (2013). + .. [6] Mullen, K. Continuous Global Optimization in R. Journal of + Statistical Software, 60(6), 1 - 45, (2014). + :doi:`10.18637/jss.v060.i06` + + Examples + -------- + The following example is a 10-D problem, with many local minima. + The function involved is called Rastrigin + (https://en.wikipedia.org/wiki/Rastrigin_function) + + >>> import numpy as np + >>> from scipy.optimize import dual_annealing + >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) + >>> lw = [-5.12] * 10 + >>> up = [5.12] * 10 + >>> ret = dual_annealing(func, bounds=list(zip(lw, up))) + >>> ret.x + array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, + -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, + -6.05775280e-09, -5.00668935e-09]) # random + >>> ret.fun + 0.000000 + + """ + + if isinstance(bounds, Bounds): + bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) + + if x0 is not None and not len(x0) == len(bounds): + raise ValueError('Bounds size does not match x0') + + lu = list(zip(*bounds)) + lower = np.array(lu[0]) + upper = np.array(lu[1]) + # Check that restart temperature ratio is correct + if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.: + raise ValueError('Restart temperature ratio has to be in range (0, 1)') + # Checking bounds are valid + if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any( + np.isnan(lower)) or np.any(np.isnan(upper))): + raise ValueError('Some bounds values are inf values or nan values') + # Checking that bounds are consistent + if not np.all(lower < upper): + raise ValueError('Bounds are not consistent min < max') + # Checking that bounds are the same length + if not len(lower) == len(upper): + raise ValueError('Bounds do not have the same dimensions') + + # Wrapper for the objective function + func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) + + # minimizer_kwargs has to be a dict, not None + minimizer_kwargs = minimizer_kwargs or {} + + minimizer_wrapper = LocalSearchWrapper( + bounds, func_wrapper, *args, **minimizer_kwargs) + + # Initialization of random Generator for reproducible runs if seed provided + rand_state = check_random_state(seed) + # Initialization of the energy state + energy_state = EnergyState(lower, upper, callback) + energy_state.reset(func_wrapper, rand_state, x0) + # Minimum value of annealing temperature reached to perform + # re-annealing + temperature_restart = initial_temp * restart_temp_ratio + # VisitingDistribution instance + visit_dist = VisitingDistribution(lower, upper, visit, rand_state) + # Strategy chain instance + strategy_chain = StrategyChain(accept, visit_dist, func_wrapper, + minimizer_wrapper, rand_state, energy_state) + need_to_stop = False + iteration = 0 + message = [] + # OptimizeResult object to be returned + optimize_res = OptimizeResult() + optimize_res.success = True + optimize_res.status = 0 + + t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0 + # Run the search loop + while not need_to_stop: + for i in range(maxiter): + # Compute temperature for this step + s = float(i) + 2.0 + t2 = np.exp((visit - 1) * np.log(s)) - 1.0 + temperature = initial_temp * t1 / t2 + if iteration >= maxiter: + message.append("Maximum number of iteration reached") + need_to_stop = True + break + # Need a re-annealing process? + if temperature < temperature_restart: + energy_state.reset(func_wrapper, rand_state) + break + # starting strategy chain + val = strategy_chain.run(i, temperature) + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + # Possible local search at the end of the strategy chain + if not no_local_search: + val = strategy_chain.local_search() + if val is not None: + message.append(val) + need_to_stop = True + optimize_res.success = False + break + iteration += 1 + + # Setting the OptimizeResult values + optimize_res.x = energy_state.xbest + optimize_res.fun = energy_state.ebest + optimize_res.nit = iteration + optimize_res.nfev = func_wrapper.nfev + optimize_res.njev = func_wrapper.ngev + optimize_res.nhev = func_wrapper.nhev + optimize_res.message = message + return optimize_res diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1b74ea88cdf158f14688b803c4304a5d06412fbc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_group_columns.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..b8529e51e83b968fea24b0859c434b2440c47575 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py @@ -0,0 +1,430 @@ +"""Hessian update strategies for quasi-Newton optimization methods.""" +import numpy as np +from numpy.linalg import norm +from scipy.linalg import get_blas_funcs +from warnings import warn + + +__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] + + +class HessianUpdateStrategy: + """Interface for implementing Hessian update strategies. + + Many optimization methods make use of Hessian (or inverse Hessian) + approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. + Some of these approximations, however, do not actually need to store + the entire matrix or can compute the internal matrix product with a + given vector in a very efficiently manner. This class serves as an + abstract interface between the optimization algorithm and the + quasi-Newton update strategies, giving freedom of implementation + to store and update the internal matrix as efficiently as possible. + Different choices of initialization and update procedure will result + in different quasi-Newton strategies. + + Four methods should be implemented in derived classes: ``initialize``, + ``update``, ``dot`` and ``get_matrix``. + + Notes + ----- + Any instance of a class that implements this interface, + can be accepted by the method ``minimize`` and used by + the compatible solvers to approximate the Hessian (or + inverse Hessian) used by the optimization algorithms. + """ + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + raise NotImplementedError("The method ``initialize(n, approx_type)``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + raise NotImplementedError("The method ``update(delta_x, delta_grad)``" + " is not implemented.") + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + raise NotImplementedError("The method ``dot(p)``" + " is not implemented.") + + def get_matrix(self): + """Return current internal matrix. + + Returns + ------- + H : ndarray, shape (n, n) + Dense matrix containing either the Hessian + or its inverse (depending on how 'approx_type' + is defined). + """ + raise NotImplementedError("The method ``get_matrix(p)``" + " is not implemented.") + + +class FullHessianUpdateStrategy(HessianUpdateStrategy): + """Hessian update strategy with full dimensional internal representation. + """ + _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update + _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update + # Symmetric matrix-vector product + _symv = get_blas_funcs('symv', dtype='d') + + def __init__(self, init_scale='auto'): + self.init_scale = init_scale + # Until initialize is called we can't really use the class, + # so it makes sense to set everything to None. + self.first_iteration = None + self.approx_type = None + self.B = None + self.H = None + + def initialize(self, n, approx_type): + """Initialize internal matrix. + + Allocate internal memory for storing and updating + the Hessian or its inverse. + + Parameters + ---------- + n : int + Problem dimension. + approx_type : {'hess', 'inv_hess'} + Selects either the Hessian or the inverse Hessian. + When set to 'hess' the Hessian will be stored and updated. + When set to 'inv_hess' its inverse will be used instead. + """ + self.first_iteration = True + self.n = n + self.approx_type = approx_type + if approx_type not in ('hess', 'inv_hess'): + raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") + # Create matrix + if self.approx_type == 'hess': + self.B = np.eye(n, dtype=float) + else: + self.H = np.eye(n, dtype=float) + + def _auto_scale(self, delta_x, delta_grad): + # Heuristic to scale matrix at first iteration. + # Described in Nocedal and Wright "Numerical Optimization" + # p.143 formula (6.20). + s_norm2 = np.dot(delta_x, delta_x) + y_norm2 = np.dot(delta_grad, delta_grad) + ys = np.abs(np.dot(delta_grad, delta_x)) + if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: + return 1 + if self.approx_type == 'hess': + return y_norm2 / ys + else: + return ys / y_norm2 + + def _update_implementation(self, delta_x, delta_grad): + raise NotImplementedError("The method ``_update_implementation``" + " is not implemented.") + + def update(self, delta_x, delta_grad): + """Update internal matrix. + + Update Hessian matrix or its inverse (depending on how 'approx_type' + is defined) using information about the last evaluated points. + + Parameters + ---------- + delta_x : ndarray + The difference between two points the gradient + function have been evaluated at: ``delta_x = x2 - x1``. + delta_grad : ndarray + The difference between the gradients: + ``delta_grad = grad(x2) - grad(x1)``. + """ + if np.all(delta_x == 0.0): + return + if np.all(delta_grad == 0.0): + warn('delta_grad == 0.0. Check if the approximated ' + 'function is linear. If the function is linear ' + 'better results can be obtained by defining the ' + 'Hessian as zero instead of using quasi-Newton ' + 'approximations.', + UserWarning, stacklevel=2) + return + if self.first_iteration: + # Get user specific scale + if self.init_scale == "auto": + scale = self._auto_scale(delta_x, delta_grad) + else: + scale = float(self.init_scale) + # Scale initial matrix with ``scale * np.eye(n)`` + if self.approx_type == 'hess': + self.B *= scale + else: + self.H *= scale + self.first_iteration = False + self._update_implementation(delta_x, delta_grad) + + def dot(self, p): + """Compute the product of the internal matrix with the given vector. + + Parameters + ---------- + p : array_like + 1-D array representing a vector. + + Returns + ------- + Hp : array + 1-D represents the result of multiplying the approximation matrix + by vector p. + """ + if self.approx_type == 'hess': + return self._symv(1, self.B, p) + else: + return self._symv(1, self.H, p) + + def get_matrix(self): + """Return the current internal matrix. + + Returns + ------- + M : ndarray, shape (n, n) + Dense matrix containing either the Hessian or its inverse + (depending on how `approx_type` was defined). + """ + if self.approx_type == 'hess': + M = np.copy(self.B) + else: + M = np.copy(self.H) + li = np.tril_indices_from(M, k=-1) + M[li] = M.T[li] + return M + + +class BFGS(FullHessianUpdateStrategy): + """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. + + Parameters + ---------- + exception_strategy : {'skip_update', 'damp_update'}, optional + Define how to proceed when the curvature condition is violated. + Set it to 'skip_update' to just skip the update. Or, alternatively, + set it to 'damp_update' to interpolate between the actual BFGS + result and the unmodified matrix. Both exceptions strategies + are explained in [1]_, p.536-537. + min_curvature : float + This number, scaled by a normalization factor, defines the + minimum curvature ``dot(delta_grad, delta_x)`` allowed to go + unaffected by the exception strategy. By default is equal to + 1e-8 when ``exception_strategy = 'skip_update'`` and equal + to 0.2 when ``exception_strategy = 'damp_update'``. + init_scale : {float, 'auto'} + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.140. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, exception_strategy='skip_update', min_curvature=None, + init_scale='auto'): + if exception_strategy == 'skip_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 1e-8 + elif exception_strategy == 'damp_update': + if min_curvature is not None: + self.min_curvature = min_curvature + else: + self.min_curvature = 0.2 + else: + raise ValueError("`exception_strategy` must be 'skip_update' " + "or 'damp_update'.") + + super().__init__(init_scale) + self.exception_strategy = exception_strategy + + def _update_inverse_hessian(self, ys, Hy, yHy, s): + """Update the inverse Hessian matrix. + + BFGS update using the formula: + + ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) + - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` + + where ``s = delta_x`` and ``y = delta_grad``. This formula is + equivalent to (6.17) in [1]_ written in a more efficient way + for implementation. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) + self.H = self._syr((ys+yHy)/ys**2, s, a=self.H) + + def _update_hessian(self, ys, Bs, sBs, y): + """Update the Hessian matrix. + + BFGS update using the formula: + + ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` + + where ``s`` is short for ``delta_x`` and ``y`` is short + for ``delta_grad``. Formula (6.19) in [1]_. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + self.B = self._syr(1.0 / ys, y, a=self.B) + self.B = self._syr(-1.0 / sBs, Bs, a=self.B) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + wz = np.dot(w, z) + Mw = self.dot(w) + wMw = Mw.dot(w) + # Guarantee that wMw > 0 by reinitializing matrix. + # While this is always true in exact arithmetic, + # indefinite matrix may appear due to roundoff errors. + if wMw <= 0.0: + scale = self._auto_scale(delta_x, delta_grad) + # Reinitialize matrix + if self.approx_type == 'hess': + self.B = scale * np.eye(self.n, dtype=float) + else: + self.H = scale * np.eye(self.n, dtype=float) + # Do common operations for new matrix + Mw = self.dot(w) + wMw = Mw.dot(w) + # Check if curvature condition is violated + if wz <= self.min_curvature * wMw: + # If the option 'skip_update' is set + # we just skip the update when the condition + # is violated. + if self.exception_strategy == 'skip_update': + return + # If the option 'damp_update' is set we + # interpolate between the actual BFGS + # result and the unmodified matrix. + elif self.exception_strategy == 'damp_update': + update_factor = (1-self.min_curvature) / (1 - wz/wMw) + z = update_factor*z + (1-update_factor)*Mw + wz = np.dot(w, z) + # Update matrix + if self.approx_type == 'hess': + self._update_hessian(wz, Mw, wMw, z) + else: + self._update_inverse_hessian(wz, Mw, wMw, z) + + +class SR1(FullHessianUpdateStrategy): + """Symmetric-rank-1 Hessian update strategy. + + Parameters + ---------- + min_denominator : float + This number, scaled by a normalization factor, + defines the minimum denominator magnitude allowed + in the update. When the condition is violated we skip + the update. By default uses ``1e-8``. + init_scale : {float, 'auto'}, optional + Matrix scale at first iteration. At the first + iteration the Hessian matrix or its inverse will be initialized + with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. + Set it to 'auto' in order to use an automatic heuristic for choosing + the initial scale. The heuristic is described in [1]_, p.143. + By default uses 'auto'. + + Notes + ----- + The update is based on the description in [1]_, p.144-146. + + References + ---------- + .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" + Second Edition (2006). + """ + + def __init__(self, min_denominator=1e-8, init_scale='auto'): + self.min_denominator = min_denominator + super().__init__(init_scale) + + def _update_implementation(self, delta_x, delta_grad): + # Auxiliary variables w and z + if self.approx_type == 'hess': + w = delta_x + z = delta_grad + else: + w = delta_grad + z = delta_x + # Do some common operations + Mw = self.dot(w) + z_minus_Mw = z - Mw + denominator = np.dot(w, z_minus_Mw) + # If the denominator is too small + # we just skip the update. + if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): + return + # Update matrix + if self.approx_type == 'hess': + self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) + else: + self.H = self._syr(1/denominator, z_minus_Mw, a=self.H) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_isotonic.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_isotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..929481e0226183f434d4b9de220e372197b1287a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_isotonic.py @@ -0,0 +1,158 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +import numpy as np + +from ._optimize import OptimizeResult +from ._pava_pybind import pava + +if TYPE_CHECKING: + import numpy.typing as npt + + +__all__ = ["isotonic_regression"] + + +def isotonic_regression( + y: npt.ArrayLike, + *, + weights: npt.ArrayLike | None = None, + increasing: bool = True, +) -> OptimizeResult: + r"""Nonparametric isotonic regression. + + A (not strictly) monotonically increasing array `x` with the same length + as `y` is calculated by the pool adjacent violators algorithm (PAVA), see + [1]_. See the Notes section for more details. + + Parameters + ---------- + y : (N,) array_like + Response variable. + weights : (N,) array_like or None + Case weights. + increasing : bool + If True, fit monotonic increasing, i.e. isotonic, regression. + If False, fit a monotonic decreasing, i.e. antitonic, regression. + Default is True. + + Returns + ------- + res : OptimizeResult + The optimization result represented as a ``OptimizeResult`` object. + Important attributes are: + + - ``x``: The isotonic regression solution, i.e. an increasing (or + decreasing) array of the same length than y, with elements in the + range from min(y) to max(y). + - ``weights`` : Array with the sum of case weights for each block + (or pool) B. + - ``blocks``: Array of length B+1 with the indices of the start + positions of each block (or pool) B. The j-th block is given by + ``x[blocks[j]:blocks[j+1]]`` for which all values are the same. + + Notes + ----- + Given data :math:`y` and case weights :math:`w`, the isotonic regression + solves the following optimization problem: + + .. math:: + + \operatorname{argmin}_{x_i} \sum_i w_i (y_i - x_i)^2 \quad + \text{subject to } x_i \leq x_j \text{ whenever } i \leq j \,. + + For every input value :math:`y_i`, it generates a value :math:`x_i` such + that :math:`x` is increasing (but not strictly), i.e. + :math:`x_i \leq x_{i+1}`. This is accomplished by the PAVA. + The solution consists of pools or blocks, i.e. neighboring elements of + :math:`x`, e.g. :math:`x_i` and :math:`x_{i+1}`, that all have the same + value. + + Most interestingly, the solution stays the same if the squared loss is + replaced by the wide class of Bregman functions which are the unique + class of strictly consistent scoring functions for the mean, see [2]_ + and references therein. + + The implemented version of PAVA according to [1]_ has a computational + complexity of O(N) with input size N. + + References + ---------- + .. [1] Busing, F. M. T. A. (2022). + Monotone Regression: A Simple and Fast O(n) PAVA Implementation. + Journal of Statistical Software, Code Snippets, 102(1), 1-25. + :doi:`10.18637/jss.v102.c01` + .. [2] Jordan, A.I., Mühlemann, A. & Ziegel, J.F. + Characterizing the optimal solutions to the isotonic regression + problem for identifiable functionals. + Ann Inst Stat Math 74, 489-514 (2022). + :doi:`10.1007/s10463-021-00808-0` + + Examples + -------- + This example demonstrates that ``isotonic_regression`` really solves a + constrained optimization problem. + + >>> import numpy as np + >>> from scipy.optimize import isotonic_regression, minimize + >>> y = [1.5, 1.0, 4.0, 6.0, 5.7, 5.0, 7.8, 9.0, 7.5, 9.5, 9.0] + >>> def objective(yhat, y): + ... return np.sum((yhat - y)**2) + >>> def constraint(yhat, y): + ... # This is for a monotonically increasing regression. + ... return np.diff(yhat) + >>> result = minimize(objective, x0=y, args=(y,), + ... constraints=[{'type': 'ineq', + ... 'fun': lambda x: constraint(x, y)}]) + >>> result.x + array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667, + 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 , + 9.25 ]) + >>> result = isotonic_regression(y) + >>> result.x + array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667, + 5.56666667, 7.8 , 8.25 , 8.25 , 9.25 , + 9.25 ]) + + The big advantage of ``isotonic_regression`` compared to calling + ``minimize`` is that it is more user friendly, i.e. one does not need to + define objective and constraint functions, and that it is orders of + magnitudes faster. On commodity hardware (in 2023), for normal distributed + input y of length 1000, the minimizer takes about 4 seconds, while + ``isotonic_regression`` takes about 200 microseconds. + """ + yarr = np.asarray(y) # Check yarr.ndim == 1 is implicit (pybind11) in pava. + if weights is None: + warr = np.ones_like(yarr) + else: + warr = np.asarray(weights) + + if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]): + raise ValueError( + "Input arrays y and w must have one dimension of equal length." + ) + if np.any(warr <= 0): + raise ValueError("Weights w must be strictly positive.") + + order = slice(None) if increasing else slice(None, None, -1) + x = np.array(yarr[order], order="C", dtype=np.float64, copy=True) + wx = np.array(warr[order], order="C", dtype=np.float64, copy=True) + n = x.shape[0] + r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp) + x, wx, r, b = pava(x, wx, r) + # Now that we know the number of blocks b, we only keep the relevant part + # of r and wx. + # As information: Due to the pava implementation, after the last block + # index, there might be smaller numbers appended to r, e.g. + # r = [0, 10, 8, 7] which in the end should be r = [0, 10]. + r = r[:b + 1] + wx = wx[:b] + if not increasing: + x = x[::-1] + wx = wx[::-1] + r = r[-1] - r[::-1] + return OptimizeResult( + x=x, + weights=wx, + blocks=r, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..73bca3037f0e548f2420ba6be220446e94ddeb69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py @@ -0,0 +1,1126 @@ +"""Interior-point method for linear programming + +The *interior-point* method uses the primal-dual path following algorithm +outlined in [1]_. This algorithm supports sparse constraint matrices and +is typically faster than the simplex methods, especially for large, sparse +problems. Note, however, that the solution returned may be slightly less +accurate than those of the simplex methods and will not, in general, +correspond with a vertex of the polytope defined by the constraints. + + .. versionadded:: 1.0.0 + +References +---------- +.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. +""" +# Author: Matt Haberland + +import numpy as np +import scipy as sp +import scipy.sparse as sps +from warnings import warn +from scipy.linalg import LinAlgError +from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options +from ._linprog_util import _postsolve +has_umfpack = True +has_cholmod = True +try: + import sksparse # noqa: F401 + from sksparse.cholmod import cholesky as cholmod # noqa: F401 + from sksparse.cholmod import analyze as cholmod_analyze +except ImportError: + has_cholmod = False +try: + import scikits.umfpack # test whether to use factorized # noqa: F401 +except ImportError: + has_umfpack = False + + +def _get_solver(M, sparse=False, lstsq=False, sym_pos=True, + cholesky=True, permc_spec='MMD_AT_PLUS_A'): + """ + Given solver options, return a handle to the appropriate linear system + solver. + + Parameters + ---------- + M : 2-D array + As defined in [4] Equation 8.31 + sparse : bool (default = False) + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool (default = False) + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool (default = True) + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool (default = True) + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + permc_spec : str (default = 'MMD_AT_PLUS_A') + Sparsity preservation strategy used by SuperLU. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + See SuperLU documentation. + + Returns + ------- + solve : function + Handle to the appropriate solver function + + """ + try: + if sparse: + if lstsq: + def solve(r, sym_pos=False): + return sps.linalg.lsqr(M, r)[0] + elif cholesky: + try: + # Will raise an exception in the first call, + # or when the matrix changes due to a new problem + _get_solver.cholmod_factor.cholesky_inplace(M) + except Exception: + _get_solver.cholmod_factor = cholmod_analyze(M) + _get_solver.cholmod_factor.cholesky_inplace(M) + solve = _get_solver.cholmod_factor + else: + if has_umfpack and sym_pos: + solve = sps.linalg.factorized(M) + else: # factorized doesn't pass permc_spec + solve = sps.linalg.splu(M, permc_spec=permc_spec).solve + + else: + if lstsq: # sometimes necessary as solution is approached + def solve(r): + return sp.linalg.lstsq(M, r)[0] + elif cholesky: + L = sp.linalg.cho_factor(M) + + def solve(r): + return sp.linalg.cho_solve(L, r) + else: + # this seems to cache the matrix factorization, so solving + # with multiple right hand sides is much faster + def solve(r, sym_pos=sym_pos): + if sym_pos: + return sp.linalg.solve(M, r, assume_a="pos") + else: + return sp.linalg.solve(M, r) + # There are many things that can go wrong here, and it's hard to say + # what all of them are. It doesn't really matter: if the matrix can't be + # factorized, return None. get_solver will be called again with different + # inputs, and a new routine will try to factorize the matrix. + except KeyboardInterrupt: + raise + except Exception: + return None + return solve + + +def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False, + lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A'): + """ + Given standard form problem defined by ``A``, ``b``, and ``c``; + current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; + algorithmic parameters ``gamma and ``eta; + and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` + (predictor-corrector), and ``ip`` (initial point improvement), + get the search direction for increments to the variable estimates. + + Parameters + ---------- + As defined in [4], except: + sparse : bool + True if the system to be solved is sparse. This is typically set + True when the original ``A_ub`` and ``A_eq`` arrays are sparse. + lstsq : bool + True if the system is ill-conditioned and/or (nearly) singular and + thus a more robust least-squares solver is desired. This is sometimes + needed as the solution is approached. + sym_pos : bool + True if the system matrix is symmetric positive definite + Sometimes this needs to be set false as the solution is approached, + even when the system should be symmetric positive definite, due to + numerical difficulties. + cholesky : bool + True if the system is to be solved by Cholesky, rather than LU, + decomposition. This is typically faster unless the problem is very + small or prone to numerical difficulties. + pc : bool + True if the predictor-corrector method of Mehrota is to be used. This + is almost always (if not always) beneficial. Even though it requires + the solution of an additional linear system, the factorization + is typically (implicitly) reused so solution is efficient, and the + number of algorithm iterations is typically reduced. + ip : bool + True if the improved initial point suggestion due to [4] section 4.3 + is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + + Returns + ------- + Search directions as defined in [4] + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if A.shape[0] == 0: + # If there are no constraints, some solvers fail (understandably) + # rather than returning empty solution. This gets the job done. + sparse, lstsq, sym_pos, cholesky = False, False, True, False + n_x = len(x) + + # [4] Equation 8.8 + r_P = b * tau - A.dot(x) + r_D = c * tau - A.T.dot(y) - z + r_G = c.dot(x) - b.transpose().dot(y) + kappa + mu = (x.dot(z) + tau * kappa) / (n_x + 1) + + # Assemble M from [4] Equation 8.31 + Dinv = x / z + + if sparse: + M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) + else: + M = A.dot(Dinv.reshape(-1, 1) * A.T) + solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec) + + # pc: "predictor-corrector" [4] Section 4.1 + # In development this option could be turned off + # but it always seems to improve performance substantially + n_corrections = 1 if pc else 0 + + i = 0 + alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 + while i <= n_corrections: + # Reference [4] Eq. 8.6 + rhatp = eta(gamma) * r_P + rhatd = eta(gamma) * r_D + rhatg = eta(gamma) * r_G + + # Reference [4] Eq. 8.7 + rhatxs = gamma * mu - x * z + rhattk = gamma * mu - tau * kappa + + if i == 1: + if ip: # if the correction is to get "initial point" + # Reference [4] Eq. 8.23 + rhatxs = ((1 - alpha) * gamma * mu - + x * z - alpha**2 * d_x * d_z) + rhattk = ((1 - alpha) * gamma * mu - + tau * kappa - + alpha**2 * d_tau * d_kappa) + else: # if the correction is for "predictor-corrector" + # Reference [4] Eq. 8.13 + rhatxs -= d_x * d_z + rhattk -= d_tau * d_kappa + + # sometimes numerical difficulties arise as the solution is approached + # this loop tries to solve the equations using a sequence of functions + # for solve. For dense systems, the order is: + # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, + # 2. scipy.linalg.solve w/ sym_pos = True, + # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails + # 4. scipy.linalg.lstsq + # For sparse systems, the order is: + # 1. sksparse.cholmod.cholesky (if available) + # 2. scipy.sparse.linalg.factorized (if umfpack available) + # 3. scipy.sparse.linalg.splu + # 4. scipy.sparse.linalg.lsqr + solved = False + while not solved: + try: + # [4] Equation 8.28 + p, q = _sym_solve(Dinv, A, c, b, solve) + # [4] Equation 8.29 + u, v = _sym_solve(Dinv, A, rhatd - + (1 / x) * rhatxs, rhatp, solve) + if np.any(np.isnan(p)) or np.any(np.isnan(q)): + raise LinAlgError + solved = True + except (LinAlgError, ValueError, TypeError) as e: + # Usually this doesn't happen. If it does, it happens when + # there are redundant constraints or when approaching the + # solution. If so, change solver. + if cholesky: + cholesky = False + warn( + "Solving system with option 'cholesky':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'cholesky' to False.", + OptimizeWarning, stacklevel=5) + elif sym_pos: + sym_pos = False + warn( + "Solving system with option 'sym_pos':True " + "failed. It is normal for this to happen " + "occasionally, especially as the solution is " + "approached. However, if you see this frequently, " + "consider setting option 'sym_pos' to False.", + OptimizeWarning, stacklevel=5) + elif not lstsq: + lstsq = True + warn( + "Solving system with option 'sym_pos':False " + "failed. This may happen occasionally, " + "especially as the solution is " + "approached. However, if you see this frequently, " + "your problem may be numerically challenging. " + "If you cannot improve the formulation, consider " + "setting 'lstsq' to True. Consider also setting " + "`presolve` to True, if it is not already.", + OptimizeWarning, stacklevel=5) + else: + raise e + solve = _get_solver(M, sparse, lstsq, sym_pos, + cholesky, permc_spec) + # [4] Results after 8.29 + d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / + (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) + d_x = u + p * d_tau + d_y = v + q * d_tau + + # [4] Relations between after 8.25 and 8.26 + d_z = (1 / x) * (rhatxs - z * d_x) + d_kappa = 1 / tau * (rhattk - kappa * d_tau) + + # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23 + alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) + if ip: # initial point - see [4] 4.4 + gamma = 10 + else: # predictor-corrector, [4] definition after 8.12 + beta1 = 0.1 # [4] pg. 220 (Table 8.1) + gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) + i += 1 + + return d_x, d_y, d_z, d_tau, d_kappa + + +def _sym_solve(Dinv, A, r1, r2, solve): + """ + An implementation of [4] equation 8.31 and 8.32 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 8.31 + r = r2 + A.dot(Dinv * r1) + v = solve(r) + # [4] 8.32 + u = Dinv * (A.T.dot(v) - r1) + return u, v + + +def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): + """ + An implementation of [4] equation 8.21 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + # [4] 4.3 Equation 8.21, ignoring 8.20 requirement + # same step is taken in primal and dual spaces + # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3 + # the value 1 is used in Mehrota corrector and initial point correction + i_x = d_x < 0 + i_z = d_z < 0 + alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 + alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 + alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 + alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 + alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) + return alpha + + +def _get_message(status): + """ + Given problem status code, return a more detailed message. + + Parameters + ---------- + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + Returns + ------- + message : str + A string descriptor of the exit status of the optimization. + + """ + messages = ( + ["Optimization terminated successfully.", + "The iteration limit was reached before the algorithm converged.", + "The algorithm terminated successfully and determined that the " + "problem is infeasible.", + "The algorithm terminated successfully and determined that the " + "problem is unbounded.", + "Numerical difficulties were encountered before the problem " + "converged. Please check your problem formulation for errors, " + "independence of linear equality constraints, and reasonable " + "scaling and matrix condition numbers. If you continue to " + "encounter this error, please submit a bug report." + ]) + return messages[status] + + +def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): + """ + An implementation of [4] Equation 8.9 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + x = x + alpha * d_x + tau = tau + alpha * d_tau + z = z + alpha * d_z + kappa = kappa + alpha * d_kappa + y = y + alpha * d_y + return x, y, z, tau, kappa + + +def _get_blind_start(shape): + """ + Return the starting point from [4] 4.4 + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + m, n = shape + x0 = np.ones(n) + y0 = np.zeros(m) + z0 = np.ones(n) + tau0 = 1 + kappa0 = 1 + return x0, y0, z0, tau0, kappa0 + + +def _indicators(A, b, c, c0, x, y, z, tau, kappa): + """ + Implementation of several equations from [4] used as indicators of + the status of optimization. + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + + # residuals for termination are relative to initial values + x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) + + # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8 + def r_p(x, tau): + return b * tau - A.dot(x) + + def r_d(y, z, tau): + return c * tau - A.T.dot(y) - z + + def r_g(x, y, kappa): + return kappa + c.dot(x) - b.dot(y) + + # np.dot unpacks if they are arrays of size one + def mu(x, tau, z, kappa): + return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) + + obj = c.dot(x / tau) + c0 + + def norm(a): + return np.linalg.norm(a) + + # See [4], Section 4.5 - The Stopping Criteria + r_p0 = r_p(x0, tau0) + r_d0 = r_d(y0, z0, tau0) + r_g0 = r_g(x0, y0, kappa0) + mu_0 = mu(x0, tau0, z0, kappa0) + rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) + rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) + rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) + rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) + rho_mu = mu(x, tau, z, kappa) / mu_0 + return rho_p, rho_d, rho_A, rho_g, rho_mu, obj + + +def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): + """ + Print indicators of optimization status to the console. + + Parameters + ---------- + rho_p : float + The (normalized) primal feasibility, see [4] 4.5 + rho_d : float + The (normalized) dual feasibility, see [4] 4.5 + rho_g : float + The (normalized) duality gap, see [4] 4.5 + alpha : float + The step size, see [4] 4.3 + rho_mu : float + The (normalized) path parameter, see [4] 4.5 + obj : float + The objective function value of the current iterate + header : bool + True if a header is to be printed + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + + """ + if header: + print("Primal Feasibility ", + "Dual Feasibility ", + "Duality Gap ", + "Step ", + "Path Parameter ", + "Objective ") + + # no clue why this works + fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' + print(fmt.format( + float(rho_p), + float(rho_d), + float(rho_g), + alpha if isinstance(alpha, str) else float(alpha), + float(rho_mu), + float(obj))) + + +def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, + sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args): + r""" + Solve a linear programming problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + using the interior point method of [4]. + + Parameters + ---------- + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in ``A`` (for standard form problem). + c : 1-D array + Coefficients of the linear objective function to be minimized (for + standard form problem). + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + alpha0 : float + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_3`of [4] Table 8.1 + beta : float + The desired reduction of the path parameter :math:`\mu` (see [6]_) + maxiter : int + The maximum number of iterations of the algorithm. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + tol : float + Termination tolerance; see [4]_ Section 4.5. + sparse : bool + Set to ``True`` if the problem is to be treated as sparse. However, + the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as + (dense) arrays rather than sparse matrices. + lstsq : bool + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left as ``False`` unless severe + numerical difficulties are frequently encountered, and a better option + would be to improve the formulation of the problem. + sym_pos : bool + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix (almost always). + cholesky : bool + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for moderate, dense problems + that are numerically well-behaved. + pc : bool + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. It's unclear whether this is beneficial. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``.) A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector + fun : float + Current value of the objective function + success : bool + True only when an algorithm has completed successfully, + so this is always False as the callback function is called + only while the algorithm is still iterating. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x`` + phase : int + The phase of the algorithm being executed. This is always + 1 for the interior-point method because it has only one phase. + status : int + For revised simplex, this is always 0 because if a different + status is detected, the algorithm terminates. + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Returns + ------- + x_hat : float + Solution vector (for standard form problem). + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at: + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + + """ + + iteration = 0 + + # default initial point + x, y, z, tau, kappa = _get_blind_start(A.shape) + + # first iteration is special improvement of initial point + ip = ip if pc else False + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) + + if disp: + _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + status = 0 + message = "Optimization terminated successfully." + + if sparse: + A = sps.csc_matrix(A) + + while go: + + iteration += 1 + + if ip: # initial point + # [4] Section 4.4 + gamma = 1 + + def eta(g): + return 1 + else: + # gamma = 0 in predictor step according to [4] 4.1 + # if predictor/corrector is off, use mean of complementarity [6] + # 5.1 / [4] Below Figure 10-4 + gamma = 0 if pc else beta * np.mean(z * x) + # [4] Section 4.1 + + def eta(g=gamma): + return 1 - g + + try: + # Solve [4] 8.6 and 8.7/8.13/8.23 + d_x, d_y, d_z, d_tau, d_kappa = _get_delta( + A, b, c, x, y, z, tau, kappa, gamma, eta, + sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) + + if ip: # initial point + # [4] 4.4 + # Formula after 8.23 takes a full step regardless if this will + # take it negative + alpha = 1.0 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, + d_z, d_tau, d_kappa, alpha) + x[x < 1] = 1 + z[z < 1] = 1 + tau = max(1, tau) + kappa = max(1, kappa) + ip = False # done with initial point + else: + # [4] Section 4.3 + alpha = _get_step(x, d_x, z, d_z, tau, + d_tau, kappa, d_kappa, alpha0) + # [4] Equation 8.9 + x, y, z, tau, kappa = _do_step( + x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) + + except (LinAlgError, FloatingPointError, + ValueError, ZeroDivisionError): + # this can happen when sparse solver is used and presolve + # is turned off. Also observed ValueError in AppVeyor Python 3.6 + # Win32 build (PR #8676). I've never seen it otherwise. + status = 4 + message = _get_message(status) + break + + # [4] 4.5 + rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( + A, b, c, c0, x, y, z, tau, kappa) + go = rho_p > tol or rho_d > tol or rho_A > tol + + if disp: + _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj) + if callback is not None: + x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) + res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, + 'con': con, 'nit': iteration, 'phase': 1, + 'complete': False, 'status': 0, + 'message': "", 'success': False}) + callback(res) + + # [4] 4.5 + inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * + max(1, kappa)) + inf2 = rho_mu < tol and tau < tol * min(1, kappa) + if inf1 or inf2: + # [4] Lemma 8.4 / Theorem 8.3 + if b.transpose().dot(y) > tol: + status = 2 + else: # elif c.T.dot(x) < tol: ? Probably not necessary. + status = 3 + message = _get_message(status) + break + elif iteration >= maxiter: + status = 1 + message = _get_message(status) + break + + x_hat = x / tau + # [4] Statement after Theorem 8.2 + return x_hat, status, message, iteration + + +def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8, + disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False, + sym_pos=True, cholesky=None, pc=True, ip=False, + permc_spec='MMD_AT_PLUS_A', **unknown_options): + r""" + Minimize a linear objective function subject to linear + equality and non-negativity constraints using the interior point method + of [4]_. Linear programming is intended to solve problems + of the following form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Purely for display.) + A : 2-D array + 2-D array such that ``A @ x``, gives the values of the equality + constraints at ``x``. + b : 1-D array + 1-D array of values representing the right hand side of each equality + constraint (row) in ``A``. + callback : callable, optional + Callback function to be executed once per iteration. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int (default = 1000) + The maximum number of iterations of the algorithm. + tol : float (default = 1e-8) + Termination tolerance to be used for all termination criteria; + see [4]_ Section 4.5. + disp : bool (default = False) + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + alpha0 : float (default = 0.99995) + The maximal step size for Mehrota's predictor-corrector search + direction; see :math:`\beta_{3}` of [4]_ Table 8.1. + beta : float (default = 0.1) + The desired reduction of the path parameter :math:`\mu` (see [6]_) + when Mehrota's predictor-corrector is not in use (uncommon). + sparse : bool (default = False) + Set to ``True`` if the problem is to be treated as sparse after + presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, + this option will automatically be set ``True``, and the problem + will be treated as sparse even during presolve. If your constraint + matrices contain mostly zeros and the problem is not very small (less + than about 100 constraints or variables), consider setting ``True`` + or providing ``A_eq`` and ``A_ub`` as sparse matrices. + lstsq : bool (default = False) + Set to ``True`` if the problem is expected to be very poorly + conditioned. This should always be left ``False`` unless severe + numerical difficulties are encountered. Leave this at the default + unless you receive a warning message suggesting otherwise. + sym_pos : bool (default = True) + Leave ``True`` if the problem is expected to yield a well conditioned + symmetric positive definite normal equation matrix + (almost always). Leave this at the default unless you receive + a warning message suggesting otherwise. + cholesky : bool (default = True) + Set to ``True`` if the normal equations are to be solved by explicit + Cholesky decomposition followed by explicit forward/backward + substitution. This is typically faster for problems + that are numerically well-behaved. + pc : bool (default = True) + Leave ``True`` if the predictor-corrector method of Mehrota is to be + used. This is almost always (if not always) beneficial. + ip : bool (default = False) + Set to ``True`` if the improved initial point suggestion due to [4]_ + Section 4.3 is desired. Whether this is beneficial or not + depends on the problem. + permc_spec : str (default = 'MMD_AT_PLUS_A') + (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = + True``, and no SuiteSparse.) + A matrix is factorized in each iteration of the algorithm. + This option specifies how to permute the columns of the matrix for + sparsity preservation. Acceptable values are: + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering. + + This option can impact the convergence of the + interior point algorithm; test different values to determine which + performs best for your problem. For more information, refer to + ``scipy.sparse.linalg.splu``. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Serious numerical difficulties encountered + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + + Notes + ----- + This method implements the algorithm outlined in [4]_ with ideas from [8]_ + and a structure inspired by the simpler methods of [6]_. + + The primal-dual path following method begins with initial 'guesses' of + the primal and dual variables of the standard form problem and iteratively + attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the + problem with a gradually reduced logarithmic barrier term added to the + objective. This particular implementation uses a homogeneous self-dual + formulation, which provides certificates of infeasibility or unboundedness + where applicable. + + The default initial point for the primal and dual variables is that + defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial + point option ``ip=True``), an alternate (potentially improved) starting + point can be calculated according to the additional recommendations of + [4]_ Section 4.4. + + A search direction is calculated using the predictor-corrector method + (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. + (A potential improvement would be to implement the method of multiple + corrections described in [4]_ Section 4.2.) In practice, this is + accomplished by solving the normal equations, [4]_ Section 5.1 Equations + 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations + 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of + solving the normal equations rather than 8.25 directly is that the + matrices involved are symmetric positive definite, so Cholesky + decomposition can be used rather than the more expensive LU factorization. + + With default options, the solver used to perform the factorization depends + on third-party software availability and the conditioning of the problem. + + For dense problems, solvers are tried in the following order: + + 1. ``scipy.linalg.cho_factor`` + + 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` + + 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` + + 4. ``scipy.linalg.lstsq`` + + For sparse problems: + + 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed) + + 2. ``scipy.sparse.linalg.factorized`` + (if scikit-umfpack and SuiteSparse are installed) + + 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) + + 4. ``scipy.sparse.linalg.lsqr`` + + If the solver fails for any reason, successively more robust (but slower) + solvers are attempted in the order indicated. Attempting, failing, and + re-starting factorization can be time consuming, so if the problem is + numerically challenging, options can be set to bypass solvers that are + failing. Setting ``cholesky=False`` skips to solver 2, + ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips + to solver 4 for both sparse and dense problems. + + Potential improvements for combatting issues associated with dense + columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and + [10]_ Section 4.1-4.2; the latter also discusses the alleviation of + accuracy issues associated with the substitution approach to free + variables. + + After calculating the search direction, the maximum possible step size + that does not activate the non-negativity constraints is calculated, and + the smaller of this step size and unity is applied (as in [4]_ Section + 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. + + The new point is tested according to the termination conditions of [4]_ + Section 4.5. The same tolerance, which can be set using the ``tol`` option, + is used for all checks. (A potential improvement would be to expose + the different tolerances to be set independently.) If optimality, + unboundedness, or infeasibility is detected, the solve procedure + terminates; otherwise it repeats. + + The expected problem formulation differs between the top level ``linprog`` + module and the method specific solvers. The method specific solvers expect a + problem in standard form: + + Minimize:: + + c @ x + + Subject to:: + + A @ x == b + x >= 0 + + Whereas the top level ``linprog`` module expects a problem of form: + + Minimize:: + + c @ x + + Subject to:: + + A_ub @ x <= b_ub + A_eq @ x == b_eq + lb <= x <= ub + + where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. + + The original problem contains equality, upper-bound and variable constraints + whereas the method specific solver requires equality constraints and + variable non-negativity. + + ``linprog`` module converts the original problem to standard form by + converting the simple bounds to upper bound constraints, introducing + non-negative slack variables for inequality constraints, and expressing + unbounded variables as the difference between two non-negative variables. + + + References + ---------- + .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point + optimizer for linear programming: an implementation of the + homogeneous algorithm." High performance optimization. Springer US, + 2000. 197-232. + .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear + Programming based on Newton's Method." Unpublished Course Notes, + March 2004. Available 2/25/2017 at + https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf + .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear + programming." Mathematical Programming 71.2 (1995): 221-245. + .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. + .. [10] Andersen, Erling D., et al. Implementation of interior point methods + for large scale linear programming. HEC/Universite de Geneve, 1996. + + """ + + _check_unknown_options(unknown_options) + + # These should be warnings, not errors + if (cholesky or cholesky is None) and sparse and not has_cholmod: + if cholesky: + warn("Sparse cholesky is only available with scikit-sparse. " + "Setting `cholesky = False`", + OptimizeWarning, stacklevel=3) + cholesky = False + + if sparse and lstsq: + warn("Option combination 'sparse':True and 'lstsq':True " + "is not recommended.", + OptimizeWarning, stacklevel=3) + + if lstsq and cholesky: + warn("Invalid option combination 'lstsq':True " + "and 'cholesky':True; option 'cholesky' has no effect when " + "'lstsq' is set True.", + OptimizeWarning, stacklevel=3) + + valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') + if permc_spec.upper() not in valid_permc_spec: + warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " + "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " + "and 'COLAMD'. Reverting to default.", + OptimizeWarning, stacklevel=3) + permc_spec = 'MMD_AT_PLUS_A' + + # This can be an error + if not sym_pos and cholesky: + raise ValueError( + "Invalid option combination 'sym_pos':False " + "and 'cholesky':True: Cholesky decomposition is only possible " + "for symmetric positive definite matrices.") + + cholesky = cholesky or (cholesky is None and sym_pos and not lstsq) + + x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, + maxiter, disp, tol, sparse, + lstsq, sym_pos, cholesky, + pc, ip, permc_spec, callback, + postsolve_args) + + return x, status, message, iteration diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py new file mode 100644 index 0000000000000000000000000000000000000000..826ceffce398a6f58bdfcd6264e2f14fc5f6f8ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py @@ -0,0 +1,572 @@ +"""Revised simplex method for linear programming + +The *revised simplex* method uses the method described in [1]_, except +that a factorization [2]_ of the basis matrix, rather than its inverse, +is efficiently maintained and used to solve the linear systems at each +iteration of the algorithm. + +.. versionadded:: 1.3.0 + +References +---------- +.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear + programming." Athena Scientific 1 (1997): 997. +.. [2] Bartels, Richard H. "A stabilization of the simplex method." + Journal in Numerische Mathematik 16.5 (1971): 414-434. + +""" +# Author: Matt Haberland + +import numpy as np +from numpy.linalg import LinAlgError + +from scipy.linalg import solve +from ._optimize import _check_unknown_options +from ._bglu_dense import LU +from ._bglu_dense import BGLU as BGLU +from ._linprog_util import _postsolve +from ._optimize import OptimizeResult + + +def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp, + maxupdate, mast, pivot): + """ + The purpose of phase one is to find an initial basic feasible solution + (BFS) to the original problem. + + Generates an auxiliary problem with a trivial BFS and an objective that + minimizes infeasibility of the original problem. Solves the auxiliary + problem using the main simplex routine (phase two). This either yields + a BFS to the original problem or determines that the original problem is + infeasible. If feasible, phase one detects redundant rows in the original + constraint matrix and removes them, then chooses additional indices as + necessary to complete a basis/BFS for the original problem. + """ + + m, n = A.shape + status = 0 + + # generate auxiliary problem to get initial BFS + A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol) + + if status == 6: + residual = c.dot(x) + iter_k = 0 + return x, basis, A, b, residual, status, iter_k + + # solve auxiliary problem + phase_one_n = n + iter_k = 0 + x, basis, status, iter_k = _phase_two(c, A, x, basis, callback, + postsolve_args, + maxiter, tol, disp, + maxupdate, mast, pivot, + iter_k, phase_one_n) + + # check for infeasibility + residual = c.dot(x) + if status == 0 and residual > tol: + status = 2 + + # drive artificial variables out of basis + # TODO: test redundant row removal better + # TODO: make solve more efficient with BGLU? This could take a while. + keep_rows = np.ones(m, dtype=bool) + for basis_column in basis[basis >= n]: + B = A[:, basis] + try: + basis_finder = np.abs(solve(B, A)) # inefficient + pertinent_row = np.argmax(basis_finder[:, basis_column]) + eligible_columns = np.ones(n, dtype=bool) + eligible_columns[basis[basis < n]] = 0 + eligible_column_indices = np.where(eligible_columns)[0] + index = np.argmax(basis_finder[:, :n] + [pertinent_row, eligible_columns]) + new_basis_column = eligible_column_indices[index] + if basis_finder[pertinent_row, new_basis_column] < tol: + keep_rows[pertinent_row] = False + else: + basis[basis == basis_column] = new_basis_column + except LinAlgError: + status = 4 + + # form solution to original problem + A = A[keep_rows, :n] + basis = basis[keep_rows] + x = x[:n] + m = A.shape[0] + return x, basis, A, b, residual, status, iter_k + + +def _get_more_basis_columns(A, basis): + """ + Called when the auxiliary problem terminates with artificial columns in + the basis, which must be removed and replaced with non-artificial + columns. Finds additional columns that do not make the matrix singular. + """ + m, n = A.shape + + # options for inclusion are those that aren't already in the basis + a = np.arange(m+n) + bl = np.zeros(len(a), dtype=bool) + bl[basis] = 1 + options = a[~bl] + options = options[options < n] # and they have to be non-artificial + + # form basis matrix + B = np.zeros((m, m)) + B[:, 0:len(basis)] = A[:, basis] + + if (basis.size > 0 and + np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)): + raise Exception("Basis has dependent columns") + + rank = 0 # just enter the loop + for i in range(n): # somewhat arbitrary, but we need another way out + # permute the options, and take as many as needed + new_basis = np.random.permutation(options)[:m-len(basis)] + B[:, len(basis):] = A[:, new_basis] # update the basis matrix + rank = np.linalg.matrix_rank(B) # check the rank + if rank == m: + break + + return np.concatenate((basis, new_basis)) + + +def _generate_auxiliary_problem(A, b, x0, tol): + """ + Modifies original problem to create an auxiliary problem with a trivial + initial basic feasible solution and an objective that minimizes + infeasibility in the original problem. + + Conceptually, this is done by stacking an identity matrix on the right of + the original constraint matrix, adding artificial variables to correspond + with each of these new columns, and generating a cost vector that is all + zeros except for ones corresponding with each of the new variables. + + A initial basic feasible solution is trivial: all variables are zero + except for the artificial variables, which are set equal to the + corresponding element of the right hand side `b`. + + Running the simplex method on this auxiliary problem drives all of the + artificial variables - and thus the cost - to zero if the original problem + is feasible. The original problem is declared infeasible otherwise. + + Much of the complexity below is to improve efficiency by using singleton + columns in the original problem where possible, thus generating artificial + variables only as necessary, and using an initial 'guess' basic feasible + solution. + """ + status = 0 + m, n = A.shape + + if x0 is not None: + x = x0 + else: + x = np.zeros(n) + + r = b - A@x # residual; this must be all zeros for feasibility + + A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS + b[r < 0] = -b[r < 0] # to the auxiliary problem + r[r < 0] *= -1 + + # Rows which we will need to find a trivial way to zero. + # This should just be the rows where there is a nonzero residual. + # But then we would not necessarily have a column singleton in every row. + # This makes it difficult to find an initial basis. + if x0 is None: + nonzero_constraints = np.arange(m) + else: + nonzero_constraints = np.where(r > tol)[0] + + # these are (at least some of) the initial basis columns + basis = np.where(np.abs(x) > tol)[0] + + if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS + c = np.zeros(n) + basis = _get_more_basis_columns(A, basis) + return A, b, c, basis, x, status + elif (len(nonzero_constraints) > m - len(basis) or + np.any(x < 0)): # can't get trivial BFS + c = np.zeros(n) + status = 6 + return A, b, c, basis, x, status + + # chooses existing columns appropriate for inclusion in initial basis + cols, rows = _select_singleton_columns(A, r) + + # find the rows we need to zero that we _can_ zero with column singletons + i_tofix = np.isin(rows, nonzero_constraints) + # these columns can't already be in the basis, though + # we are going to add them to the basis and change the corresponding x val + i_notinbasis = np.logical_not(np.isin(cols, basis)) + i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis) + rows = rows[i_fix_without_aux] + cols = cols[i_fix_without_aux] + + # indices of the rows we can only zero with auxiliary variable + # these rows will get a one in each auxiliary column + arows = nonzero_constraints[np.logical_not( + np.isin(nonzero_constraints, rows))] + n_aux = len(arows) + acols = n + np.arange(n_aux) # indices of auxiliary columns + + basis_ng = np.concatenate((cols, acols)) # basis columns not from guess + basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero + + # add auxiliary singleton columns + A = np.hstack((A, np.zeros((m, n_aux)))) + A[arows, acols] = 1 + + # generate initial BFS + x = np.concatenate((x, np.zeros(n_aux))) + x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng] + + # generate costs to minimize infeasibility + c = np.zeros(n_aux + n) + c[acols] = 1 + + # basis columns correspond with nonzeros in guess, those with column + # singletons we used to zero remaining constraints, and any additional + # columns to get a full set (m columns) + basis = np.concatenate((basis, basis_ng)) + basis = _get_more_basis_columns(A, basis) # add columns as needed + + return A, b, c, basis, x, status + + +def _select_singleton_columns(A, b): + """ + Finds singleton columns for which the singleton entry is of the same sign + as the right-hand side; these columns are eligible for inclusion in an + initial basis. Determines the rows in which the singleton entries are + located. For each of these rows, returns the indices of the one singleton + column and its corresponding row. + """ + # find indices of all singleton columns and corresponding row indices + column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0] + columns = A[:, column_indices] # array of singleton columns + row_indices = np.zeros(len(column_indices), dtype=int) + nonzero_rows, nonzero_columns = np.nonzero(columns) + row_indices[nonzero_columns] = nonzero_rows # corresponding row indices + + # keep only singletons with entries that have same sign as RHS + # this is necessary because all elements of BFS must be non-negative + same_sign = A[row_indices, column_indices]*b[row_indices] >= 0 + column_indices = column_indices[same_sign][::-1] + row_indices = row_indices[same_sign][::-1] + # Reversing the order so that steps below select rightmost columns + # for initial basis, which will tend to be slack variables. (If the + # guess corresponds with a basic feasible solution but a constraint + # is not satisfied with the corresponding slack variable zero, the slack + # variable must be basic.) + + # for each row, keep rightmost singleton column with an entry in that row + unique_row_indices, first_columns = np.unique(row_indices, + return_index=True) + return column_indices[first_columns], unique_row_indices + + +def _find_nonzero_rows(A, tol): + """ + Returns logical array indicating the locations of rows with at least + one nonzero element. + """ + return np.any(np.abs(A) > tol, axis=1) + + +def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12): + """ + Selects a pivot to enter the basis. Currently Bland's rule - the smallest + index that has a negative reduced cost - is the default. + """ + if rule.lower() == "mrc": # index with minimum reduced cost + return a[~bl][np.argmin(c_hat)] + else: # smallest index w/ negative reduced cost + return a[~bl][c_hat < -tol][0] + + +def _display_iter(phase, iteration, slack, con, fun): + """ + Print indicators of optimization status to the console. + """ + header = True if not iteration % 20 else False + + if header: + print("Phase", + "Iteration", + "Minimum Slack ", + "Constraint Residual", + "Objective ") + + # := -tol): # all reduced costs positive -> terminate + break + + j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol) + u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j]) + + i = u > tol # if none of the u are positive, unbounded + if not np.any(i): + status = 3 + break + + th = xb[i]/u[i] + l = np.argmin(th) # implicitly selects smallest subscript + th_star = th[l] # step size + + x[b] = x[b] - th_star*u # take step + x[j] = th_star + B.update(ab[i][l], j) # modify basis + b = B.b # similar to b[ab[i][l]] = + + else: + # If the end of the for loop is reached (without a break statement), + # then another step has been taken, so the iteration counter should + # increment, info should be displayed, and callback should be called. + iteration += 1 + status = 1 + if disp or callback is not None: + _display_and_callback(phase_one_n, x, postsolve_args, status, + iteration, disp, callback) + + return x, b, status, iteration + + +def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args, + maxiter=5000, tol=1e-12, disp=False, + maxupdate=10, mast=False, pivot="mrc", + **unknown_options): + """ + Solve the following linear programming problem via a two-phase + revised simplex algorithm.:: + + minimize: c @ x + + subject to: A @ x == b + 0 <= x < oo + + User-facing documentation is in _linprog_doc.py. + + Parameters + ---------- + c : 1-D array + Coefficients of the linear objective function to be minimized. + c0 : float + Constant term in objective function due to fixed (and eliminated) + variables. (Currently unused.) + A : 2-D array + 2-D array which, when matrix-multiplied by ``x``, gives the values of + the equality constraints at ``x``. + b : 1-D array + 1-D array of values representing the RHS of each equality constraint + (row) in ``A_eq``. + x0 : 1-D array, optional + Starting values of the independent variables, which will be refined by + the optimization algorithm. For the revised simplex method, these must + correspond with a basic feasible solution. + callback : callable, optional + If a callback function is provided, it will be called within each + iteration of the algorithm. The callback function must accept a single + `scipy.optimize.OptimizeResult` consisting of the following fields: + + x : 1-D array + Current solution vector. + fun : float + Current value of the objective function ``c @ x``. + success : bool + True only when an algorithm has completed successfully, + so this is always False as the callback function is called + only while the algorithm is still iterating. + slack : 1-D array + The values of the slack variables. Each slack variable + corresponds to an inequality constraint. If the slack is zero, + the corresponding constraint is active. + con : 1-D array + The (nominally zero) residuals of the equality constraints, + that is, ``b - A_eq @ x``. + phase : int + The phase of the algorithm being executed. + status : int + For revised simplex, this is always 0 because if a different + status is detected, the algorithm terminates. + nit : int + The number of iterations performed. + message : str + A string descriptor of the exit status of the optimization. + postsolve_args : tuple + Data needed by _postsolve to convert the solution to the standard-form + problem into the solution to the original problem. + + Options + ------- + maxiter : int + The maximum number of iterations to perform in either phase. + tol : float + The tolerance which determines when a solution is "close enough" to + zero in Phase 1 to be considered a basic feasible solution or close + enough to positive to serve as an optimal solution. + disp : bool + Set to ``True`` if indicators of optimization status are to be printed + to the console each iteration. + maxupdate : int + The maximum number of updates performed on the LU factorization. + After this many updates is reached, the basis matrix is factorized + from scratch. + mast : bool + Minimize Amortized Solve Time. If enabled, the average time to solve + a linear system using the basis factorization is measured. Typically, + the average solve time will decrease with each successive solve after + initial factorization, as factorization takes much more time than the + solve operation (and updates). Eventually, however, the updated + factorization becomes sufficiently complex that the average solve time + begins to increase. When this is detected, the basis is refactorized + from scratch. Enable this option to maximize speed at the risk of + nondeterministic behavior. Ignored if ``maxupdate`` is 0. + pivot : "mrc" or "bland" + Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose + Bland's rule if iteration limit is reached and cycling is suspected. + unknown_options : dict + Optional arguments not used by this particular solver. If + `unknown_options` is non-empty a warning is issued listing all + unused options. + + Returns + ------- + x : 1-D array + Solution vector. + status : int + An integer representing the exit status of the optimization:: + + 0 : Optimization terminated successfully + 1 : Iteration limit reached + 2 : Problem appears to be infeasible + 3 : Problem appears to be unbounded + 4 : Numerical difficulties encountered + 5 : No constraints; turn presolve on + 6 : Guess x0 cannot be converted to a basic feasible solution + + message : str + A string descriptor of the exit status of the optimization. + iteration : int + The number of iterations taken to solve the problem. + """ + + _check_unknown_options(unknown_options) + + messages = ["Optimization terminated successfully.", + "Iteration limit reached.", + "The problem appears infeasible, as the phase one auxiliary " + "problem terminated successfully with a residual of {0:.1e}, " + "greater than the tolerance {1} required for the solution to " + "be considered feasible. Consider increasing the tolerance to " + "be greater than {0:.1e}. If this tolerance is unnaceptably " + "large, the problem is likely infeasible.", + "The problem is unbounded, as the simplex algorithm found " + "a basic feasible solution from which there is a direction " + "with negative reduced cost in which all decision variables " + "increase.", + "Numerical difficulties encountered; consider trying " + "method='interior-point'.", + "Problems with no constraints are trivially solved; please " + "turn presolve on.", + "The guess x0 cannot be converted to a basic feasible " + "solution. " + ] + + if A.size == 0: # address test_unbounded_below_no_presolve_corrected + return np.zeros(c.shape), 5, messages[5], 0 + + x, basis, A, b, residual, status, iteration = ( + _phase_one(A, b, x0, callback, postsolve_args, + maxiter, tol, disp, maxupdate, mast, pivot)) + + if status == 0: + x, basis, status, iteration = _phase_two(c, A, x, basis, callback, + postsolve_args, + maxiter, tol, disp, + maxupdate, mast, pivot, + iteration) + + return x, status, messages[status].format(residual, tol), iteration diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3113dfbb89439ae3c73682ba36114b036d5a43c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_milp.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_milp.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9ecf52083f1312ad6fceff3e3917ff262d90ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_milp.py @@ -0,0 +1,392 @@ +import warnings +import numpy as np +from scipy.sparse import csc_array, vstack, issparse +from scipy._lib._util import VisibleDeprecationWarning +from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import] +from ._constraints import LinearConstraint, Bounds +from ._optimize import OptimizeResult +from ._linprog_highs import _highs_to_scipy_status_message + + +def _constraints_to_components(constraints): + """ + Convert sequence of constraints to a single set of components A, b_l, b_u. + + `constraints` could be + + 1. A LinearConstraint + 2. A tuple representing a LinearConstraint + 3. An invalid object + 4. A sequence of composed entirely of objects of type 1/2 + 5. A sequence containing at least one object of type 3 + + We want to accept 1, 2, and 4 and reject 3 and 5. + """ + message = ("`constraints` (or each element within `constraints`) must be " + "convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + As = [] + b_ls = [] + b_us = [] + + # Accept case 1 by standardizing as case 4 + if isinstance(constraints, LinearConstraint): + constraints = [constraints] + else: + # Reject case 3 + try: + iter(constraints) + except TypeError as exc: + raise ValueError(message) from exc + + # Accept case 2 by standardizing as case 4 + if len(constraints) == 3: + # argument could be a single tuple representing a LinearConstraint + try: + constraints = [LinearConstraint(*constraints)] + except (TypeError, ValueError, VisibleDeprecationWarning): + # argument was not a tuple representing a LinearConstraint + pass + + # Address cases 4/5 + for constraint in constraints: + # if it's not a LinearConstraint or something that represents a + # LinearConstraint at this point, it's invalid + if not isinstance(constraint, LinearConstraint): + try: + constraint = LinearConstraint(*constraint) + except TypeError as exc: + raise ValueError(message) from exc + As.append(csc_array(constraint.A)) + b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64)) + b_us.append(np.atleast_1d(constraint.ub).astype(np.float64)) + + if len(As) > 1: + A = vstack(As, format="csc") + b_l = np.concatenate(b_ls) + b_u = np.concatenate(b_us) + else: # avoid unnecessary copying + A = As[0] + b_l = b_ls[0] + b_u = b_us[0] + + return A, b_l, b_u + + +def _milp_iv(c, integrality, bounds, constraints, options): + # objective IV + if issparse(c): + raise ValueError("`c` must be a dense array.") + c = np.atleast_1d(c).astype(np.float64) + if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)): + message = ("`c` must be a one-dimensional array of finite numbers " + "with at least one element.") + raise ValueError(message) + + # integrality IV + if issparse(integrality): + raise ValueError("`integrality` must be a dense array.") + message = ("`integrality` must contain integers 0-3 and be broadcastable " + "to `c.shape`.") + if integrality is None: + integrality = 0 + try: + integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8) + except ValueError: + raise ValueError(message) + if integrality.min() < 0 or integrality.max() > 3: + raise ValueError(message) + + # bounds IV + if bounds is None: + bounds = Bounds(0, np.inf) + elif not isinstance(bounds, Bounds): + message = ("`bounds` must be convertible into an instance of " + "`scipy.optimize.Bounds`.") + try: + bounds = Bounds(*bounds) + except TypeError as exc: + raise ValueError(message) from exc + + try: + lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64) + ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64) + except (ValueError, TypeError) as exc: + message = ("`bounds.lb` and `bounds.ub` must contain reals and " + "be broadcastable to `c.shape`.") + raise ValueError(message) from exc + + # constraints IV + if not constraints: + constraints = [LinearConstraint(np.empty((0, c.size)), + np.empty((0,)), np.empty((0,)))] + try: + A, b_l, b_u = _constraints_to_components(constraints) + except ValueError as exc: + message = ("`constraints` (or each element within `constraints`) must " + "be convertible into an instance of " + "`scipy.optimize.LinearConstraint`.") + raise ValueError(message) from exc + + if A.shape != (b_l.size, c.size): + message = "The shape of `A` must be (len(b_l), len(c))." + raise ValueError(message) + indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64) + + # options IV + options = options or {} + supported_options = {'disp', 'presolve', 'time_limit', 'node_limit', + 'mip_rel_gap'} + unsupported_options = set(options).difference(supported_options) + if unsupported_options: + message = (f"Unrecognized options detected: {unsupported_options}. " + "These will be passed to HiGHS verbatim.") + warnings.warn(message, RuntimeWarning, stacklevel=3) + options_iv = {'log_to_console': options.pop("disp", False), + 'mip_max_nodes': options.pop("node_limit", None)} + options_iv.update(options) + + return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv + + +def milp(c, *, integrality=None, bounds=None, constraints=None, options=None): + r""" + Mixed-integer linear programming + + Solves problems of the following form: + + .. math:: + + \min_x \ & c^T x \\ + \mbox{such that} \ & b_l \leq A x \leq b_u,\\ + & l \leq x \leq u, \\ + & x_i \in \mathbb{Z}, i \in X_i + + where :math:`x` is a vector of decision variables; + :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors; + :math:`A` is a matrix, and :math:`X_i` is the set of indices of + decision variables that must be integral. (In this context, a + variable that can assume only integer values is said to be "integral"; + it has an "integrality" constraint.) + + Alternatively, that's: + + minimize:: + + c @ x + + such that:: + + b_l <= A @ x <= b_u + l <= x <= u + Specified elements of x must be integers + + By default, ``l = 0`` and ``u = np.inf`` unless specified with + ``bounds``. + + Parameters + ---------- + c : 1D dense array_like + The coefficients of the linear objective function to be minimized. + `c` is converted to a double precision array before the problem is + solved. + integrality : 1D dense array_like, optional + Indicates the type of integrality constraint on each decision variable. + + ``0`` : Continuous variable; no integrality constraint. + + ``1`` : Integer variable; decision variable must be an integer + within `bounds`. + + ``2`` : Semi-continuous variable; decision variable must be within + `bounds` or take value ``0``. + + ``3`` : Semi-integer variable; decision variable must be an integer + within `bounds` or take value ``0``. + + By default, all variables are continuous. `integrality` is converted + to an array of integers before the problem is solved. + + bounds : scipy.optimize.Bounds, optional + Bounds on the decision variables. Lower and upper bounds are converted + to double precision arrays before the problem is solved. The + ``keep_feasible`` parameter of the `Bounds` object is ignored. If + not specified, all decision variables are constrained to be + non-negative. + constraints : sequence of scipy.optimize.LinearConstraint, optional + Linear constraints of the optimization problem. Arguments may be + one of the following: + + 1. A single `LinearConstraint` object + 2. A single tuple that can be converted to a `LinearConstraint` object + as ``LinearConstraint(*constraints)`` + 3. A sequence composed entirely of objects of type 1. and 2. + + Before the problem is solved, all values are converted to double + precision, and the matrices of constraint coefficients are converted to + instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter + of `LinearConstraint` objects is ignored. + options : dict, optional + A dictionary of solver options. The following keys are recognized. + + disp : bool (default: ``False``) + Set to ``True`` if indicators of optimization status are to be + printed to the console during optimization. + node_limit : int, optional + The maximum number of nodes (linear program relaxations) to solve + before stopping. Default is no maximum number of nodes. + presolve : bool (default: ``True``) + Presolve attempts to identify trivial infeasibilities, + identify trivial unboundedness, and simplify the problem before + sending it to the main solver. + time_limit : float, optional + The maximum number of seconds allotted to solve the problem. + Default is no time limit. + mip_rel_gap : float, optional + Termination criterion for MIP solver: solver will terminate when + the gap between the primal objective value and the dual objective + bound, scaled by the primal objective value, is <= mip_rel_gap. + + Returns + ------- + res : OptimizeResult + An instance of :class:`scipy.optimize.OptimizeResult`. The object + is guaranteed to have the following attributes. + + status : int + An integer representing the exit status of the algorithm. + + ``0`` : Optimal solution found. + + ``1`` : Iteration or time limit reached. + + ``2`` : Problem is infeasible. + + ``3`` : Problem is unbounded. + + ``4`` : Other; see message for details. + + success : bool + ``True`` when an optimal solution is found and ``False`` otherwise. + + message : str + A string descriptor of the exit status of the algorithm. + + The following attributes will also be present, but the values may be + ``None``, depending on the solution status. + + x : ndarray + The values of the decision variables that minimize the + objective function while satisfying the constraints. + fun : float + The optimal value of the objective function ``c @ x``. + mip_node_count : int + The number of subproblems or "nodes" solved by the MILP solver. + mip_dual_bound : float + The MILP solver's final estimate of the lower bound on the optimal + solution. + mip_gap : float + The difference between the primal objective value and the dual + objective bound, scaled by the primal objective value. + + Notes + ----- + `milp` is a wrapper of the HiGHS linear optimization software [1]_. The + algorithm is deterministic, and it typically finds the global optimum of + moderately challenging mixed-integer linear programs (when it exists). + + References + ---------- + .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. + "HiGHS - high performance software for linear optimization." + https://highs.dev/ + .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised + simplex method." Mathematical Programming Computation, 10 (1), + 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 + + Examples + -------- + Consider the problem at + https://en.wikipedia.org/wiki/Integer_programming#Example, which is + expressed as a maximization problem of two variables. Since `milp` requires + that the problem be expressed as a minimization problem, the objective + function coefficients on the decision variables are: + + >>> import numpy as np + >>> c = -np.array([0, 1]) + + Note the negative sign: we maximize the original objective function + by minimizing the negative of the objective function. + + We collect the coefficients of the constraints into arrays like: + + >>> A = np.array([[-1, 1], [3, 2], [2, 3]]) + >>> b_u = np.array([1, 12, 12]) + >>> b_l = np.full_like(b_u, -np.inf) + + Because there is no lower limit on these constraints, we have defined a + variable ``b_l`` full of values representing negative infinity. This may + be unfamiliar to users of `scipy.optimize.linprog`, which only accepts + "less than" (or "upper bound") inequality constraints of the form + ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints + ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than" + inequality constraints, "less than" inequality constraints, and equality + constraints concisely. + + These arrays are collected into a single `LinearConstraint` object like: + + >>> from scipy.optimize import LinearConstraint + >>> constraints = LinearConstraint(A, b_l, b_u) + + The non-negativity bounds on the decision variables are enforced by + default, so we do not need to provide an argument for `bounds`. + + Finally, the problem states that both decision variables must be integers: + + >>> integrality = np.ones_like(c) + + We solve the problem like: + + >>> from scipy.optimize import milp + >>> res = milp(c=c, constraints=constraints, integrality=integrality) + >>> res.x + [1.0, 2.0] + + Note that had we solved the relaxed problem (without integrality + constraints): + + >>> res = milp(c=c, constraints=constraints) # OR: + >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u) + >>> res.x + [1.8, 2.8] + + we would not have obtained the correct solution by rounding to the nearest + integers. + + Other examples are given :ref:`in the tutorial `. + + """ + args_iv = _milp_iv(c, integrality, bounds, constraints, options) + c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv + + highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u, + lb, ub, integrality, options) + + res = {} + + # Convert to scipy-style status and message + highs_status = highs_res.get('status', None) + highs_message = highs_res.get('message', None) + status, message = _highs_to_scipy_status_message(highs_status, + highs_message) + res['status'] = status + res['message'] = message + res['success'] = (status == 0) + x = highs_res.get('x', None) + res['x'] = np.array(x) if x is not None else None + res['fun'] = highs_res.get('fun', None) + res['mip_node_count'] = highs_res.get('mip_node_count', None) + res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None) + res['mip_gap'] = highs_res.get('mip_gap', None) + + return OptimizeResult(res) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e33f9ab02aa2efd0c6c33ed2135942827917f41c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4d5e0f6ea4cafe723f9133d1ac59ecc673c3df89 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nnls.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nnls.py new file mode 100644 index 0000000000000000000000000000000000000000..17fcdc9e4cc52b1839cd938f21a78256cfb19436 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nnls.py @@ -0,0 +1,164 @@ +import numpy as np +from scipy.linalg import solve, LinAlgWarning +import warnings + +__all__ = ['nnls'] + + +def nnls(A, b, maxiter=None, *, atol=None): + """ + Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. + + This problem, often called as NonNegative Least Squares, is a convex + optimization problem with convex constraints. It typically arises when + the ``x`` models quantities for which only nonnegative values are + attainable; weight of ingredients, component costs and so on. + + Parameters + ---------- + A : (m, n) ndarray + Coefficient array + b : (m,) ndarray, float + Right-hand side vector. + maxiter: int, optional + Maximum number of iterations, optional. Default value is ``3 * n``. + atol: float + Tolerance value used in the algorithm to assess closeness to zero in + the projected residual ``(A.T @ (A x - b)`` entries. Increasing this + value relaxes the solution constraints. A typical relaxation value can + be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``. + This value is not set as default since the norm operation becomes + expensive for large problems hence can be used only when necessary. + + Returns + ------- + x : ndarray + Solution vector. + rnorm : float + The 2-norm of the residual, ``|| Ax-b ||_2``. + + See Also + -------- + lsq_linear : Linear least squares with bounds on the variables + + Notes + ----- + The code is based on [2]_ which is an improved version of the classical + algorithm of [1]_. It utilizes an active set method and solves the KKT + (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem. + + References + ---------- + .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM, + 1995, :doi:`10.1137/1.9781611971217` + .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity- + Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997, + :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L` + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import nnls + ... + >>> A = np.array([[1, 0], [1, 0], [0, 1]]) + >>> b = np.array([2, 1, 1]) + >>> nnls(A, b) + (array([1.5, 1. ]), 0.7071067811865475) + + >>> b = np.array([-1, -1, -1]) + >>> nnls(A, b) + (array([0., 0.]), 1.7320508075688772) + + """ + + A = np.asarray_chkfinite(A) + b = np.asarray_chkfinite(b) + + if len(A.shape) != 2: + raise ValueError("Expected a two-dimensional array (matrix)" + + f", but the shape of A is {A.shape}") + if len(b.shape) != 1: + raise ValueError("Expected a one-dimensional array (vector)" + + f", but the shape of b is {b.shape}") + + m, n = A.shape + + if m != b.shape[0]: + raise ValueError( + "Incompatible dimensions. The first dimension of " + + f"A is {m}, while the shape of b is {(b.shape[0], )}") + + x, rnorm, mode = _nnls(A, b, maxiter, tol=atol) + if mode != 1: + raise RuntimeError("Maximum number of iterations reached.") + + return x, rnorm + + +def _nnls(A, b, maxiter=None, tol=None): + """ + This is a single RHS algorithm from ref [2] above. For multiple RHS + support, the algorithm is given in :doi:`10.1002/cem.889` + """ + m, n = A.shape + + AtA = A.T @ A + Atb = b @ A # Result is 1D - let NumPy figure it out + + if not maxiter: + maxiter = 3*n + if tol is None: + tol = 10 * max(m, n) * np.spacing(1.) + + # Initialize vars + x = np.zeros(n, dtype=np.float64) + s = np.zeros(n, dtype=np.float64) + # Inactive constraint switches + P = np.zeros(n, dtype=bool) + + # Projected residual + w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term + + # Overall iteration counter + # Outer loop is not counted, inner iter is counted across outer spins + iter = 0 + + while (not P.all()) and (w[~P] > tol).any(): # B + # Get the "most" active coeff index and move to inactive set + k = np.argmax(w * (~P)) # B.2 + P[k] = True # B.3 + + # Iteration solution + s[:] = 0. + # B.4 + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message='Ill-conditioned matrix', + category=LinAlgWarning) + s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False) + + # Inner loop + while (iter < maxiter) and (s[P].min() < 0): # C.1 + iter += 1 + inds = P * (s < 0) + alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2 + x *= (1 - alpha) + x += alpha*s + P[x <= tol] = False + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message='Ill-conditioned matrix', + category=LinAlgWarning) + s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', + check_finite=False) + s[~P] = 0 # C.6 + + x[:] = s[:] + w[:] = Atb - AtA @ x + + if iter == maxiter: + # Typically following line should return + # return x, np.linalg.norm(A@x - b), -1 + # however at the top level, -1 raises an exception wasting norm + # Instead return dummy number 0. + return x, 0., -1 + + return x, np.linalg.norm(A@x - b), 1 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nonlin.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c429fd30da5ff138a7141b871126e4db3a681b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_nonlin.py @@ -0,0 +1,1584 @@ +# Copyright (C) 2009, Pauli Virtanen +# Distributed under the same license as SciPy. + +import inspect +import sys +import warnings + +import numpy as np +from numpy import asarray, dot, vdot + +from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError +import scipy.sparse.linalg +import scipy.sparse +from scipy.linalg import get_blas_funcs +from scipy._lib._util import getfullargspec_no_self as _getfullargspec +from ._linesearch import scalar_search_wolfe1, scalar_search_armijo + + +__all__ = [ + 'broyden1', 'broyden2', 'anderson', 'linearmixing', + 'diagbroyden', 'excitingmixing', 'newton_krylov', + 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence'] + +#------------------------------------------------------------------------------ +# Utility functions +#------------------------------------------------------------------------------ + + +class NoConvergence(Exception): + """Exception raised when nonlinear solver fails to converge within the specified + `maxiter`.""" + pass + + +def maxnorm(x): + return np.absolute(x).max() + + +def _as_inexact(x): + """Return `x` as an array, of either floats or complex floats""" + x = asarray(x) + if not np.issubdtype(x.dtype, np.inexact): + return asarray(x, dtype=np.float64) + return x + + +def _array_like(x, x0): + """Return ndarray `x` as same array subclass and shape as `x0`""" + x = np.reshape(x, np.shape(x0)) + wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) + return wrap(x) + + +def _safe_norm(v): + if not np.isfinite(v).all(): + return np.array(np.inf) + return norm(v) + +#------------------------------------------------------------------------------ +# Generic nonlinear solver machinery +#------------------------------------------------------------------------------ + + +_doc_parts = dict( + params_basic=""" + F : function(x) -> f + Function whose root to find; should take and return an array-like + object. + xin : array_like + Initial guess for the solution + """.strip(), + params_extra=""" + iter : int, optional + Number of iterations to make. If omitted (default), make as many + as required to meet tolerances. + verbose : bool, optional + Print status to stdout on every iteration. + maxiter : int, optional + Maximum number of iterations to make. If more are needed to + meet convergence, `NoConvergence` is raised. + f_tol : float, optional + Absolute tolerance (in max-norm) for the residual. + If omitted, default is 6e-6. + f_rtol : float, optional + Relative tolerance for the residual. If omitted, not used. + x_tol : float, optional + Absolute minimum step size, as determined from the Jacobian + approximation. If the step size is smaller than this, optimization + is terminated as successful. If omitted, not used. + x_rtol : float, optional + Relative minimum step size. If omitted, not used. + tol_norm : function(vector) -> scalar, optional + Norm to use in convergence check. Default is the maximum norm. + line_search : {None, 'armijo' (default), 'wolfe'}, optional + Which type of a line search to use to determine the step size in the + direction given by the Jacobian approximation. Defaults to 'armijo'. + callback : function, optional + Optional callback function. It is called on every iteration as + ``callback(x, f)`` where `x` is the current solution and `f` + the corresponding residual. + + Returns + ------- + sol : ndarray + An array (of similar array type as `x0`) containing the final solution. + + Raises + ------ + NoConvergence + When a solution was not found. + + """.strip() +) + + +def _set_doc(obj): + if obj.__doc__: + obj.__doc__ = obj.__doc__ % _doc_parts + + +def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, + maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, + full_output=False, raise_exception=True): + """ + Find a root of a function, in a way suitable for large-scale problems. + + Parameters + ---------- + %(params_basic)s + jacobian : Jacobian + A Jacobian approximation: `Jacobian` object or something that + `asjacobian` can transform to one. Alternatively, a string specifying + which of the builtin Jacobian approximations to use: + + krylov, broyden1, broyden2, anderson + diagbroyden, linearmixing, excitingmixing + + %(params_extra)s + full_output : bool + If true, returns a dictionary `info` containing convergence + information. + raise_exception : bool + If True, a `NoConvergence` exception is raise if no solution is found. + + See Also + -------- + asjacobian, Jacobian + + Notes + ----- + This algorithm implements the inexact Newton method, with + backtracking or full line searches. Several Jacobian + approximations are available, including Krylov and Quasi-Newton + methods. + + References + ---------- + .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear + Equations\". Society for Industrial and Applied Mathematics. (1995) + https://archive.siam.org/books/kelley/fr16/ + + """ + # Can't use default parameters because it's being explicitly passed as None + # from the calling function, so we need to set it here. + tol_norm = maxnorm if tol_norm is None else tol_norm + condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, + x_tol=x_tol, x_rtol=x_rtol, + iter=iter, norm=tol_norm) + + x0 = _as_inexact(x0) + def func(z): + return _as_inexact(F(_array_like(z, x0))).flatten() + x = x0.flatten() + + dx = np.full_like(x, np.inf) + Fx = func(x) + Fx_norm = norm(Fx) + + jacobian = asjacobian(jacobian) + jacobian.setup(x.copy(), Fx, func) + + if maxiter is None: + if iter is not None: + maxiter = iter + 1 + else: + maxiter = 100*(x.size+1) + + if line_search is True: + line_search = 'armijo' + elif line_search is False: + line_search = None + + if line_search not in (None, 'armijo', 'wolfe'): + raise ValueError("Invalid line search") + + # Solver tolerance selection + gamma = 0.9 + eta_max = 0.9999 + eta_treshold = 0.1 + eta = 1e-3 + + for n in range(maxiter): + status = condition.check(Fx, x, dx) + if status: + break + + # The tolerance, as computed for scipy.sparse.linalg.* routines + tol = min(eta, eta*Fx_norm) + dx = -jacobian.solve(Fx, tol=tol) + + if norm(dx) == 0: + raise ValueError("Jacobian inversion yielded zero vector. " + "This indicates a bug in the Jacobian " + "approximation.") + + # Line search, or Newton step + if line_search: + s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, + line_search) + else: + s = 1.0 + x = x + dx + Fx = func(x) + Fx_norm_new = norm(Fx) + + jacobian.update(x.copy(), Fx) + + if callback: + callback(x, Fx) + + # Adjust forcing parameters for inexact methods + eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 + if gamma * eta**2 < eta_treshold: + eta = min(eta_max, eta_A) + else: + eta = min(eta_max, max(eta_A, gamma*eta**2)) + + Fx_norm = Fx_norm_new + + # Print status + if verbose: + sys.stdout.write("%d: |F(x)| = %g; step %g\n" % ( + n, tol_norm(Fx), s)) + sys.stdout.flush() + else: + if raise_exception: + raise NoConvergence(_array_like(x, x0)) + else: + status = 2 + + if full_output: + info = {'nit': condition.iteration, + 'fun': Fx, + 'status': status, + 'success': status == 1, + 'message': {1: 'A solution was found at the specified ' + 'tolerance.', + 2: 'The maximum number of iterations allowed ' + 'has been reached.' + }[status] + } + return _array_like(x, x0), info + else: + return _array_like(x, x0) + + +_set_doc(nonlin_solve) + + +def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, + smin=1e-2): + tmp_s = [0] + tmp_Fx = [Fx] + tmp_phi = [norm(Fx)**2] + s_norm = norm(x) / norm(dx) + + def phi(s, store=True): + if s == tmp_s[0]: + return tmp_phi[0] + xt = x + s*dx + v = func(xt) + p = _safe_norm(v)**2 + if store: + tmp_s[0] = s + tmp_phi[0] = p + tmp_Fx[0] = v + return p + + def derphi(s): + ds = (abs(s) + s_norm + 1) * rdiff + return (phi(s+ds, store=False) - phi(s)) / ds + + if search_type == 'wolfe': + s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], + xtol=1e-2, amin=smin) + elif search_type == 'armijo': + s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], + amin=smin) + + if s is None: + # XXX: No suitable step length found. Take the full Newton step, + # and hope for the best. + s = 1.0 + + x = x + s*dx + if s == tmp_s[0]: + Fx = tmp_Fx[0] + else: + Fx = func(x) + Fx_norm = norm(Fx) + + return s, x, Fx, Fx_norm + + +class TerminationCondition: + """ + Termination condition for an iteration. It is terminated if + + - |F| < f_rtol*|F_0|, AND + - |F| < f_tol + + AND + + - |dx| < x_rtol*|x|, AND + - |dx| < x_tol + + """ + def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + iter=None, norm=maxnorm): + + if f_tol is None: + f_tol = np.finfo(np.float64).eps ** (1./3) + if f_rtol is None: + f_rtol = np.inf + if x_tol is None: + x_tol = np.inf + if x_rtol is None: + x_rtol = np.inf + + self.x_tol = x_tol + self.x_rtol = x_rtol + self.f_tol = f_tol + self.f_rtol = f_rtol + + self.norm = norm + + self.iter = iter + + self.f0_norm = None + self.iteration = 0 + + def check(self, f, x, dx): + self.iteration += 1 + f_norm = self.norm(f) + x_norm = self.norm(x) + dx_norm = self.norm(dx) + + if self.f0_norm is None: + self.f0_norm = f_norm + + if f_norm == 0: + return 1 + + if self.iter is not None: + # backwards compatibility with SciPy 0.6.0 + return 2 * (self.iteration > self.iter) + + # NB: condition must succeed for rtol=inf even if norm == 0 + return int((f_norm <= self.f_tol + and f_norm/self.f_rtol <= self.f0_norm) + and (dx_norm <= self.x_tol + and dx_norm/self.x_rtol <= x_norm)) + + +#------------------------------------------------------------------------------ +# Generic Jacobian approximation +#------------------------------------------------------------------------------ + +class Jacobian: + """ + Common interface for Jacobians or Jacobian approximations. + + The optional methods come useful when implementing trust region + etc., algorithms that often require evaluating transposes of the + Jacobian. + + Methods + ------- + solve + Returns J^-1 * v + update + Updates Jacobian to point `x` (where the function has residual `Fx`) + + matvec : optional + Returns J * v + rmatvec : optional + Returns A^H * v + rsolve : optional + Returns A^-H * v + matmat : optional + Returns A * V, where V is a dense matrix with dimensions (N,K). + todense : optional + Form the dense Jacobian matrix. Necessary for dense trust region + algorithms, and useful for testing. + + Attributes + ---------- + shape + Matrix dimensions (M, N) + dtype + Data type of the matrix. + func : callable, optional + Function the Jacobian corresponds to + + """ + + def __init__(self, **kw): + names = ["solve", "update", "matvec", "rmatvec", "rsolve", + "matmat", "todense", "shape", "dtype"] + for name, value in kw.items(): + if name not in names: + raise ValueError("Unknown keyword argument %s" % name) + if value is not None: + setattr(self, name, kw[name]) + + + if hasattr(self, "todense"): + def __array__(self, dtype=None, copy=None): + if dtype is not None: + raise ValueError(f"`dtype` must be None, was {dtype}") + return self.todense() + + def aspreconditioner(self): + return InverseJacobian(self) + + def solve(self, v, tol=0): + raise NotImplementedError + + def update(self, x, F): + pass + + def setup(self, x, F, func): + self.func = func + self.shape = (F.size, x.size) + self.dtype = F.dtype + if self.__class__.setup is Jacobian.setup: + # Call on the first point unless overridden + self.update(x, F) + + +class InverseJacobian: + def __init__(self, jacobian): + self.jacobian = jacobian + self.matvec = jacobian.solve + self.update = jacobian.update + if hasattr(jacobian, 'setup'): + self.setup = jacobian.setup + if hasattr(jacobian, 'rsolve'): + self.rmatvec = jacobian.rsolve + + @property + def shape(self): + return self.jacobian.shape + + @property + def dtype(self): + return self.jacobian.dtype + + +def asjacobian(J): + """ + Convert given object to one suitable for use as a Jacobian. + """ + spsolve = scipy.sparse.linalg.spsolve + if isinstance(J, Jacobian): + return J + elif inspect.isclass(J) and issubclass(J, Jacobian): + return J() + elif isinstance(J, np.ndarray): + if J.ndim > 2: + raise ValueError('array must have rank <= 2') + J = np.atleast_2d(np.asarray(J)) + if J.shape[0] != J.shape[1]: + raise ValueError('array must be square') + + return Jacobian(matvec=lambda v: dot(J, v), + rmatvec=lambda v: dot(J.conj().T, v), + solve=lambda v, tol=0: solve(J, v), + rsolve=lambda v, tol=0: solve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif scipy.sparse.issparse(J): + if J.shape[0] != J.shape[1]: + raise ValueError('matrix must be square') + return Jacobian(matvec=lambda v: J @ v, + rmatvec=lambda v: J.conj().T @ v, + solve=lambda v, tol=0: spsolve(J, v), + rsolve=lambda v, tol=0: spsolve(J.conj().T, v), + dtype=J.dtype, shape=J.shape) + elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): + return Jacobian(matvec=getattr(J, 'matvec'), + rmatvec=getattr(J, 'rmatvec'), + solve=J.solve, + rsolve=getattr(J, 'rsolve'), + update=getattr(J, 'update'), + setup=getattr(J, 'setup'), + dtype=J.dtype, + shape=J.shape) + elif callable(J): + # Assume it's a function J(x) that returns the Jacobian + class Jac(Jacobian): + def update(self, x, F): + self.x = x + + def solve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m, v) + elif scipy.sparse.issparse(m): + return spsolve(m, v) + else: + raise ValueError("Unknown matrix type") + + def matvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m, v) + elif scipy.sparse.issparse(m): + return m @ v + else: + raise ValueError("Unknown matrix type") + + def rsolve(self, v, tol=0): + m = J(self.x) + if isinstance(m, np.ndarray): + return solve(m.conj().T, v) + elif scipy.sparse.issparse(m): + return spsolve(m.conj().T, v) + else: + raise ValueError("Unknown matrix type") + + def rmatvec(self, v): + m = J(self.x) + if isinstance(m, np.ndarray): + return dot(m.conj().T, v) + elif scipy.sparse.issparse(m): + return m.conj().T @ v + else: + raise ValueError("Unknown matrix type") + return Jac() + elif isinstance(J, str): + return dict(broyden1=BroydenFirst, + broyden2=BroydenSecond, + anderson=Anderson, + diagbroyden=DiagBroyden, + linearmixing=LinearMixing, + excitingmixing=ExcitingMixing, + krylov=KrylovJacobian)[J]() + else: + raise TypeError('Cannot convert object to a Jacobian') + + +#------------------------------------------------------------------------------ +# Broyden +#------------------------------------------------------------------------------ + +class GenericBroyden(Jacobian): + def setup(self, x0, f0, func): + Jacobian.setup(self, x0, f0, func) + self.last_f = f0 + self.last_x = x0 + + if hasattr(self, 'alpha') and self.alpha is None: + # Autoscale the initial Jacobian parameter + # unless we have already guessed the solution. + normf0 = norm(f0) + if normf0: + self.alpha = 0.5*max(norm(x0), 1) / normf0 + else: + self.alpha = 1.0 + + def _update(self, x, f, dx, df, dx_norm, df_norm): + raise NotImplementedError + + def update(self, x, f): + df = f - self.last_f + dx = x - self.last_x + self._update(x, f, dx, df, norm(dx), norm(df)) + self.last_f = f + self.last_x = x + + +class LowRankMatrix: + r""" + A matrix represented as + + .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger + + However, if the rank of the matrix reaches the dimension of the vectors, + full matrix representation will be used thereon. + + """ + + def __init__(self, alpha, n, dtype): + self.alpha = alpha + self.cs = [] + self.ds = [] + self.n = n + self.dtype = dtype + self.collapsed = None + + @staticmethod + def _matvec(v, alpha, cs, ds): + axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], + cs[:1] + [v]) + w = alpha * v + for c, d in zip(cs, ds): + a = dotc(d, v) + w = axpy(c, w, w.size, a) + return w + + @staticmethod + def _solve(v, alpha, cs, ds): + """Evaluate w = M^-1 v""" + if len(cs) == 0: + return v/alpha + + # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 + + axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) + + c0 = cs[0] + A = alpha * np.identity(len(cs), dtype=c0.dtype) + for i, d in enumerate(ds): + for j, c in enumerate(cs): + A[i,j] += dotc(d, c) + + q = np.zeros(len(cs), dtype=c0.dtype) + for j, d in enumerate(ds): + q[j] = dotc(d, v) + q /= alpha + q = solve(A, q) + + w = v/alpha + for c, qc in zip(cs, q): + w = axpy(c, w, w.size, -qc) + + return w + + def matvec(self, v): + """Evaluate w = M v""" + if self.collapsed is not None: + return np.dot(self.collapsed, v) + return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) + + def rmatvec(self, v): + """Evaluate w = M^H v""" + if self.collapsed is not None: + return np.dot(self.collapsed.T.conj(), v) + return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) + + def solve(self, v, tol=0): + """Evaluate w = M^-1 v""" + if self.collapsed is not None: + return solve(self.collapsed, v) + return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) + + def rsolve(self, v, tol=0): + """Evaluate w = M^-H v""" + if self.collapsed is not None: + return solve(self.collapsed.T.conj(), v) + return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) + + def append(self, c, d): + if self.collapsed is not None: + self.collapsed += c[:,None] * d[None,:].conj() + return + + self.cs.append(c) + self.ds.append(d) + + if len(self.cs) > c.size: + self.collapse() + + def __array__(self, dtype=None, copy=None): + if dtype is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `dtype` " + f"should only be None but was {dtype} (not handled)", + stacklevel=3) + if copy is not None: + warnings.warn("LowRankMatrix is scipy-internal code, `copy` " + f"should only be None but was {copy} (not handled)", + stacklevel=3) + if self.collapsed is not None: + return self.collapsed + + Gm = self.alpha*np.identity(self.n, dtype=self.dtype) + for c, d in zip(self.cs, self.ds): + Gm += c[:,None]*d[None,:].conj() + return Gm + + def collapse(self): + """Collapse the low-rank matrix to a full-rank one.""" + self.collapsed = np.array(self) + self.cs = None + self.ds = None + self.alpha = None + + def restart_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping all vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + if len(self.cs) > rank: + del self.cs[:] + del self.ds[:] + + def simple_reduce(self, rank): + """ + Reduce the rank of the matrix by dropping oldest vectors. + """ + if self.collapsed is not None: + return + assert rank > 0 + while len(self.cs) > rank: + del self.cs[0] + del self.ds[0] + + def svd_reduce(self, max_rank, to_retain=None): + """ + Reduce the rank of the matrix by retaining some SVD components. + + This corresponds to the \"Broyden Rank Reduction Inverse\" + algorithm described in [1]_. + + Note that the SVD decomposition can be done by solving only a + problem whose size is the effective rank of this matrix, which + is viable even for large problems. + + Parameters + ---------- + max_rank : int + Maximum rank of this matrix after reduction. + to_retain : int, optional + Number of SVD components to retain when reduction is done + (ie. rank > max_rank). Default is ``max_rank - 2``. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + """ + if self.collapsed is not None: + return + + p = max_rank + if to_retain is not None: + q = to_retain + else: + q = p - 2 + + if self.cs: + p = min(p, len(self.cs[0])) + q = max(0, min(q, p-1)) + + m = len(self.cs) + if m < p: + # nothing to do + return + + C = np.array(self.cs).T + D = np.array(self.ds).T + + D, R = qr(D, mode='economic') + C = dot(C, R.T.conj()) + + U, S, WH = svd(C, full_matrices=False) + + C = dot(C, inv(WH)) + D = dot(D, WH.T.conj()) + + for k in range(q): + self.cs[k] = C[:,k].copy() + self.ds[k] = D[:,k].copy() + + del self.cs[q:] + del self.ds[q:] + + +_doc_parts['broyden_params'] = """ + alpha : float, optional + Initial guess for the Jacobian is ``(-1/alpha)``. + reduction_method : str or tuple, optional + Method used in ensuring that the rank of the Broyden matrix + stays low. Can either be a string giving the name of the method, + or a tuple of the form ``(method, param1, param2, ...)`` + that gives the name of the method and values for additional parameters. + + Methods available: + + - ``restart``: drop all matrix columns. Has no extra parameters. + - ``simple``: drop oldest matrix column. Has no extra parameters. + - ``svd``: keep only the most significant SVD components. + Takes an extra parameter, ``to_retain``, which determines the + number of SVD components to retain when rank reduction is done. + Default is ``max_rank - 2``. + + max_rank : int, optional + Maximum rank for the Broyden matrix. + Default is infinity (i.e., no rank reduction). + """.strip() + + +class BroydenFirst(GenericBroyden): + r""" + Find a root of a function, using Broyden's first Jacobian approximation. + + This method is also known as \"Broyden's good method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden1'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) + + which corresponds to Broyden's first Jacobian update + + .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx + + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden1(fun, [0, 0]) + >>> sol + array([0.84116396, 0.15883641]) + + """ + + def __init__(self, alpha=None, reduction_method='restart', max_rank=None): + GenericBroyden.__init__(self) + self.alpha = alpha + self.Gm = None + + if max_rank is None: + max_rank = np.inf + self.max_rank = max_rank + + if isinstance(reduction_method, str): + reduce_params = () + else: + reduce_params = reduction_method[1:] + reduction_method = reduction_method[0] + reduce_params = (max_rank - 1,) + reduce_params + + if reduction_method == 'svd': + self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) + elif reduction_method == 'simple': + self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) + elif reduction_method == 'restart': + self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) + else: + raise ValueError("Unknown rank reduction method '%s'" % + reduction_method) + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) + + def todense(self): + return inv(self.Gm) + + def solve(self, f, tol=0): + r = self.Gm.matvec(f) + if not np.isfinite(r).all(): + # singular; reset the Jacobian approximation + self.setup(self.last_x, self.last_f, self.func) + return self.Gm.matvec(f) + return r + + def matvec(self, f): + return self.Gm.solve(f) + + def rsolve(self, f, tol=0): + return self.Gm.rmatvec(f) + + def rmatvec(self, f): + return self.Gm.rsolve(f) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = self.Gm.rmatvec(dx) + c = dx - self.Gm.matvec(df) + d = v / vdot(df, v) + + self.Gm.append(c, d) + + +class BroydenSecond(BroydenFirst): + """ + Find a root of a function, using Broyden\'s second Jacobian approximation. + + This method is also known as \"Broyden's bad method\". + + Parameters + ---------- + %(params_basic)s + %(broyden_params)s + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='broyden2'`` in particular. + + Notes + ----- + This algorithm implements the inverse Jacobian Quasi-Newton update + + .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) + + corresponding to Broyden's second method. + + References + ---------- + .. [1] B.A. van der Rotten, PhD thesis, + \"A limited memory Broyden method to solve high-dimensional + systems of nonlinear equations\". Mathematisch Instituut, + Universiteit Leiden, The Netherlands (2003). + + https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.broyden2(fun, [0, 0]) + >>> sol + array([0.84116365, 0.15883529]) + + """ + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self._reduce() # reduce first to preserve secant condition + + v = df + c = dx - self.Gm.matvec(df) + d = v / df_norm**2 + self.Gm.append(c, d) + + +#------------------------------------------------------------------------------ +# Broyden-like (restricted memory) +#------------------------------------------------------------------------------ + +class Anderson(GenericBroyden): + """ + Find a root of a function, using (extended) Anderson mixing. + + The Jacobian is formed by for a 'best' solution in the space + spanned by last `M` vectors. As a result, only a MxM matrix + inversions and MxN multiplications are required. [Ey]_ + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + M : float, optional + Number of previous vectors to retain. Defaults to 5. + w0 : float, optional + Regularization parameter for numerical stability. + Compared to unity, good values of the order of 0.01. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='anderson'`` in particular. + + References + ---------- + .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.anderson(fun, [0, 0]) + >>> sol + array([0.84116588, 0.15883789]) + + """ + + # Note: + # + # Anderson method maintains a rank M approximation of the inverse Jacobian, + # + # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v + # A = W + dF^H dF + # W = w0^2 diag(dF^H dF) + # + # so that for w0 = 0 the secant condition applies for last M iterates, i.e., + # + # J^-1 df_j = dx_j + # + # for all j = 0 ... M-1. + # + # Moreover, (from Sherman-Morrison-Woodbury formula) + # + # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v + # C = (dX + alpha dF) A^-1 + # b = -1/alpha + # + # and after simplification + # + # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v + # + + def __init__(self, alpha=None, w0=0.01, M=5): + GenericBroyden.__init__(self) + self.alpha = alpha + self.M = M + self.dx = [] + self.df = [] + self.gamma = None + self.w0 = w0 + + def solve(self, f, tol=0): + dx = -self.alpha*f + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + try: + gamma = solve(self.a, df_f) + except LinAlgError: + # singular; reset the Jacobian approximation + del self.dx[:] + del self.df[:] + return dx + + for m in range(n): + dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) + return dx + + def matvec(self, f): + dx = -f/self.alpha + + n = len(self.dx) + if n == 0: + return dx + + df_f = np.empty(n, dtype=f.dtype) + for k in range(n): + df_f[k] = vdot(self.df[k], f) + + b = np.empty((n, n), dtype=f.dtype) + for i in range(n): + for j in range(n): + b[i,j] = vdot(self.df[i], self.dx[j]) + if i == j and self.w0 != 0: + b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha + gamma = solve(b, df_f) + + for m in range(n): + dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) + return dx + + def _update(self, x, f, dx, df, dx_norm, df_norm): + if self.M == 0: + return + + self.dx.append(dx) + self.df.append(df) + + while len(self.dx) > self.M: + self.dx.pop(0) + self.df.pop(0) + + n = len(self.dx) + a = np.zeros((n, n), dtype=f.dtype) + + for i in range(n): + for j in range(i, n): + if i == j: + wd = self.w0**2 + else: + wd = 0 + a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) + + a += np.triu(a, 1).T.conj() + self.a = a + +#------------------------------------------------------------------------------ +# Simple iterations +#------------------------------------------------------------------------------ + + +class DiagBroyden(GenericBroyden): + """ + Find a root of a function, using diagonal Broyden Jacobian approximation. + + The Jacobian approximation is derived from previous iterations, by + retaining only the diagonal of Broyden matrices. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial guess for the Jacobian is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='diagbroyden'`` in particular. + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, + ... 0.5 * (x[1] - x[0])**3 + x[1]] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.diagbroyden(fun, [0, 0]) + >>> sol + array([0.84116403, 0.15883384]) + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f / self.d + + def matvec(self, f): + return -f * self.d + + def rsolve(self, f, tol=0): + return -f / self.d.conj() + + def rmatvec(self, f): + return -f * self.d.conj() + + def todense(self): + return np.diag(-self.d) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + self.d -= (df + self.d*dx)*dx/dx_norm**2 + + +class LinearMixing(GenericBroyden): + """ + Find a root of a function, using a scalar Jacobian approximation. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + The Jacobian approximation is (-1/alpha). + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='linearmixing'`` in particular. + + """ + + def __init__(self, alpha=None): + GenericBroyden.__init__(self) + self.alpha = alpha + + def solve(self, f, tol=0): + return -f*self.alpha + + def matvec(self, f): + return -f/self.alpha + + def rsolve(self, f, tol=0): + return -f*np.conj(self.alpha) + + def rmatvec(self, f): + return -f/np.conj(self.alpha) + + def todense(self): + return np.diag(np.full(self.shape[0], -1/self.alpha)) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + pass + + +class ExcitingMixing(GenericBroyden): + """ + Find a root of a function, using a tuned diagonal Jacobian approximation. + + The Jacobian matrix is diagonal and is tuned on each iteration. + + .. warning:: + + This algorithm may be useful for specific problems, but whether + it will work may depend strongly on the problem. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='excitingmixing'`` in particular. + + Parameters + ---------- + %(params_basic)s + alpha : float, optional + Initial Jacobian approximation is (-1/alpha). + alphamax : float, optional + The entries of the diagonal Jacobian are kept in the range + ``[alpha, alphamax]``. + %(params_extra)s + """ + + def __init__(self, alpha=None, alphamax=1.0): + GenericBroyden.__init__(self) + self.alpha = alpha + self.alphamax = alphamax + self.beta = None + + def setup(self, x, F, func): + GenericBroyden.setup(self, x, F, func) + self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype) + + def solve(self, f, tol=0): + return -f*self.beta + + def matvec(self, f): + return -f/self.beta + + def rsolve(self, f, tol=0): + return -f*self.beta.conj() + + def rmatvec(self, f): + return -f/self.beta.conj() + + def todense(self): + return np.diag(-1/self.beta) + + def _update(self, x, f, dx, df, dx_norm, df_norm): + incr = f*self.last_f > 0 + self.beta[incr] += self.alpha + self.beta[~incr] = self.alpha + np.clip(self.beta, 0, self.alphamax, out=self.beta) + + +#------------------------------------------------------------------------------ +# Iterative/Krylov approximated Jacobians +#------------------------------------------------------------------------------ + +class KrylovJacobian(Jacobian): + r""" + Find a root of a function, using Krylov approximation for inverse Jacobian. + + This method is suitable for solving large-scale problems. + + Parameters + ---------- + %(params_basic)s + rdiff : float, optional + Relative step size to use in numerical differentiation. + method : str or callable, optional + Krylov method to use to approximate the Jacobian. Can be a string, + or a function implementing the same interface as the iterative + solvers in `scipy.sparse.linalg`. If a string, needs to be one of: + ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, + ``'tfqmr'``. + + The default is `scipy.sparse.linalg.lgmres`. + inner_maxiter : int, optional + Parameter to pass to the "inner" Krylov solver: maximum number of + iterations. Iteration will stop after maxiter steps even if the + specified tolerance has not been achieved. + inner_M : LinearOperator or InverseJacobian + Preconditioner for the inner Krylov iteration. + Note that you can use also inverse Jacobians as (adaptive) + preconditioners. For example, + + >>> from scipy.optimize import BroydenFirst, KrylovJacobian + >>> from scipy.optimize import InverseJacobian + >>> jac = BroydenFirst() + >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) + + If the preconditioner has a method named 'update', it will be called + as ``update(x, f)`` after each nonlinear step, with ``x`` giving + the current point, and ``f`` the current function value. + outer_k : int, optional + Size of the subspace kept across LGMRES nonlinear iterations. + See `scipy.sparse.linalg.lgmres` for details. + inner_kwargs : kwargs + Keyword parameters for the "inner" Krylov solver + (defined with `method`). Parameter names must start with + the `inner_` prefix which will be stripped before passing on + the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details. + %(params_extra)s + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See ``method='krylov'`` in particular. + scipy.sparse.linalg.gmres + scipy.sparse.linalg.lgmres + + Notes + ----- + This function implements a Newton-Krylov solver. The basic idea is + to compute the inverse of the Jacobian with an iterative Krylov + method. These methods require only evaluating the Jacobian-vector + products, which are conveniently approximated by a finite difference: + + .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega + + Due to the use of iterative matrix inverses, these methods can + deal with large nonlinear problems. + + SciPy's `scipy.sparse.linalg` module offers a selection of Krylov + solvers to choose from. The default here is `lgmres`, which is a + variant of restarted GMRES iteration that reuses some of the + information obtained in the previous Newton steps to invert + Jacobians in subsequent steps. + + For a review on Newton-Krylov methods, see for example [1]_, + and for the LGMRES sparse inverse method, see [2]_. + + References + ---------- + .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method, + SIAM, pp.57-83, 2003. + :doi:`10.1137/1.9780898718898.ch3` + .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). + :doi:`10.1016/j.jcp.2003.08.010` + .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel, + SIAM J. Matrix Anal. Appl. 26, 962 (2005). + :doi:`10.1137/S0895479803422014` + + Examples + -------- + The following functions define a system of nonlinear equations + + >>> def fun(x): + ... return [x[0] + 0.5 * x[1] - 1.0, + ... 0.5 * (x[1] - x[0]) ** 2] + + A solution can be obtained as follows. + + >>> from scipy import optimize + >>> sol = optimize.newton_krylov(fun, [0, 0]) + >>> sol + array([0.66731771, 0.66536458]) + + """ + + def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, + inner_M=None, outer_k=10, **kw): + self.preconditioner = inner_M + self.rdiff = rdiff + # Note that this retrieves one of the named functions, or otherwise + # uses `method` as is (i.e., for a user-provided callable). + self.method = dict( + bicgstab=scipy.sparse.linalg.bicgstab, + gmres=scipy.sparse.linalg.gmres, + lgmres=scipy.sparse.linalg.lgmres, + cgs=scipy.sparse.linalg.cgs, + minres=scipy.sparse.linalg.minres, + tfqmr=scipy.sparse.linalg.tfqmr, + ).get(method, method) + + self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) + + if self.method is scipy.sparse.linalg.gmres: + # Replace GMRES's outer iteration with Newton steps + self.method_kw['restart'] = inner_maxiter + self.method_kw['maxiter'] = 1 + self.method_kw.setdefault('atol', 0) + elif self.method in (scipy.sparse.linalg.gcrotmk, + scipy.sparse.linalg.bicgstab, + scipy.sparse.linalg.cgs): + self.method_kw.setdefault('atol', 0) + elif self.method is scipy.sparse.linalg.lgmres: + self.method_kw['outer_k'] = outer_k + # Replace LGMRES's outer iteration with Newton steps + self.method_kw['maxiter'] = 1 + # Carry LGMRES's `outer_v` vectors across nonlinear iterations + self.method_kw.setdefault('outer_v', []) + self.method_kw.setdefault('prepend_outer_v', True) + # But don't carry the corresponding Jacobian*v products, in case + # the Jacobian changes a lot in the nonlinear step + # + # XXX: some trust-region inspired ideas might be more efficient... + # See e.g., Brown & Saad. But needs to be implemented separately + # since it's not an inexact Newton method. + self.method_kw.setdefault('store_outer_Av', False) + self.method_kw.setdefault('atol', 0) + + for key, value in kw.items(): + if not key.startswith('inner_'): + raise ValueError("Unknown parameter %s" % key) + self.method_kw[key[6:]] = value + + def _update_diff_step(self): + mx = abs(self.x0).max() + mf = abs(self.f0).max() + self.omega = self.rdiff * max(1, mx) / max(1, mf) + + def matvec(self, v): + nv = norm(v) + if nv == 0: + return 0*v + sc = self.omega / nv + r = (self.func(self.x0 + sc*v) - self.f0) / sc + if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): + raise ValueError('Function returned non-finite results') + return r + + def solve(self, rhs, tol=0): + if 'rtol' in self.method_kw: + sol, info = self.method(self.op, rhs, **self.method_kw) + else: + sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw) + return sol + + def update(self, x, f): + self.x0 = x + self.f0 = f + self._update_diff_step() + + # Update also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'update'): + self.preconditioner.update(x, f) + + def setup(self, x, f, func): + Jacobian.setup(self, x, f, func) + self.x0 = x + self.f0 = f + self.op = scipy.sparse.linalg.aslinearoperator(self) + + if self.rdiff is None: + self.rdiff = np.finfo(x.dtype).eps ** (1./2) + + self._update_diff_step() + + # Setup also the preconditioner, if possible + if self.preconditioner is not None: + if hasattr(self.preconditioner, 'setup'): + self.preconditioner.setup(x, f, func) + + +#------------------------------------------------------------------------------ +# Wrapper functions +#------------------------------------------------------------------------------ + +def _nonlin_wrapper(name, jac): + """ + Construct a solver wrapper with given name and Jacobian approx. + + It inspects the keyword arguments of ``jac.__init__``, and allows to + use the same arguments in the wrapper function, in addition to the + keyword arguments of `nonlin_solve` + + """ + signature = _getfullargspec(jac.__init__) + args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature + kwargs = list(zip(args[-len(defaults):], defaults)) + kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs]) + if kw_str: + kw_str = ", " + kw_str + kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs]) + if kwkw_str: + kwkw_str = kwkw_str + ", " + if kwonlyargs: + raise ValueError('Unexpected signature %s' % signature) + + # Construct the wrapper function so that its keyword arguments + # are visible in pydoc.help etc. + wrapper = """ +def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, + f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, + tol_norm=None, line_search='armijo', callback=None, **kw): + jac = %(jac)s(%(kwkw)s **kw) + return nonlin_solve(F, xin, jac, iter, verbose, maxiter, + f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, + callback) +""" + + wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, + kwkw=kwkw_str) + ns = {} + ns.update(globals()) + exec(wrapper, ns) + func = ns[name] + func.__doc__ = jac.__doc__ + _set_doc(func) + return func + + +broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) +broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) +anderson = _nonlin_wrapper('anderson', Anderson) +linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) +diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) +excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) +newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_qap.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_qap.py new file mode 100644 index 0000000000000000000000000000000000000000..094119c0ad6c1e4bba72978390c7273f10ac7fff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_qap.py @@ -0,0 +1,731 @@ +import numpy as np +import operator +from . import (linear_sum_assignment, OptimizeResult) +from ._optimize import _check_unknown_options + +from scipy._lib._util import check_random_state +import itertools + +QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt'] + +def quadratic_assignment(A, B, method="faq", options=None): + r""" + Approximates solution to the quadratic assignment problem and + the graph matching problem. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. + :ref:`'faq' ` (default) and + :ref:`'2opt' ` are available. + + options : dict, optional + A dictionary of solver options. All solvers support the following: + + maximize : bool (default: False) + Maximizes the objective function if ``True``. + + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: + node ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + rng : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + For method-specific options, see + :func:`show_options('quadratic_assignment') `. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of iterations performed during optimization. + + Notes + ----- + The default method :ref:`'faq' ` uses the Fast + Approximate QAP algorithm [1]_; it typically offers the best combination of + speed and accuracy. + Method :ref:`'2opt' ` can be computationally expensive, + but may be a useful alternative, or it can be used to refine the solution + returned by another method. + + References + ---------- + .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, + S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and + C.E. Priebe, "Fast approximate quadratic programming for graph + matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, + :doi:`10.1371/journal.pone.0121002` + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, :doi:`10.1016/j.patcog.2018.09.014` + + .. [3] "2-opt," Wikipedia. + https://en.wikipedia.org/wiki/2-opt + + Examples + -------- + >>> import numpy as np + >>> from scipy.optimize import quadratic_assignment + >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100], + ... [150, 130, 0, 120], [170, 100, 120, 0]]) + >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8], + ... [0, 0, 0, 3], [0, 0, 0, 0]]) + >>> res = quadratic_assignment(A, B) + >>> print(res) + fun: 3260 + col_ind: [0 3 2 1] + nit: 9 + + The see the relationship between the returned ``col_ind`` and ``fun``, + use ``col_ind`` to form the best permutation matrix found, then evaluate + the objective function :math:`f(P) = trace(A^T P B P^T )`. + + >>> perm = res['col_ind'] + >>> P = np.eye(len(A), dtype=int)[perm] + >>> fun = np.trace(A.T @ P @ B @ P.T) + >>> print(fun) + 3260 + + Alternatively, to avoid constructing the permutation matrix explicitly, + directly permute the rows and columns of the distance matrix. + + >>> fun = np.trace(A.T @ B[perm][:, perm]) + >>> print(fun) + 3260 + + Although not guaranteed in general, ``quadratic_assignment`` happens to + have found the globally optimal solution. + + >>> from itertools import permutations + >>> perm_opt, fun_opt = None, np.inf + >>> for perm in permutations([0, 1, 2, 3]): + ... perm = np.array(perm) + ... fun = np.trace(A.T @ B[perm][:, perm]) + ... if fun < fun_opt: + ... fun_opt, perm_opt = fun, perm + >>> print(np.array_equal(perm_opt, res['col_ind'])) + True + + Here is an example for which the default method, + :ref:`'faq' `, does not find the global optimum. + + >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1], + ... [8, 5, 0, 2], [6, 1, 2, 0]]) + >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2], + ... [8, 5, 0, 5], [4, 2, 5, 0]]) + >>> res = quadratic_assignment(A, B) + >>> print(res) + fun: 178 + col_ind: [1 0 3 2] + nit: 13 + + If accuracy is important, consider using :ref:`'2opt' ` + to refine the solution. + + >>> guess = np.array([np.arange(len(A)), res.col_ind]).T + >>> res = quadratic_assignment(A, B, method="2opt", + ... options = {'partial_guess': guess}) + >>> print(res) + fun: 176 + col_ind: [1 2 3 0] + nit: 17 + + """ + + if options is None: + options = {} + + method = method.lower() + methods = {"faq": _quadratic_assignment_faq, + "2opt": _quadratic_assignment_2opt} + if method not in methods: + raise ValueError(f"method {method} must be in {methods}.") + res = methods[method](A, B, **options) + return res + + +def _calc_score(A, B, perm): + # equivalent to objective function but avoids matmul + return np.sum(A * B[perm][:, perm]) + + +def _common_input_validation(A, B, partial_match): + A = np.atleast_2d(A) + B = np.atleast_2d(B) + + if partial_match is None: + partial_match = np.array([[], []]).T + partial_match = np.atleast_2d(partial_match).astype(int) + + msg = None + if A.shape[0] != A.shape[1]: + msg = "`A` must be square" + elif B.shape[0] != B.shape[1]: + msg = "`B` must be square" + elif A.ndim != 2 or B.ndim != 2: + msg = "`A` and `B` must have exactly two dimensions" + elif A.shape != B.shape: + msg = "`A` and `B` matrices must be of equal size" + elif partial_match.shape[0] > A.shape[0]: + msg = "`partial_match` can have only as many seeds as there are nodes" + elif partial_match.shape[1] != 2: + msg = "`partial_match` must have two columns" + elif partial_match.ndim != 2: + msg = "`partial_match` must have exactly two dimensions" + elif (partial_match < 0).any(): + msg = "`partial_match` must contain only positive indices" + elif (partial_match >= len(A)).any(): + msg = "`partial_match` entries must be less than number of nodes" + elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or + not len(set(partial_match[:, 1])) == len(partial_match[:, 1])): + msg = "`partial_match` column entries must be unique" + + if msg is not None: + raise ValueError(msg) + + return A, B, partial_match + + +def _quadratic_assignment_faq(A, B, + maximize=False, partial_match=None, rng=None, + P0="barycenter", shuffle_input=False, maxiter=30, + tol=0.03, **unknown_options): + r"""Solve the quadratic assignment problem (approximately). + + This function solves the Quadratic Assignment Problem (QAP) and the + Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm + (FAQ) [1]_. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. This is the method-specific + documentation for 'faq'. + :ref:`'2opt' ` is also available. + + Options + ------- + maximize : bool (default: False) + Maximizes the objective function if ``True``. + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: + node ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where + ``m`` is not greater than the number of nodes, :math:`n`. + + rng : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter") + Initial position. Must be a doubly-stochastic matrix [3]_. + + If the initial position is an array, it must be a doubly stochastic + matrix of size :math:`m' \times m'` where :math:`m' = n - m`. + + If ``"barycenter"`` (default), the initial position is the barycenter + of the Birkhoff polytope (the space of doubly stochastic matrices). + This is a :math:`m' \times m'` matrix with all entries equal to + :math:`1 / m'`. + + If ``"randomized"`` the initial search position is + :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and + :math:`K` is a random doubly stochastic matrix. + shuffle_input : bool (default: False) + Set to `True` to resolve degenerate gradients randomly. For + non-degenerate gradients this option has no effect. + maxiter : int, positive (default: 30) + Integer specifying the max number of Frank-Wolfe iterations performed. + tol : float (default: 0.03) + Tolerance for termination. Frank-Wolfe iteration terminates when + :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`, + where :math:`i` is the iteration number. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of Frank-Wolfe iterations performed. + + Notes + ----- + The algorithm may be sensitive to the initial permutation matrix (or + search "position") due to the possibility of several local minima + within the feasible region. A barycenter initialization is more likely to + result in a better solution than a single random initialization. However, + calling ``quadratic_assignment`` several times with different random + initializations may result in a better optimum at the cost of longer + total execution time. + + Examples + -------- + As mentioned above, a barycenter initialization often results in a better + solution than a single random initialization. + + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> n = 15 + >>> A = rng.random((n, n)) + >>> B = rng.random((n, n)) + >>> res = quadratic_assignment(A, B) # FAQ is default method + >>> print(res.fun) + 46.871483385480545 # may vary + + >>> options = {"P0": "randomized"} # use randomized initialization + >>> res = quadratic_assignment(A, B, options=options) + >>> print(res.fun) + 47.224831071310625 # may vary + + However, consider running from several randomized initializations and + keeping the best result. + + >>> res = min([quadratic_assignment(A, B, options=options) + ... for i in range(30)], key=lambda x: x.fun) + >>> print(res.fun) + 46.671852533681516 # may vary + + The '2-opt' method can be used to further refine the results. + + >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T} + >>> res = quadratic_assignment(A, B, method="2opt", options=options) + >>> print(res.fun) + 46.47160735721583 # may vary + + References + ---------- + .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, + S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and + C.E. Priebe, "Fast approximate quadratic programming for graph + matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, + :doi:`10.1371/journal.pone.0121002` + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, :doi:`10.1016/j.patcog.2018.09.014` + + .. [3] "Doubly stochastic Matrix," Wikipedia. + https://en.wikipedia.org/wiki/Doubly_stochastic_matrix + + """ + + _check_unknown_options(unknown_options) + + maxiter = operator.index(maxiter) + + # ValueError check + A, B, partial_match = _common_input_validation(A, B, partial_match) + + msg = None + if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}: + msg = "Invalid 'P0' parameter string" + elif maxiter <= 0: + msg = "'maxiter' must be a positive integer" + elif tol <= 0: + msg = "'tol' must be a positive float" + if msg is not None: + raise ValueError(msg) + + rng = check_random_state(rng) + n = len(A) # number of vertices in graphs + n_seeds = len(partial_match) # number of seeds + n_unseed = n - n_seeds + + # [1] Algorithm 1 Line 1 - choose initialization + if not isinstance(P0, str): + P0 = np.atleast_2d(P0) + if P0.shape != (n_unseed, n_unseed): + msg = "`P0` matrix must have shape m' x m', where m'=n-m" + elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1) + or not np.allclose(np.sum(P0, axis=1), 1)): + msg = "`P0` matrix must be doubly stochastic" + if msg is not None: + raise ValueError(msg) + elif P0 == 'barycenter': + P0 = np.ones((n_unseed, n_unseed)) / n_unseed + elif P0 == 'randomized': + J = np.ones((n_unseed, n_unseed)) / n_unseed + # generate a nxn matrix where each entry is a random number [0, 1] + # would use rand, but Generators don't have it + # would use random, but old mtrand.RandomStates don't have it + K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed))) + P0 = (J + K) / 2 + + # check trivial cases + if n == 0 or n_seeds == n: + score = _calc_score(A, B, partial_match[:, 1]) + res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} + return OptimizeResult(res) + + obj_func_scalar = 1 + if maximize: + obj_func_scalar = -1 + + nonseed_B = np.setdiff1d(range(n), partial_match[:, 1]) + if shuffle_input: + nonseed_B = rng.permutation(nonseed_B) + + nonseed_A = np.setdiff1d(range(n), partial_match[:, 0]) + perm_A = np.concatenate([partial_match[:, 0], nonseed_A]) + perm_B = np.concatenate([partial_match[:, 1], nonseed_B]) + + # definitions according to Seeded Graph Matching [2]. + A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds) + B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds) + const_sum = A21 @ B21.T + A12.T @ B12 + + P = P0 + # [1] Algorithm 1 Line 2 - loop while stopping criteria not met + for n_iter in range(1, maxiter+1): + # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t) + grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22) + # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8 + _, cols = linear_sum_assignment(grad_fp, maximize=maximize) + Q = np.eye(n_unseed)[cols] + + # [1] Algorithm 1 Line 5 - compute the step size + # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect + # terms as ax**2 + bx + c. c does not affect location of minimum + # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum(); + # apply where possible for efficiency. + R = P - Q + b21 = ((R.T @ A21) * B21).sum() + b12 = ((R.T @ A12.T) * B12.T).sum() + AR22 = A22.T @ R + BR22 = B22 @ R.T + b22a = (AR22 * B22.T[cols]).sum() + b22b = (A22 * BR22[cols]).sum() + a = (AR22.T * BR22).sum() + b = b21 + b12 + b22a + b22b + # critical point of ax^2 + bx + c is at x = -d/(2*e) + # if a * obj_func_scalar > 0, it is a minimum + # if minimum is not in [0, 1], only endpoints need to be considered + if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1: + alpha = -b/(2*a) + else: + alpha = np.argmin([0, (b + a)*obj_func_scalar]) + + # [1] Algorithm 1 Line 6 - Update P + P_i1 = alpha * P + (1 - alpha) * Q + if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol: + P = P_i1 + break + P = P_i1 + # [1] Algorithm 1 Line 7 - end main loop + + # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices + _, col = linear_sum_assignment(P, maximize=True) + perm = np.concatenate((np.arange(n_seeds), col + n_seeds)) + + unshuffled_perm = np.zeros(n, dtype=int) + unshuffled_perm[perm_A] = perm_B[perm] + + score = _calc_score(A, B, unshuffled_perm) + res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter} + return OptimizeResult(res) + + +def _split_matrix(X, n): + # definitions according to Seeded Graph Matching [2]. + upper, lower = X[:n], X[n:] + return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:] + + +def _doubly_stochastic(P, tol=1e-3): + # Adapted from @btaba implementation + # https://github.com/btaba/sinkhorn_knopp + # of Sinkhorn-Knopp algorithm + # https://projecteuclid.org/euclid.pjm/1102992505 + + max_iter = 1000 + c = 1 / P.sum(axis=0) + r = 1 / (P @ c) + P_eps = P + + for it in range(max_iter): + if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and + (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): + # All column/row sums ~= 1 within threshold + break + + c = 1 / (r @ P) + r = 1 / (P @ c) + P_eps = r[:, None] * P * c + + return P_eps + + +def _quadratic_assignment_2opt(A, B, maximize=False, rng=None, + partial_match=None, + partial_guess=None, + **unknown_options): + r"""Solve the quadratic assignment problem (approximately). + + This function solves the Quadratic Assignment Problem (QAP) and the + Graph Matching Problem (GMP) using the 2-opt algorithm [1]_. + + Quadratic assignment solves problems of the following form: + + .. math:: + + \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ + \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ + + where :math:`\mathcal{P}` is the set of all permutation matrices, + and :math:`A` and :math:`B` are square matrices. + + Graph matching tries to *maximize* the same objective function. + This algorithm can be thought of as finding the alignment of the + nodes of two graphs that minimizes the number of induced edge + disagreements, or, in the case of weighted graphs, the sum of squared + edge weight differences. + + Note that the quadratic assignment problem is NP-hard. The results given + here are approximations and are not guaranteed to be optimal. + + Parameters + ---------- + A : 2-D array, square + The square matrix :math:`A` in the objective function above. + B : 2-D array, square + The square matrix :math:`B` in the objective function above. + method : str in {'faq', '2opt'} (default: 'faq') + The algorithm used to solve the problem. This is the method-specific + documentation for '2opt'. + :ref:`'faq' ` is also available. + + Options + ------- + maximize : bool (default: False) + Maximizes the objective function if ``True``. + rng : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + partial_match : 2-D array of integers, optional (default: None) + Fixes part of the matching. Also known as a "seed" [2]_. + + Each row of `partial_match` specifies a pair of matched nodes: node + ``partial_match[i, 0]`` of `A` is matched to node + ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + .. note:: + `partial_match` must be sorted by the first column. + + partial_guess : 2-D array of integers, optional (default: None) + A guess for the matching between the two matrices. Unlike + `partial_match`, `partial_guess` does not fix the indices; they are + still free to be optimized. + + Each row of `partial_guess` specifies a pair of matched nodes: node + ``partial_guess[i, 0]`` of `A` is matched to node + ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``, + where ``m`` is not greater than the number of nodes, :math:`n`. + + .. note:: + `partial_guess` must be sorted by the first column. + + Returns + ------- + res : OptimizeResult + `OptimizeResult` containing the following fields. + + col_ind : 1-D array + Column indices corresponding to the best permutation found of the + nodes of `B`. + fun : float + The objective value of the solution. + nit : int + The number of iterations performed during optimization. + + Notes + ----- + This is a greedy algorithm that works similarly to bubble sort: beginning + with an initial permutation, it iteratively swaps pairs of indices to + improve the objective function until no such improvements are possible. + + References + ---------- + .. [1] "2-opt," Wikipedia. + https://en.wikipedia.org/wiki/2-opt + + .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, + C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): + 203-215, https://doi.org/10.1016/j.patcog.2018.09.014 + + """ + _check_unknown_options(unknown_options) + rng = check_random_state(rng) + A, B, partial_match = _common_input_validation(A, B, partial_match) + + N = len(A) + # check trivial cases + if N == 0 or partial_match.shape[0] == N: + score = _calc_score(A, B, partial_match[:, 1]) + res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} + return OptimizeResult(res) + + if partial_guess is None: + partial_guess = np.array([[], []]).T + partial_guess = np.atleast_2d(partial_guess).astype(int) + + msg = None + if partial_guess.shape[0] > A.shape[0]: + msg = ("`partial_guess` can have only as " + "many entries as there are nodes") + elif partial_guess.shape[1] != 2: + msg = "`partial_guess` must have two columns" + elif partial_guess.ndim != 2: + msg = "`partial_guess` must have exactly two dimensions" + elif (partial_guess < 0).any(): + msg = "`partial_guess` must contain only positive indices" + elif (partial_guess >= len(A)).any(): + msg = "`partial_guess` entries must be less than number of nodes" + elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or + not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])): + msg = "`partial_guess` column entries must be unique" + if msg is not None: + raise ValueError(msg) + + fixed_rows = None + if partial_match.size or partial_guess.size: + # use partial_match and partial_guess for initial permutation, + # but randomly permute the rest. + guess_rows = np.zeros(N, dtype=bool) + guess_cols = np.zeros(N, dtype=bool) + fixed_rows = np.zeros(N, dtype=bool) + fixed_cols = np.zeros(N, dtype=bool) + perm = np.zeros(N, dtype=int) + + rg, cg = partial_guess.T + guess_rows[rg] = True + guess_cols[cg] = True + perm[guess_rows] = cg + + # match overrides guess + rf, cf = partial_match.T + fixed_rows[rf] = True + fixed_cols[cf] = True + perm[fixed_rows] = cf + + random_rows = ~fixed_rows & ~guess_rows + random_cols = ~fixed_cols & ~guess_cols + perm[random_rows] = rng.permutation(np.arange(N)[random_cols]) + else: + perm = rng.permutation(np.arange(N)) + + best_score = _calc_score(A, B, perm) + + i_free = np.arange(N) + if fixed_rows is not None: + i_free = i_free[~fixed_rows] + + better = operator.gt if maximize else operator.lt + n_iter = 0 + done = False + while not done: + # equivalent to nested for loops i in range(N), j in range(i, N) + for i, j in itertools.combinations_with_replacement(i_free, 2): + n_iter += 1 + perm[i], perm[j] = perm[j], perm[i] + score = _calc_score(A, B, perm) + if better(score, best_score): + best_score = score + break + # faster to swap back than to create a new list every time + perm[i], perm[j] = perm[j], perm[i] + else: # no swaps made + done = True + + res = {"col_ind": perm, "fun": best_score, "nit": n_iter} + return OptimizeResult(res) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..cb81ad1696b768d2304b2fc42a80cc9678cbde00 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py @@ -0,0 +1,522 @@ +""" +Routines for removing redundant (linearly dependent) equations from linear +programming equality constraints. +""" +# Author: Matt Haberland + +import numpy as np +from scipy.linalg import svd +from scipy.linalg.interpolative import interp_decomp +import scipy +from scipy.linalg.blas import dtrsm + + +def _row_count(A): + """ + Counts the number of nonzeros in each row of input array A. + Nonzeros are defined as any element with absolute value greater than + tol = 1e-13. This value should probably be an input to the function. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + + Returns + ------- + rowcount : 1-D array + Number of nonzeros in each row of A + + """ + tol = 1e-13 + return np.array((abs(A) > tol).sum(axis=1)).flatten() + + +def _get_densest(A, eligibleRows): + """ + Returns the index of the densest row of A. Ignores rows that are not + eligible for consideration. + + Parameters + ---------- + A : 2-D array + An array representing a matrix + eligibleRows : 1-D logical array + Values indicate whether the corresponding row of A is eligible + to be considered + + Returns + ------- + i_densest : int + Index of the densest row in A eligible for consideration + + """ + rowCounts = _row_count(A) + return np.argmax(rowCounts * eligibleRows) + + +def _remove_zero_rows(A, b): + """ + Eliminates trivial equations from system of equations defined by Ax = b + and identifies trivial infeasibilities + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the removal operation + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + status = 0 + message = "" + i_zero = _row_count(A) == 0 + A = A[np.logical_not(i_zero), :] + if not np.allclose(b[i_zero], 0): + status = 2 + message = "There is a zero row in A_eq with a nonzero corresponding " \ + "entry in b_eq. The problem is infeasible." + b = b[np.logical_not(i_zero)] + return A, b, status, message + + +def bg_update_dense(plu, perm_r, v, j): + LU, p = plu + + vperm = v[perm_r] + u = dtrsm(1, LU, vperm, lower=1, diag=1) + LU[:j+1, j] = u[:j+1] + l = u[j+1:] + piv = LU[j, j] + LU[j+1:, j] += (l/piv) + return LU, p + + +def _remove_redundancy_pivot_dense(A, rhs, true_rank=None): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + d = [] # Indices of dependent rows + perm_r = None + + A_orig = A + A = np.zeros((m, m + n), order='F') + np.fill_diagonal(A, 1) + A[:, m:] = A_orig + e = np.zeros(m) + + js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis + # manual masking was faster than masked array + js_mask = np.ones(js_candidates.shape, dtype=bool) + + # Implements basic algorithm from [2] + # Uses some of the suggested improvements (removing zero rows and + # Bartels-Golub update idea). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis are only really useful if + # the matrix is sparse. + + lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial + perm_r = lu[1] + for i in v: + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + try: # fails for i==0 and any time it gets ill-conditioned + j = b[i-1] + lu = bg_update_dense(lu, perm_r, A[:, j], i-1) + except Exception: + lu = scipy.linalg.lu_factor(A[:, b]) + LU, p = lu + perm_r = list(range(m)) + for i1, i2 in enumerate(p): + perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] + + pi = scipy.linalg.lu_solve(lu, e, trans=1) + + js = js_candidates[js_mask] + batch = 50 + + # This is a tiny bit faster than looping over columns individually, + # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: + for j_index in range(0, len(js), batch): + j_indices = js[j_index: min(j_index+batch, len(js))] + + c = abs(A[:, j_indices].transpose().dot(pi)) + if (c > tolapiv).any(): + j = js[j_index + np.argmax(c)] # very independent column + b[i] = j + js_mask[j-m] = False + break + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + if true_rank is not None and len(d) == m - true_rank: + break # found all redundancies + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_pivot_sparse(A, rhs): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D sparse matrix + An matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D sparse matrix + A matrix representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + tolapiv = 1e-8 + tolprimal = 1e-8 + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + v = list(range(m)) # Artificial column indices. + b = list(v) # Basis column indices. + # This is better as a list than a set because column order of basis matrix + # needs to be consistent. + k = set(range(m, m+n)) # Structural column indices. + d = [] # Indices of dependent rows + + A_orig = A + A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() + e = np.zeros(m) + + # Implements basic algorithm from [2] + # Uses only one of the suggested improvements (removing zero rows). + # Removing column singletons would be easy, but it is not as important + # because the procedure is performed only on the equality constraint + # matrix from the original problem - not on the canonical form matrix, + # which would have many more column singletons due to slack variables + # from the inequality constraints. + # The thoughts on "crashing" the initial basis sound useful, but the + # description of the procedure seems to assume a lot of familiarity with + # the subject; it is not very explicit. I already went through enough + # trouble getting the basic algorithm working, so I was not interested in + # trying to decipher this, too. (Overall, the paper is fraught with + # mistakes and ambiguities - which is strange, because the rest of + # Andersen's papers are quite good.) + # I tried and tried and tried to improve performance using the + # Bartels-Golub update. It works, but it's only practical if the LU + # factorization can be specialized as described, and that is not possible + # until the SciPy SuperLU interface permits control over column + # permutation - see issue #7700. + + for i in v: + B = A[:, b] + + e[i] = 1 + if i > 0: + e[i-1] = 0 + + pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) + + js = list(k-set(b)) # not efficient, but this is not the time sink... + + # Due to overhead, it tends to be faster (for problems tested) to + # compute the full matrix-vector product rather than individual + # vector-vector products (with the chance of terminating as soon + # as any are nonzero). For very large matrices, it might be worth + # it to compute, say, 100 or 1000 at a time and stop when a nonzero + # is found. + + c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] + if len(c) > 0: # independent + j = js[c[0]] + # in a previous commit, the previous line was changed to choose + # index j corresponding with the maximum dot product. + # While this avoided issues with almost + # singular matrices, it slowed the routine in most NETLIB tests. + # I think this is because these columns were denser than the + # first column with nonzero dot product (c[0]). + # It would be nice to have a heuristic that balances sparsity with + # high dot product, but I don't think it's worth the time to + # develop one right now. Bartels-Golub update is a much higher + # priority. + b[i] = j # replace artificial column + else: + bibar = pi.T.dot(rhs.reshape(-1, 1)) + bnorm = np.linalg.norm(rhs) + if abs(bibar)/(1 + bnorm) > tolprimal: + status = 2 + message = inconsistent + return A_orig, rhs, status, message + else: # dependent + d.append(i) + + keep = set(range(m)) + keep = list(keep - set(d)) + return A_orig[keep, :], rhs[keep], status, message + + +def _remove_redundancy_svd(A, b): + """ + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + b : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + References + ---------- + .. [2] Andersen, Erling D. "Finding all linearly dependent rows in + large-scale linear programming." Optimization Methods and Software + 6.3 (1995): 219-227. + + """ + + A, b, status, message = _remove_zero_rows(A, b) + + if status != 0: + return A, b, status, message + + U, s, Vh = svd(A) + eps = np.finfo(float).eps + tol = s.max() * max(A.shape) * eps + + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + # this algorithm is faster than that of [2] when the nullspace is small + # but it could probably be improvement by randomized algorithms and with + # a sparse implementation. + # it relies on repeated singular value decomposition to find linearly + # dependent rows (as identified by columns of U that correspond with zero + # singular values). Unfortunately, only one row can be removed per + # decomposition (I tried otherwise; doing so can cause problems.) + # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds + # but that function is unreliable at finding singular values near zero. + # Finding max eigenvalue L of A A^T, then largest eigenvalue (and + # associated eigenvector) of -A A^T + L I (I is identity) via power + # iteration would also work in theory, but is only efficient if the + # smallest nonzero eigenvalue of A A^T is close to the largest nonzero + # eigenvalue. + + while abs(s_min) < tol: + v = U[:, -1] # TODO: return these so user can eliminate from problem? + # rows need to be represented in significant amount + eligibleRows = np.abs(v) > tol * 10e6 + if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): + status = 4 + message = ("Due to numerical issues, redundant equality " + "constraints could not be removed automatically. " + "Try providing your constraint matrices as sparse " + "matrices to activate sparse presolve, try turning " + "off redundancy removal, or try turning off presolve " + "altogether.") + break + if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349 + status = 2 + message = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + break + + i_remove = _get_densest(A, eligibleRows) + A = np.delete(A, i_remove, axis=0) + b = np.delete(b, i_remove) + U, s, Vh = svd(A) + m, n = A.shape + s_min = s[-1] if m <= n else 0 + + return A, b, status, message + + +def _remove_redundancy_id(A, rhs, rank=None, randomized=True): + """Eliminates redundant equations from a system of equations. + + Eliminates redundant equations from system of equations defined by Ax = b + and identifies infeasibilities. + + Parameters + ---------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + rank : int, optional + The rank of A + randomized: bool, optional + True for randomized interpolative decomposition + + Returns + ------- + A : 2-D array + An array representing the left-hand side of a system of equations + rhs : 1-D array + An array representing the right-hand side of a system of equations + status: int + An integer indicating the status of the system + 0: No infeasibility identified + 2: Trivially infeasible + message : str + A string descriptor of the exit status of the optimization. + + """ + + status = 0 + message = "" + inconsistent = ("There is a linear combination of rows of A_eq that " + "results in zero, suggesting a redundant constraint. " + "However the same linear combination of b_eq is " + "nonzero, suggesting that the constraints conflict " + "and the problem is infeasible.") + + A, rhs, status, message = _remove_zero_rows(A, rhs) + + if status != 0: + return A, rhs, status, message + + m, n = A.shape + + k = rank + if rank is None: + k = np.linalg.matrix_rank(A) + + idx, proj = interp_decomp(A.T, k, rand=randomized) + + # first k entries in idx are indices of the independent rows + # remaining entries are the indices of the m-k dependent rows + # proj provides a linear combinations of rows of A2 that form the + # remaining m-k (dependent) rows. The same linear combination of entries + # in rhs2 must give the remaining m-k entries. If not, the system is + # inconsistent, and the problem is infeasible. + if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]): + status = 2 + message = inconsistent + + # sort indices because the other redundancy removal routines leave rows + # in original order and tests were written with that in mind + idx = sorted(idx[:k]) + A2 = A[idx, :] + rhs2 = rhs[idx] + return A2, rhs2, status, message diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py new file mode 100644 index 0000000000000000000000000000000000000000..54e861ae2de02164966a33c437e5fdb08ba3006c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py @@ -0,0 +1,65 @@ +from ._trustregion import (_minimize_trust_region) +from ._trlib import (get_trlib_quadratic_subproblem) + +__all__ = ['_minimize_trust_krylov'] + +def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, + inexact=True, **trust_region_options): + """ + Minimization of a scalar function of one or more variables using + a nearly exact trust-region algorithm that only requires matrix + vector products with the hessian matrix. + + .. versionadded:: 1.0.0 + + Options + ------- + inexact : bool, optional + Accuracy to solve subproblems. If True requires less nonlinear + iterations, but more vector products. + """ + + if jac is None: + raise ValueError('Jacobian is required for trust region ', + 'exact minimization.') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Krylov trust-region minimization') + + # tol_rel specifies the termination tolerance relative to the initial + # gradient norm in the Krylov subspace iteration. + + # - tol_rel_i specifies the tolerance for interior convergence. + # - tol_rel_b specifies the tolerance for boundary convergence. + # in nonlinear programming applications it is not necessary to solve + # the boundary case as exact as the interior case. + + # - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov + # subspace iteration leading to quadratic convergence if eventually + # the trust region stays inactive. + # - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov + # subspace iteration leading to superlinear convergence as long + # as the iterates hit the trust region boundary. + + # For details consult the documentation of trlib_krylov_min + # in _trlib/trlib_krylov.h + # + # Optimality of this choice of parameters among a range of possibilities + # has been tested on the unconstrained subset of the CUTEst library. + + if inexact: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=-2.0, tol_rel_b=-3.0, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) + else: + return _minimize_trust_region(fun, x0, args=args, jac=jac, + hess=hess, hessp=hessp, + subproblem=get_trlib_quadratic_subproblem( + tol_rel_i=1e-8, tol_rel_b=1e-6, + disp=trust_region_options.get('disp', False) + ), + **trust_region_options) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py new file mode 100644 index 0000000000000000000000000000000000000000..fed17ff8b84eaf019c0ad69a03f260ca674477ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py @@ -0,0 +1,126 @@ +"""Newton-CG trust-region optimization.""" +import math + +import numpy as np +import scipy.linalg +from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) + +__all__ = [] + + +def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, + **trust_region_options): + """ + Minimization of scalar function of one or more variables using + the Newton conjugate gradient trust-region algorithm. + + Options + ------- + initial_trust_radius : float + Initial trust-region radius. + max_trust_radius : float + Maximum value of the trust-region radius. No steps that are longer + than this value will be proposed. + eta : float + Trust region related acceptance stringency for proposed steps. + gtol : float + Gradient norm must be less than `gtol` before successful + termination. + + """ + if jac is None: + raise ValueError('Jacobian is required for Newton-CG trust-region ' + 'minimization') + if hess is None and hessp is None: + raise ValueError('Either the Hessian or the Hessian-vector product ' + 'is required for Newton-CG trust-region minimization') + return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, + hessp=hessp, subproblem=CGSteihaugSubproblem, + **trust_region_options) + + +class CGSteihaugSubproblem(BaseQuadraticSubproblem): + """Quadratic subproblem solved by a conjugate gradient method""" + def solve(self, trust_radius): + """ + Solve the subproblem using a conjugate gradient method. + + Parameters + ---------- + trust_radius : float + We are allowed to wander only this far away from the origin. + + Returns + ------- + p : ndarray + The proposed step. + hits_boundary : bool + True if the proposed step is on the boundary of the trust region. + + Notes + ----- + This is algorithm (7.2) of Nocedal and Wright 2nd edition. + Only the function that computes the Hessian-vector product is required. + The Hessian itself is not required, and the Hessian does + not need to be positive semidefinite. + """ + + # get the norm of jacobian and define the origin + p_origin = np.zeros_like(self.jac) + + # define a default tolerance + tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag + + # Stop the method if the search direction + # is a direction of nonpositive curvature. + if self.jac_mag < tolerance: + hits_boundary = False + return p_origin, hits_boundary + + # init the state for the first iteration + z = p_origin + r = self.jac + d = -r + + # Search for the min of the approximation of the objective function. + while True: + + # do an iteration + Bd = self.hessp(d) + dBd = np.dot(d, Bd) + if dBd <= 0: + # Look at the two boundary points. + # Find both values of t to get the boundary points such that + # ||z + t d|| == trust_radius + # and then choose the one with the predicted min value. + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + pa = z + ta * d + pb = z + tb * d + if self(pa) < self(pb): + p_boundary = pa + else: + p_boundary = pb + hits_boundary = True + return p_boundary, hits_boundary + r_squared = np.dot(r, r) + alpha = r_squared / dBd + z_next = z + alpha * d + if scipy.linalg.norm(z_next) >= trust_radius: + # Find t >= 0 to get the boundary point such that + # ||z + t d|| == trust_radius + ta, tb = self.get_boundaries_intersections(z, d, trust_radius) + p_boundary = z + tb * d + hits_boundary = True + return p_boundary, hits_boundary + r_next = r + alpha * Bd + r_next_squared = np.dot(r_next, r_next) + if math.sqrt(r_next_squared) < tolerance: + hits_boundary = False + return z_next, hits_boundary + beta_next = r_next_squared / r_squared + d_next = -r_next + beta_next * d + + # update the state for the next iteration + z = z_next + r = r_next + d = d_next diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tstutils.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tstutils.py new file mode 100644 index 0000000000000000000000000000000000000000..344c6764a37471ed032ecd0b05f7d93e9f4a6536 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_tstutils.py @@ -0,0 +1,968 @@ +r""" +Parameters used in test and benchmark methods. + +Collections of test cases suitable for testing 1-D root-finders + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval + with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a left- and right- discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, + randomly negative on the other. + f4 - f6 are not continuous at the root. + + 'aps': The test problems in the 1995 paper + TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The test cases are provided as a list of dictionaries. The dictionary + keys will be a subset of: + ["f", "fprime", "fprime2", "args", "bracket", "smoothness", + "a", "b", "x0", "x1", "root", "ID"] +""" + +# Sources: +# [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# "Algorithm 748: Enclosing Zeros of Continuous Functions", +# ACM Trans. Math. Softw. Volume 221(1995) +# doi = {10.1145/210089.210111}, +# [2] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm +# for finding the zero of a nonlinear function without using derivatives." +# Advances in Engineering Software 28.3 (1997): 145-149. + +from random import random + +import numpy as np + +from scipy.optimize import _zeros_py as cc + +# "description" refers to the original functions +description = """ +f2 is a symmetric parabola, x**2 - 1 +f3 is a quartic polynomial with large hump in interval +f4 is step function with a discontinuity at 1 +f5 is a hyperbola with vertical asymptote at 1 +f6 has random values positive to left of 1, negative to right + +Of course, these are not real problems. They just test how the +'good' solvers behave in bad circumstances where bisection is +really the best. A good solver should not be much worse than +bisection in such circumstance, while being faster for smooth +monotone sorts of functions. +""" + + +def f1(x): + r"""f1 is a quadratic with roots at 0 and 1""" + return x * (x - 1.) + + +def f1_fp(x): + return 2 * x - 1 + + +def f1_fpp(x): + return 2 + + +def f2(x): + r"""f2 is a symmetric parabola, x**2 - 1""" + return x**2 - 1 + + +def f2_fp(x): + return 2 * x + + +def f2_fpp(x): + return 2 + + +def f3(x): + r"""A quartic with roots at 0, 1, 2 and 3""" + return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x + + +def f3_fp(x): + return 4 * x**3 - 18 * x**2 + 22 * x - 6 + + +def f3_fpp(x): + return 12 * x**2 - 36 * x + 22 + + +def f4(x): + r"""Piecewise linear, left- and right- discontinuous at x=1, the root.""" + if x > 1: + return 1.0 + .1 * x + if x < 1: + return -1.0 + .1 * x + return 0 + + +def f5(x): + r""" + Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root. + """ + if x != 1: + return 1.0 / (1. - x) + return 0 + + +# f6(x) returns random value. Without memoization, calling twice with the +# same x returns different values, hence a "random value", not a +# "function with random values" +_f6_cache = {} +def f6(x): + v = _f6_cache.get(x, None) + if v is None: + if x > 1: + v = random() + elif x < 1: + v = -random() + else: + v = 0 + _f6_cache[x] = v + return v + + +# Each Original test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability (smoothness) on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case + +_ORIGINAL_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_ORIGINAL_TESTS = [ + [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"], + [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"], + [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"], + [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"], + [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"], + [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"] +] + +_ORIGINAL_TESTS_DICTS = [ + dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS +] + +# ################## +# "APS" test cases +# Functions and test cases that appear in [1] + + +def aps01_f(x): + r"""Straightforward sum of trigonometric function and polynomial""" + return np.sin(x) - x / 2 + + +def aps01_fp(x): + return np.cos(x) - 1.0 / 2 + + +def aps01_fpp(x): + return -np.sin(x) + + +def aps02_f(x): + r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0""" + ii = np.arange(1, 21) + return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3) + + +def aps02_fp(x): + ii = np.arange(1, 21) + return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4) + + +def aps02_fpp(x): + ii = np.arange(1, 21) + return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5) + + +def aps03_f(x, a, b): + r"""Rapidly changing at the root""" + return a * x * np.exp(b * x) + + +def aps03_fp(x, a, b): + return a * (b * x + 1) * np.exp(b * x) + + +def aps03_fpp(x, a, b): + return a * (b * (b * x + 1) + b) * np.exp(b * x) + + +def aps04_f(x, n, a): + r"""Medium-degree polynomial""" + return x**n - a + + +def aps04_fp(x, n, a): + return n * x**(n - 1) + + +def aps04_fpp(x, n, a): + return n * (n - 1) * x**(n - 2) + + +def aps05_f(x): + r"""Simple Trigonometric function""" + return np.sin(x) - 1.0 / 2 + + +def aps05_fp(x): + return np.cos(x) + + +def aps05_fpp(x): + return -np.sin(x) + + +def aps06_f(x, n): + r"""Exponential rapidly changing from -1 to 1 at x=0""" + return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1 + + +def aps06_fp(x, n): + return 2 * np.exp(-n) + 2 * n * np.exp(-n * x) + + +def aps06_fpp(x, n): + return -2 * n * n * np.exp(-n * x) + + +def aps07_f(x, n): + r"""Upside down parabola with parametrizable height""" + return (1 + (1 - n)**2) * x - (1 - n * x)**2 + + +def aps07_fp(x, n): + return (1 + (1 - n)**2) + 2 * n * (1 - n * x) + + +def aps07_fpp(x, n): + return -2 * n * n + + +def aps08_f(x, n): + r"""Degree n polynomial""" + return x * x - (1 - x)**n + + +def aps08_fp(x, n): + return 2 * x + n * (1 - x)**(n - 1) + + +def aps08_fpp(x, n): + return 2 - n * (n - 1) * (1 - x)**(n - 2) + + +def aps09_f(x, n): + r"""Upside down quartic with parametrizable height""" + return (1 + (1 - n)**4) * x - (1 - n * x)**4 + + +def aps09_fp(x, n): + return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3 + + +def aps09_fpp(x, n): + return -12 * n * (1 - n * x)**2 + + +def aps10_f(x, n): + r"""Exponential plus a polynomial""" + return np.exp(-n * x) * (x - 1) + x**n + + +def aps10_fp(x, n): + return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1) + + +def aps10_fpp(x, n): + return (np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + + n * (n - 1) * x**(n - 2)) + + +def aps11_f(x, n): + r"""Rational function with a zero at x=1/n and a pole at x=0""" + return (n * x - 1) / ((n - 1) * x) + + +def aps11_fp(x, n): + return 1 / (n - 1) / x**2 + + +def aps11_fpp(x, n): + return -2 / (n - 1) / x**3 + + +def aps12_f(x, n): + r"""nth root of x, with a zero at x=n""" + return np.power(x, 1.0 / n) - np.power(n, 1.0 / n) + + +def aps12_fp(x, n): + return np.power(x, (1.0 - n) / n) / n + + +def aps12_fpp(x, n): + return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n + + +_MAX_EXPABLE = np.log(np.finfo(float).max) + + +def aps13_f(x): + r"""Function with *all* derivatives 0 at the root""" + if x == 0: + return 0 + # x2 = 1.0/x**2 + # if x2 > 708: + # return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return x / np.exp(y) + + +def aps13_fp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return (1 + 2 / x**2) / np.exp(y) + + +def aps13_fpp(x): + if x == 0: + return 0 + y = 1 / x**2 + if y > _MAX_EXPABLE: + return 0 + return 2 * (2 - x**2) / x**5 / np.exp(y) + + +def aps14_f(x, n): + r"""0 for negative x-values, trigonometric+linear for x positive""" + if x <= 0: + return -n / 20.0 + return n / 20.0 * (x / 1.5 + np.sin(x) - 1) + + +def aps14_fp(x, n): + if x <= 0: + return 0 + return n / 20.0 * (1.0 / 1.5 + np.cos(x)) + + +def aps14_fpp(x, n): + if x <= 0: + return 0 + return -n / 20.0 * (np.sin(x)) + + +def aps15_f(x, n): + r"""piecewise linear, constant outside of [0, 0.002/(1+n)]""" + if x < 0: + return -0.859 + if x > 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) - 1.859 + + +def aps15_fp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 + + +def aps15_fpp(x, n): + if not 0 <= x <= 2 * 1e-3 / (1 + n): + return np.e - 1.859 + return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000 + + +# Each APS test case has +# - a function and its two derivatives, +# - additional arguments, +# - a bracket enclosing a root, +# - the order of differentiability of the function on this interval +# - a starting value for methods which don't require a bracket +# - the root (inside the bracket) +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley methods need a single +# starting point x0, which was chosen to be near the middle of the interval, +# unless that would have made the problem too easy. + +_APS_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID" +] +_APS_TESTS = [ + [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, + 3, 1.89549426703398094e+00, "aps.01.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, + 2, 3.02291534727305677e+00, "aps.02.00"], + [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, + 5, 6.68375356080807848e+00, "aps.02.01"], + [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, + 10, 1.12387016550022114e+01, "aps.02.02"], + [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, + 17, 1.96760000806234103e+01, "aps.02.03"], + [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, + 26, 2.98282273265047557e+01, "aps.02.04"], + [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, + 37, 4.19061161952894139e+01, "aps.02.05"], + [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, + 50, 5.59535958001430913e+01, "aps.02.06"], + [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, + 65, 7.19856655865877997e+01, "aps.02.07"], + [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, + 82, 9.00088685391666701e+01, "aps.02.08"], + [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, + 101, 1.10026532748330197e+02, "aps.02.09"], + [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, + -2, 0, "aps.03.00"], + [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, + -2, 0, "aps.03.01"], + [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, + -2, 0, "aps.03.02"], + [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, + 2.5, 6.68740304976422006e-01, "aps.04.00"], + [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, + 2.5, 7.64724491331730039e-01, "aps.04.01"], + [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, + 2.5, 8.17765433957942545e-01, "aps.04.02"], + [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, + 2.5, 8.51339922520784609e-01, "aps.04.03"], + [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, + 2.5, 8.74485272221167897e-01, "aps.04.04"], + [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.05"], + [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.06"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.07"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.08"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, + 2.5, 1, "aps.04.09"], + [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.10"], + [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.11"], + [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.12"], + [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, + 1.5, 1, "aps.04.13"], + [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, + 1.3, np.pi / 6, "aps.05.00"], + [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, + 0.5, 4.22477709641236709e-01, "aps.06.00"], + [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, + 0.5, 3.06699410483203705e-01, "aps.06.01"], + [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, + 0.5, 2.23705457654662959e-01, "aps.06.02"], + [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, + 0.5, 1.71719147519508369e-01, "aps.06.03"], + [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, + 0.4, 1.38257155056824066e-01, "aps.06.04"], + [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, + 0.1, 3.46573590208538521e-02, "aps.06.05"], + [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, + 5e-02, 1.73286795139986315e-02, "aps.06.06"], + [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, + 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"], + [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, + 2.5e-02, 8.66433975699931573e-03, "aps.06.08"], + [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, + 2e-02, 6.93147180559945415e-03, "aps.06.09"], + [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, + 0.4, 3.84025518406218985e-02, "aps.07.00"], + [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, + 0.4, 9.90000999800049949e-03, "aps.07.01"], + [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, + 0.4, 2.49375003906201174e-03, "aps.07.02"], + [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, + 0.9, 0.5, "aps.08.00"], + [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, + 0.9, 3.45954815848242059e-01, "aps.08.01"], + [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, + 0.9, 2.45122333753307220e-01, "aps.08.02"], + [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, + 0.9, 1.95547623536565629e-01, "aps.08.03"], + [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, + 0.9, 1.64920957276440960e-01, "aps.08.04"], + [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, + 0.5, 2.75508040999484394e-01, "aps.09.00"], + [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, + 0.5, 1.37754020499742197e-01, "aps.09.01"], + [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, + 0.5, 1.03052837781564422e-02, "aps.09.02"], + [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, + 0.5, 3.61710817890406339e-03, "aps.09.03"], + [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, + 0.5, 4.10872918496395375e-04, "aps.09.04"], + [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, + 0.5, 2.59895758929076292e-05, "aps.09.05"], + [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, + 0.5, 7.66859512218533719e-06, "aps.09.06"], + [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, + 0.9, 4.01058137541547011e-01, "aps.10.00"], + [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, + 0.9, 5.16153518757933583e-01, "aps.10.01"], + [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, + 0.9, 5.39522226908415781e-01, "aps.10.02"], + [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, + 0.9, 5.48182294340655241e-01, "aps.10.03"], + [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, + 0.9, 5.52704666678487833e-01, "aps.10.04"], + [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, + 1e-02, 1.0 / 2, "aps.11.00"], + [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, + 1e-02, 1.0 / 5, "aps.11.01"], + [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, + 1e-02, 1.0 / 15, "aps.11.02"], + [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, + 1e-02, 1.0 / 20, "aps.11.03"], + [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, + 1.1, 2, "aps.12.00"], + [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, + 1.1, 3, "aps.12.01"], + [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, + 1.1, 4, "aps.12.02"], + [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, + 1.1, 5, "aps.12.03"], + [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, + 1.1, 6, "aps.12.04"], + [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, + 1.1, 7, "aps.12.05"], + [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, + 1.1, 9, "aps.12.06"], + [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, + 1.1, 11, "aps.12.07"], + [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, + 1.1, 13, "aps.12.08"], + [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, + 1.1, 15, "aps.12.09"], + [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, + 1.1, 17, "aps.12.10"], + [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, + 1.1, 19, "aps.12.11"], + [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, + 1.1, 21, "aps.12.12"], + [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, + 1.1, 23, "aps.12.13"], + [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, + 1.1, 25, "aps.12.14"], + [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, + 1.1, 27, "aps.12.15"], + [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, + 1.1, 29, "aps.12.16"], + [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, + 1.1, 31, "aps.12.17"], + [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, + 1.1, 33, "aps.12.18"], + [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, + 1.5, 0, "aps.13.00"], + [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.00"], + [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.01"], + [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.02"], + [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.03"], + [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.04"], + [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.05"], + [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.06"], + [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.07"], + [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.08"], + [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.09"], + [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.10"], + [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.11"], + [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.12"], + [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.13"], + [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.14"], + [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.15"], + [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.16"], + [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.17"], + [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.18"], + [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.19"], + [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.20"], + [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.21"], + [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.22"], + [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.23"], + [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.24"], + [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.25"], + [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.26"], + [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.27"], + [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.28"], + [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.29"], + [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.30"], + [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.31"], + [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.32"], + [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.33"], + [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.34"], + [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.35"], + [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.36"], + [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.37"], + [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.38"], + [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, + 1, 6.23806518961612433e-01, "aps.14.39"], + [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, + -2, 5.90513055942197166e-05, "aps.15.00"], + [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, + -2, 5.63671553399369967e-05, "aps.15.01"], + [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, + -2, 5.39164094555919196e-05, "aps.15.02"], + [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, + -2, 5.16698923949422470e-05, "aps.15.03"], + [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, + -2, 4.96030966991445609e-05, "aps.15.04"], + [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, + -2, 4.76952852876389951e-05, "aps.15.05"], + [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, + -2, 4.59287932399486662e-05, "aps.15.06"], + [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, + -2, 4.42884791956647841e-05, "aps.15.07"], + [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, + -2, 4.27612902578832391e-05, "aps.15.08"], + [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, + -2, 4.13359139159538030e-05, "aps.15.09"], + [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, + -2, 4.00024973380198076e-05, "aps.15.10"], + [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, + -2, 3.87524192962066869e-05, "aps.15.11"], + [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, + -2, 3.75781035599579910e-05, "aps.15.12"], + [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, + -2, 3.64728652199592355e-05, "aps.15.13"], + [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, + -2, 3.54307833565318273e-05, "aps.15.14"], + [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, + -2, 3.44465949299614980e-05, "aps.15.15"], + [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, + -2, 3.35156058778003705e-05, "aps.15.16"], + [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, + -2, 3.26336162494372125e-05, "aps.15.17"], + [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, + -2, 3.17968568584260013e-05, "aps.15.18"], + [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, + -2, 3.10019354369653455e-05, "aps.15.19"], + [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, + -2, 3.02457906702100968e-05, "aps.15.20"], + [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, + -2, 1.22779942324615231e-05, "aps.15.21"], + [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, + -2, 6.16953939044086617e-06, "aps.15.22"], + [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, + -2, 4.11985852982928163e-06, "aps.15.23"], + [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, + -2, 3.09246238772721682e-06, "aps.15.24"], + [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, + -2, 2.47520442610501789e-06, "aps.15.25"], + [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, + -2, 2.06335676785127107e-06, "aps.15.26"], + [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, + -2, 1.76901200781542651e-06, "aps.15.27"], + [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, + -2, 1.54816156988591016e-06, "aps.15.28"], + [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, + -2, 1.37633453660223511e-06, "aps.15.29"], + [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, + -2, 1.23883857889971403e-06, "aps.15.30"] +] + +_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS] + + +# ################## +# "complex" test cases +# A few simple, complex-valued, functions, defined on the complex plane. + + +def cplx01_f(z, n, a): + r"""z**n-a: Use to find the nth root of a""" + return z**n - a + + +def cplx01_fp(z, n, a): + return n * z**(n - 1) + + +def cplx01_fpp(z, n, a): + return n * (n - 1) * z**(n - 2) + + +def cplx02_f(z, a): + r"""e**z - a: Use to find the log of a""" + return np.exp(z) - a + + +def cplx02_fp(z, a): + return np.exp(z) + + +def cplx02_fpp(z, a): + return np.exp(z) + + +# Each "complex" test case has +# - a function and its two derivatives, +# - additional arguments, +# - the order of differentiability of the function on this interval +# - two starting values x0 and x1 +# - the root +# - an Identifier of the test case +# +# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided +# in [1] for each test case. Newton and Halley need a single starting point +# x0, which was chosen to be near the middle of the interval, unless that +# would make the problem too easy. + + +_COMPLEX_TESTS_KEYS = [ + "f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID" +] +_COMPLEX_TESTS = [ + [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, + (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, + (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.01"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, + 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j), + "complex.01.02"], + [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, + 5, 4, 2, "complex.01.03"], + [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"], + [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, + (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"], +] + +_COMPLEX_TESTS_DICTS = [ + dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS +] + + +def _add_a_b(tests): + r"""Add "a" and "b" keys to each test from the "bracket" value""" + for d in tests: + for k, v in zip(['a', 'b'], d.get('bracket', [])): + d[k] = v + + +_add_a_b(_ORIGINAL_TESTS_DICTS) +_add_a_b(_APS_TESTS_DICTS) +_add_a_b(_COMPLEX_TESTS_DICTS) + + +def get_tests(collection='original', smoothness=None): + r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys + + Allowed values of collection: + 'original': The original benchmarking functions. + Real-valued functions of real-valued inputs on an interval with a zero. + f1, .., f3 are continuous and infinitely differentiable + f4 has a single discontinuity at the root + f5 has a root at 1 replacing a 1st order pole + f6 is randomly positive on one side of the root, randomly negative on the other + 'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" + paper by Alefeld, Potra and Shi. Real-valued functions of + real-valued inputs on an interval with a zero. + Suitable for methods which start with an enclosing interval, and + derivatives up to 2nd order. + 'complex': Some complex-valued functions of complex-valued inputs. + No enclosing bracket is provided. + Suitable for methods which use one or more starting values, and + derivatives up to 2nd order. + + The dictionary keys will be a subset of + ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"] + """ # noqa: E501 + collection = collection or "original" + subsets = {"aps": _APS_TESTS_DICTS, + "complex": _COMPLEX_TESTS_DICTS, + "original": _ORIGINAL_TESTS_DICTS, + "chandrupatla": _CHANDRUPATLA_TESTS_DICTS} + tests = subsets.get(collection, []) + if smoothness is not None: + tests = [tc for tc in tests if tc['smoothness'] >= smoothness] + return tests + + +# Backwards compatibility +methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq] +mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq'] +functions = [f2, f3, f4, f5, f6] +fstrings = ['f2', 'f3', 'f4', 'f5', 'f6'] + +# ################## +# "Chandrupatla" test cases +# Functions and test cases that appear in [2] + +def fun1(x): + return x**3 - 2*x - 5 +fun1.root = 2.0945514815423265 # additional precision using mpmath.findroot + + +def fun2(x): + return 1 - 1/x**2 +fun2.root = 1 + + +def fun3(x): + return (x-3)**3 +fun3.root = 3 + + +def fun4(x): + return 6*(x-2)**5 +fun4.root = 2 + + +def fun5(x): + return x**9 +fun5.root = 0 + + +def fun6(x): + return x**19 +fun6.root = 0 + + +def fun7(x): + return 0 if abs(x) < 3.8e-4 else x*np.exp(-x**(-2)) +fun7.root = 0 + + +def fun8(x): + xi = 0.61489 + return -(3062*(1-xi)*np.exp(-x))/(xi + (1-xi)*np.exp(-x)) - 1013 + 1628/x +fun8.root = 1.0375360332870405 + + +def fun9(x): + return np.exp(x) - 2 - 0.01/x**2 + .000002/x**3 +fun9.root = 0.7032048403631358 + +# Each "chandropatla" test case has +# - a function, +# - two starting values x0 and x1 +# - the root +# - the number of function evaluations required by Chandrupatla's algorithm +# - an Identifier of the test case +# +# Chandrupatla's is a bracketing algorithm, so a bracketing interval was +# provided in [2] for each test case. No special support for testing with +# secant/Newton/Halley is provided. + +_CHANDRUPATLA_TESTS_KEYS = ["f", "bracket", "root", "nfeval", "ID"] +_CHANDRUPATLA_TESTS = [ + [fun1, [2, 3], fun1.root, 7], + [fun1, [1, 10], fun1.root, 11], + [fun1, [1, 100], fun1.root, 14], + [fun1, [-1e4, 1e4], fun1.root, 23], + [fun1, [-1e10, 1e10], fun1.root, 43], + [fun2, [0.5, 1.51], fun2.root, 8], + [fun2, [1e-4, 1e4], fun2.root, 22], + [fun2, [1e-6, 1e6], fun2.root, 28], + [fun2, [1e-10, 1e10], fun2.root, 41], + [fun2, [1e-12, 1e12], fun2.root, 48], + [fun3, [0, 5], fun3.root, 21], + [fun3, [-10, 10], fun3.root, 23], + [fun3, [-1e4, 1e4], fun3.root, 36], + [fun3, [-1e6, 1e6], fun3.root, 45], + [fun3, [-1e10, 1e10], fun3.root, 55], + [fun4, [0, 5], fun4.root, 21], + [fun4, [-10, 10], fun4.root, 23], + [fun4, [-1e4, 1e4], fun4.root, 33], + [fun4, [-1e6, 1e6], fun4.root, 43], + [fun4, [-1e10, 1e10], fun4.root, 54], + [fun5, [-1, 4], fun5.root, 21], + [fun5, [-2, 5], fun5.root, 22], + [fun5, [-1, 10], fun5.root, 23], + [fun5, [-5, 50], fun5.root, 25], + [fun5, [-10, 100], fun5.root, 26], + [fun6, [-1., 4.], fun6.root, 21], + [fun6, [-2., 5.], fun6.root, 22], + [fun6, [-1., 10.], fun6.root, 23], + [fun6, [-5., 50.], fun6.root, 25], + [fun6, [-10., 100.], fun6.root, 26], + [fun7, [-1, 4], fun7.root, 8], + [fun7, [-2, 5], fun7.root, 8], + [fun7, [-1, 10], fun7.root, 11], + [fun7, [-5, 50], fun7.root, 18], + [fun7, [-10, 100], fun7.root, 19], + [fun8, [2e-4, 2], fun8.root, 9], + [fun8, [2e-4, 3], fun8.root, 10], + [fun8, [2e-4, 9], fun8.root, 11], + [fun8, [2e-4, 27], fun8.root, 12], + [fun8, [2e-4, 81], fun8.root, 14], + [fun9, [2e-4, 1], fun9.root, 7], + [fun9, [2e-4, 3], fun9.root, 8], + [fun9, [2e-4, 9], fun9.root, 10], + [fun9, [2e-4, 27], fun9.root, 11], + [fun9, [2e-4, 81], fun9.root, 13], +] +_CHANDRUPATLA_TESTS = [test + [f'{test[0].__name__}.{i%5+1}'] + for i, test in enumerate(_CHANDRUPATLA_TESTS)] + +_CHANDRUPATLA_TESTS_DICTS = [dict(zip(_CHANDRUPATLA_TESTS_KEYS, testcase)) + for testcase in _CHANDRUPATLA_TESTS] +_add_a_b(_CHANDRUPATLA_TESTS_DICTS) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py new file mode 100644 index 0000000000000000000000000000000000000000..986031920d69578c1c7c470b03deae5b3d24c309 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_zeros_py.py @@ -0,0 +1,1403 @@ +import warnings +from collections import namedtuple +import operator +from . import _zeros +from ._optimize import OptimizeResult +import numpy as np + + +_iter = 100 +_xtol = 2e-12 +_rtol = 4 * np.finfo(float).eps + +__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', + 'RootResults'] + +# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h +_ECONVERGED = 0 +_ESIGNERR = -1 # used in _chandrupatla +_ECONVERR = -2 +_EVALUEERR = -3 +_ECALLBACK = -4 +_EINPROGRESS = 1 + +CONVERGED = 'converged' +SIGNERR = 'sign error' +CONVERR = 'convergence error' +VALUEERR = 'value error' +INPROGRESS = 'No error' + + +flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR, + _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS} + + +class RootResults(OptimizeResult): + """Represents the root finding result. + + Attributes + ---------- + root : float + Estimated root location. + iterations : int + Number of iterations needed to find the root. + function_calls : int + Number of times the function was called. + converged : bool + True if the routine converged. + flag : str + Description of the cause of termination. + method : str + Root finding method used. + + """ + + def __init__(self, root, iterations, function_calls, flag, method): + self.root = root + self.iterations = iterations + self.function_calls = function_calls + self.converged = flag == _ECONVERGED + if flag in flag_map: + self.flag = flag_map[flag] + else: + self.flag = flag + self.method = method + + +def results_c(full_output, r, method): + if full_output: + x, funcalls, iterations, flag = r + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + else: + return r + + +def _results_select(full_output, r, method): + """Select from a tuple of (root, funccalls, iterations, flag)""" + x, funcalls, iterations, flag = r + if full_output: + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag, method=method) + return x, results + return x + + +def _wrap_nan_raise(f): + + def f_raise(x, *args): + fx = f(x, *args) + f_raise._function_calls += 1 + if np.isnan(fx): + msg = (f'The function value at x={x} is NaN; ' + 'solver cannot continue.') + err = ValueError(msg) + err._x = x + err._function_calls = f_raise._function_calls + raise err + return fx + + f_raise._function_calls = 0 + return f_raise + + +def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, + fprime2=None, x1=None, rtol=0.0, + full_output=False, disp=True): + """ + Find a root of a real or complex function using the Newton-Raphson + (or secant or Halley's) method. + + Find a root of the scalar-valued function `func` given a nearby scalar + starting point `x0`. + The Newton-Raphson method is used if the derivative `fprime` of `func` + is provided, otherwise the secant method is used. If the second order + derivative `fprime2` of `func` is also provided, then Halley's method is + used. + + If `x0` is a sequence with more than one item, `newton` returns an array: + the roots of the function from each (scalar) starting point in `x0`. + In this case, `func` must be vectorized to return a sequence or array of + the same shape as its first argument. If `fprime` (`fprime2`) is given, + then its return must also have the same shape: each element is the first + (second) derivative of `func` with respect to its only variable evaluated + at each element of its first argument. + + `newton` is for finding roots of a scalar-valued functions of a single + variable. For problems involving several variables, see `root`. + + Parameters + ---------- + func : callable + The function whose root is wanted. It must be a function of a + single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` + are extra arguments that can be passed in the `args` parameter. + x0 : float, sequence, or ndarray + An initial estimate of the root that should be somewhere near the + actual root. If not scalar, then `func` must be vectorized and return + a sequence or array of the same shape as its first argument. + fprime : callable, optional + The derivative of the function when available and convenient. If it + is None (default), then the secant method is used. + args : tuple, optional + Extra arguments to be used in the function call. + tol : float, optional + The allowable error of the root's value. If `func` is complex-valued, + a larger `tol` is recommended as both the real and imaginary parts + of `x` contribute to ``|x - x0|``. + maxiter : int, optional + Maximum number of iterations. + fprime2 : callable, optional + The second order derivative of the function when available and + convenient. If it is None (default), then the normal Newton-Raphson + or the secant method is used. If it is not None, then Halley's method + is used. + x1 : float, optional + Another estimate of the root that should be somewhere near the + actual root. Used if `fprime` is not provided. + rtol : float, optional + Tolerance (relative) for termination. + full_output : bool, optional + If `full_output` is False (default), the root is returned. + If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` + is the root and ``r`` is a `RootResults` object. + If True and `x0` is non-scalar, the return value is ``(x, converged, + zero_der)`` (see Returns section for details). + disp : bool, optional + If True, raise a RuntimeError if the algorithm didn't converge, with + the error message containing the number of iterations and current + function value. Otherwise, the convergence status is recorded in a + `RootResults` return object. + Ignored if `x0` is not scalar. + *Note: this has little to do with displaying, however, + the `disp` keyword cannot be renamed for backwards compatibility.* + + Returns + ------- + root : float, sequence, or ndarray + Estimated location where function is zero. + r : `RootResults`, optional + Present if ``full_output=True`` and `x0` is scalar. + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + converged : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements converged successfully. + zero_der : ndarray of bool, optional + Present if ``full_output=True`` and `x0` is non-scalar. + For vector functions, indicates which elements had a zero derivative. + + See Also + -------- + root_scalar : interface to root solvers for scalar functions + root : interface to root solvers for multi-input, multi-output functions + + Notes + ----- + The convergence rate of the Newton-Raphson method is quadratic, + the Halley method is cubic, and the secant method is + sub-quadratic. This means that if the function is well-behaved + the actual error in the estimated root after the nth iteration + is approximately the square (cube for Halley) of the error + after the (n-1)th step. However, the stopping criterion used + here is the step size and there is no guarantee that a root + has been found. Consequently, the result should be verified. + Safer algorithms are brentq, brenth, ridder, and bisect, + but they all require that the root first be bracketed in an + interval where the function changes sign. The brentq algorithm + is recommended for general use in one dimensional problems + when such an interval has been found. + + When `newton` is used with arrays, it is best suited for the following + types of problems: + + * The initial guesses, `x0`, are all relatively the same distance from + the roots. + * Some or all of the extra arguments, `args`, are also arrays so that a + class of similar problems can be solved together. + * The size of the initial guesses, `x0`, is larger than O(100) elements. + Otherwise, a naive loop may perform as well or better than a vector. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import optimize + + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + ``fprime`` is not provided, use the secant method: + + >>> root = optimize.newton(f, 1.5) + >>> root + 1.0000000000000016 + >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) + >>> root + 1.0000000000000016 + + Only ``fprime`` is provided, use the Newton-Raphson method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) + >>> root + 1.0 + + Both ``fprime2`` and ``fprime`` are provided, use Halley's method: + + >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, + ... fprime2=lambda x: 6 * x) + >>> root + 1.0 + + When we want to find roots for a set of related starting values and/or + function parameters, we can provide both of those as an array of inputs: + + >>> f = lambda x, a: x**3 - a + >>> fder = lambda x, a: 3 * x**2 + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(100) + >>> a = np.arange(-50, 50) + >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200) + + The above is the equivalent of solving for each value in ``(x, a)`` + separately in a for-loop, just faster: + + >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), + ... maxiter=200) + ... for x0, a0 in zip(x, a)] + >>> np.allclose(vec_res, loop_res) + True + + Plot the results found for all values of ``a``: + + >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) + >>> fig, ax = plt.subplots() + >>> ax.plot(a, analytical_result, 'o') + >>> ax.plot(a, vec_res, '.') + >>> ax.set_xlabel('$a$') + >>> ax.set_ylabel('$x$ where $f(x, a)=0$') + >>> plt.show() + + """ + if tol <= 0: + raise ValueError("tol too small (%g <= 0)" % tol) + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if np.size(x0) > 1: + return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, + full_output) + + # Convert to float (don't use float(x0); this works also for complex x0) + # Use np.asarray because we want x0 to be a numpy object, not a Python + # object. e.g. np.complex(1+1j) > 0 is possible, but (1 + 1j) > 0 raises + # a TypeError + x0 = np.asarray(x0)[()] * 1.0 + p0 = x0 + funcalls = 0 + if fprime is not None: + # Newton-Raphson method + method = "newton" + for itr in range(maxiter): + # first evaluate fval + fval = func(p0, *args) + funcalls += 1 + # If fval is 0, a root has been found, then terminate + if fval == 0: + return _results_select( + full_output, (p0, funcalls, itr, _ECONVERGED), method) + fder = fprime(p0, *args) + funcalls += 1 + if fder == 0: + msg = "Derivative was zero." + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p0)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + return _results_select( + full_output, (p0, funcalls, itr + 1, _ECONVERR), method) + newton_step = fval / fder + if fprime2: + fder2 = fprime2(p0, *args) + funcalls += 1 + method = "halley" + # Halley's method: + # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) + # Only do it if denominator stays close enough to 1 + # Rationale: If 1-adj < 0, then Halley sends x in the + # opposite direction to Newton. Doesn't happen if x is close + # enough to root. + adj = newton_step * fder2 / fder / 2 + if np.abs(adj) < 1: + newton_step /= 1.0 - adj + p = p0 - newton_step + if np.isclose(p, p0, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0 = p + else: + # Secant method + method = "secant" + if x1 is not None: + if x1 == x0: + raise ValueError("x1 and x0 must be different") + p1 = x1 + else: + eps = 1e-4 + p1 = x0 * (1 + eps) + p1 += (eps if p1 >= 0 else -eps) + q0 = func(p0, *args) + funcalls += 1 + q1 = func(p1, *args) + funcalls += 1 + if abs(q1) < abs(q0): + p0, p1, q0, q1 = p1, p0, q1, q0 + for itr in range(maxiter): + if q1 == q0: + if p1 != p0: + msg = "Tolerance of %s reached." % (p1 - p0) + if disp: + msg += ( + " Failed to converge after %d iterations, value is %s." + % (itr + 1, p1)) + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=2) + p = (p1 + p0) / 2.0 + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERR), method) + else: + if abs(q1) > abs(q0): + p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) + else: + p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) + if np.isclose(p, p1, rtol=rtol, atol=tol): + return _results_select( + full_output, (p, funcalls, itr + 1, _ECONVERGED), method) + p0, q0 = p1, q1 + p1 = p + q1 = func(p1, *args) + funcalls += 1 + + if disp: + msg = ("Failed to converge after %d iterations, value is %s." + % (itr + 1, p)) + raise RuntimeError(msg) + + return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR), method) + + +def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): + """ + A vectorized version of Newton, Halley, and secant methods for arrays. + + Do not use this method directly. This method is called from `newton` + when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. + """ + # Explicitly copy `x0` as `p` will be modified inplace, but the + # user's array should not be altered. + p = np.array(x0, copy=True) + + failures = np.ones_like(p, dtype=bool) + nz_der = np.ones_like(failures) + if fprime is not None: + # Newton-Raphson method + for iteration in range(maxiter): + # first evaluate fval + fval = np.asarray(func(p, *args)) + # If all fval are 0, all roots have been found, then terminate + if not fval.any(): + failures = fval.astype(bool) + break + fder = np.asarray(fprime(p, *args)) + nz_der = (fder != 0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + break + # Newton step + dp = fval[nz_der] / fder[nz_der] + if fprime2 is not None: + fder2 = np.asarray(fprime2(p, *args)) + dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, dp, np.float64)) + p[nz_der] -= dp + failures[nz_der] = np.abs(dp) >= tol # items not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + else: + # Secant method + dx = np.finfo(float).eps**0.33 + p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) + q0 = np.asarray(func(p, *args)) + q1 = np.asarray(func(p1, *args)) + active = np.ones_like(p, dtype=bool) + for iteration in range(maxiter): + nz_der = (q1 != q0) + # stop iterating if all derivatives are zero + if not nz_der.any(): + p = (p1 + p) / 2.0 + break + # Secant Step + dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] + # only update nonzero derivatives + p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64)) + p[nz_der] = p1[nz_der] - dp + active_zero_der = ~nz_der & active + p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 + active &= nz_der # don't assign zero derivatives again + failures[nz_der] = np.abs(dp) >= tol # not yet converged + # stop iterating if there aren't any failures, not incl zero der + if not failures[nz_der].any(): + break + p1, p = p, p1 + q0 = q1 + q1 = np.asarray(func(p1, *args)) + + zero_der = ~nz_der & failures # don't include converged with zero-ders + if zero_der.any(): + # Secant warnings + if fprime is None: + nonzero_dp = (p1 != p) + # non-zero dp, but infinite newton step + zero_der_nz_dp = (zero_der & nonzero_dp) + if zero_der_nz_dp.any(): + rms = np.sqrt( + sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) + ) + warnings.warn(f'RMS of {rms:g} reached', RuntimeWarning, stacklevel=3) + # Newton or Halley warnings + else: + all_or_some = 'all' if zero_der.all() else 'some' + msg = f'{all_or_some:s} derivatives were zero' + warnings.warn(msg, RuntimeWarning, stacklevel=3) + elif failures.any(): + all_or_some = 'all' if failures.all() else 'some' + msg = f'{all_or_some:s} failed to converge after {maxiter:d} iterations' + if failures.all(): + raise RuntimeError(msg) + warnings.warn(msg, RuntimeWarning, stacklevel=3) + + if full_output: + result = namedtuple('result', ('root', 'converged', 'zero_der')) + p = result(p, ~failures, zero_der) + + return p + + +def bisect(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find root of a function within an interval using bisection. + + Basic bisection routine to find a root of the function `f` between the + arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. + Slow but sure. + + Parameters + ---------- + f : function + Python function returning a number. `f` must be continuous, and + f(a) and f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where x is the root, and r is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in a `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.bisect(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.bisect(f, -2, 0) + >>> root + -1.0 + + See Also + -------- + brentq, brenth, bisect, newton + fixed_point : scalar fixed-point finder + fsolve : n-dimensional root-finding + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "bisect") + + +def ridder(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in an interval using Ridder's method. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. + In particular, ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + Uses [Ridders1979]_ method to find a root of the function `f` between the + arguments `a` and `b`. Ridders' method is faster than bisection, but not + generally as fast as the Brent routines. [Ridders1979]_ provides the + classic description and source of the algorithm. A description can also be + found in any recent edition of Numerical Recipes. + + The routine used here diverges slightly from standard presentations in + order to be a bit more careful of tolerance. + + References + ---------- + .. [Ridders1979] + Ridders, C. F. J. "A New Algorithm for Computing a + Single Root of a Real Continuous Function." + IEEE Trans. Circuits Systems 26, 979-980, 1979. + + Examples + -------- + + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.ridder(f, 0, 2) + >>> root + 1.0 + + >>> root = optimize.ridder(f, -2, 0) + >>> root + -1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "ridder") + + +def brentq(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in a bracketing interval using Brent's method. + + Uses the classic Brent's method to find a root of the function `f` on + the sign changing interval [a , b]. Generally considered the best of the + rootfinding routines here. It is a safe version of the secant method that + uses inverse quadratic extrapolation. Brent's method combines root + bracketing, interval bisection, and inverse quadratic interpolation. It is + sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) + claims convergence is guaranteed for functions computable within [a,b]. + + [Brent1973]_ provides the classic description of the algorithm. Another + description can be found in a recent edition of Numerical Recipes, including + [PressEtal1992]_. A third description is at + http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to + understand the algorithm just by reading our code. Our code diverges a bit + from standard presentations: we choose a different formula for the + extrapolation step. + + Parameters + ---------- + f : function + Python function returning a number. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` must + have opposite signs. + a : scalar + One end of the bracketing interval :math:`[a, b]`. + b : scalar + The other end of the bracketing interval :math:`[a, b]`. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. For nice functions, Brent's + method will often satisfy the above condition with ``xtol/2`` + and ``rtol/2``. [Brent1973]_ + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + Notes + ----- + `f` must be continuous. f(a) and f(b) must have opposite signs. + + Related functions fall into several classes: + + multivariate local optimizers + `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` + nonlinear least squares minimizer + `leastsq` + constrained multivariate optimizers + `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` + global optimizers + `basinhopping`, `brute`, `differential_evolution` + local scalar minimizers + `fminbound`, `brent`, `golden`, `bracket` + N-D root-finding + `fsolve` + 1-D root-finding + `brenth`, `ridder`, `bisect`, `newton` + scalar fixed-point finder + `fixed_point` + + References + ---------- + .. [Brent1973] + Brent, R. P., + *Algorithms for Minimization Without Derivatives*. + Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. + + .. [PressEtal1992] + Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. + *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. + Cambridge, England: Cambridge University Press, pp. 352-355, 1992. + Section 9.3: "Van Wijngaarden-Dekker-Brent Method." + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brentq(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brentq(f, 0, 2) + >>> root + 1.0 + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brentq") + + +def brenth(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """Find a root of a function in a bracketing interval using Brent's + method with hyperbolic extrapolation. + + A variation on the classic Brent routine to find a root of the function f + between the arguments a and b that uses hyperbolic extrapolation instead of + inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence + for this method, claiming that the upper bound of function evaluations here + is 4 or 5 times that of bisection. + f(a) and f(b) cannot have the same signs. Generally, on a par with the + brent routine, but not as heavily tested. It is a safe version of the + secant method that uses hyperbolic extrapolation. + The version here is by Chuck Harris, and implements Algorithm M of + [BusAndDekker1975]_, where further details (convergence properties, + additional remarks and such) can be found + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : scalar + One end of the bracketing interval [a,b]. + b : scalar + The other end of the bracketing interval [a,b]. + xtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. As with `brentq`, for nice + functions the method will often satisfy the above condition + with ``xtol/2`` and ``rtol/2``. + rtol : number, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter cannot be smaller than its default value of + ``4*np.finfo(float).eps``. As with `brentq`, for nice functions + the method will often satisfy the above condition with + ``xtol/2`` and ``rtol/2``. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + Containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in any `RootResults` + return object. + + Returns + ------- + root : float + Root of `f` between `a` and `b`. + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers + leastsq : nonlinear least squares minimizer + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + basinhopping, differential_evolution, brute : global optimizers + fminbound, brent, golden, bracket : local scalar minimizers + fsolve : N-D root-finding + brentq, brenth, ridder, bisect, newton : 1-D root-finding + fixed_point : scalar fixed-point finder + + References + ---------- + .. [BusAndDekker1975] + Bus, J. C. P., Dekker, T. J., + "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero + of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue + 4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M". + :doi:`10.1145/355656.355659` + + Examples + -------- + >>> def f(x): + ... return (x**2 - 1) + + >>> from scipy import optimize + + >>> root = optimize.brenth(f, -2, 0) + >>> root + -1.0 + + >>> root = optimize.brenth(f, 0, 2) + >>> root + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + maxiter = operator.index(maxiter) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") + f = _wrap_nan_raise(f) + r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r, "brenth") + + +################################ +# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by +# Alefeld, G. E. and Potra, F. A. and Shi, Yixun, +# See [1] + + +def _notclose(fs, rtol=_rtol, atol=_xtol): + # Ensure not None, not 0, all finite, and not very close to each other + notclosefvals = ( + all(fs) and all(np.isfinite(fs)) and + not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) + for i, _f in enumerate(fs[:-1]))) + return notclosefvals + + +def _secant(xvals, fvals): + """Perform a secant step, taking a little care""" + # Secant has many "mathematically" equivalent formulations + # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 + # = x1 - (x1 - x0)/(f1 - f0) * f1 + # = (-x1 * f0 + x0 * f1) / (f1 - f0) + # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + x0, x1 = xvals[:2] + f0, f1 = fvals[:2] + if f0 == f1: + return np.nan + if np.abs(f1) > np.abs(f0): + x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) + else: + x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) + return x2 + + +def _update_bracket(ab, fab, c, fc): + """Update a bracket given (c, fc), return the discarded endpoints.""" + fa, fb = fab + idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) + rx, rfx = ab[idx], fab[idx] + fab[idx] = fc + ab[idx] = c + return rx, rfx + + +def _compute_divided_differences(xvals, fvals, N=None, full=True, + forward=True): + """Return a matrix of divided differences for the xvals, fvals pairs + + DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i + + If full is False, just return the main diagonal(or last row): + f[a], f[a, b] and f[a, b, c]. + If forward is False, return f[c], f[b, c], f[a, b, c].""" + if full: + if forward: + xvals = np.asarray(xvals) + else: + xvals = np.array(xvals)[::-1] + M = len(xvals) + N = M if N is None else min(N, M) + DD = np.zeros([M, N]) + DD[:, 0] = fvals[:] + for i in range(1, N): + DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) / + (xvals[i:] - xvals[:M - i])) + return DD + + xvals = np.asarray(xvals) + dd = np.array(fvals) + row = np.array(fvals) + idx2Use = (0 if forward else -1) + dd[0] = fvals[idx2Use] + for i in range(1, len(xvals)): + denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] + row = np.diff(row)[:] / denom + dd[i] = row[idx2Use] + return dd + + +def _interpolated_poly(xvals, fvals, x): + """Compute p(x) for the polynomial passing through the specified locations. + + Use Neville's algorithm to compute p(x) where p is the minimal degree + polynomial passing through the points xvals, fvals""" + xvals = np.asarray(xvals) + N = len(xvals) + Q = np.zeros([N, N]) + D = np.zeros([N, N]) + Q[:, 0] = fvals[:] + D[:, 0] = fvals[:] + for k in range(1, N): + alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] + diffik = xvals[0:N - k] - xvals[k:N] + Q[k:, k] = (xvals[k:] - x) / diffik * alpha + D[k:, k] = (xvals[:N - k] - x) / diffik * alpha + # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root + return np.sum(Q[-1, 1:]) + Q[-1, 0] + + +def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): + """Inverse cubic interpolation f-values -> x-values + + Given four points (fa, a), (fb, b), (fc, c), (fd, d) with + fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points + and compute x=IP(0). + """ + return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) + + +def _newton_quadratic(ab, fab, d, fd, k): + """Apply Newton-Raphson like steps, using divided differences to approximate f' + + ab is a real interval [a, b] containing a root, + fab holds the real values of f(a), f(b) + d is a real number outside [ab, b] + k is the number of steps to apply + """ + a, b = ab + fa, fb = fab + _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], + forward=True, full=False) + + # _P is the quadratic polynomial through the 3 points + def _P(x): + # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) + return (A * (x - b) + B) * (x - a) + fa + + if A == 0: + r = a - fa / B + else: + r = (a if np.sign(A) * np.sign(fa) > 0 else b) + # Apply k Newton-Raphson steps to _P(x), starting from x=r + for i in range(k): + r1 = r - _P(r) / (B + A * (2 * r - a - b)) + if not (ab[0] < r1 < ab[1]): + if (ab[0] < r < ab[1]): + return r + r = sum(ab) / 2.0 + break + r = r1 + + return r + + +class TOMS748Solver: + """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. + """ + _MU = 0.5 + _K_MIN = 1 + _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. + + def __init__(self): + self.f = None + self.args = None + self.function_calls = 0 + self.iterations = 0 + self.k = 2 + # ab=[a,b] is a global interval containing a root + self.ab = [np.nan, np.nan] + # fab is function values at a, b + self.fab = [np.nan, np.nan] + self.d = None + self.fd = None + self.e = None + self.fe = None + self.disp = False + self.xtol = _xtol + self.rtol = _rtol + self.maxiter = _iter + + def configure(self, xtol, rtol, maxiter, disp, k): + self.disp = disp + self.xtol = xtol + self.rtol = rtol + self.maxiter = maxiter + # Silently replace a low value of k with 1 + self.k = max(k, self._K_MIN) + # Noisily replace a high value of k with self._K_MAX + if self.k > self._K_MAX: + msg = "toms748: Overriding k: ->%d" % self._K_MAX + warnings.warn(msg, RuntimeWarning, stacklevel=3) + self.k = self._K_MAX + + def _callf(self, x, error=True): + """Call the user-supplied function, update book-keeping""" + fx = self.f(x, *self.args) + self.function_calls += 1 + if not np.isfinite(fx) and error: + raise ValueError(f"Invalid function value: f({x:f}) -> {fx} ") + return fx + + def get_result(self, x, flag=_ECONVERGED): + r"""Package the result and statistics into a tuple.""" + return (x, self.function_calls, self.iterations, flag) + + def _update_bracket(self, c, fc): + return _update_bracket(self.ab, self.fab, c, fc) + + def start(self, f, a, b, args=()): + r"""Prepare for the iterations.""" + self.function_calls = 0 + self.iterations = 0 + + self.f = f + self.args = args + self.ab[:] = [a, b] + if not np.isfinite(a) or np.imag(a) != 0: + raise ValueError("Invalid x value: %s " % (a)) + if not np.isfinite(b) or np.imag(b) != 0: + raise ValueError("Invalid x value: %s " % (b)) + + fa = self._callf(a) + if not np.isfinite(fa) or np.imag(fa) != 0: + raise ValueError(f"Invalid function value: f({a:f}) -> {fa} ") + if fa == 0: + return _ECONVERGED, a + fb = self._callf(b) + if not np.isfinite(fb) or np.imag(fb) != 0: + raise ValueError(f"Invalid function value: f({b:f}) -> {fb} ") + if fb == 0: + return _ECONVERGED, b + + if np.sign(fb) * np.sign(fa) > 0: + raise ValueError("f(a) and f(b) must have different signs, but " + f"f({a:e})={fa:e}, f({b:e})={fb:e} ") + self.fab[:] = [fa, fb] + + return _EINPROGRESS, sum(self.ab) / 2.0 + + def get_status(self): + """Determine the current status.""" + a, b = self.ab[:2] + if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): + return _ECONVERGED, sum(self.ab) / 2.0 + if self.iterations >= self.maxiter: + return _ECONVERR, sum(self.ab) / 2.0 + return _EINPROGRESS, sum(self.ab) / 2.0 + + def iterate(self): + """Perform one step in the algorithm. + + Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] + """ + self.iterations += 1 + eps = np.finfo(float).eps + d, fd, e, fe = self.d, self.fd, self.e, self.fe + ab_width = self.ab[1] - self.ab[0] # Need the start width below + c = None + + for nsteps in range(2, self.k+2): + # If the f-values are sufficiently separated, perform an inverse + # polynomial interpolation step. Otherwise, nsteps repeats of + # an approximate Newton-Raphson step. + if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): + c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, + self.fab[0], self.fab[1], fd, fe) + if self.ab[0] < c0 < self.ab[1]: + c = c0 + if c is None: + c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + # re-bracket + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # u is the endpoint with the smallest f-value + uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) + u, fu = self.ab[uix], self.fab[uix] + + _, A = _compute_divided_differences(self.ab, self.fab, + forward=(uix == 0), full=False) + c = u - 2 * fu / A + if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): + c = sum(self.ab) / 2.0 + else: + if np.isclose(c, u, rtol=eps, atol=0): + # c didn't change (much). + # Either because the f-values at the endpoints have vastly + # differing magnitudes, or because the root is very close to + # that endpoint + frs = np.frexp(self.fab)[1] + if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 + c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 + else: + # Make a bigger adjustment, about the + # size of the requested tolerance. + mm = (1 if uix == 0 else -1) + adj = mm * np.abs(c) * self.rtol + mm * self.xtol + c = u + adj + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + + fc = self._callf(c) + if fc == 0: + return _ECONVERGED, c + + e, fe = d, fd + d, fd = self._update_bracket(c, fc) + + # If the width of the new interval did not decrease enough, bisect + if self.ab[1] - self.ab[0] > self._MU * ab_width: + e, fe = d, fd + z = sum(self.ab) / 2.0 + fz = self._callf(z) + if fz == 0: + return _ECONVERGED, z + d, fd = self._update_bracket(z, fz) + + # Record d and e for next iteration + self.d, self.fd = d, fd + self.e, self.fe = e, fe + + status, xn = self.get_status() + return status, xn + + def solve(self, f, a, b, args=(), + xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): + r"""Solve f(x) = 0 given an interval containing a root.""" + self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) + status, xn = self.start(f, a, b, args) + if status == _ECONVERGED: + return self.get_result(xn) + + # The first step only has two x-values. + c = _secant(self.ab, self.fab) + if not self.ab[0] < c < self.ab[1]: + c = sum(self.ab) / 2.0 + fc = self._callf(c) + if fc == 0: + return self.get_result(c) + + self.d, self.fd = self._update_bracket(c, fc) + self.e, self.fe = None, None + self.iterations += 1 + + while True: + status, xn = self.iterate() + if status == _ECONVERGED: + return self.get_result(xn) + if status == _ECONVERR: + fmt = "Failed to converge after %d iterations, bracket is %s" + if disp: + msg = fmt % (self.iterations + 1, self.ab) + raise RuntimeError(msg) + return self.get_result(xn, _ECONVERR) + + +def toms748(f, a, b, args=(), k=1, + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root using TOMS Algorithm 748 method. + + Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a + root of the function `f` on the interval `[a , b]`, where `f(a)` and + `f(b)` must have opposite signs. + + It uses a mixture of inverse cubic interpolation and + "Newton-quadratic" steps. [APS1995]. + + Parameters + ---------- + f : function + Python function returning a scalar. The function :math:`f` + must be continuous, and :math:`f(a)` and :math:`f(b)` + have opposite signs. + a : scalar, + lower boundary of the search interval + b : scalar, + upper boundary of the search interval + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``f(x, *args)``. + k : int, optional + The number of Newton quadratic steps to perform each + iteration. ``k>=1``. + xtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The + parameter must be positive. + rtol : scalar, optional + The computed root ``x0`` will satisfy ``np.allclose(x, x0, + atol=xtol, rtol=rtol)``, where ``x`` is the exact root. + maxiter : int, optional + If convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + Otherwise, the convergence status is recorded in the `RootResults` + return object. + + Returns + ------- + root : float + Approximate root of `f` + r : `RootResults` (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, ridder, bisect, newton + fsolve : find roots in N dimensions. + + Notes + ----- + `f` must be continuous. + Algorithm 748 with ``k=2`` is asymptotically the most efficient + algorithm known for finding roots of a four times continuously + differentiable function. + In contrast with Brent's algorithm, which may only decrease the length of + the enclosing bracket on the last step, Algorithm 748 decreases it each + iteration with the same asymptotic efficiency as it finds the root. + + For easy statement of efficiency indices, assume that `f` has 4 + continuouous deriviatives. + For ``k=1``, the convergence order is at least 2.7, and with about + asymptotically 2 function evaluations per iteration, the efficiency + index is approximately 1.65. + For ``k=2``, the order is about 4.6 with asymptotically 3 function + evaluations per iteration, and the efficiency index 1.66. + For higher values of `k`, the efficiency index approaches + the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are + usually appropriate. + + References + ---------- + .. [APS1995] + Alefeld, G. E. and Potra, F. A. and Shi, Yixun, + *Algorithm 748: Enclosing Zeros of Continuous Functions*, + ACM Trans. Math. Softw. Volume 221(1995) + doi = {10.1145/210089.210111} + + Examples + -------- + >>> def f(x): + ... return (x**3 - 1) # only one real root at x = 1 + + >>> from scipy import optimize + >>> root, results = optimize.toms748(f, 0, 2, full_output=True) + >>> root + 1.0 + >>> results + converged: True + flag: converged + function_calls: 11 + iterations: 5 + root: 1.0 + method: toms748 + """ + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol / 4: + raise ValueError(f"rtol too small ({rtol:g} < {_rtol/4:g})") + maxiter = operator.index(maxiter) + if maxiter < 1: + raise ValueError("maxiter must be greater than 0") + if not np.isfinite(a): + raise ValueError("a is not finite %s" % a) + if not np.isfinite(b): + raise ValueError("b is not finite %s" % b) + if a >= b: + raise ValueError(f"a and b are not an interval [{a}, {b}]") + if not k >= 1: + raise ValueError("k too small (%s < 1)" % k) + + if not isinstance(args, tuple): + args = (args,) + f = _wrap_nan_raise(f) + solver = TOMS748Solver() + result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, + maxiter=maxiter, disp=disp) + x, function_calls, iterations, flag = result + return _results_select(full_output, (x, function_calls, iterations, flag), + "toms748") diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d35f8da68b34d3a587f3a99326770d8550a2135c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd @@ -0,0 +1,11 @@ +# Public Cython API declarations +# +# See doc/source/dev/contributor/public_cython_api.rst for guidelines + + +# The following cimport statement provides legacy ABI +# support. Changing it causes an ABI forward-compatibility break +# (gh-11793), so we currently leave it as is (no further cimport +# statements should be used in this file). +from scipy.optimize.cython_optimize._zeros cimport ( + brentq, brenth, ridder, bisect, zeros_full_output) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/nonlin.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/nonlin.py new file mode 100644 index 0000000000000000000000000000000000000000..38c43c3d848e9f142bae2e7f55ace4482569998e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/nonlin.py @@ -0,0 +1,57 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Anderson', + 'BroydenFirst', + 'BroydenSecond', + 'DiagBroyden', + 'ExcitingMixing', + 'GenericBroyden', + 'InverseJacobian', + 'Jacobian', + 'KrylovJacobian', + 'LinAlgError', + 'LinearMixing', + 'LowRankMatrix', + 'NoConvergence', + 'TerminationCondition', + 'anderson', + 'asarray', + 'asjacobian', + 'broyden1', + 'broyden2', + 'diagbroyden', + 'dot', + 'excitingmixing', + 'get_blas_funcs', + 'inspect', + 'inv', + 'linearmixing', + 'maxnorm', + 'newton_krylov', + 'nonlin_solve', + 'norm', + 'qr', + 'scalar_search_armijo', + 'scalar_search_wolfe1', + 'scipy', + 'solve', + 'svd', + 'sys', + 'vdot', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="nonlin", + private_modules=["_nonlin"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tnc.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tnc.py new file mode 100644 index 0000000000000000000000000000000000000000..92ff24432c681517aa82aa4ffed5e6c1eee10002 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/tnc.py @@ -0,0 +1,44 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'CONSTANT', + 'FCONVERGED', + 'INFEASIBLE', + 'LOCALMINIMUM', + 'LSFAIL', + 'MAXFUN', + 'MSGS', + 'MSG_ALL', + 'MSG_EXIT', + 'MSG_INFO', + 'MSG_ITER', + 'MSG_NONE', + 'MSG_VERS', + 'MemoizeJac', + 'NOPROGRESS', + 'OptimizeResult', + 'RCSTRINGS', + 'USERABORT', + 'XCONVERGED', + 'array', + 'fmin_tnc', + 'inf', + 'moduleTNC', + 'old_bound_to_new', + 'zeros', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="tnc", + private_modules=["_tnc"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/zeros.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8fc89eb5f9a93109f3fad31bc887df85998bae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/zeros.py @@ -0,0 +1,36 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.optimize` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'CONVERGED', + 'CONVERR', + 'INPROGRESS', + 'RootResults', + 'SIGNERR', + 'TOMS748Solver', + 'VALUEERR', + 'bisect', + 'brenth', + 'brentq', + 'flag_map', + 'namedtuple', + 'newton', + 'operator', + 'results_c', + 'ridder', + 'toms748', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="optimize", module="zeros", + private_modules=["_zeros_py"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..6c41166721b891a801cdc6828804c6da7233d625 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fee99512bab4ccc6569b47b924e4b034e1cdbab5624fafc7e120648bd5f7a128 +size 183688 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..0a1460e407521836a9b73a081609af4ccdb6deae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3c719edd5431fb9e7b9ecb6d19e3ca7a9095298bd19f226685b0fca40f0c073 +size 9328