diff --git a/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..db1cd4f38cea0def3071f6dc985b444826e171eb --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dca47d9e08e05365f06d99de39e3906c525902dbfb9fd433f82336907ff8c8ee +size 33555627 diff --git a/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5698d8b4265a8abc9196571244a9bf30eb5ee96e --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:346836985993271f5166b56fa60162ce854d4b23350c1eadb9ad378f144cfdf9 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__init__.py b/venv/lib/python3.10/site-packages/scipy/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08e2e13478365fb7b227b461418d2f31e3cb76d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/__init__.py @@ -0,0 +1,90 @@ +""" +================================ +Datasets (:mod:`scipy.datasets`) +================================ + +.. currentmodule:: scipy.datasets + +Dataset Methods +=============== + +.. autosummary:: + :toctree: generated/ + + ascent + face + electrocardiogram + +Utility Methods +=============== + +.. autosummary:: + :toctree: generated/ + + download_all -- Download all the dataset files to specified path. + clear_cache -- Clear cached dataset directory. + + +Usage of Datasets +================= + +SciPy dataset methods can be simply called as follows: ``'()'`` +This downloads the dataset files over the network once, and saves the cache, +before returning a `numpy.ndarray` object representing the dataset. + +Note that the return data structure and data type might be different for +different dataset methods. For a more detailed example on usage, please look +into the particular dataset method documentation above. + + +How dataset retrieval and storage works +======================================= + +SciPy dataset files are stored within individual github repositories under the +SciPy GitHub organization, following a naming convention as +``'dataset-'``, for example `scipy.datasets.face` files live at +https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes +and depends on `Pooch `_, a Python +package built to simplify fetching data files. Pooch uses these repos to +retrieve the respective dataset files when calling the dataset function. + +A registry of all the datasets, essentially a mapping of filenames with their +SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify +the downloads on function call. After downloading the dataset once, the files +are saved in the system cache directory under ``'scipy-data'``. + +Dataset cache locations may vary on different platforms. + +For macOS:: + + '~/Library/Caches/scipy-data' + +For Linux and other Unix-like platforms:: + + '~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined + +For Windows:: + + 'C:\\Users\\\\AppData\\Local\\\\scipy-data\\Cache' + + +In environments with constrained network connectivity for various security +reasons or on systems without continuous internet connections, one may manually +load the cache of the datasets by placing the contents of the dataset repo in +the above mentioned cache directory to avoid fetching dataset errors without +the internet connectivity. + +""" + + +from ._fetchers import face, ascent, electrocardiogram +from ._download_all import download_all +from ._utils import clear_cache + +__all__ = ['ascent', 'electrocardiogram', 'face', + 'download_all', 'clear_cache'] + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5da67ad448bd9ab5eca952c72ce4f674dddf2d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b02711481538cc74971bf668afec0fb37b23badc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc3baf0ccf7690c6e3e02f8a84128c4b16f2e85b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc552901b5657c954c529bbdbfc22fee76dc058 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02b2d8774e1446a7355c91c8c92448085a797569 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/_download_all.py b/venv/lib/python3.10/site-packages/scipy/datasets/_download_all.py new file mode 100644 index 0000000000000000000000000000000000000000..255fdcaf22950848f458a7ed9ada183e0a2e630e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/_download_all.py @@ -0,0 +1,57 @@ +""" +Platform independent script to download all the +`scipy.datasets` module data files. +This doesn't require a full scipy build. + +Run: python _download_all.py +""" + +import argparse +try: + import pooch +except ImportError: + pooch = None + + +if __package__ is None or __package__ == '': + # Running as python script, use absolute import + import _registry # type: ignore +else: + # Running as python module, use relative import + from . import _registry + + +def download_all(path=None): + """ + Utility method to download all the dataset files + for `scipy.datasets` module. + + Parameters + ---------- + path : str, optional + Directory path to download all the dataset files. + If None, default to the system cache_dir detected by pooch. + """ + if pooch is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + if path is None: + path = pooch.os_cache('scipy-data') + for dataset_name, dataset_hash in _registry.registry.items(): + pooch.retrieve(url=_registry.registry_urls[dataset_name], + known_hash=dataset_hash, + fname=dataset_name, path=path) + + +def main(): + parser = argparse.ArgumentParser(description='Download SciPy data files.') + parser.add_argument("path", nargs='?', type=str, + default=pooch.os_cache('scipy-data'), + help="Directory path to download all the data files.") + args = parser.parse_args() + download_all(args.path) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/_fetchers.py b/venv/lib/python3.10/site-packages/scipy/datasets/_fetchers.py new file mode 100644 index 0000000000000000000000000000000000000000..51dfbc4498f9d55c61a8d22b9d7db8c21cfa5a68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/_fetchers.py @@ -0,0 +1,220 @@ +from numpy import array, frombuffer, load +from ._registry import registry, registry_urls + +try: + import pooch +except ImportError: + pooch = None + data_fetcher = None +else: + data_fetcher = pooch.create( + # Use the default cache folder for the operating system + # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to + # select an appropriate directory for the cache on each platform. + path=pooch.os_cache("scipy-data"), + + # The remote data is on Github + # base_url is a required param, even though we override this + # using individual urls in the registry. + base_url="https://github.com/scipy/", + registry=registry, + urls=registry_urls + ) + + +def fetch_data(dataset_name, data_fetcher=data_fetcher): + if data_fetcher is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + # The "fetch" method returns the full path to the downloaded data file. + return data_fetcher.fetch(dataset_name) + + +def ascent(): + """ + Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy + use in demos. + + The image is derived from accent-to-the-top.jpg at + http://www.public-domain-image.com/people-public-domain-images-pictures/ + + Parameters + ---------- + None + + Returns + ------- + ascent : ndarray + convenient image to use for testing and demonstration + + Examples + -------- + >>> import scipy.datasets + >>> ascent = scipy.datasets.ascent() + >>> ascent.shape + (512, 512) + >>> ascent.max() + 255 + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(ascent) + >>> plt.show() + + """ + import pickle + + # The file will be downloaded automatically the first time this is run, + # returning the path to the downloaded file. Afterwards, Pooch finds + # it in the local cache and doesn't repeat the download. + fname = fetch_data("ascent.dat") + # Now we just need to load it with our standard Python tools. + with open(fname, 'rb') as f: + ascent = array(pickle.load(f)) + return ascent + + +def electrocardiogram(): + """ + Load an electrocardiogram as an example for a 1-D signal. + + The returned signal is a 5 minute long electrocardiogram (ECG), a medical + recording of the heart's electrical activity, sampled at 360 Hz. + + Returns + ------- + ecg : ndarray + The electrocardiogram in millivolt (mV) sampled at 360 Hz. + + Notes + ----- + The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ + (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on + PhysioNet [2]_. The excerpt includes noise induced artifacts, typical + heartbeats as well as pathological changes. + + .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. + IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). + (PMID: 11446209); :doi:`10.13026/C2F305` + .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, + Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, + PhysioToolkit, and PhysioNet: Components of a New Research Resource + for Complex Physiologic Signals. Circulation 101(23):e215-e220; + :doi:`10.1161/01.CIR.101.23.e215` + + Examples + -------- + >>> from scipy.datasets import electrocardiogram + >>> ecg = electrocardiogram() + >>> ecg + array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385]) + >>> ecg.shape, ecg.mean(), ecg.std() + ((108000,), -0.16510875, 0.5992473991177294) + + As stated the signal features several areas with a different morphology. + E.g., the first few seconds show the electrical activity of a heart in + normal sinus rhythm as seen below. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> fs = 360 + >>> time = np.arange(ecg.size) / fs + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(9, 10.2) + >>> plt.ylim(-1, 1.5) + >>> plt.show() + + After second 16, however, the first premature ventricular contractions, + also called extrasystoles, appear. These have a different morphology + compared to typical heartbeats. The difference can easily be observed + in the following plot. + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(46.5, 50) + >>> plt.ylim(-2, 1.5) + >>> plt.show() + + At several points large artifacts disturb the recording, e.g.: + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(207, 215) + >>> plt.ylim(-2, 3.5) + >>> plt.show() + + Finally, examining the power spectrum reveals that most of the biosignal is + made up of lower frequencies. At 60 Hz the noise induced by the mains + electricity can be clearly observed. + + >>> from scipy.signal import welch + >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") + >>> plt.semilogy(f, Pxx) + >>> plt.xlabel("Frequency in Hz") + >>> plt.ylabel("Power spectrum of the ECG in mV**2") + >>> plt.xlim(f[[0, -1]]) + >>> plt.show() + """ + fname = fetch_data("ecg.dat") + with load(fname) as file: + ecg = file["ecg"].astype(int) # np.uint16 -> int + # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain + ecg = (ecg - 1024) / 200.0 + return ecg + + +def face(gray=False): + """ + Get a 1024 x 768, color image of a raccoon face. + + raccoon-procyon-lotor.jpg at http://www.public-domain-image.com + + Parameters + ---------- + gray : bool, optional + If True return 8-bit grey-scale image, otherwise return a color image + + Returns + ------- + face : ndarray + image of a raccoon face + + Examples + -------- + >>> import scipy.datasets + >>> face = scipy.datasets.face() + >>> face.shape + (768, 1024, 3) + >>> face.max() + 255 + >>> face.dtype + dtype('uint8') + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(face) + >>> plt.show() + + """ + import bz2 + fname = fetch_data("face.dat") + with open(fname, 'rb') as f: + rawdata = f.read() + face_data = bz2.decompress(rawdata) + face = frombuffer(face_data, dtype='uint8') + face.shape = (768, 1024, 3) + if gray is True: + face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + + 0.07 * face[:, :, 2]).astype('uint8') + return face diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/_registry.py b/venv/lib/python3.10/site-packages/scipy/datasets/_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..969384ad9843159e766100bfa9755aed8102dd09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/_registry.py @@ -0,0 +1,26 @@ +########################################################################## +# This file serves as the dataset registry for SciPy Datasets SubModule. +########################################################################## + + +# To generate the SHA256 hash, use the command +# openssl sha256 +registry = { + "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2", + "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf", + "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886" +} + +registry_urls = { + "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat", + "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat", + "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat" +} + +# dataset method mapping with their associated filenames +# : ["filename1", "filename2", ...] +method_files_map = { + "ascent": ["ascent.dat"], + "electrocardiogram": ["ecg.dat"], + "face": ["face.dat"] +} diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/_utils.py b/venv/lib/python3.10/site-packages/scipy/datasets/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f644f8797d6e3256a16ec2c509eec725c726300 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/_utils.py @@ -0,0 +1,81 @@ +import os +import shutil +from ._registry import method_files_map + +try: + import platformdirs +except ImportError: + platformdirs = None # type: ignore[assignment] + + +def _clear_cache(datasets, cache_dir=None, method_map=None): + if method_map is None: + # Use SciPy Datasets method map + method_map = method_files_map + if cache_dir is None: + # Use default cache_dir path + if platformdirs is None: + # platformdirs is pooch dependency + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + cache_dir = platformdirs.user_cache_dir("scipy-data") + + if not os.path.exists(cache_dir): + print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.") + return + + if datasets is None: + print(f"Cleaning the cache directory {cache_dir}!") + shutil.rmtree(cache_dir) + else: + if not isinstance(datasets, (list, tuple)): + # single dataset method passed should be converted to list + datasets = [datasets, ] + for dataset in datasets: + assert callable(dataset) + dataset_name = dataset.__name__ # Name of the dataset method + if dataset_name not in method_map: + raise ValueError(f"Dataset method {dataset_name} doesn't " + "exist. Please check if the passed dataset " + "is a subset of the following dataset " + f"methods: {list(method_map.keys())}") + + data_files = method_map[dataset_name] + data_filepaths = [os.path.join(cache_dir, file) + for file in data_files] + for data_filepath in data_filepaths: + if os.path.exists(data_filepath): + print("Cleaning the file " + f"{os.path.split(data_filepath)[1]} " + f"for dataset {dataset_name}") + os.remove(data_filepath) + else: + print(f"Path {data_filepath} doesn't exist. " + "Nothing to clear.") + + +def clear_cache(datasets=None): + """ + Cleans the scipy datasets cache directory. + + If a scipy.datasets method or a list/tuple of the same is + provided, then clear_cache removes all the data files + associated to the passed dataset method callable(s). + + By default, it removes all the cached data files. + + Parameters + ---------- + datasets : callable or list/tuple of callable or None + + Examples + -------- + >>> from scipy import datasets + >>> ascent_array = datasets.ascent() + >>> ascent_array.shape + (512, 512) + >>> datasets.clear_cache([datasets.ascent]) + Cleaning the file ascent.dat for dataset ascent + """ + _clear_cache(datasets) diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a526fdcf20b4e782f8b5699c80db5e0926ebd0c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..991d17c64e6dd01fe465aa685403c9b52e875005 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py b/venv/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..f94ebbe71b5ca91b87865153db572db430205443 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py @@ -0,0 +1,123 @@ +from scipy.datasets._registry import registry +from scipy.datasets._fetchers import data_fetcher +from scipy.datasets._utils import _clear_cache +from scipy.datasets import ascent, face, electrocardiogram, download_all +from numpy.testing import assert_equal, assert_almost_equal +import os +import pytest + +try: + import pooch +except ImportError: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + + +data_dir = data_fetcher.path # type: ignore + + +def _has_hash(path, expected_hash): + """Check if the provided path has the expected hash.""" + if not os.path.exists(path): + return False + return pooch.file_hash(path) == expected_hash + + +class TestDatasets: + + @pytest.fixture(scope='module', autouse=True) + def test_download_all(self): + # This fixture requires INTERNET CONNECTION + + # test_setup phase + download_all() + + yield + + def test_existence_all(self): + assert len(os.listdir(data_dir)) >= len(registry) + + def test_ascent(self): + assert_equal(ascent().shape, (512, 512)) + + # hash check + assert _has_hash(os.path.join(data_dir, "ascent.dat"), + registry["ascent.dat"]) + + def test_face(self): + assert_equal(face().shape, (768, 1024, 3)) + + # hash check + assert _has_hash(os.path.join(data_dir, "face.dat"), + registry["face.dat"]) + + def test_electrocardiogram(self): + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert_equal(ecg.dtype, float) + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) + + # hash check + assert _has_hash(os.path.join(data_dir, "ecg.dat"), + registry["ecg.dat"]) + + +def test_clear_cache(tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + dummy_basepath = tmp_path / "dummy_cache_dir" + dummy_basepath.mkdir() + + # Create three dummy dataset files for dummy dataset methods + dummy_method_map = {} + for i in range(4): + dummy_method_map[f"data{i}"] = [f"data{i}.dat"] + data_filepath = dummy_basepath / f"data{i}.dat" + data_filepath.write_text("") + + # clear files associated to single dataset method data0 + # also test callable argument instead of list of callables + def data0(): + pass + _clear_cache(datasets=data0, cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data0.dat") + + # clear files associated to multiple dataset methods "data3" and "data4" + def data1(): + pass + + def data2(): + pass + _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data1.dat") + assert not os.path.exists(dummy_basepath/"data2.dat") + + # clear multiple dataset files "data3_0.dat" and "data3_1.dat" + # associated with dataset method "data3" + def data4(): + pass + # create files + (dummy_basepath / "data4_0.dat").write_text("") + (dummy_basepath / "data4_1.dat").write_text("") + + dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"] + _clear_cache(datasets=[data4], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data4_0.dat") + assert not os.path.exists(dummy_basepath/"data4_1.dat") + + # wrong dataset method should raise ValueError since it + # doesn't exist in the dummy_method_map + def data5(): + pass + with pytest.raises(ValueError): + _clear_cache(datasets=[data5], cache_dir=dummy_basepath, + method_map=dummy_method_map) + + # remove all dataset cache + _clear_cache(datasets=None, cache_dir=dummy_basepath) + assert not os.path.exists(dummy_basepath) diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43f2044abfe5bca6b762d6bc30ad65d88d94d9bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/_scipy_spectral_test_shim.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/_scipy_spectral_test_shim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2f1cef28e013526b7b01d56850b437b1b34b961 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/_scipy_spectral_test_shim.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ad0664f29e9420bed8bd068f3f9d339f8fa801a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1adfbe051cfb50ecfbecab206f0141227bef28a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57de5f4e43ba74c4624235b7310275eb6cc1070 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512a28f2c3f18e193c01b793a54b85d1d6602a15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72c9179f18639e731314634927ed2a512db39af7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/__init__.py b/venv/lib/python3.10/site-packages/scipy/signal/windows/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..967a7c758f69c1c8002d886d78832904c402d2b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/signal/windows/__init__.py @@ -0,0 +1,52 @@ +""" +Window functions (:mod:`scipy.signal.windows`) +============================================== + +The suite of window functions for filtering and spectral estimation. + +.. currentmodule:: scipy.signal.windows + +.. autosummary:: + :toctree: generated/ + + get_window -- Return a window of a given length and type. + + barthann -- Bartlett-Hann window + bartlett -- Bartlett window + blackman -- Blackman window + blackmanharris -- Minimum 4-term Blackman-Harris window + bohman -- Bohman window + boxcar -- Boxcar window + chebwin -- Dolph-Chebyshev window + cosine -- Cosine window + dpss -- Discrete prolate spheroidal sequences + exponential -- Exponential window + flattop -- Flat top window + gaussian -- Gaussian window + general_cosine -- Generalized Cosine window + general_gaussian -- Generalized Gaussian window + general_hamming -- Generalized Hamming window + hamming -- Hamming window + hann -- Hann window + kaiser -- Kaiser window + kaiser_bessel_derived -- Kaiser-Bessel derived window + lanczos -- Lanczos window also known as a sinc window + nuttall -- Nuttall's minimum 4-term Blackman-Harris window + parzen -- Parzen window + taylor -- Taylor window + triang -- Triangular window + tukey -- Tukey window + +""" + +from ._windows import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import windows + +__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', + 'blackmanharris', 'flattop', 'bartlett', 'barthann', + 'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian', + 'general_gaussian', 'general_cosine', 'general_hamming', + 'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor', + 'get_window', 'dpss', 'lanczos'] diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81f735e9d4b3a48fab08d6a3905ca598fba8a1ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f6ec0190774502dd625e3820f50435ed5e910ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1990704078b7d79360ffd76838de56ad0c642f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/signal/windows/__pycache__/windows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/_windows.py b/venv/lib/python3.10/site-packages/scipy/signal/windows/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..bafd48b2457633c569dae640ca83158163820513 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/signal/windows/_windows.py @@ -0,0 +1,2374 @@ +"""The suite of window functions.""" + +import operator +import warnings + +import numpy as np +from scipy import linalg, special, fft as sp_fft + +__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', + 'blackmanharris', 'flattop', 'bartlett', 'barthann', + 'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian', + 'general_cosine', 'general_gaussian', 'general_hamming', + 'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor', + 'dpss', 'get_window', 'lanczos'] + + +def _len_guards(M): + """Handle small or incorrect window lengths""" + if int(M) != M or M < 0: + raise ValueError('Window length M must be a non-negative integer') + return M <= 1 + + +def _extend(M, sym): + """Extend window by 1 sample if needed for DFT-even symmetry""" + if not sym: + return M + 1, True + else: + return M, False + + +def _truncate(w, needed): + """Truncate window by 1 sample if needed for DFT-even symmetry""" + if needed: + return w[:-1] + else: + return w + + +def general_cosine(M, a, sym=True): + r""" + Generic weighted sum of cosine terms window + + Parameters + ---------- + M : int + Number of points in the output window + a : array_like + Sequence of weighting coefficients. This uses the convention of being + centered on the origin, so these will typically all be positive + numbers, not alternating sign. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The array of window values. + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Heinzel describes a flat-top window named "HFT90D" with formula: [2]_ + + .. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z) + - 0.440811 \cos(3z) + 0.043097 \cos(4z) + + where + + .. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1 + + Since this uses the convention of starting at the origin, to reproduce the + window, we need to convert every other coefficient to a positive number: + + >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097] + + The paper states that the highest sidelobe is at -90.2 dB. Reproduce + Figure 42 by plotting the window and its frequency response, and confirm + the sidelobe level in red: + + >>> import numpy as np + >>> from scipy.signal.windows import general_cosine + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = general_cosine(1000, HFT90D, sym=False) + >>> plt.plot(window) + >>> plt.title("HFT90D window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 10000) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = np.abs(fftshift(A / abs(A).max())) + >>> response = 20 * np.log10(np.maximum(response, 1e-10)) + >>> plt.plot(freq, response) + >>> plt.axis([-50/1000, 50/1000, -140, 0]) + >>> plt.title("Frequency response of the HFT90D window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axhline(-90.2, color='red') + >>> plt.show() + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + fac = np.linspace(-np.pi, np.pi, M) + w = np.zeros(M) + for k in range(len(a)): + w += a[k] * np.cos(k * fac) + + return _truncate(w, needs_trunc) + + +def boxcar(M, sym=True): + """Return a boxcar or rectangular window. + + Also known as a rectangular window or Dirichlet window, this is equivalent + to no window at all. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + Whether the window is symmetric. (Has no effect for boxcar.) + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.boxcar(51) + >>> plt.plot(window) + >>> plt.title("Boxcar window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the boxcar window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + w = np.ones(M, float) + + return _truncate(w, needs_trunc) + + +def triang(M, sym=True): + """Return a triangular window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + See Also + -------- + bartlett : A triangular window that touches zero + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.triang(51) + >>> plt.plot(window) + >>> plt.title("Triangular window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = np.abs(fftshift(A / abs(A).max())) + >>> response = 20 * np.log10(np.maximum(response, 1e-10)) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the triangular window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(1, (M + 1) // 2 + 1) + if M % 2 == 0: + w = (2 * n - 1.0) / M + w = np.r_[w, w[::-1]] + else: + w = 2 * n / (M + 1.0) + w = np.r_[w, w[-2::-1]] + + return _truncate(w, needs_trunc) + + +def parzen(M, sym=True): + """Return a Parzen window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] E. Parzen, "Mathematical Considerations in the Estimation of + Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190 + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.parzen(51) + >>> plt.plot(window) + >>> plt.title("Parzen window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Parzen window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0) + na = np.extract(n < -(M - 1) / 4.0, n) + nb = np.extract(abs(n) <= (M - 1) / 4.0, n) + wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0 + wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 + + 6 * (np.abs(nb) / (M / 2.0)) ** 3.0) + w = np.r_[wa, wb, wa[::-1]] + + return _truncate(w, needs_trunc) + + +def bohman(M, sym=True): + """Return a Bohman window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.bohman(51) + >>> plt.plot(window) + >>> plt.title("Bohman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2047) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bohman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + fac = np.abs(np.linspace(-1, 1, M)[1:-1]) + w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac) + w = np.r_[0, w, 0] + + return _truncate(w, needs_trunc) + + +def blackman(M, sym=True): + r""" + Return a Blackman window. + + The Blackman window is a taper formed by using the first three terms of + a summation of cosines. It was designed to have close to the minimal + leakage possible. It is close to optimal, only slightly worse than a + Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M) + + The "exact Blackman" window was designed to null out the third and fourth + sidelobes, but has discontinuities at the boundaries, resulting in a + 6 dB/oct fall-off. This window is an approximation of the "exact" window, + which does not null the sidelobes as well, but is smooth at the edges, + improving the fall-off rate to 18 dB/oct. [3]_ + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the Kaiser window. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.blackman(51) + >>> plt.plot(window) + >>> plt.title("Blackman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = np.abs(fftshift(A / abs(A).max())) + >>> response = 20 * np.log10(np.maximum(response, 1e-10)) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's blackman function + return general_cosine(M, [0.42, 0.50, 0.08], sym) + + +def nuttall(M, sym=True): + """Return a minimum 4-term Blackman-Harris window according to Nuttall. + + This variation is called "Nuttall4c" by Heinzel. [2]_ + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.nuttall(51) + >>> plt.plot(window) + >>> plt.title("Nuttall window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Nuttall window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym) + + +def blackmanharris(M, sym=True): + """Return a minimum 4-term Blackman-Harris window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.blackmanharris(51) + >>> plt.plot(window) + >>> plt.title("Blackman-Harris window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman-Harris window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) + + +def flattop(M, sym=True): + """Return a flat top window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + Flat top windows are used for taking accurate measurements of signal + amplitude in the frequency domain, with minimal scalloping error from the + center of a frequency bin to its edges, compared to others. This is a + 5th-order cosine window, with the 5 terms optimized to make the main lobe + maximally flat. [1]_ + + References + ---------- + .. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for + Measurement Systems", Springer Media, 2006, p. 70 + :doi:`10.1007/0-387-28666-7`. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.flattop(51) + >>> plt.plot(window) + >>> plt.title("Flat top window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the flat top window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368] + return general_cosine(M, a, sym) + + +def bartlett(M, sym=True): + r""" + Return a Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The triangular window, with the first and last samples equal to zero + and the maximum value normalized to 1 (though the value 1 does not + appear if `M` is even and `sym` is True). + + See Also + -------- + triang : A triangular window that does not touch zero at the ends + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \frac{2}{M-1} \left( + \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right| + \right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The Fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. [2]_ + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.bartlett(51) + >>> plt.plot(window) + >>> plt.title("Bartlett window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's bartlett function + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + w = np.where(np.less_equal(n, (M - 1) / 2.0), + 2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1)) + + return _truncate(w, needs_trunc) + + +def hann(M, sym=True): + r""" + Return a Hann window. + + The Hann window is a taper formed by using a raised cosine or sine-squared + with ends that touch zero. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hann window is defined as + + .. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The window was named for Julius von Hann, an Austrian meteorologist. It is + also known as the Cosine Bell. It is sometimes erroneously referred to as + the "Hanning" window, from the use of "hann" as a verb in the original + paper and confusion with the very similar Hamming window. + + Most references to the Hann window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.hann(51) + >>> plt.plot(window) + >>> plt.title("Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = np.abs(fftshift(A / abs(A).max())) + >>> response = 20 * np.log10(np.maximum(response, 1e-10)) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's hanning function + return general_hamming(M, 0.5, sym) + + +def tukey(M, alpha=0.5, sym=True): + r"""Return a Tukey window, also known as a tapered cosine window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + alpha : float, optional + Shape parameter of the Tukey window, representing the fraction of the + window inside the cosine tapered region. + If zero, the Tukey window is equivalent to a rectangular window. + If one, the Tukey window is equivalent to a Hann window. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + References + ---------- + .. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837` + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function#Tukey_window + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.tukey(51) + >>> plt.plot(window) + >>> plt.title("Tukey window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.ylim([0, 1.1]) + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Tukey window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + + if alpha <= 0: + return np.ones(M, 'd') + elif alpha >= 1.0: + return hann(M, sym=sym) + + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + width = int(np.floor(alpha*(M-1)/2.0)) + n1 = n[0:width+1] + n2 = n[width+1:M-width-1] + n3 = n[M-width-1:] + + w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1)))) + w2 = np.ones(n2.shape) + w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1)))) + + w = np.concatenate((w1, w2, w3)) + + return _truncate(w, needs_trunc) + + +def barthann(M, sym=True): + """Return a modified Bartlett-Hann window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.barthann(51) + >>> plt.plot(window) + >>> plt.title("Bartlett-Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett-Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + fac = np.abs(n / (M - 1.0) - 0.5) + w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac) + + return _truncate(w, needs_trunc) + + +def general_hamming(M, alpha, sym=True): + r"""Return a generalized Hamming window. + + The generalized Hamming window is constructed by multiplying a rectangular + window by one period of a cosine function [1]_. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + alpha : float + The window coefficient, :math:`\alpha` + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + See Also + -------- + hamming, hann + + Notes + ----- + The generalized Hamming window is defined as + + .. math:: w(n) = \alpha - \left(1 - \alpha\right) + \cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 + + Both the common Hamming window and Hann window are special cases of the + generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` = + 0.5, respectively [2]_. + + References + ---------- + .. [1] DSPRelated, "Generalized Hamming Window Family", + https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm + Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition + .. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition + + Examples + -------- + The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming + windows in the processing of spaceborne Synthetic Aperture Radar (SAR) + data [3]_. The facility uses various values for the :math:`\alpha` + parameter based on operating mode of the SAR instrument. Some common + :math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we + plot these different windows. + + >>> import numpy as np + >>> from scipy.signal.windows import general_hamming + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> fig1, spatial_plot = plt.subplots() + >>> spatial_plot.set_title("Generalized Hamming Windows") + >>> spatial_plot.set_ylabel("Amplitude") + >>> spatial_plot.set_xlabel("Sample") + + >>> fig2, freq_plot = plt.subplots() + >>> freq_plot.set_title("Frequency Responses") + >>> freq_plot.set_ylabel("Normalized magnitude [dB]") + >>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]") + + >>> for alpha in [0.75, 0.7, 0.52]: + ... window = general_hamming(41, alpha) + ... spatial_plot.plot(window, label="{:.2f}".format(alpha)) + ... A = fft(window, 2048) / (len(window)/2.0) + ... freq = np.linspace(-0.5, 0.5, len(A)) + ... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + ... freq_plot.plot(freq, response, label="{:.2f}".format(alpha)) + >>> freq_plot.legend(loc="upper right") + >>> spatial_plot.legend(loc="upper right") + + """ + return general_cosine(M, [alpha, 1. - alpha], sym) + + +def hamming(M, sym=True): + r"""Return a Hamming window. + + The Hamming window is a taper formed by using a raised cosine with + non-zero endpoints, optimized to minimize the nearest side lobe. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and + is described in Blackman and Tukey. It was recommended for smoothing the + truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.hamming(51) + >>> plt.plot(window) + >>> plt.title("Hamming window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hamming window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's hamming function + return general_hamming(M, 0.54, sym) + + +def kaiser(M, beta, sym=True): + r"""Return a Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + beta : float + Shape parameter, determines trade-off between main-lobe width and + side lobe level. As beta gets large, the window narrows. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} + \right)/I_0(\beta) + + with + + .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple approximation + to the DPSS window based on Bessel functions. + The Kaiser window is a very good approximation to the Digital Prolate + Spheroidal Sequence, or Slepian window, which is the transform which + maximizes the energy in the main lobe of the window relative to total + energy. + + The Kaiser can approximate other windows by varying the beta parameter. + (Some literature uses alpha = beta/pi.) [4]_ + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hann + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + be returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transform," Proceedings of the IEEE, vol. 66, + no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`. + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.kaiser(51, beta=14) + >>> plt.plot(window) + >>> plt.title(r"Kaiser window ($\beta$=14)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's kaiser function + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) + alpha = (M - 1) / 2.0 + w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) / + special.i0(beta)) + + return _truncate(w, needs_trunc) + + +def kaiser_bessel_derived(M, beta, *, sym=True): + """Return a Kaiser-Bessel derived window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + Note that this window is only defined for an even + number of points. + beta : float + Kaiser window shape parameter. + sym : bool, optional + This parameter only exists to comply with the interface offered by + the other window functions and to be callable by `get_window`. + When True (default), generates a symmetric window, for use in filter + design. + + Returns + ------- + w : ndarray + The window, normalized to fulfil the Princen-Bradley condition. + + See Also + -------- + kaiser + + Notes + ----- + It is designed to be suitable for use with the modified discrete cosine + transform (MDCT) and is mainly used in audio signal processing and + audio coding. + + .. versionadded:: 1.9.0 + + References + ---------- + .. [1] Bosi, Marina, and Richard E. Goldberg. Introduction to Digital + Audio Coding and Standards. Dordrecht: Kluwer, 2003. + .. [2] Wikipedia, "Kaiser window", + https://en.wikipedia.org/wiki/Kaiser_window + + Examples + -------- + Plot the Kaiser-Bessel derived window based on the wikipedia + reference [2]_: + + >>> import numpy as np + >>> from scipy import signal + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> N = 50 + >>> for alpha in [0.64, 2.55, 7.64, 31.83]: + ... ax.plot(signal.windows.kaiser_bessel_derived(2*N, np.pi*alpha), + ... label=f"{alpha=}") + >>> ax.grid(True) + >>> ax.set_title("Kaiser-Bessel derived window") + >>> ax.set_ylabel("Amplitude") + >>> ax.set_xlabel("Sample") + >>> ax.set_xticks([0, N, 2*N-1]) + >>> ax.set_xticklabels(["0", "N", "2N+1"]) # doctest: +SKIP + >>> ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.707, 0.8, 1.0]) + >>> fig.legend(loc="center") + >>> fig.tight_layout() + >>> fig.show() + """ + if not sym: + raise ValueError( + "Kaiser-Bessel Derived windows are only defined for symmetric " + "shapes" + ) + elif M < 1: + return np.array([]) + elif M % 2: + raise ValueError( + "Kaiser-Bessel Derived windows are only defined for even number " + "of points" + ) + + kaiser_window = kaiser(M // 2 + 1, beta) + csum = np.cumsum(kaiser_window) + half_window = np.sqrt(csum[:-1] / csum[-1]) + w = np.concatenate((half_window, half_window[::-1]), axis=0) + return w + + +def gaussian(M, std, sym=True): + r"""Return a Gaussian window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + std : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 } + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.gaussian(51, std=7) + >>> plt.plot(window) + >>> plt.title(r"Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) - (M - 1.0) / 2.0 + sig2 = 2 * std * std + w = np.exp(-n ** 2 / sig2) + + return _truncate(w, needs_trunc) + + +def general_gaussian(M, p, sig, sym=True): + r"""Return a window with a generalized Gaussian shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + p : float + Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is + the same shape as the Laplace distribution. + sig : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The generalized Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} } + + the half-power point is at + + .. math:: (2 \log(2))^{1/(2 p)} \sigma + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.general_gaussian(51, p=1.5, sig=7) + >>> plt.plot(window) + >>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Freq. resp. of the gen. Gaussian " + ... r"window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + n = np.arange(0, M) - (M - 1.0) / 2.0 + w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p)) + + return _truncate(w, needs_trunc) + + +# `chebwin` contributed by Kumar Appaiah. +def chebwin(M, at, sym=True): + r"""Return a Dolph-Chebyshev window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + at : float + Attenuation (in dB). + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value always normalized to 1 + + Notes + ----- + This window optimizes for the narrowest main lobe width for a given order + `M` and sidelobe equiripple attenuation `at`, using Chebyshev + polynomials. It was originally developed by Dolph to optimize the + directionality of radio antenna arrays. + + Unlike most windows, the Dolph-Chebyshev is defined in terms of its + frequency response: + + .. math:: W(k) = \frac + {\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}} + {\cosh[M \cosh^{-1}(\beta)]} + + where + + .. math:: \beta = \cosh \left [\frac{1}{M} + \cosh^{-1}(10^\frac{A}{20}) \right ] + + and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`). + + The time domain window is then generated using the IFFT, so + power-of-two `M` are the fastest to generate, and prime number `M` are + the slowest. + + The equiripple condition in the frequency domain creates impulses in the + time domain, which appear at the ends of the window. + + References + ---------- + .. [1] C. Dolph, "A current distribution for broadside arrays which + optimizes the relationship between beam width and side-lobe level", + Proceedings of the IEEE, Vol. 34, Issue 6 + .. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter", + American Meteorological Society (April 1997) + http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf + .. [3] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transforms", Proceedings of the IEEE, Vol. 66, + No. 1, January 1978 + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.chebwin(51, at=100) + >>> plt.plot(window) + >>> plt.title("Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if np.abs(at) < 45: + warnings.warn("This window is not suitable for spectral analysis " + "for attenuation values lower than about 45dB because " + "the equivalent noise bandwidth of a Chebyshev window " + "does not grow monotonically with increasing sidelobe " + "attenuation when the attenuation is smaller than " + "about 45 dB.", + stacklevel=2) + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + # compute the parameter beta + order = M - 1.0 + beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.))) + k = np.r_[0:M] * 1.0 + x = beta * np.cos(np.pi * k / M) + # Find the window's DFT coefficients + # Use analytic definition of Chebyshev polynomial instead of expansion + # from scipy.special. Using the expansion in scipy.special leads to errors. + p = np.zeros(x.shape) + p[x > 1] = np.cosh(order * np.arccosh(x[x > 1])) + p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1])) + p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1])) + + # Appropriate IDFT and filling up + # depending on even/odd M + if M % 2: + w = np.real(sp_fft.fft(p)) + n = (M + 1) // 2 + w = w[:n] + w = np.concatenate((w[n - 1:0:-1], w)) + else: + p = p * np.exp(1.j * np.pi / M * np.r_[0:M]) + w = np.real(sp_fft.fft(p)) + n = M // 2 + 1 + w = np.concatenate((w[n - 1:0:-1], w[1:n])) + w = w / max(w) + + return _truncate(w, needs_trunc) + + +def cosine(M, sym=True): + """Return a window with a simple cosine shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.cosine(51) + >>> plt.plot(window) + >>> plt.title("Cosine window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2047) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the cosine window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.show() + + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + w = np.sin(np.pi / M * (np.arange(0, M) + .5)) + + return _truncate(w, needs_trunc) + + +def exponential(M, center=None, tau=1., sym=True): + r"""Return an exponential (or Poisson) window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + center : float, optional + Parameter defining the center location of the window function. + The default value if not given is ``center = (M-1) / 2``. This + parameter must take its default value for symmetric windows. + tau : float, optional + Parameter defining the decay. For ``center = 0`` use + ``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window + remaining at the end. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Exponential window is defined as + + .. math:: w(n) = e^{-|n-center| / \tau} + + References + ---------- + .. [1] S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)", + Technical Review 3, Bruel & Kjaer, 1987. + + Examples + -------- + Plot the symmetric window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> M = 51 + >>> tau = 3.0 + >>> window = signal.windows.exponential(M, tau=tau) + >>> plt.plot(window) + >>> plt.title("Exponential Window (tau=3.0)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -35, 0]) + >>> plt.title("Frequency response of the Exponential window (tau=3.0)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + This function can also generate non-symmetric windows: + + >>> tau2 = -(M-1) / np.log(0.01) + >>> window2 = signal.windows.exponential(M, 0, tau2, False) + >>> plt.figure() + >>> plt.plot(window2) + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + """ + if sym and center is not None: + raise ValueError("If sym==True, center must be None.") + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + if center is None: + center = (M-1) / 2 + + n = np.arange(0, M) + w = np.exp(-np.abs(n-center) / tau) + + return _truncate(w, needs_trunc) + + +def taylor(M, nbar=4, sll=30, norm=True, sym=True): + """ + Return a Taylor window. + + The Taylor window taper function approximates the Dolph-Chebyshev window's + constant sidelobe level for a parameterized number of near-in sidelobes, + but then allows a taper beyond [2]_. + + The SAR (synthetic aperture radar) community commonly uses Taylor + weighting for image formation processing because it provides strong, + selectable sidelobe suppression with minimum broadening of the + mainlobe [1]_. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + nbar : int, optional + Number of nearly constant level sidelobes adjacent to the mainlobe. + sll : float, optional + Desired suppression of sidelobe level in decibels (dB) relative to the + DC gain of the mainlobe. This should be a positive number. + norm : bool, optional + When True (default), divides the window by the largest (middle) value + for odd-length windows or the value that would occur between the two + repeated middle values for even-length windows such that all values + are less than or equal to 1. When False the DC gain will remain at 1 + (0 dB) and the sidelobes will be `sll` dB down. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + out : array + The window. When `norm` is True (default), the maximum value is + normalized to 1 (though the value 1 does not appear if `M` is + even and `sym` is True). + + See Also + -------- + chebwin, kaiser, bartlett, blackman, hamming, hann + + References + ---------- + .. [1] W. Carrara, R. Goodman, and R. Majewski, "Spotlight Synthetic + Aperture Radar: Signal Processing Algorithms" Pages 512-513, + July 1995. + .. [2] Armin Doerry, "Catalog of Window Taper Functions for + Sidelobe Control", 2017. + https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf + + Examples + -------- + Plot the window and its frequency response: + + >>> import numpy as np + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.taylor(51, nbar=20, sll=100, norm=False) + >>> plt.plot(window) + >>> plt.title("Taylor window (100 dB)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Taylor window (100 dB)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ # noqa: E501 + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + # Original text uses a negative sidelobe level parameter and then negates + # it in the calculation of B. To keep consistent with other methods we + # assume the sidelobe level parameter to be positive. + B = 10**(sll / 20) + A = np.arccosh(B) / np.pi + s2 = nbar**2 / (A**2 + (nbar - 0.5)**2) + ma = np.arange(1, nbar) + + Fm = np.empty(nbar-1) + signs = np.empty_like(ma) + signs[::2] = 1 + signs[1::2] = -1 + m2 = ma*ma + for mi, m in enumerate(ma): + numer = signs[mi] * np.prod(1 - m2[mi]/s2/(A**2 + (ma - 0.5)**2)) + denom = 2 * np.prod(1 - m2[mi]/m2[:mi]) * np.prod(1 - m2[mi]/m2[mi+1:]) + Fm[mi] = numer / denom + + def W(n): + return 1 + 2*np.dot(Fm, np.cos( + 2*np.pi*ma[:, np.newaxis]*(n-M/2.+0.5)/M)) + + w = W(np.arange(M)) + + # normalize (Note that this is not described in the original text [1]) + if norm: + scale = 1.0 / W((M - 1) / 2) + w *= scale + + return _truncate(w, needs_trunc) + + +def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False): + """ + Compute the Discrete Prolate Spheroidal Sequences (DPSS). + + DPSS (or Slepian sequences) are often used in multitaper power spectral + density estimation (see [1]_). The first window in the sequence can be + used to maximize the energy concentration in the main lobe, and is also + called the Slepian window. + + Parameters + ---------- + M : int + Window length. + NW : float + Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*M*dt`` + where ``dt`` is taken as 1. + Kmax : int | None, optional + Number of DPSS windows to return (orders ``0`` through ``Kmax-1``). + If None (default), return only a single window of shape ``(M,)`` + instead of an array of windows of shape ``(Kmax, M)``. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + norm : {2, 'approximate', 'subsample'} | None, optional + If 'approximate' or 'subsample', then the windows are normalized by the + maximum, and a correction scale-factor for even-length windows + is applied either using ``M**2/(M**2+NW)`` ("approximate") or + a FFT-based subsample shift ("subsample"), see Notes for details. + If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise + (which uses the l2 norm). + return_ratios : bool, optional + If True, also return the concentration ratios in addition to the + windows. + + Returns + ------- + v : ndarray, shape (Kmax, M) or (M,) + The DPSS windows. Will be 1D if `Kmax` is None. + r : ndarray, shape (Kmax,) or float, optional + The concentration ratios for the windows. Only returned if + `return_ratios` evaluates to True. Will be 0D if `Kmax` is None. + + Notes + ----- + This computation uses the tridiagonal eigenvector formulation given + in [2]_. + + The default normalization for ``Kmax=None``, i.e. window-generation mode, + simply using the l-infinity norm would create a window with two unity + values, which creates slight normalization differences between even and odd + orders. The approximate correction of ``M**2/float(M**2+NW)`` for even + sample numbers is used to counteract this effect (see Examples below). + + For very long signals (e.g., 1e6 elements), it can be useful to compute + windows orders of magnitude shorter and use interpolation (e.g., + `scipy.interpolate.interp1d`) to obtain tapers of length `M`, + but this in general will not preserve orthogonality between the tapers. + + .. versionadded:: 1.1 + + References + ---------- + .. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications: + Multitaper and Conventional Univariate Techniques. + Cambridge University Press; 1993. + .. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and + uncertainty V: The discrete case. Bell System Technical Journal, + Volume 57 (1978), 1371430. + .. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for + Spectrum Analysis. IEEE Transactions on Acoustics, Speech and + Signal Processing. ASSP-28 (1): 105-107; 1980. + + Examples + -------- + We can compare the window to `kaiser`, which was invented as an alternative + that was easier to calculate [3]_ (example adapted from + `here `_): + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.signal import windows, freqz + >>> M = 51 + >>> fig, axes = plt.subplots(3, 2, figsize=(5, 7)) + >>> for ai, alpha in enumerate((1, 3, 5)): + ... win_dpss = windows.dpss(M, alpha) + ... beta = alpha*np.pi + ... win_kaiser = windows.kaiser(M, beta) + ... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')): + ... win /= win.sum() + ... axes[ai, 0].plot(win, color=c, lw=1.) + ... axes[ai, 0].set(xlim=[0, M-1], title=r'$\\alpha$ = %s' % alpha, + ... ylabel='Amplitude') + ... w, h = freqz(win) + ... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.) + ... axes[ai, 1].set(xlim=[0, np.pi], + ... title=r'$\\beta$ = %0.2f' % beta, + ... ylabel='Magnitude (dB)') + >>> for ax in axes.ravel(): + ... ax.grid(True) + >>> axes[2, 1].legend(['DPSS', 'Kaiser']) + >>> fig.tight_layout() + >>> plt.show() + + And here are examples of the first four windows, along with their + concentration ratios: + + >>> M = 512 + >>> NW = 2.5 + >>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True) + >>> fig, ax = plt.subplots(1) + >>> ax.plot(win.T, linewidth=1.) + >>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples', + ... title='DPSS, M=%d, NW=%0.1f' % (M, NW)) + >>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio) + ... for ii, ratio in enumerate(eigvals)]) + >>> fig.tight_layout() + >>> plt.show() + + Using a standard :math:`l_{\\infty}` norm would produce two unity values + for even `M`, but only one unity value for odd `M`. This produces uneven + window power that can be counteracted by the approximate correction + ``M**2/float(M**2+NW)``, which can be selected by using + ``norm='approximate'`` (which is the same as ``norm=None`` when + ``Kmax=None``, as is the case here). Alternatively, the slower + ``norm='subsample'`` can be used, which uses subsample shifting in the + frequency domain (FFT) to compute the correction: + + >>> Ms = np.arange(1, 41) + >>> factors = (50, 20, 10, 5, 2.0001) + >>> energy = np.empty((3, len(Ms), len(factors))) + >>> for mi, M in enumerate(Ms): + ... for fi, factor in enumerate(factors): + ... NW = M / float(factor) + ... # Corrected using empirical approximation (default) + ... win = windows.dpss(M, NW) + ... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + ... # Corrected using subsample shifting + ... win = windows.dpss(M, NW, norm='subsample') + ... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + ... # Uncorrected (using l-infinity norm) + ... win /= win.max() + ... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M) + >>> fig, ax = plt.subplots(1) + >>> hs = ax.plot(Ms, energy[2], '-o', markersize=4, + ... markeredgecolor='none') + >>> leg = [hs[-1]] + >>> for hi, hh in enumerate(hs): + ... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4, + ... color=hh.get_color(), markeredgecolor='none', + ... alpha=0.66) + ... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4, + ... color=hh.get_color(), markeredgecolor='none', + ... alpha=0.33) + ... if hi == len(hs) - 1: + ... leg.insert(0, h1[0]) + ... leg.insert(0, h2[0]) + >>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$') + >>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$', + ... 'Corrected (subsample)']) + >>> fig.tight_layout() + + """ + if _len_guards(M): + return np.ones(M) + if norm is None: + norm = 'approximate' if Kmax is None else 2 + known_norms = (2, 'approximate', 'subsample') + if norm not in known_norms: + raise ValueError(f'norm must be one of {known_norms}, got {norm}') + if Kmax is None: + singleton = True + Kmax = 1 + else: + singleton = False + Kmax = operator.index(Kmax) + if not 0 < Kmax <= M: + raise ValueError('Kmax must be greater than 0 and less than M') + if NW >= M/2.: + raise ValueError('NW must be less than M/2.') + if NW <= 0: + raise ValueError('NW must be positive') + M, needs_trunc = _extend(M, sym) + W = float(NW) / M + nidx = np.arange(M) + + # Here we want to set up an optimization problem to find a sequence + # whose energy is maximally concentrated within band [-W,W]. + # Thus, the measure lambda(T,W) is the ratio between the energy within + # that band, and the total energy. This leads to the eigen-system + # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest + # eigenvalue is the sequence with maximally concentrated energy. The + # collection of eigenvectors of this system are called Slepian + # sequences, or discrete prolate spheroidal sequences (DPSS). Only the + # first K, K = 2NW/dt orders of DPSS will exhibit good spectral + # concentration + # [see https://en.wikipedia.org/wiki/Spectral_concentration_problem] + + # Here we set up an alternative symmetric tri-diagonal eigenvalue + # problem such that + # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) + # the main diagonal = ([M-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,M-1] + # and the first off-diagonal = t(M-t)/2, t=[1,2,...,M-1] + # [see Percival and Walden, 1993] + d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) + e = nidx[1:] * (M - nidx[1:]) / 2. + + # only calculate the highest Kmax eigenvalues + w, windows = linalg.eigh_tridiagonal( + d, e, select='i', select_range=(M - Kmax, M - 1)) + w = w[::-1] + windows = windows[:, ::-1].T + + # By convention (Percival and Walden, 1993 pg 379) + # * symmetric tapers (k=0,2,4,...) should have a positive average. + fix_even = (windows[::2].sum(axis=1) < 0) + for i, f in enumerate(fix_even): + if f: + windows[2 * i] *= -1 + # * antisymmetric tapers should begin with a positive lobe + # (this depends on the definition of "lobe", here we'll take the first + # point above the numerical noise, which should be good enough for + # sufficiently smooth functions, and more robust than relying on an + # algorithm that uses max(abs(w)), which is susceptible to numerical + # noise problems) + thresh = max(1e-7, 1. / M) + for i, w in enumerate(windows[1::2]): + if w[w * w > thresh][0] < 0: + windows[2 * i + 1] *= -1 + + # Now find the eigenvalues of the original spectral concentration problem + # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 + if return_ratios: + dpss_rxx = _fftautocorr(windows) + r = 4 * W * np.sinc(2 * W * nidx) + r[0] = 2 * W + ratios = np.dot(dpss_rxx, r) + if singleton: + ratios = ratios[0] + # Deal with sym and Kmax=None + if norm != 2: + windows /= windows.max() + if M % 2 == 0: + if norm == 'approximate': + correction = M**2 / float(M**2 + NW) + else: + s = sp_fft.rfft(windows[0]) + shift = -(1 - 1./M) * np.arange(1, M//2 + 1) + s[1:] *= 2 * np.exp(-1j * np.pi * shift) + correction = M / s.real.sum() + windows *= correction + # else we're already l2 normed, so do nothing + if needs_trunc: + windows = windows[:, :-1] + if singleton: + windows = windows[0] + return (windows, ratios) if return_ratios else windows + + +def lanczos(M, *, sym=True): + r"""Return a Lanczos window also known as a sinc window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Lanczos window is defined as + + .. math:: w(n) = sinc \left( \frac{2n}{M - 1} - 1 \right) + + where + + .. math:: sinc(x) = \frac{\sin(\pi x)}{\pi x} + + The Lanczos window has reduced Gibbs oscillations and is widely used for + filtering climate timeseries with good properties in the physical and + spectral domains. + + .. versionadded:: 1.10 + + References + ---------- + .. [1] Lanczos, C., and Teichmann, T. (1957). Applied analysis. + Physics Today, 10, 44. + .. [2] Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions. + Journal of Applied Meteorology, Vol 18, pp 1016-1022. + .. [3] Thomson, R. E. and Emery, W. J. (2014) Data Analysis Methods in + Physical Oceanography (Third Edition), Elsevier, pp 593-637. + .. [4] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + + Examples + -------- + Plot the window + + >>> import numpy as np + >>> from scipy.signal.windows import lanczos + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1) + >>> window = lanczos(51) + >>> ax.plot(window) + >>> ax.set_title("Lanczos window") + >>> ax.set_ylabel("Amplitude") + >>> ax.set_xlabel("Sample") + >>> fig.tight_layout() + >>> plt.show() + + and its frequency response: + + >>> fig, ax = plt.subplots(1) + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> ax.plot(freq, response) + >>> ax.set_xlim(-0.5, 0.5) + >>> ax.set_ylim(-120, 0) + >>> ax.set_title("Frequency response of the lanczos window") + >>> ax.set_ylabel("Normalized magnitude [dB]") + >>> ax.set_xlabel("Normalized frequency [cycles per sample]") + >>> fig.tight_layout() + >>> plt.show() + """ + if _len_guards(M): + return np.ones(M) + M, needs_trunc = _extend(M, sym) + + # To make sure that the window is symmetric, we concatenate the right hand + # half of the window and the flipped one which is the left hand half of + # the window. + def _calc_right_side_lanczos(n, m): + return np.sinc(2. * np.arange(n, m) / (m - 1) - 1.0) + + if M % 2 == 0: + wh = _calc_right_side_lanczos(M/2, M) + w = np.r_[np.flip(wh), wh] + else: + wh = _calc_right_side_lanczos((M+1)/2, M) + w = np.r_[np.flip(wh), 1.0, wh] + + return _truncate(w, needs_trunc) + + +def _fftautocorr(x): + """Compute the autocorrelation of a real array and crop the result.""" + N = x.shape[-1] + use_N = sp_fft.next_fast_len(2*N-1) + x_fft = sp_fft.rfft(x, use_N, axis=-1) + cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N] + # Or equivalently (but in most cases slower): + # cxy = np.array([np.convolve(xx, yy[::-1], mode='full') + # for xx, yy in zip(x, x)])[:, N-1:2*N-1] + return cxy + + +_win_equiv_raw = { + ('barthann', 'brthan', 'bth'): (barthann, False), + ('bartlett', 'bart', 'brt'): (bartlett, False), + ('blackman', 'black', 'blk'): (blackman, False), + ('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False), + ('bohman', 'bman', 'bmn'): (bohman, False), + ('boxcar', 'box', 'ones', + 'rect', 'rectangular'): (boxcar, False), + ('chebwin', 'cheb'): (chebwin, True), + ('cosine', 'halfcosine'): (cosine, False), + ('dpss',): (dpss, True), + ('exponential', 'poisson'): (exponential, False), + ('flattop', 'flat', 'flt'): (flattop, False), + ('gaussian', 'gauss', 'gss'): (gaussian, True), + ('general cosine', 'general_cosine'): (general_cosine, True), + ('general gaussian', 'general_gaussian', + 'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True), + ('general hamming', 'general_hamming'): (general_hamming, True), + ('hamming', 'hamm', 'ham'): (hamming, False), + ('hann', 'han'): (hann, False), + ('kaiser', 'ksr'): (kaiser, True), + ('kaiser bessel derived', 'kbd'): (kaiser_bessel_derived, True), + ('lanczos', 'sinc'): (lanczos, False), + ('nuttall', 'nutl', 'nut'): (nuttall, False), + ('parzen', 'parz', 'par'): (parzen, False), + ('taylor', 'taylorwin'): (taylor, False), + ('triangle', 'triang', 'tri'): (triang, False), + ('tukey', 'tuk'): (tukey, False), +} + +# Fill dict with all valid window name strings +_win_equiv = {} +for k, v in _win_equiv_raw.items(): + for key in k: + _win_equiv[key] = v[0] + +# Keep track of which windows need additional parameters +_needs_param = set() +for k, v in _win_equiv_raw.items(): + if v[1]: + _needs_param.update(k) + + +def get_window(window, Nx, fftbins=True): + """ + Return a window of a given length and type. + + Parameters + ---------- + window : string, float, or tuple + The type of window to create. See below for more details. + Nx : int + The number of samples in the window. + fftbins : bool, optional + If True (default), create a "periodic" window, ready to use with + `ifftshift` and be multiplied by the result of an FFT (see also + :func:`~scipy.fft.fftfreq`). + If False, create a "symmetric" window, for use in filter design. + + Returns + ------- + get_window : ndarray + Returns a window of length `Nx` and type `window` + + Notes + ----- + Window types: + + - `~scipy.signal.windows.boxcar` + - `~scipy.signal.windows.triang` + - `~scipy.signal.windows.blackman` + - `~scipy.signal.windows.hamming` + - `~scipy.signal.windows.hann` + - `~scipy.signal.windows.bartlett` + - `~scipy.signal.windows.flattop` + - `~scipy.signal.windows.parzen` + - `~scipy.signal.windows.bohman` + - `~scipy.signal.windows.blackmanharris` + - `~scipy.signal.windows.nuttall` + - `~scipy.signal.windows.barthann` + - `~scipy.signal.windows.cosine` + - `~scipy.signal.windows.exponential` + - `~scipy.signal.windows.tukey` + - `~scipy.signal.windows.taylor` + - `~scipy.signal.windows.lanczos` + - `~scipy.signal.windows.kaiser` (needs beta) + - `~scipy.signal.windows.kaiser_bessel_derived` (needs beta) + - `~scipy.signal.windows.gaussian` (needs standard deviation) + - `~scipy.signal.windows.general_cosine` (needs weighting coefficients) + - `~scipy.signal.windows.general_gaussian` (needs power, width) + - `~scipy.signal.windows.general_hamming` (needs window coefficient) + - `~scipy.signal.windows.dpss` (needs normalized half-bandwidth) + - `~scipy.signal.windows.chebwin` (needs attenuation) + + + If the window requires no parameters, then `window` can be a string. + + If the window requires parameters, then `window` must be a tuple + with the first argument the string name of the window, and the next + arguments the needed parameters. + + If `window` is a floating point number, it is interpreted as the beta + parameter of the `~scipy.signal.windows.kaiser` window. + + Each of the window types listed above is also the name of + a function that can be called directly to create a window of + that type. + + Examples + -------- + >>> from scipy import signal + >>> signal.get_window('triang', 7) + array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375]) + >>> signal.get_window(('kaiser', 4.0), 9) + array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, + 0.97885093, 0.82160913, 0.56437221, 0.29425961]) + >>> signal.get_window(('exponential', None, 1.), 9) + array([ 0.011109 , 0.03019738, 0.082085 , 0.22313016, 0.60653066, + 0.60653066, 0.22313016, 0.082085 , 0.03019738]) + >>> signal.get_window(4.0, 9) + array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, + 0.97885093, 0.82160913, 0.56437221, 0.29425961]) + + """ + sym = not fftbins + try: + beta = float(window) + except (TypeError, ValueError) as e: + args = () + if isinstance(window, tuple): + winstr = window[0] + if len(window) > 1: + args = window[1:] + elif isinstance(window, str): + if window in _needs_param: + raise ValueError("The '" + window + "' window needs one or " + "more parameters -- pass a tuple.") from e + else: + winstr = window + else: + raise ValueError("%s as window type is not supported." % + str(type(window))) from e + + try: + winfunc = _win_equiv[winstr] + except KeyError as e: + raise ValueError("Unknown window type.") from e + + if winfunc is dpss: + params = (Nx,) + args + (None,) + else: + params = (Nx,) + args + else: + winfunc = kaiser + params = (Nx, beta) + + return winfunc(*params, sym=sym) diff --git a/venv/lib/python3.10/site-packages/scipy/signal/windows/windows.py b/venv/lib/python3.10/site-packages/scipy/signal/windows/windows.py new file mode 100644 index 0000000000000000000000000000000000000000..a449bc1aaafc777538db27c699dfbb07b48a473b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/signal/windows/windows.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal.windows` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', + 'blackmanharris', 'flattop', 'bartlett', 'barthann', + 'hamming', 'kaiser', 'gaussian', 'general_cosine', + 'general_gaussian', 'general_hamming', 'chebwin', 'cosine', + 'hann', 'exponential', 'tukey', 'taylor', 'dpss', 'get_window', + 'linalg', 'sp_fft', 'k', 'v', 'key' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal.windows", module="windows", + private_modules=["_windows"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/__init__.py b/venv/lib/python3.10/site-packages/scipy/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e66b5a6416e6cd9bb796bb6efdb32fbad2e6d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/__init__.py @@ -0,0 +1,863 @@ +""" +======================================== +Special functions (:mod:`scipy.special`) +======================================== + +.. currentmodule:: scipy.special + +Almost all of the functions below accept NumPy arrays as input +arguments as well as single numbers. This means they follow +broadcasting and automatic array-looping rules. Technically, +they are `NumPy universal functions +`_. +Functions which do not accept NumPy arrays are marked by a warning +in the section description. + +.. seealso:: + + `scipy.special.cython_special` -- Typed Cython versions of special functions + + +Error handling +============== + +Errors are handled by returning NaNs or other appropriate values. +Some of the special function routines can emit warnings or raise +exceptions when an error occurs. By default this is disabled; to +query and control the current error handling state the following +functions are provided. + +.. autosummary:: + :toctree: generated/ + + geterr -- Get the current way of handling special-function errors. + seterr -- Set how special-function errors are handled. + errstate -- Context manager for special-function error handling. + SpecialFunctionWarning -- Warning that can be emitted by special functions. + SpecialFunctionError -- Exception that can be raised by special functions. + +Available functions +=================== + +Airy functions +-------------- + +.. autosummary:: + :toctree: generated/ + + airy -- Airy functions and their derivatives. + airye -- Exponentially scaled Airy functions and their derivatives. + ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative. + bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative. + itairy -- Integrals of Airy functions + + +Elliptic functions and integrals +-------------------------------- + +.. autosummary:: + :toctree: generated/ + + ellipj -- Jacobian elliptic functions. + ellipk -- Complete elliptic integral of the first kind. + ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1. + ellipkinc -- Incomplete elliptic integral of the first kind. + ellipe -- Complete elliptic integral of the second kind. + ellipeinc -- Incomplete elliptic integral of the second kind. + elliprc -- Degenerate symmetric integral RC. + elliprd -- Symmetric elliptic integral of the second kind. + elliprf -- Completely-symmetric elliptic integral of the first kind. + elliprg -- Completely-symmetric elliptic integral of the second kind. + elliprj -- Symmetric elliptic integral of the third kind. + +Bessel functions +---------------- + +.. autosummary:: + :toctree: generated/ + + jv -- Bessel function of the first kind of real order and \ + complex argument. + jve -- Exponentially scaled Bessel function of order `v`. + yn -- Bessel function of the second kind of integer order and \ + real argument. + yv -- Bessel function of the second kind of real order and \ + complex argument. + yve -- Exponentially scaled Bessel function of the second kind \ + of real order. + kn -- Modified Bessel function of the second kind of integer \ + order `n` + kv -- Modified Bessel function of the second kind of real order \ + `v` + kve -- Exponentially scaled modified Bessel function of the \ + second kind. + iv -- Modified Bessel function of the first kind of real order. + ive -- Exponentially scaled modified Bessel function of the \ + first kind. + hankel1 -- Hankel function of the first kind. + hankel1e -- Exponentially scaled Hankel function of the first kind. + hankel2 -- Hankel function of the second kind. + hankel2e -- Exponentially scaled Hankel function of the second kind. + wright_bessel -- Wright's generalized Bessel function. + +The following function does not accept NumPy arrays (it is not a +universal function): + +.. autosummary:: + :toctree: generated/ + + lmbda -- Jahnke-Emden Lambda function, Lambdav(x). + +Zeros of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'. + jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + jn_zeros -- Compute zeros of integer-order Bessel function Jn(x). + jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x). + yn_zeros -- Compute zeros of integer-order Bessel function Yn(x). + ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x). + y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + +Faster versions of common Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + j0 -- Bessel function of the first kind of order 0. + j1 -- Bessel function of the first kind of order 1. + y0 -- Bessel function of the second kind of order 0. + y1 -- Bessel function of the second kind of order 1. + i0 -- Modified Bessel function of order 0. + i0e -- Exponentially scaled modified Bessel function of order 0. + i1 -- Modified Bessel function of order 1. + i1e -- Exponentially scaled modified Bessel function of order 1. + k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`. + k0e -- Exponentially scaled modified Bessel function K of order 0 + k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + k1e -- Exponentially scaled modified Bessel function K of order 1. + +Integrals of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + itj0y0 -- Integrals of Bessel functions of order 0. + it2j0y0 -- Integrals related to Bessel functions of order 0. + iti0k0 -- Integrals of modified Bessel functions of order 0. + it2i0k0 -- Integrals related to modified Bessel functions of order 0. + besselpoly -- Weighted integral of a Bessel function. + +Derivatives of Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`. + yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`. + kvp -- Compute nth derivative of real-order modified Bessel function Kv(z) + ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. + h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`. + h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`. + +Spherical Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + spherical_jn -- Spherical Bessel function of the first kind or its derivative. + spherical_yn -- Spherical Bessel function of the second kind or its derivative. + spherical_in -- Modified spherical Bessel function of the first kind or its derivative. + spherical_kn -- Modified spherical Bessel function of the second kind or its derivative. + +Riccati-Bessel functions +^^^^^^^^^^^^^^^^^^^^^^^^ + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative. + riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative. + +Struve functions +---------------- + +.. autosummary:: + :toctree: generated/ + + struve -- Struve function. + modstruve -- Modified Struve function. + itstruve0 -- Integral of the Struve function of order 0. + it2struve0 -- Integral related to the Struve function of order 0. + itmodstruve0 -- Integral of the modified Struve function of order 0. + + +Raw statistical functions +------------------------- + +.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions. + +Binomial distribution +^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + bdtr -- Binomial distribution cumulative distribution function. + bdtrc -- Binomial distribution survival function. + bdtri -- Inverse function to `bdtr` with respect to `p`. + bdtrik -- Inverse function to `bdtr` with respect to `k`. + bdtrin -- Inverse function to `bdtr` with respect to `n`. + +Beta distribution +^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + btdtr -- Cumulative distribution function of the beta distribution. + btdtri -- The `p`-th quantile of the beta distribution. + btdtria -- Inverse of `btdtr` with respect to `a`. + btdtrib -- btdtria(a, p, x). + +F distribution +^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + fdtr -- F cumulative distribution function. + fdtrc -- F survival function. + fdtri -- The `p`-th quantile of the F-distribution. + fdtridfd -- Inverse to `fdtr` vs dfd. + +Gamma distribution +^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + gdtr -- Gamma distribution cumulative distribution function. + gdtrc -- Gamma distribution survival function. + gdtria -- Inverse of `gdtr` vs a. + gdtrib -- Inverse of `gdtr` vs b. + gdtrix -- Inverse of `gdtr` vs x. + +Negative binomial distribution +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nbdtr -- Negative binomial cumulative distribution function. + nbdtrc -- Negative binomial survival function. + nbdtri -- Inverse of `nbdtr` vs `p`. + nbdtrik -- Inverse of `nbdtr` vs `k`. + nbdtrin -- Inverse of `nbdtr` vs `n`. + +Noncentral F distribution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + ncfdtr -- Cumulative distribution function of the non-central F distribution. + ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution. + ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution. + ncfdtri -- Inverse cumulative distribution function of the non-central F distribution. + ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution. + +Noncentral t distribution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nctdtr -- Cumulative distribution function of the non-central `t` distribution. + nctdtridf -- Calculate degrees of freedom for non-central t distribution. + nctdtrit -- Inverse cumulative distribution function of the non-central t distribution. + nctdtrinc -- Calculate non-centrality parameter for non-central t distribution. + +Normal distribution +^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + nrdtrimn -- Calculate mean of normal distribution given other params. + nrdtrisd -- Calculate standard deviation of normal distribution given other params. + ndtr -- Normal cumulative distribution function. + log_ndtr -- Logarithm of normal cumulative distribution function. + ndtri -- Inverse of `ndtr` vs x. + ndtri_exp -- Inverse of `log_ndtr` vs x. + +Poisson distribution +^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + pdtr -- Poisson cumulative distribution function. + pdtrc -- Poisson survival function. + pdtri -- Inverse to `pdtr` vs m. + pdtrik -- Inverse to `pdtr` vs k. + +Student t distribution +^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + stdtr -- Student t distribution cumulative distribution function. + stdtridf -- Inverse of `stdtr` vs df. + stdtrit -- Inverse of `stdtr` vs `t`. + +Chi square distribution +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + chdtr -- Chi square cumulative distribution function. + chdtrc -- Chi square survival function. + chdtri -- Inverse to `chdtrc`. + chdtriv -- Inverse to `chdtr` vs `v`. + +Non-central chi square distribution +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + chndtr -- Non-central chi square cumulative distribution function. + chndtridf -- Inverse to `chndtr` vs `df`. + chndtrinc -- Inverse to `chndtr` vs `nc`. + chndtrix -- Inverse to `chndtr` vs `x`. + +Kolmogorov distribution +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function. + smirnovi -- Inverse to `smirnov`. + kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution. + kolmogi -- Inverse function to `kolmogorov`. + +Box-Cox transformation +^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + boxcox -- Compute the Box-Cox transformation. + boxcox1p -- Compute the Box-Cox transformation of 1 + `x`. + inv_boxcox -- Compute the inverse of the Box-Cox transformation. + inv_boxcox1p -- Compute the inverse of the Box-Cox transformation. + + +Sigmoidal functions +^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + logit -- Logit ufunc for ndarrays. + expit -- Logistic sigmoid function. + log_expit -- Logarithm of the logistic sigmoid function. + +Miscellaneous +^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated/ + + tklmbda -- Tukey-Lambda cumulative distribution function. + owens_t -- Owen's T Function. + + +Information Theory functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + entr -- Elementwise function for computing entropy. + rel_entr -- Elementwise function for computing relative entropy. + kl_div -- Elementwise function for computing Kullback-Leibler divergence. + huber -- Huber loss function. + pseudo_huber -- Pseudo-Huber loss function. + + +Gamma and related functions +--------------------------- + +.. autosummary:: + :toctree: generated/ + + gamma -- Gamma function. + gammaln -- Logarithm of the absolute value of the Gamma function for real inputs. + loggamma -- Principal branch of the logarithm of the Gamma function. + gammasgn -- Sign of the gamma function. + gammainc -- Regularized lower incomplete gamma function. + gammaincinv -- Inverse to `gammainc`. + gammaincc -- Regularized upper incomplete gamma function. + gammainccinv -- Inverse to `gammaincc`. + beta -- Beta function. + betaln -- Natural logarithm of absolute value of beta function. + betainc -- Incomplete beta integral. + betaincc -- Complemented incomplete beta integral. + betaincinv -- Inverse function to beta integral. + betainccinv -- Inverse of the complemented incomplete beta integral. + psi -- The digamma function. + rgamma -- Gamma function inverted. + polygamma -- Polygamma function n. + multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma. + digamma -- psi(x[, out]). + poch -- Rising factorial (z)_m. + + +Error function and Fresnel integrals +------------------------------------ + +.. autosummary:: + :toctree: generated/ + + erf -- Returns the error function of complex argument. + erfc -- Complementary error function, ``1 - erf(x)``. + erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``. + erfi -- Imaginary error function, ``-i erf(i z)``. + erfinv -- Inverse function for erf. + erfcinv -- Inverse function for erfc. + wofz -- Faddeeva function. + dawsn -- Dawson's integral. + fresnel -- Fresnel sin and cos integrals. + fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + modfresnelp -- Modified Fresnel positive integrals. + modfresnelm -- Modified Fresnel negative integrals. + voigt_profile -- Voigt profile. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + erf_zeros -- Compute nt complex zeros of error function erf(z). + fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z). + fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z). + +Legendre functions +------------------ + +.. autosummary:: + :toctree: generated/ + + lpmv -- Associated Legendre function of integer order and real degree. + sph_harm -- Compute spherical harmonics. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + clpmn -- Associated Legendre function of the first kind for complex arguments. + lpn -- Legendre function of the first kind. + lqn -- Legendre function of the second kind. + lpmn -- Sequence of associated Legendre functions of the first kind. + lqmn -- Sequence of associated Legendre functions of the second kind. + +Ellipsoidal harmonics +--------------------- + +.. autosummary:: + :toctree: generated/ + + ellip_harm -- Ellipsoidal harmonic functions E^p_n(l). + ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l). + ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n. + +Orthogonal polynomials +---------------------- + +The following functions evaluate values of orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k. + eval_legendre -- Evaluate Legendre polynomial at a point. + eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point. + eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point. + eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point. + eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point. + eval_jacobi -- Evaluate Jacobi polynomial at a point. + eval_laguerre -- Evaluate Laguerre polynomial at a point. + eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point. + eval_hermite -- Evaluate physicist's Hermite polynomial at a point. + eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point. + eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point. + eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point. + eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point. + eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point. + eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point. + +The following functions compute roots and quadrature weights for +orthogonal polynomials: + +.. autosummary:: + :toctree: generated/ + + roots_legendre -- Gauss-Legendre quadrature. + roots_chebyt -- Gauss-Chebyshev (first kind) quadrature. + roots_chebyu -- Gauss-Chebyshev (second kind) quadrature. + roots_chebyc -- Gauss-Chebyshev (first kind) quadrature. + roots_chebys -- Gauss-Chebyshev (second kind) quadrature. + roots_jacobi -- Gauss-Jacobi quadrature. + roots_laguerre -- Gauss-Laguerre quadrature. + roots_genlaguerre -- Gauss-generalized Laguerre quadrature. + roots_hermite -- Gauss-Hermite (physicst's) quadrature. + roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature. + roots_gegenbauer -- Gauss-Gegenbauer quadrature. + roots_sh_legendre -- Gauss-Legendre (shifted) quadrature. + roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature. + roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature. + roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature. + +The functions below, in turn, return the polynomial coefficients in +``orthopoly1d`` objects, which function similarly as `numpy.poly1d`. +The ``orthopoly1d`` class also has an attribute ``weights``, which returns +the roots, weights, and total weights for the appropriate form of Gaussian +quadrature. These are returned in an ``n x 3`` array with roots in the first +column, weights in the second column, and total weights in the final column. +Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing +arithmetic, and lose information of the original orthogonal polynomial. + +.. autosummary:: + :toctree: generated/ + + legendre -- Legendre polynomial. + chebyt -- Chebyshev polynomial of the first kind. + chebyu -- Chebyshev polynomial of the second kind. + chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + jacobi -- Jacobi polynomial. + laguerre -- Laguerre polynomial. + genlaguerre -- Generalized (associated) Laguerre polynomial. + hermite -- Physicist's Hermite polynomial. + hermitenorm -- Normalized (probabilist's) Hermite polynomial. + gegenbauer -- Gegenbauer (ultraspherical) polynomial. + sh_legendre -- Shifted Legendre polynomial. + sh_chebyt -- Shifted Chebyshev polynomial of the first kind. + sh_chebyu -- Shifted Chebyshev polynomial of the second kind. + sh_jacobi -- Shifted Jacobi polynomial. + +.. warning:: + + Computing values of high-order polynomials (around ``order > 20``) using + polynomial coefficients is numerically unstable. To evaluate polynomial + values, the ``eval_*`` functions should be used instead. + + +Hypergeometric functions +------------------------ + +.. autosummary:: + :toctree: generated/ + + hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z). + hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x). + hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind. + hyp0f1 -- Confluent hypergeometric limit function 0F1. + + +Parabolic cylinder functions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + pbdv -- Parabolic cylinder function D. + pbvv -- Parabolic cylinder function V. + pbwa -- Parabolic cylinder function W. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives. + pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives. + pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives. + +Mathieu and related functions +----------------------------- + +.. autosummary:: + :toctree: generated/ + + mathieu_a -- Characteristic value of even Mathieu functions. + mathieu_b -- Characteristic value of odd Mathieu functions. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions. + mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions. + +The following return both function and first derivative: + +.. autosummary:: + :toctree: generated/ + + mathieu_cem -- Even Mathieu function and its derivative. + mathieu_sem -- Odd Mathieu function and its derivative. + mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative. + mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative. + mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative. + mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative. + +Spheroidal wave functions +------------------------- + +.. autosummary:: + :toctree: generated/ + + pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative. + pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative. + pro_rad2 -- Prolate spheroidal radial function of the second kind and its derivative. + obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative. + obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative. + obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative. + pro_cv -- Characteristic value of prolate spheroidal function. + obl_cv -- Characteristic value of oblate spheroidal function. + pro_cv_seq -- Characteristic values for prolate spheroidal wave functions. + obl_cv_seq -- Characteristic values for oblate spheroidal wave functions. + +The following functions require pre-computed characteristic value: + +.. autosummary:: + :toctree: generated/ + + pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value. + pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value. + pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value. + obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value. + obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value. + obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value. + +Kelvin functions +---------------- + +.. autosummary:: + :toctree: generated/ + + kelvin -- Kelvin functions as complex numbers. + kelvin_zeros -- Compute nt zeros of all Kelvin functions. + ber -- Kelvin function ber. + bei -- Kelvin function bei + berp -- Derivative of the Kelvin function `ber`. + beip -- Derivative of the Kelvin function `bei`. + ker -- Kelvin function ker. + kei -- Kelvin function ker. + kerp -- Derivative of the Kelvin function ker. + keip -- Derivative of the Kelvin function kei. + +The following functions do not accept NumPy arrays (they are not +universal functions): + +.. autosummary:: + :toctree: generated/ + + ber_zeros -- Compute nt zeros of the Kelvin function ber(x). + bei_zeros -- Compute nt zeros of the Kelvin function bei(x). + berp_zeros -- Compute nt zeros of the Kelvin function ber'(x). + beip_zeros -- Compute nt zeros of the Kelvin function bei'(x). + ker_zeros -- Compute nt zeros of the Kelvin function ker(x). + kei_zeros -- Compute nt zeros of the Kelvin function kei(x). + kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x). + keip_zeros -- Compute nt zeros of the Kelvin function kei'(x). + +Combinatorics +------------- + +.. autosummary:: + :toctree: generated/ + + comb -- The number of combinations of N things taken k at a time. + perm -- Permutations of N things taken k at a time, i.e., k-permutations of N. + stirling2 -- Stirling numbers of the second kind. + +Lambert W and related functions +------------------------------- + +.. autosummary:: + :toctree: generated/ + + lambertw -- Lambert W function. + wrightomega -- Wright Omega function. + +Other special functions +----------------------- + +.. autosummary:: + :toctree: generated/ + + agm -- Arithmetic, Geometric Mean. + bernoulli -- Bernoulli numbers B0..Bn (inclusive). + binom -- Binomial coefficient + diric -- Periodic sinc function, also called the Dirichlet function. + euler -- Euler numbers E0..En (inclusive). + expn -- Exponential integral E_n. + exp1 -- Exponential integral E_1 of complex argument z. + expi -- Exponential integral Ei. + factorial -- The factorial of a number or array of numbers. + factorial2 -- Double factorial. + factorialk -- Multifactorial of n of order k, n(!!...!). + shichi -- Hyperbolic sine and cosine integrals. + sici -- Sine and cosine integrals. + softmax -- Softmax function. + log_softmax -- Logarithm of softmax function. + spence -- Spence's function, also known as the dilogarithm. + zeta -- Riemann zeta function. + zetac -- Riemann zeta function minus 1. + +Convenience functions +--------------------- + +.. autosummary:: + :toctree: generated/ + + cbrt -- Cube root of `x`. + exp10 -- 10**x. + exp2 -- 2**x. + radian -- Convert from degrees to radians. + cosdg -- Cosine of the angle `x` given in degrees. + sindg -- Sine of angle given in degrees. + tandg -- Tangent of angle x given in degrees. + cotdg -- Cotangent of the angle `x` given in degrees. + log1p -- Calculates log(1+x) for use when `x` is near zero. + expm1 -- ``exp(x) - 1`` for use when `x` is near zero. + cosm1 -- ``cos(x) - 1`` for use when `x` is near zero. + powm1 -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1. + round -- Round to nearest integer. + xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + logsumexp -- Compute the log of the sum of exponentials of input elements. + exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero. + sinc -- Return the sinc function. + +""" # noqa: E501 + +import warnings + +from ._sf_error import SpecialFunctionWarning, SpecialFunctionError + +from . import _ufuncs +from ._ufuncs import * + +# Replace some function definitions from _ufuncs to add Array API support +from ._support_alternative_backends import ( + log_ndtr, ndtr, ndtri, erf, erfc, i0, i0e, i1, i1e, + gammaln, gammainc, gammaincc, logit, expit) + +from . import _basic +from ._basic import * + +from ._logsumexp import logsumexp, softmax, log_softmax + +from . import _orthogonal +from ._orthogonal import * + +from ._spfun_stats import multigammaln +from ._ellip_harm import ( + ellip_harm, + ellip_harm_2, + ellip_normal +) +from ._lambertw import lambertw +from ._spherical_bessel import ( + spherical_jn, + spherical_yn, + spherical_in, + spherical_kn +) + +# Deprecated namespaces, to be removed in v2.0.0 +from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats + +# We replace some function definitions from _ufuncs with those from +# _support_alternative_backends above, but those are all listed in _ufuncs.__all__, +# so there is no need to consider _support_alternative_backends.__all__ here. +__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__ +__all__ += [ + 'SpecialFunctionWarning', + 'SpecialFunctionError', + 'logsumexp', + 'softmax', + 'log_softmax', + 'multigammaln', + 'ellip_harm', + 'ellip_harm_2', + 'ellip_normal', + 'lambertw', + 'spherical_jn', + 'spherical_yn', + 'spherical_in', + 'spherical_kn', +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester + +_depr_msg = ('\nThis function was deprecated in SciPy 1.12.0, and will be ' + 'removed in SciPy 1.14.0. Use scipy.special.{} instead.') + + +def btdtr(*args, **kwargs): # type: ignore [no-redef] + warnings.warn(_depr_msg.format('betainc'), category=DeprecationWarning, + stacklevel=2) + return _ufuncs.btdtr(*args, **kwargs) + + +btdtr.__doc__ = _ufuncs.btdtr.__doc__ # type: ignore [misc] + + +def btdtri(*args, **kwargs): # type: ignore [no-redef] + warnings.warn(_depr_msg.format('betaincinv'), category=DeprecationWarning, + stacklevel=2) + return _ufuncs.btdtri(*args, **kwargs) + + +btdtri.__doc__ = _ufuncs.btdtri.__doc__ # type: ignore [misc] + + +def _get_include(): + """This function is for development purposes only. + + This function could disappear or its behavior could change at any time. + """ + import os + return os.path.dirname(__file__) diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..379afdcf50b4fa4b526c41a3b29c6b3bca976c46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41da394df8165f8ffae25672f5bbdbc85186ec76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd999de4829cdff8d2ecd8695d68ecc77b6b2eee Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_ellip_harm.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_ellip_harm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ae95411d4a5c92198dff38141429acee921ae6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_ellip_harm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_lambertw.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_lambertw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbabd0a021372b1b03d151408530d263c28196ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_lambertw.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_sf_error.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_sf_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8fde4550557eb40fa92b084cd1a1c5bc0a22de2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_sf_error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d337d1911ab6927ba56f35515a1e67ba372d990e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_testutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..345b44a5db8999b468b5ab36bc59c6ddcbc15a00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/_testutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/add_newdocs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92cbeb380a17f06639767a52c1cfca999b1d9a1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/add_newdocs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38766e4e810fef6453929eabe0435536a2b58738 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/sf_error.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/sf_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f72b07d83817c4ec52a4be5443c246c11f405c68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/sf_error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/__pycache__/specfun.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/specfun.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a976d8e624725e4941220b9504c8f3b781e0488 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/__pycache__/specfun.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py b/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..f55867947b8e4a5bbd20a630e16ab1b8c90cf90a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_add_newdocs.py @@ -0,0 +1,14580 @@ +# Docstrings for generated ufuncs +# +# The syntax is designed to look like the function add_newdoc is being +# called from numpy.lib, but in this file add_newdoc puts the +# docstrings in a dictionary. This dictionary is used in +# _generate_pyx.py to generate the docstrings for the ufuncs in +# scipy.special at the C level when the ufuncs are created at compile +# time. + +docdict: dict[str, str] = {} + + +def get(name): + return docdict.get(name) + + +def add_newdoc(name, doc): + docdict[name] = doc + + +add_newdoc("_sf_error_test_function", + """ + Private function; do not use. + """) + + +add_newdoc("_cosine_cdf", + """ + _cosine_cdf(x) + + Cumulative distribution function (CDF) of the cosine distribution:: + + { 0, x < -pi + cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi + { 1, x > pi + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + + Returns + ------- + scalar or ndarray + The cosine distribution CDF evaluated at `x`. + + """) + +add_newdoc("_cosine_invcdf", + """ + _cosine_invcdf(p) + + Inverse of the cumulative distribution function (CDF) of the cosine + distribution. + + The CDF of the cosine distribution is:: + + cdf(x) = (pi + x + sin(x))/(2*pi) + + This function computes the inverse of cdf(x). + + Parameters + ---------- + p : array_like + `p` must contain real numbers in the interval ``0 <= p <= 1``. + `nan` is returned for values of `p` outside the interval [0, 1]. + + Returns + ------- + scalar or ndarray + The inverse of the cosine distribution CDF evaluated at `p`. + + """) + +add_newdoc("sph_harm", + r""" + sph_harm(m, n, theta, phi, out=None) + + Compute spherical harmonics. + + The spherical harmonics are defined as + + .. math:: + + Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}} + e^{i m \theta} P^m_n(\cos(\phi)) + + where :math:`P_n^m` are the associated Legendre functions; see `lpmv`. + + Parameters + ---------- + m : array_like + Order of the harmonic (int); must have ``|m| <= n``. + n : array_like + Degree of the harmonic (int); must have ``n >= 0``. This is + often denoted by ``l`` (lower case L) in descriptions of + spherical harmonics. + theta : array_like + Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``. + phi : array_like + Polar (colatitudinal) coordinate; must be in ``[0, pi]``. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y_mn : complex scalar or ndarray + The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``. + + Notes + ----- + There are different conventions for the meanings of the input + arguments ``theta`` and ``phi``. In SciPy ``theta`` is the + azimuthal angle and ``phi`` is the polar angle. It is common to + see the opposite convention, that is, ``theta`` as the polar angle + and ``phi`` as the azimuthal angle. + + Note that SciPy's spherical harmonics include the Condon-Shortley + phase [2]_ because it is part of `lpmv`. + + With SciPy's conventions, the first several spherical harmonics + are + + .. math:: + + Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\ + Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}} + e^{-i\theta} \sin(\phi) \\ + Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}} + \cos(\phi) \\ + Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}} + e^{i\theta} \sin(\phi). + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 14.30. + https://dlmf.nist.gov/14.30 + .. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase + """) + +add_newdoc("_ellip_harm", + """ + Internal function, use `ellip_harm` instead. + """) + +add_newdoc("_ellip_norm", + """ + Internal function, use `ellip_norm` instead. + """) + +add_newdoc("_lambertw", + """ + Internal function, use `lambertw` instead. + """) + +add_newdoc("voigt_profile", + r""" + voigt_profile(x, sigma, gamma, out=None) + + Voigt profile. + + The Voigt profile is a convolution of a 1-D Normal distribution with + standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at + half-maximum ``gamma``. + + If ``sigma = 0``, PDF of Cauchy distribution is returned. + Conversely, if ``gamma = 0``, PDF of Normal distribution is returned. + If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, + and ``0`` for all other ``x``. + + Parameters + ---------- + x : array_like + Real argument + sigma : array_like + The standard deviation of the Normal distribution part + gamma : array_like + The half-width at half-maximum of the Cauchy distribution part + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The Voigt profile at the given arguments + + See Also + -------- + wofz : Faddeeva function + + Notes + ----- + It can be expressed in terms of Faddeeva function + + .. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}}, + .. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma} + + where :math:`w(z)` is the Faddeeva function. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Voigt_profile + + Examples + -------- + Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``. + + >>> from scipy.special import voigt_profile + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> voigt_profile(2, 1., 1.) + 0.09071519942627544 + + Calculate the function at several points by providing a NumPy array + for `x`. + + >>> values = np.array([-2., 0., 5]) + >>> voigt_profile(values, 1., 1.) + array([0.0907152 , 0.20870928, 0.01388492]) + + Plot the function for different parameter sets. + + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> x = np.linspace(-10, 10, 500) + >>> parameters_list = [(1.5, 0., "solid"), (1.3, 0.5, "dashed"), + ... (0., 1.8, "dotted"), (1., 1., "dashdot")] + >>> for params in parameters_list: + ... sigma, gamma, linestyle = params + ... voigt = voigt_profile(x, sigma, gamma) + ... ax.plot(x, voigt, label=rf"$\sigma={sigma},\, \gamma={gamma}$", + ... ls=linestyle) + >>> ax.legend() + >>> plt.show() + + Verify visually that the Voigt profile indeed arises as the convolution + of a normal and a Cauchy distribution. + + >>> from scipy.signal import convolve + >>> x, dx = np.linspace(-10, 10, 500, retstep=True) + >>> def gaussian(x, sigma): + ... return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi)) + >>> def cauchy(x, gamma): + ... return gamma/(np.pi * (np.square(x)+gamma**2)) + >>> sigma = 2 + >>> gamma = 1 + >>> gauss_profile = gaussian(x, sigma) + >>> cauchy_profile = cauchy(x, gamma) + >>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode="same") + >>> voigt = voigt_profile(x, sigma, gamma) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> ax.plot(x, gauss_profile, label="Gauss: $G$", c='b') + >>> ax.plot(x, cauchy_profile, label="Cauchy: $C$", c='y', ls="dashed") + >>> xx = 0.5*(x[1:] + x[:-1]) # midpoints + >>> ax.plot(xx, convolved[1:], label="Convolution: $G * C$", ls='dashdot', + ... c='k') + >>> ax.plot(x, voigt, label="Voigt", ls='dotted', c='r') + >>> ax.legend() + >>> plt.show() + """) + +add_newdoc("wrightomega", + r""" + wrightomega(z, out=None) + + Wright Omega function. + + Defined as the solution to + + .. math:: + + \omega + \log(\omega) = z + + where :math:`\log` is the principal branch of the complex logarithm. + + Parameters + ---------- + z : array_like + Points at which to evaluate the Wright Omega function + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + omega : scalar or ndarray + Values of the Wright Omega function + + See Also + -------- + lambertw : The Lambert W function + + Notes + ----- + .. versionadded:: 0.19.0 + + The function can also be defined as + + .. math:: + + \omega(z) = W_{K(z)}(e^z) + + where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the + unwinding number and :math:`W` is the Lambert W function. + + The implementation here is taken from [1]_. + + References + ---------- + .. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex + Double-Precision Evaluation of the Wright :math:`\omega` + Function." ACM Transactions on Mathematical Software, + 2012. :doi:`10.1145/2168773.2168779`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import wrightomega, lambertw + + >>> wrightomega([-2, -1, 0, 1, 2]) + array([0.12002824, 0.27846454, 0.56714329, 1. , 1.5571456 ]) + + Complex input: + + >>> wrightomega(3 + 5j) + (1.5804428632097158+3.8213626783287937j) + + Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``: + + >>> w = -5 + 4j + >>> wrightomega(w + np.log(w)) + (-5+4j) + + Verify the connection to ``lambertw``: + + >>> z = 0.5 + 3j + >>> wrightomega(z) + (0.0966015889280649+1.4937828458191993j) + >>> lambertw(np.exp(z)) + (0.09660158892806493+1.4937828458191993j) + + >>> z = 0.5 + 4j + >>> wrightomega(z) + (-0.3362123489037213+2.282986001579032j) + >>> lambertw(np.exp(z), k=1) + (-0.33621234890372115+2.282986001579032j) + """) + + +add_newdoc("agm", + """ + agm(a, b, out=None) + + Compute the arithmetic-geometric mean of `a` and `b`. + + Start with a_0 = a and b_0 = b and iteratively compute:: + + a_{n+1} = (a_n + b_n)/2 + b_{n+1} = sqrt(a_n*b_n) + + a_n and b_n converge to the same limit as n increases; their common + limit is agm(a, b). + + Parameters + ---------- + a, b : array_like + Real values only. If the values are both negative, the result + is negative. If one value is negative and the other is positive, + `nan` is returned. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The arithmetic-geometric mean of `a` and `b`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import agm + >>> a, b = 24.0, 6.0 + >>> agm(a, b) + 13.458171481725614 + + Compare that result to the iteration: + + >>> while a != b: + ... a, b = (a + b)/2, np.sqrt(a*b) + ... print("a = %19.16f b=%19.16f" % (a, b)) + ... + a = 15.0000000000000000 b=12.0000000000000000 + a = 13.5000000000000000 b=13.4164078649987388 + a = 13.4582039324993694 b=13.4581390309909850 + a = 13.4581714817451772 b=13.4581714817060547 + a = 13.4581714817256159 b=13.4581714817256159 + + When array-like arguments are given, broadcasting applies: + + >>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1). + >>> b = np.array([6, 12, 24, 48]) # b has shape (4,). + >>> agm(a, b) + array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756], + [ 4.37037309, 6.72908574, 10.84726853, 18.11597502], + [ 6. , 8.74074619, 13.45817148, 21.69453707]]) + """) + +add_newdoc("airy", + r""" + airy(z, out=None) + + Airy functions and their derivatives. + + Parameters + ---------- + z : array_like + Real or complex argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray + Airy functions Ai and Bi, and their derivatives Aip and Bip. + + See Also + -------- + airye : exponentially scaled Airy functions. + + Notes + ----- + The Airy functions Ai and Bi are two independent solutions of + + .. math:: y''(x) = x y(x). + + For real `z` in [-10, 10], the computation is carried out by calling + the Cephes [1]_ `airy` routine, which uses power series summation + for small `z` and rational minimax approximations for large `z`. + + Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are + employed. They are computed using power series for :math:`|z| < 1` and + the following relations to modified Bessel functions for larger `z` + (where :math:`t \equiv 2 z^{3/2}/3`): + + .. math:: + + Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t) + + Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t) + + Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right) + + Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right) + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compute the Airy functions on the interval [-15, 5]. + + >>> import numpy as np + >>> from scipy import special + >>> x = np.linspace(-15, 5, 201) + >>> ai, aip, bi, bip = special.airy(x) + + Plot Ai(x) and Bi(x). + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, ai, 'r', label='Ai(x)') + >>> plt.plot(x, bi, 'b--', label='Bi(x)') + >>> plt.ylim(-0.5, 1.0) + >>> plt.grid() + >>> plt.legend(loc='upper left') + >>> plt.show() + + """) + +add_newdoc("airye", + """ + airye(z, out=None) + + Exponentially scaled Airy functions and their derivatives. + + Scaling:: + + eAi = Ai * exp(2.0/3.0*z*sqrt(z)) + eAip = Aip * exp(2.0/3.0*z*sqrt(z)) + eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real)) + + Parameters + ---------- + z : array_like + Real or complex argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray + Exponentially scaled Airy functions eAi and eBi, and their derivatives + eAip and eBip + + See Also + -------- + airy + + Notes + ----- + Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + We can compute exponentially scaled Airy functions and their derivatives: + + >>> import numpy as np + >>> from scipy.special import airye + >>> import matplotlib.pyplot as plt + >>> z = np.linspace(0, 50, 500) + >>> eAi, eAip, eBi, eBip = airye(z) + >>> f, ax = plt.subplots(2, 1, sharex=True) + >>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]], + ... [eBi, eBip, ["eBi", "eBip"]]]): + ... ax[ind].plot(z, data[0], "-r", z, data[1], "-b") + ... ax[ind].legend(data[2]) + ... ax[ind].grid(True) + >>> plt.show() + + We can compute these using usual non-scaled Airy functions by: + + >>> from scipy.special import airy + >>> Ai, Aip, Bi, Bip = airy(z) + >>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z))) + True + >>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z))) + True + >>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z))))) + True + >>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z))))) + True + + Comparing non-scaled and exponentially scaled ones, the usual non-scaled + function quickly underflows for large values, whereas the exponentially + scaled function does not. + + >>> airy(200) + (0.0, 0.0, nan, nan) + >>> airye(200) + (0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093) + + """) + +add_newdoc("bdtr", + r""" + bdtr(k, n, p, out=None) + + Binomial distribution cumulative distribution function. + + Sum of the terms 0 through `floor(k)` of the Binomial probability density. + + .. math:: + \mathrm{bdtr}(k, n, p) = + \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (double), rounded down to the nearest integer. + n : array_like + Number of events (int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Probability of `floor(k)` or fewer successes in `n` independent events with + success probabilities of `p`. + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtr}(k, n, p) = + I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1). + + Wrapper for the Cephes [1]_ routine `bdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("bdtrc", + r""" + bdtrc(k, n, p, out=None) + + Binomial distribution survival function. + + Sum of the terms `floor(k) + 1` through `n` of the binomial probability + density, + + .. math:: + \mathrm{bdtrc}(k, n, p) = + \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j} + + Parameters + ---------- + k : array_like + Number of successes (double), rounded down to nearest integer. + n : array_like + Number of events (int) + p : array_like + Probability of success in a single event. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Probability of `floor(k) + 1` or more successes in `n` independent + events with success probabilities of `p`. + + See Also + -------- + bdtr + betainc + + Notes + ----- + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor). + + Wrapper for the Cephes [1]_ routine `bdtrc`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("bdtri", + r""" + bdtri(k, n, y, out=None) + + Inverse function to `bdtr` with respect to `p`. + + Finds the event probability `p` such that the sum of the terms 0 through + `k` of the binomial probability density is equal to the given cumulative + probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float), rounded down to the nearest integer. + n : array_like + Number of events (float) + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + p : scalar or ndarray + The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`. + + See Also + -------- + bdtr + betaincinv + + Notes + ----- + The computation is carried out using the inverse beta integral function + and the relation,:: + + 1 - p = betaincinv(n - k, k + 1, y). + + Wrapper for the Cephes [1]_ routine `bdtri`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("bdtrik", + """ + bdtrik(y, n, p, out=None) + + Inverse function to `bdtr` with respect to `k`. + + Finds the number of successes `k` such that the sum of the terms 0 through + `k` of the Binomial probability density for `n` events with probability + `p` is equal to the given cumulative probability `y`. + + Parameters + ---------- + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + n : array_like + Number of events (float). + p : array_like + Success probability (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + k : scalar or ndarray + The number of successes `k` such that `bdtr(k, n, p) = y`. + + See Also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + + """) + +add_newdoc("bdtrin", + """ + bdtrin(k, y, p, out=None) + + Inverse function to `bdtr` with respect to `n`. + + Finds the number of events `n` such that the sum of the terms 0 through + `k` of the Binomial probability density for events with probability `p` is + equal to the given cumulative probability `y`. + + Parameters + ---------- + k : array_like + Number of successes (float). + y : array_like + Cumulative probability (probability of `k` or fewer successes in `n` + events). + p : array_like + Success probability (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + n : scalar or ndarray + The number of events `n` such that `bdtr(k, n, p) = y`. + + See Also + -------- + bdtr + + Notes + ----- + Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the + cumulative incomplete beta distribution. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + """) + +add_newdoc( + "binom", + r""" + binom(x, y, out=None) + + Binomial coefficient considered as a function of two real variables. + + For real arguments, the binomial coefficient is defined as + + .. math:: + + \binom{x}{y} = \frac{\Gamma(x + 1)}{\Gamma(y + 1)\Gamma(x - y + 1)} = + \frac{1}{(x + 1)\mathrm{B}(x - y + 1, y + 1)} + + Where :math:`\Gamma` is the Gamma function (`gamma`) and :math:`\mathrm{B}` + is the Beta function (`beta`) [1]_. + + Parameters + ---------- + x, y: array_like + Real arguments to :math:`\binom{x}{y}`. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Value of binomial coefficient. + + See Also + -------- + comb : The number of combinations of N things taken k at a time. + + Notes + ----- + The Gamma function has poles at non-positive integers and tends to either + positive or negative infinity depending on the direction on the real line + from which a pole is approached. When considered as a function of two real + variables, :math:`\binom{x}{y}` is thus undefined when `x` is a negative + integer. `binom` returns ``nan`` when ``x`` is a negative integer. This + is the case even when ``x`` is a negative integer and ``y`` an integer, + contrary to the usual convention for defining :math:`\binom{n}{k}` when it + is considered as a function of two integer variables. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Binomial_coefficient + + Examples + -------- + The following examples illustrate the ways in which `binom` differs from + the function `comb`. + + >>> from scipy.special import binom, comb + + When ``exact=False`` and ``x`` and ``y`` are both positive, `comb` calls + `binom` internally. + + >>> x, y = 3, 2 + >>> (binom(x, y), comb(x, y), comb(x, y, exact=True)) + (3.0, 3.0, 3) + + For larger values, `comb` with ``exact=True`` no longer agrees + with `binom`. + + >>> x, y = 43, 23 + >>> (binom(x, y), comb(x, y), comb(x, y, exact=True)) + (960566918219.9999, 960566918219.9999, 960566918220) + + `binom` returns ``nan`` when ``x`` is a negative integer, but is otherwise + defined for negative arguments. `comb` returns 0 whenever one of ``x`` or + ``y`` is negative or ``x`` is less than ``y``. + + >>> x, y = -3, 2 + >>> (binom(x, y), comb(x, y), comb(x, y, exact=True)) + (nan, 0.0, 0) + + >>> x, y = -3.1, 2.2 + >>> (binom(x, y), comb(x, y), comb(x, y, exact=True)) + (18.714147876804432, 0.0, 0) + + >>> x, y = 2.2, 3.1 + >>> (binom(x, y), comb(x, y), comb(x, y, exact=True)) + (0.037399983365134115, 0.0, 0) + """ +) + +add_newdoc("btdtria", + r""" + btdtria(p, b, x, out=None) + + Inverse of `btdtr` with respect to `a`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `a`, returning the value of `a` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + p : array_like + Cumulative probability, in [0, 1]. + b : array_like + Shape parameter (`b` > 0). + x : array_like + The quantile, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + a : scalar or ndarray + The value of the shape parameter `a` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative distribution function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtrib : Inverse with respect to `b`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + """) + +add_newdoc("btdtrib", + r""" + btdtria(a, p, x, out=None) + + Inverse of `btdtr` with respect to `b`. + + This is the inverse of the beta cumulative distribution function, `btdtr`, + considered as a function of `b`, returning the value of `b` for which + `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + p : array_like + Cumulative probability, in [0, 1]. + x : array_like + The quantile, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + b : scalar or ndarray + The value of the shape parameter `b` such that `btdtr(a, b, x) = p`. + + See Also + -------- + btdtr : Cumulative distribution function of the beta distribution. + btdtri : Inverse with respect to `x`. + btdtria : Inverse with respect to `a`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Algorithm 708: Significant Digit Computation of the Incomplete Beta + Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373. + + + """) + +add_newdoc("bei", + r""" + bei(x, out=None) + + Kelvin function bei. + + Defined as + + .. math:: + + \mathrm{bei}(x) = \Im[J_0(x e^{3 \pi i / 4})] + + where :math:`J_0` is the Bessel function of the first kind of + order zero (see `jv`). See [dlmf]_ for more details. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the Kelvin function. + + See Also + -------- + ber : the corresponding real part + beip : the derivative of bei + jv : Bessel function of the first kind + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10.61 + + Examples + -------- + It can be expressed using Bessel functions. + + >>> import numpy as np + >>> import scipy.special as sc + >>> x = np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag + array([0.24956604, 0.97229163, 1.93758679, 2.29269032]) + >>> sc.bei(x) + array([0.24956604, 0.97229163, 1.93758679, 2.29269032]) + + """) + +add_newdoc("beip", + r""" + beip(x, out=None) + + Derivative of the Kelvin function bei. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + The values of the derivative of bei. + + See Also + -------- + bei + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10#PT5 + + """) + +add_newdoc("ber", + r""" + ber(x, out=None) + + Kelvin function ber. + + Defined as + + .. math:: + + \mathrm{ber}(x) = \Re[J_0(x e^{3 \pi i / 4})] + + where :math:`J_0` is the Bessel function of the first kind of + order zero (see `jv`). See [dlmf]_ for more details. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the Kelvin function. + + See Also + -------- + bei : the corresponding real part + berp : the derivative of bei + jv : Bessel function of the first kind + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10.61 + + Examples + -------- + It can be expressed using Bessel functions. + + >>> import numpy as np + >>> import scipy.special as sc + >>> x = np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real + array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656]) + >>> sc.ber(x) + array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656]) + + """) + +add_newdoc("berp", + r""" + berp(x, out=None) + + Derivative of the Kelvin function ber. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + The values of the derivative of ber. + + See Also + -------- + ber + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10#PT5 + + """) + +add_newdoc("besselpoly", + r""" + besselpoly(a, lmb, nu, out=None) + + Weighted integral of the Bessel function of the first kind. + + Computes + + .. math:: + + \int_0^1 x^\lambda J_\nu(2 a x) \, dx + + where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`, + :math:`\nu=nu`. + + Parameters + ---------- + a : array_like + Scale factor inside the Bessel function. + lmb : array_like + Power of `x` + nu : array_like + Order of the Bessel function. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Value of the integral. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Evaluate the function for one parameter set. + + >>> from scipy.special import besselpoly + >>> besselpoly(1, 1, 1) + 0.24449718372863877 + + Evaluate the function for different scale factors. + + >>> import numpy as np + >>> factors = np.array([0., 3., 6.]) + >>> besselpoly(factors, 1, 1) + array([ 0. , -0.00549029, 0.00140174]) + + Plot the function for varying powers, orders and scales. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> powers = np.linspace(0, 10, 100) + >>> orders = [1, 2, 3] + >>> scales = [1, 2] + >>> all_combinations = [(order, scale) for order in orders + ... for scale in scales] + >>> for order, scale in all_combinations: + ... ax.plot(powers, besselpoly(scale, powers, order), + ... label=rf"$\nu={order}, a={scale}$") + >>> ax.legend() + >>> ax.set_xlabel(r"$\lambda$") + >>> ax.set_ylabel(r"$\int_0^1 x^{\lambda} J_{\nu}(2ax)\,dx$") + >>> plt.show() + """) + +add_newdoc("beta", + r""" + beta(a, b, out=None) + + Beta function. + + This function is defined in [1]_ as + + .. math:: + + B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt + = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a, b : array_like + Real-valued arguments + out : ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the beta function + + See Also + -------- + gamma : the gamma function + betainc : the regularized incomplete beta function + betaln : the natural logarithm of the absolute + value of the beta function + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions, + Eq. 5.12.1. https://dlmf.nist.gov/5.12 + + Examples + -------- + >>> import scipy.special as sc + + The beta function relates to the gamma function by the + definition given above: + + >>> sc.beta(2, 3) + 0.08333333333333333 + >>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3) + 0.08333333333333333 + + As this relationship demonstrates, the beta function + is symmetric: + + >>> sc.beta(1.7, 2.4) + 0.16567527689031739 + >>> sc.beta(2.4, 1.7) + 0.16567527689031739 + + This function satisfies :math:`B(1, b) = 1/b`: + + >>> sc.beta(1, 4) + 0.25 + + """) + +add_newdoc( + "betainc", + r""" + betainc(a, b, x, out=None) + + Regularized incomplete beta function. + + Computes the regularized incomplete beta function, defined as [1]_: + + .. math:: + + I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x + t^{a-1}(1-t)^{b-1}dt, + + for :math:`0 \leq x \leq 1`. + + This function is the cumulative distribution function for the beta + distribution; its range is [0, 1]. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Value of the regularized incomplete beta function + + See Also + -------- + beta : beta function + betaincinv : inverse of the regularized incomplete beta function + betaincc : complement of the regularized incomplete beta function + scipy.stats.beta : beta distribution + + Notes + ----- + The term *regularized* in the name of this function refers to the + scaling of the function by the gamma function terms shown in the + formula. When not qualified as *regularized*, the name *incomplete + beta function* often refers to just the integral expression, + without the gamma terms. One can use the function `beta` from + `scipy.special` to get this "nonregularized" incomplete beta + function by multiplying the result of ``betainc(a, b, x)`` by + ``beta(a, b)``. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + + Let :math:`B(a, b)` be the `beta` function. + + >>> import scipy.special as sc + + The coefficient in terms of `gamma` is equal to + :math:`1/B(a, b)`. Also, when :math:`x=1` + the integral is equal to :math:`B(a, b)`. + Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`. + + >>> sc.betainc(0.2, 3.5, 1.0) + 1.0 + + It satisfies + :math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`, + where :math:`F` is the hypergeometric function `hyp2f1`: + + >>> a, b, x = 1.4, 3.1, 0.5 + >>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b)) + 0.8148904036225295 + >>> sc.betainc(a, b, x) + 0.8148904036225296 + + This functions satisfies the relationship + :math:`I_x(a, b) = 1 - I_{1-x}(b, a)`: + + >>> sc.betainc(2.2, 3.1, 0.4) + 0.49339638807619446 + >>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4) + 0.49339638807619446 + + """) + + +add_newdoc( + "betaincc", + r""" + betaincc(a, b, x, out=None) + + Complement of the regularized incomplete beta function. + + Computes the complement of the regularized incomplete beta function, + defined as [1]_: + + .. math:: + + \bar{I}_x(a, b) = 1 - I_x(a, b) + = 1 - \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x + t^{a-1}(1-t)^{b-1}dt, + + for :math:`0 \leq x \leq 1`. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + x : array_like + Real-valued such that :math:`0 \leq x \leq 1`, + the upper limit of integration + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Value of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + betaincinv : inverse of the regularized incomplete beta function + betainccinv : + inverse of the complement of the regularized incomplete beta function + beta : beta function + scipy.stats.beta : beta distribution + + Notes + ----- + .. versionadded:: 1.11.0 + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> from scipy.special import betaincc, betainc + + The naive calculation ``1 - betainc(a, b, x)`` loses precision when + the values of ``betainc(a, b, x)`` are close to 1: + + >>> 1 - betainc(0.5, 8, [0.9, 0.99, 0.999]) + array([2.0574632e-09, 0.0000000e+00, 0.0000000e+00]) + + By using ``betaincc``, we get the correct values: + + >>> betaincc(0.5, 8, [0.9, 0.99, 0.999]) + array([2.05746321e-09, 1.97259354e-17, 1.96467954e-25]) + + """) + +add_newdoc( + "betaincinv", + r""" + betaincinv(a, b, y, out=None) + + Inverse of the regularized incomplete beta function. + + Computes :math:`x` such that: + + .. math:: + + y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} + \int_0^x t^{a-1}(1-t)^{b-1}dt, + + where :math:`I_x` is the normalized incomplete beta function `betainc` + and :math:`\Gamma` is the `gamma` function [1]_. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + y : array_like + Real-valued input + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the inverse of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + gamma : gamma function + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> import scipy.special as sc + + This function is the inverse of `betainc` for fixed + values of :math:`a` and :math:`b`. + + >>> a, b = 1.2, 3.1 + >>> y = sc.betainc(a, b, 0.2) + >>> sc.betaincinv(a, b, y) + 0.2 + >>> + >>> a, b = 7.5, 0.4 + >>> x = sc.betaincinv(a, b, 0.5) + >>> sc.betainc(a, b, x) + 0.5 + + """) + + +add_newdoc( + "betainccinv", + r""" + betainccinv(a, b, y, out=None) + + Inverse of the complemented regularized incomplete beta function. + + Computes :math:`x` such that: + + .. math:: + + y = 1 - I_x(a, b) = 1 - \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} + \int_0^x t^{a-1}(1-t)^{b-1}dt, + + where :math:`I_x` is the normalized incomplete beta function `betainc` + and :math:`\Gamma` is the `gamma` function [1]_. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + y : array_like + Real-valued input + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the inverse of the regularized incomplete beta function + + See Also + -------- + betainc : regularized incomplete beta function + betaincc : complement of the regularized incomplete beta function + + Notes + ----- + .. versionadded:: 1.11.0 + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.17 + + Examples + -------- + >>> from scipy.special import betainccinv, betaincc + + This function is the inverse of `betaincc` for fixed + values of :math:`a` and :math:`b`. + + >>> a, b = 1.2, 3.1 + >>> y = betaincc(a, b, 0.2) + >>> betainccinv(a, b, y) + 0.2 + + >>> a, b = 7, 2.5 + >>> x = betainccinv(a, b, 0.875) + >>> betaincc(a, b, x) + 0.875 + + """) + +add_newdoc("betaln", + """ + betaln(a, b, out=None) + + Natural logarithm of absolute value of beta function. + + Computes ``ln(abs(beta(a, b)))``. + + Parameters + ---------- + a, b : array_like + Positive, real-valued parameters + out : ndarray, optional + Optional output array for function values + + Returns + ------- + scalar or ndarray + Value of the betaln function + + See Also + -------- + gamma : the gamma function + betainc : the regularized incomplete beta function + beta : the beta function + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import betaln, beta + + Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)`` + is the same as ``log(beta(a, b))``: + + >>> betaln(3, 4) + -4.0943445622221 + + >>> np.log(beta(3, 4)) + -4.0943445622221 + + In the following ``beta(a, b)`` underflows to 0, so we can't compute + the logarithm of the actual value. + + >>> a = 400 + >>> b = 900 + >>> beta(a, b) + 0.0 + + We can compute the logarithm of ``beta(a, b)`` by using `betaln`: + + >>> betaln(a, b) + -804.3069951764146 + + """) + +add_newdoc("boxcox", + """ + boxcox(x, lmbda, out=None) + + Compute the Box-Cox transformation. + + The Box-Cox transformation is:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Returns `nan` if ``x < 0``. + Returns `-inf` if ``x == 0`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox + >>> boxcox([1, 4, 10], 2.5) + array([ 0. , 12.4 , 126.09110641]) + >>> boxcox(2, [0, 1, 2]) + array([ 0.69314718, 1. , 1.5 ]) + """) + +add_newdoc("boxcox1p", + """ + boxcox1p(x, lmbda, out=None) + + Compute the Box-Cox transformation of 1 + `x`. + + The Box-Cox transformation computed by `boxcox1p` is:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Returns `nan` if ``x < -1``. + Returns `-inf` if ``x == -1`` and ``lmbda < 0``. + + Parameters + ---------- + x : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> from scipy.special import boxcox1p + >>> boxcox1p(1e-4, [0, 0.5, 1]) + array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04]) + >>> boxcox1p([0.01, 0.1], 0.25) + array([ 0.00996272, 0.09645476]) + """) + +add_newdoc("inv_boxcox", + """ + inv_boxcox(y, lmbda, out=None) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = (x**lmbda - 1) / lmbda if lmbda != 0 + log(x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox, inv_boxcox + >>> y = boxcox([1, 4, 10], 2.5) + >>> inv_boxcox(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("inv_boxcox1p", + """ + inv_boxcox1p(y, lmbda, out=None) + + Compute the inverse of the Box-Cox transformation. + + Find ``x`` such that:: + + y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0 + log(1+x) if lmbda == 0 + + Parameters + ---------- + y : array_like + Data to be transformed. + lmbda : array_like + Power parameter of the Box-Cox transform. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + Transformed data. + + Notes + ----- + + .. versionadded:: 0.16.0 + + Examples + -------- + >>> from scipy.special import boxcox1p, inv_boxcox1p + >>> y = boxcox1p([1, 4, 10], 2.5) + >>> inv_boxcox1p(y, 2.5) + array([1., 4., 10.]) + """) + +add_newdoc("btdtr", + r""" + btdtr(a, b, x, out=None) + + Cumulative distribution function of the beta distribution. + + Returns the integral from zero to `x` of the beta probability density + function, + + .. math:: + I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + where :math:`\Gamma` is the gamma function. + + .. deprecated:: 1.12.0 + This function is deprecated and will be removed from SciPy 1.14.0. + Use `scipy.special.betainc` instead. + + Parameters + ---------- + a : array_like + Shape parameter (a > 0). + b : array_like + Shape parameter (b > 0). + x : array_like + Upper limit of integration, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Cumulative distribution function of the beta distribution with + parameters `a` and `b` at `x`. + + See Also + -------- + betainc + + Notes + ----- + This function is identical to the incomplete beta integral function + `betainc`. + + Wrapper for the Cephes [1]_ routine `btdtr`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("btdtri", + r""" + btdtri(a, b, p, out=None) + + The `p`-th quantile of the beta distribution. + + This function is the inverse of the beta cumulative distribution function, + `btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or + + .. math:: + p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt + + .. deprecated:: 1.12.0 + This function is deprecated and will be removed from SciPy 1.14.0. + Use `scipy.special.betaincinv` instead. + + Parameters + ---------- + a : array_like + Shape parameter (`a` > 0). + b : array_like + Shape parameter (`b` > 0). + p : array_like + Cumulative probability, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + The quantile corresponding to `p`. + + See Also + -------- + betaincinv + btdtr + + Notes + ----- + The value of `x` is found by interval halving or Newton iterations. + + Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent + problem of finding the inverse of the incomplete beta integral. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + """) + +add_newdoc("cbrt", + """ + cbrt(x, out=None) + + Element-wise cube root of `x`. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The cube root of each value in `x`. + + Examples + -------- + >>> from scipy.special import cbrt + + >>> cbrt(8) + 2.0 + >>> cbrt([-8, -3, 0.125, 1.331]) + array([-2. , -1.44224957, 0.5 , 1.1 ]) + + """) + +add_newdoc("chdtr", + r""" + chdtr(v, x, out=None) + + Chi square cumulative distribution function. + + Returns the area under the left tail (from 0 to `x`) of the Chi + square probability density function with `v` degrees of freedom: + + .. math:: + + \frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt + + Here :math:`\Gamma` is the Gamma function; see `gamma`. This + integral can be expressed in terms of the regularized lower + incomplete gamma function `gammainc` as + ``gammainc(v / 2, x / 2)``. [1]_ + + Parameters + ---------- + v : array_like + Degrees of freedom. + x : array_like + Upper bound of the integral. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the cumulative distribution function. + + See Also + -------- + chdtrc, chdtri, chdtriv, gammainc + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It can be expressed in terms of the regularized lower incomplete + gamma function. + + >>> v = 1 + >>> x = np.arange(4) + >>> sc.chdtr(v, x) + array([0. , 0.68268949, 0.84270079, 0.91673548]) + >>> sc.gammainc(v / 2, x / 2) + array([0. , 0.68268949, 0.84270079, 0.91673548]) + + """) + +add_newdoc("chdtrc", + r""" + chdtrc(v, x, out=None) + + Chi square survival function. + + Returns the area under the right hand tail (from `x` to infinity) + of the Chi square probability density function with `v` degrees of + freedom: + + .. math:: + + \frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt + + Here :math:`\Gamma` is the Gamma function; see `gamma`. This + integral can be expressed in terms of the regularized upper + incomplete gamma function `gammaincc` as + ``gammaincc(v / 2, x / 2)``. [1]_ + + Parameters + ---------- + v : array_like + Degrees of freedom. + x : array_like + Lower bound of the integral. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the survival function. + + See Also + -------- + chdtr, chdtri, chdtriv, gammaincc + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It can be expressed in terms of the regularized upper incomplete + gamma function. + + >>> v = 1 + >>> x = np.arange(4) + >>> sc.chdtrc(v, x) + array([1. , 0.31731051, 0.15729921, 0.08326452]) + >>> sc.gammaincc(v / 2, x / 2) + array([1. , 0.31731051, 0.15729921, 0.08326452]) + + """) + +add_newdoc("chdtri", + """ + chdtri(v, p, out=None) + + Inverse to `chdtrc` with respect to `x`. + + Returns `x` such that ``chdtrc(v, x) == p``. + + Parameters + ---------- + v : array_like + Degrees of freedom. + p : array_like + Probability. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + x : scalar or ndarray + Value so that the probability a Chi square random variable + with `v` degrees of freedom is greater than `x` equals `p`. + + See Also + -------- + chdtrc, chdtr, chdtriv + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import scipy.special as sc + + It inverts `chdtrc`. + + >>> v, p = 1, 0.3 + >>> sc.chdtrc(v, sc.chdtri(v, p)) + 0.3 + >>> x = 1 + >>> sc.chdtri(v, sc.chdtrc(v, x)) + 1.0 + + """) + +add_newdoc("chdtriv", + """ + chdtriv(p, x, out=None) + + Inverse to `chdtr` with respect to `v`. + + Returns `v` such that ``chdtr(v, x) == p``. + + Parameters + ---------- + p : array_like + Probability that the Chi square random variable is less than + or equal to `x`. + x : array_like + Nonnegative input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Degrees of freedom. + + See Also + -------- + chdtr, chdtrc, chdtri + + References + ---------- + .. [1] Chi-Square distribution, + https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm + + Examples + -------- + >>> import scipy.special as sc + + It inverts `chdtr`. + + >>> p, x = 0.5, 1 + >>> sc.chdtr(sc.chdtriv(p, x), x) + 0.5000000000202172 + >>> v = 1 + >>> sc.chdtriv(sc.chdtr(v, x), v) + 1.0000000000000013 + + """) + +add_newdoc("chndtr", + r""" + chndtr(x, df, nc, out=None) + + Non-central chi square cumulative distribution function + + The cumulative distribution function is given by: + + .. math:: + + P(\chi^{\prime 2} \vert \nu, \lambda) =\sum_{j=0}^{\infty} + e^{-\lambda /2} + \frac{(\lambda /2)^j}{j!} P(\chi^{\prime 2} \vert \nu + 2j), + + where :math:`\nu > 0` is the degrees of freedom (``df``) and + :math:`\lambda \geq 0` is the non-centrality parameter (``nc``). + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value of the non-central chi square cumulative distribution function. + + See Also + -------- + chndtrix, chndtridf, chndtrinc + + """) + +add_newdoc("chndtrix", + """ + chndtrix(p, df, nc, out=None) + + Inverse to `chndtr` vs `x` + + Calculated using a search to find a value for `x` that produces the + desired value of `p`. + + Parameters + ---------- + p : array_like + Probability; must satisfy ``0 <= p < 1`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value so that the probability a non-central Chi square random variable + with `df` degrees of freedom and non-centrality, `nc`, is greater than + `x` equals `p`. + + See Also + -------- + chndtr, chndtridf, chndtrinc + + """) + +add_newdoc("chndtridf", + """ + chndtridf(x, p, nc, out=None) + + Inverse to `chndtr` vs `df` + + Calculated using a search to find a value for `df` that produces the + desired value of `p`. + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + p : array_like + Probability; must satisfy ``0 <= p < 1`` + nc : array_like + Non-centrality parameter; must satisfy ``nc >= 0`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + Degrees of freedom + + See Also + -------- + chndtr, chndtrix, chndtrinc + + """) + +add_newdoc("chndtrinc", + """ + chndtrinc(x, df, p, out=None) + + Inverse to `chndtr` vs `nc` + + Calculated using a search to find a value for `df` that produces the + desired value of `p`. + + Parameters + ---------- + x : array_like + Upper bound of the integral; must satisfy ``x >= 0`` + df : array_like + Degrees of freedom; must satisfy ``df > 0`` + p : array_like + Probability; must satisfy ``0 <= p < 1`` + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Non-centrality + + See Also + -------- + chndtr, chndtrix, chndtrinc + + """) + +add_newdoc("cosdg", + """ + cosdg(x, out=None) + + Cosine of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Cosine of the input. + + See Also + -------- + sindg, tandg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using cosine directly. + + >>> x = 90 + 180 * np.arange(3) + >>> sc.cosdg(x) + array([-0., 0., -0.]) + >>> np.cos(x * np.pi / 180) + array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16]) + + """) + +add_newdoc("cosm1", + """ + cosm1(x, out=None) + + cos(x) - 1 for use when `x` is near zero. + + Parameters + ---------- + x : array_like + Real valued argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of ``cos(x) - 1``. + + See Also + -------- + expm1, log1p + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than computing ``cos(x) - 1`` directly for + ``x`` around 0. + + >>> x = 1e-30 + >>> np.cos(x) - 1 + 0.0 + >>> sc.cosm1(x) + -5.0000000000000005e-61 + + """) + +add_newdoc("cotdg", + """ + cotdg(x, out=None) + + Cotangent of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Cotangent at the input. + + See Also + -------- + sindg, cosdg, tandg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using cotangent directly. + + >>> x = 90 + 180 * np.arange(3) + >>> sc.cotdg(x) + array([0., 0., 0.]) + >>> 1 / np.tan(x * np.pi / 180) + array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16]) + + """) + +add_newdoc("dawsn", + """ + dawsn(x, out=None) + + Dawson's integral. + + Computes:: + + exp(-x**2) * integral(exp(t**2), t=0..x). + + Parameters + ---------- + x : array_like + Function parameter. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + Value of the integral. + + See Also + -------- + wofz, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-15, 15, num=1000) + >>> plt.plot(x, special.dawsn(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$dawsn(x)$') + >>> plt.show() + + """) + +add_newdoc("ellipe", + r""" + ellipe(m, out=None) + + Complete elliptic integral of the second kind + + This function is defined as + + .. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + m : array_like + Defines the parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + E : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpe`. + + For `m > 0` the computation uses the approximation, + + .. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m), + + where :math:`P` and :math:`Q` are tenth-order polynomials. For + `m < 0`, the relation + + .. math:: E(m) = E(m/(m - 1)) \sqrt(1-m) + + is used. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre E integral is related to Carlson's symmetric R_D or R_G + functions in multiple ways [3]_. For example, + + .. math:: E(m) = 2 R_G(0, 1-k^2, 1) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + + Examples + -------- + This function is used in finding the circumference of an + ellipse with semi-major axis `a` and semi-minor axis `b`. + + >>> import numpy as np + >>> from scipy import special + + >>> a = 3.5 + >>> b = 2.1 + >>> e_sq = 1.0 - b**2/a**2 # eccentricity squared + + Then the circumference is found using the following: + + >>> C = 4*a*special.ellipe(e_sq) # circumference formula + >>> C + 17.868899204378693 + + When `a` and `b` are the same (meaning eccentricity is 0), + this reduces to the circumference of a circle. + + >>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b + 21.991148575128552 + >>> 2*np.pi*a # formula for circle of radius a + 21.991148575128552 + + """) + +add_newdoc("ellipeinc", + r""" + ellipeinc(phi, m, out=None) + + Incomplete elliptic integral of the second kind + + This function is defined as + + .. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral. + m : array_like + parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + E : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellie`. + + Computation uses arithmetic-geometric means algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre E incomplete integral can be related to combinations + of Carlson's symmetric integrals R_D, R_F, and R_G in multiple + ways [3]_. For example, with :math:`c = \csc^2\phi`, + + .. math:: + E(\phi, m) = R_F(c-1, c-k^2, c) + - \frac{1}{3} k^2 R_D(c-1, c-k^2, c) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + """) + +add_newdoc("ellipj", + """ + ellipj(u, m, out=None) + + Jacobian elliptic functions + + Calculates the Jacobian elliptic functions of parameter `m` between + 0 and 1, and real argument `u`. + + Parameters + ---------- + m : array_like + Parameter. + u : array_like + Argument. + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + sn, cn, dn, ph : 4-tuple of scalar or ndarray + The returned functions:: + + sn(u|m), cn(u|m), dn(u|m) + + The value `ph` is such that if `u = ellipkinc(ph, m)`, + then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`. + + See Also + -------- + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpj`. + + These functions are periodic, with quarter-period on the real axis + equal to the complete elliptic integral `ellipk(m)`. + + Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then + `sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called + the amplitude of `u`. + + Computation is by means of the arithmetic-geometric mean algorithm, + except when `m` is within 1e-9 of 0 or 1. In the latter case with `m` + close to 1, the approximation applies only for `phi < pi/2`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("ellipkm1", + """ + ellipkm1(p, out=None) + + Complete elliptic integral of the first kind around `m` = 1 + + This function is defined as + + .. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt + + where `m = 1 - p`. + + Parameters + ---------- + p : array_like + Defines the parameter of the elliptic integral as `m = 1 - p`. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipk : Complete elliptic integral of the first kind + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellpk`. + + For `p <= 1`, computation uses the approximation, + + .. math:: K(p) \\approx P(p) - \\log(p) Q(p), + + where :math:`P` and :math:`Q` are tenth-order polynomials. The + argument `p` is used internally rather than `m` so that the logarithmic + singularity at `m = 1` will be shifted to the origin; this preserves + maximum accuracy. For `p > 1`, the identity + + .. math:: K(p) = K(1/p)/\\sqrt(p) + + is used. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + """) + +add_newdoc("ellipk", + r""" + ellipk(m, out=None) + + Complete elliptic integral of the first kind. + + This function is defined as + + .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt + + Parameters + ---------- + m : array_like + The parameter of the elliptic integral. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral. + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind around m = 1 + ellipkinc : Incomplete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + For more precision around point m = 1, use `ellipkm1`, which this + function calls. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [1]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre K integral is related to Carlson's symmetric R_F + function by [2]_: + + .. math:: K(m) = R_F(0, 1-k^2, 1) . + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [2] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + + """) + +add_newdoc("ellipkinc", + r""" + ellipkinc(phi, m, out=None) + + Incomplete elliptic integral of the first kind + + This function is defined as + + .. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt + + This function is also called :math:`F(\phi, m)`. + + Parameters + ---------- + phi : array_like + amplitude of the elliptic integral + m : array_like + parameter of the elliptic integral + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the elliptic integral + + See Also + -------- + ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1 + ellipk : Complete elliptic integral of the first kind + ellipe : Complete elliptic integral of the second kind + ellipeinc : Incomplete elliptic integral of the second kind + elliprf : Completely-symmetric elliptic integral of the first kind. + + Notes + ----- + Wrapper for the Cephes [1]_ routine `ellik`. The computation is + carried out using the arithmetic-geometric mean algorithm. + + The parameterization in terms of :math:`m` follows that of section + 17.2 in [2]_. Other parameterizations in terms of the + complementary parameter :math:`1 - m`, modular angle + :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also + used, so be careful that you choose the correct parameter. + + The Legendre K incomplete integral (or F integral) is related to + Carlson's symmetric R_F function [3]_. + Setting :math:`c = \csc^2\phi`, + + .. math:: F(\phi, m) = R_F(c-1, c-k^2, c) . + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [3] NIST Digital Library of Mathematical + Functions. http://dlmf.nist.gov/, Release 1.0.28 of + 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i + """) + +add_newdoc( + "elliprc", + r""" + elliprc(x, y, out=None) + + Degenerate symmetric elliptic integral. + + The function RC is defined as [1]_ + + .. math:: + + R_{\mathrm{C}}(x, y) = + \frac{1}{2} \int_0^{+\infty} (t + x)^{-1/2} (t + y)^{-1} dt + = R_{\mathrm{F}}(x, y, y) + + Parameters + ---------- + x, y : array_like + Real or complex input parameters. `x` can be any number in the + complex plane cut along the negative real axis. `y` must be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If `y` is real and negative, the Cauchy + principal value is returned. If both of `x` and `y` are real, the + return value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) == + elliprf(x, y, y)``. It is an elementary function rather than an elliptic + integral. + + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E6 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprc + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> scale = 0.3 + 0.4j + >>> elliprc(scale*x, scale*y) + (0.5484493976710874-0.4169557678995833j) + + >>> elliprc(x, y)/np.sqrt(scale) + (0.5484493976710874-0.41695576789958333j) + + When the two arguments coincide, the integral is particularly + simple: + + >>> x = 1.2 + 3.4j + >>> elliprc(x, x) + (0.4299173120614631-0.3041729818745595j) + + >>> 1/np.sqrt(x) + (0.4299173120614631-0.30417298187455954j) + + Another simple case: the first argument vanishes: + + >>> y = 1.2 + 3.4j + >>> elliprc(0, y) + (0.6753125346116815-0.47779380263880866j) + + >>> np.pi/2/np.sqrt(y) + (0.6753125346116815-0.4777938026388088j) + + When `x` and `y` are both positive, we can express + :math:`R_C(x,y)` in terms of more elementary functions. For the + case :math:`0 \le x < y`, + + >>> x = 3.2 + >>> y = 6. + >>> elliprc(x, y) + 0.44942991498453444 + + >>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x) + 0.44942991498453433 + + And for the case :math:`0 \le y < x`, + + >>> x = 6. + >>> y = 3.2 + >>> elliprc(x,y) + 0.4989837501576147 + + >>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y) + 0.49898375015761476 + + """) + +add_newdoc( + "elliprd", + r""" + elliprd(x, y, z, out=None) + + Symmetric elliptic integral of the second kind. + + The function RD is defined as [1]_ + + .. math:: + + R_{\mathrm{D}}(x, y, z) = + \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2} + dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x` or `y` can be any number in the + complex plane cut along the negative real axis, but at most one of them + can be zero, while `z` must be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the + return value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric elliptic integral. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) == + elliprj(x, y, z, z)``. + + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E5 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprd + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprd(scale*x, scale*y, scale*z) + (-0.03703043835680379-0.24500934665683802j) + + >>> elliprd(x, y, z)*np.power(scale, -1.5) + (-0.0370304383568038-0.24500934665683805j) + + All three arguments coincide: + + >>> x = 1.2 + 3.4j + >>> elliprd(x, x, x) + (-0.03986825876151896-0.14051741840449586j) + + >>> np.power(x, -1.5) + (-0.03986825876151894-0.14051741840449583j) + + The so-called "second lemniscate constant": + + >>> elliprd(0, 2, 1)/3 + 0.5990701173677961 + + >>> from scipy.special import gamma + >>> gamma(0.75)**2/np.sqrt(2*np.pi) + 0.5990701173677959 + + """) + +add_newdoc( + "elliprf", + r""" + elliprf(x, y, z, out=None) + + Completely-symmetric elliptic integral of the first kind. + + The function RF is defined as [1]_ + + .. math:: + + R_{\mathrm{F}}(x, y, z) = + \frac{1}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x`, `y`, or `z` can be any number in + the complex plane cut along the negative real axis, but at most one of + them can be zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the return + value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order (cf.: + https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete + integral. [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E1 + .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprf + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprf(scale*x, scale*y, scale*z) + (0.5328051227278146-0.4008623567957094j) + + >>> elliprf(x, y, z)/np.sqrt(scale) + (0.5328051227278147-0.4008623567957095j) + + All three arguments coincide: + + >>> x = 1.2 + 3.4j + >>> elliprf(x, x, x) + (0.42991731206146316-0.30417298187455954j) + + >>> 1/np.sqrt(x) + (0.4299173120614631-0.30417298187455954j) + + The so-called "first lemniscate constant": + + >>> elliprf(0, 1, 2) + 1.3110287771460598 + + >>> from scipy.special import gamma + >>> gamma(0.25)**2/(4*np.sqrt(2*np.pi)) + 1.3110287771460598 + + """) + +add_newdoc( + "elliprg", + r""" + elliprg(x, y, z, out=None) + + Completely-symmetric elliptic integral of the second kind. + + The function RG is defined as [1]_ + + .. math:: + + R_{\mathrm{G}}(x, y, z) = + \frac{1}{4} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} + \left(\frac{x}{t + x} + \frac{y}{t + y} + \frac{z}{t + z}\right) t + dt + + Parameters + ---------- + x, y, z : array_like + Real or complex input parameters. `x`, `y`, or `z` can be any number in + the complex plane cut along the negative real axis. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, and `z` are real, the return + value is real. Otherwise, the return value is complex. + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprj : Symmetric elliptic integral of the third kind. + + Notes + ----- + The implementation uses the relation [1]_ + + .. math:: + + 2 R_{\mathrm{G}}(x, y, z) = + z R_{\mathrm{F}}(x, y, z) - + \frac{1}{3} (x - z) (y - z) R_{\mathrm{D}}(x, y, z) + + \sqrt{\frac{x y}{z}} + + and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can + be chosen as the pivot. When one of the arguments is close to zero, the AGM + method is applied instead. Other special cases are computed following Ref. + [2]_ + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.16.E1 + https://dlmf.nist.gov/19.20.ii + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprg + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> scale = 0.3 + 0.4j + >>> elliprg(scale*x, scale*y, scale*z) + (1.195936862005246+0.8470988320464167j) + + >>> elliprg(x, y, z)*np.sqrt(scale) + (1.195936862005246+0.8470988320464165j) + + Simplifications: + + >>> elliprg(0, y, y) + 1.756203682760182 + + >>> 0.25*np.pi*np.sqrt(y) + 1.7562036827601817 + + >>> elliprg(0, 0, z) + 1.224744871391589 + + >>> 0.5*np.sqrt(z) + 1.224744871391589 + + The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and + ``c`` is given by + + .. math:: + + S = 4 \pi a b c R_{\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2). + + >>> def ellipsoid_area(a, b, c): + ... r = 4.0 * np.pi * a * b * c + ... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c)) + >>> print(ellipsoid_area(1, 3, 5)) + 108.62688289491807 + """) + +add_newdoc( + "elliprj", + r""" + elliprj(x, y, z, p, out=None) + + Symmetric elliptic integral of the third kind. + + The function RJ is defined as [1]_ + + .. math:: + + R_{\mathrm{J}}(x, y, z, p) = + \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} + (t + p)^{-1} dt + + .. warning:: + This function should be considered experimental when the inputs are + unbalanced. Check correctness with another independent implementation. + + Parameters + ---------- + x, y, z, p : array_like + Real or complex input parameters. `x`, `y`, or `z` are numbers in + the complex plane cut along the negative real axis (subject to further + constraints, see Notes), and at most one of them can be zero. `p` must + be non-zero. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + R : scalar or ndarray + Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the + return value is real. Otherwise, the return value is complex. + + If `p` is real and negative, while `x`, `y`, and `z` are real, + non-negative, and at most one of them is zero, the Cauchy principal + value is returned. [1]_ [2]_ + + See Also + -------- + elliprc : Degenerate symmetric integral. + elliprd : Symmetric elliptic integral of the second kind. + elliprf : Completely-symmetric elliptic integral of the first kind. + elliprg : Completely-symmetric elliptic integral of the second kind. + + Notes + ----- + The code implements Carlson's algorithm based on the duplication theorems + and series expansion up to the 7th order. [3]_ The algorithm is slightly + different from its earlier incarnation as it appears in [1]_, in that the + call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in + the inner loop. Asymptotic approximations are used where arguments differ + widely in the order of magnitude. [5]_ + + The input values are subject to certain sufficient but not necessary + constraints when input arguments are complex. Notably, ``x``, ``y``, and + ``z`` must have non-negative real parts, unless two of them are + non-negative and complex-conjugates to each other while the other is a real + non-negative number. [1]_ If the inputs do not satisfy the sufficient + condition described in Ref. [1]_ they are rejected outright with the output + set to NaN. + + In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the + function ``elliprd`` should be preferred because of its less restrictive + domain. + + .. versionadded:: 1.8.0 + + References + ---------- + .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic + integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995. + https://arxiv.org/abs/math/9409227 + https://doi.org/10.1007/BF02198293 + .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical + Functions," NIST, US Dept. of Commerce. + https://dlmf.nist.gov/19.20.iii + .. [3] B. C. Carlson, J. FitzSimmons, "Reduction Theorems for Elliptic + Integrands with the Square Root of Two Quadratic Factors," J. + Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000. + https://doi.org/10.1016/S0377-0427(00)00282-X + .. [4] F. Johansson, "Numerical Evaluation of Elliptic Functions, Elliptic + Integrals and Modular Forms," in J. Blumlein, C. Schneider, P. + Paule, eds., "Elliptic Integrals, Elliptic Functions and Modular + Forms in Quantum Field Theory," pp. 269-293, 2019 (Cham, + Switzerland: Springer Nature Switzerland) + https://arxiv.org/abs/1806.06725 + https://doi.org/10.1007/978-3-030-04480-0 + .. [5] B. C. Carlson, J. L. Gustafson, "Asymptotic Approximations for + Symmetric Elliptic Integrals," SIAM J. Math. Anls., vol. 25, no. 2, + pp. 288-303, 1994. + https://arxiv.org/abs/math/9310223 + https://doi.org/10.1137/S0036141092228477 + + Examples + -------- + Basic homogeneity property: + + >>> import numpy as np + >>> from scipy.special import elliprj + + >>> x = 1.2 + 3.4j + >>> y = 5. + >>> z = 6. + >>> p = 7. + >>> scale = 0.3 - 0.4j + >>> elliprj(scale*x, scale*y, scale*z, scale*p) + (0.10834905565679157+0.19694950747103812j) + + >>> elliprj(x, y, z, p)*np.power(scale, -1.5) + (0.10834905565679556+0.19694950747103854j) + + Reduction to simpler elliptic integral: + + >>> elliprj(x, y, z, z) + (0.08288462362195129-0.028376809745123258j) + + >>> from scipy.special import elliprd + >>> elliprd(x, y, z) + (0.08288462362195136-0.028376809745123296j) + + All arguments coincide: + + >>> elliprj(x, x, x, x) + (-0.03986825876151896-0.14051741840449586j) + + >>> np.power(x, -1.5) + (-0.03986825876151894-0.14051741840449583j) + + """) + +add_newdoc("entr", + r""" + entr(x, out=None) + + Elementwise function for computing entropy. + + .. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 + \\ -\infty & \text{otherwise} \end{cases} + + Parameters + ---------- + x : ndarray + Input array. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + res : scalar or ndarray + The value of the elementwise entropy function at the given points `x`. + + See Also + -------- + kl_div, rel_entr, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is concave. + + The origin of this function is in convex programming; see [1]_. + Given a probability distribution :math:`p_1, \ldots, p_n`, + the definition of entropy in the context of *information theory* is + + .. math:: + + \sum_{i = 1}^n \mathrm{entr}(p_i). + + To compute the latter quantity, use `scipy.stats.entropy`. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + + """) + +add_newdoc("erf", + """ + erf(z, out=None) + + Returns the error function of complex argument. + + It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``. + + Parameters + ---------- + x : ndarray + Input array. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + res : scalar or ndarray + The values of the error function at the given points `x`. + + See Also + -------- + erfc, erfinv, erfcinv, wofz, erfcx, erfi + + Notes + ----- + The cumulative of the unit normal distribution is given by + ``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Error_function + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, + 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm + .. [3] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erf(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erf(x)$') + >>> plt.show() + + """) + +add_newdoc("erfc", + """ + erfc(x, out=None) + + Complementary error function, ``1 - erf(x)``. + + Parameters + ---------- + x : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the complementary error function + + See Also + -------- + erf, erfi, erfcx, dawsn, wofz + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfc(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfc(x)$') + >>> plt.show() + + """) + +add_newdoc("erfi", + """ + erfi(z, out=None) + + Imaginary error function, ``-i erf(i z)``. + + Parameters + ---------- + z : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the imaginary error function + + See Also + -------- + erf, erfc, erfcx, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfi(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfi(x)$') + >>> plt.show() + + """) + +add_newdoc("erfcx", + """ + erfcx(x, out=None) + + Scaled complementary error function, ``exp(x**2) * erfc(x)``. + + Parameters + ---------- + x : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the scaled complementary error function + + + See Also + -------- + erf, erfc, erfi, dawsn, wofz + + Notes + ----- + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-3, 3) + >>> plt.plot(x, special.erfcx(x)) + >>> plt.xlabel('$x$') + >>> plt.ylabel('$erfcx(x)$') + >>> plt.show() + + """) + +add_newdoc( + "erfinv", + """ + erfinv(y, out=None) + + Inverse of the error function. + + Computes the inverse of the error function. + + In the complex domain, there is no unique complex number w satisfying + erf(w)=z. This indicates a true inverse function would be multivalued. + When the domain restricts to the real, -1 < x < 1, there is a unique real + number satisfying erf(erfinv(x)) = x. + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [-1, 1] + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + erfinv : scalar or ndarray + The inverse of erf of y, element-wise + + See Also + -------- + erf : Error function of a complex argument + erfc : Complementary error function, ``1 - erf(x)`` + erfcinv : Inverse of the complementary error function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import erfinv, erf + + >>> erfinv(0.5) + 0.4769362762044699 + + >>> y = np.linspace(-1.0, 1.0, num=9) + >>> x = erfinv(y) + >>> x + array([ -inf, -0.81341985, -0.47693628, -0.22531206, 0. , + 0.22531206, 0.47693628, 0.81341985, inf]) + + Verify that ``erf(erfinv(y))`` is ``y``. + + >>> erf(x) + array([-1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, 1. ]) + + Plot the function: + + >>> y = np.linspace(-1, 1, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(y, erfinv(y)) + >>> ax.grid(True) + >>> ax.set_xlabel('y') + >>> ax.set_title('erfinv(y)') + >>> plt.show() + + """) + +add_newdoc( + "erfcinv", + """ + erfcinv(y, out=None) + + Inverse of the complementary error function. + + Computes the inverse of the complementary error function. + + In the complex domain, there is no unique complex number w satisfying + erfc(w)=z. This indicates a true inverse function would be multivalued. + When the domain restricts to the real, 0 < x < 2, there is a unique real + number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)). + + It is related to inverse of the error function by erfcinv(1-x) = erfinv(x) + + Parameters + ---------- + y : ndarray + Argument at which to evaluate. Domain: [0, 2] + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + erfcinv : scalar or ndarray + The inverse of erfc of y, element-wise + + See Also + -------- + erf : Error function of a complex argument + erfc : Complementary error function, ``1 - erf(x)`` + erfinv : Inverse of the error function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import erfcinv + + >>> erfcinv(0.5) + 0.4769362762044699 + + >>> y = np.linspace(0.0, 2.0, num=11) + >>> erfcinv(y) + array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345, + -0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 , + -inf]) + + Plot the function: + + >>> y = np.linspace(0, 2, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(y, erfcinv(y)) + >>> ax.grid(True) + >>> ax.set_xlabel('y') + >>> ax.set_title('erfcinv(y)') + >>> plt.show() + + """) + +add_newdoc("eval_jacobi", + r""" + eval_jacobi(n, alpha, beta, x, out=None) + + Evaluate Jacobi polynomial at a point. + + The Jacobi polynomials can be defined via the Gauss hypergeometric + function :math:`{}_2F_1` as + + .. math:: + + P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2) + + where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When + :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.42 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + beta : array_like + Parameter + x : array_like + Points at which to evaluate the polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the Jacobi polynomial + + See Also + -------- + roots_jacobi : roots and quadrature weights of Jacobi polynomials + jacobi : Jacobi polynomial object + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_sh_jacobi", + r""" + eval_sh_jacobi(n, p, q, x, out=None) + + Evaluate shifted Jacobi polynomial at a point. + + Defined by + + .. math:: + + G_n^{(p, q)}(x) + = \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1), + + where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi + polynomial. See 22.5.2 in [AS]_ for details. + + Parameters + ---------- + n : int + Degree of the polynomial. If not an integer, the result is + determined via the relation to `binom` and `eval_jacobi`. + p : float + Parameter + q : float + Parameter + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + G : scalar or ndarray + Values of the shifted Jacobi polynomial. + + See Also + -------- + roots_sh_jacobi : roots and quadrature weights of shifted Jacobi + polynomials + sh_jacobi : shifted Jacobi polynomial object + eval_jacobi : evaluate Jacobi polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_gegenbauer", + r""" + eval_gegenbauer(n, alpha, x, out=None) + + Evaluate Gegenbauer polynomial at a point. + + The Gegenbauer polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)} + {}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.46 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + alpha : array_like + Parameter + x : array_like + Points at which to evaluate the Gegenbauer polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + C : scalar or ndarray + Values of the Gegenbauer polynomial + + See Also + -------- + roots_gegenbauer : roots and quadrature weights of Gegenbauer + polynomials + gegenbauer : Gegenbauer polynomial object + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebyt", + r""" + eval_chebyt(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind at a point. + + The Chebyshev polynomials of the first kind can be defined via the + Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.47 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + T : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyt : roots and quadrature weights of Chebyshev + polynomials of the first kind + chebyu : Chebychev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + + Notes + ----- + This routine is numerically stable for `x` in ``[-1, 1]`` at least + up to order ``10000``. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebyu", + r""" + eval_chebyu(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind at a point. + + The Chebyshev polynomials of the second kind can be defined via + the Gauss hypergeometric function :math:`{}_2F_1` as + + .. math:: + + U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.48 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + U : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyu : roots and quadrature weights of Chebyshev + polynomials of the second kind + chebyu : Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + hyp2f1 : Gauss hypergeometric function + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_chebys", + r""" + eval_chebys(n, x, out=None) + + Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + S_n(x) = U_n(x/2) + + where :math:`U_n` is a Chebyshev polynomial of the second + kind. See 22.5.13 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + S : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebys : roots and quadrature weights of Chebyshev + polynomials of the second kind on [-2, 2] + chebys : Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + They are a scaled version of the Chebyshev polynomials of the + second kind. + + >>> x = np.linspace(-2, 2, 6) + >>> sc.eval_chebys(3, x) + array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ]) + >>> sc.eval_chebyu(3, x / 2) + array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ]) + + """) + +add_newdoc("eval_chebyc", + r""" + eval_chebyc(n, x, out=None) + + Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a + point. + + These polynomials are defined as + + .. math:: + + C_n(x) = 2 T_n(x/2) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. See + 22.5.11 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + C : scalar or ndarray + Values of the Chebyshev polynomial + + See Also + -------- + roots_chebyc : roots and quadrature weights of Chebyshev + polynomials of the first kind on [-2, 2] + chebyc : Chebyshev polynomial object + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + eval_chebyt : evaluate Chebycshev polynomials of the first kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + They are a scaled version of the Chebyshev polynomials of the + first kind. + + >>> x = np.linspace(-2, 2, 6) + >>> sc.eval_chebyc(3, x) + array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ]) + >>> 2 * sc.eval_chebyt(3, x / 2) + array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ]) + + """) + +add_newdoc("eval_sh_chebyt", + r""" + eval_sh_chebyt(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the first kind at a + point. + + These polynomials are defined as + + .. math:: + + T_n^*(x) = T_n(2x - 1) + + where :math:`T_n` is a Chebyshev polynomial of the first kind. See + 22.5.14 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyt`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + T : scalar or ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyt : roots and quadrature weights of shifted + Chebyshev polynomials of the first kind + sh_chebyt : shifted Chebyshev polynomial object + eval_chebyt : evaluate Chebyshev polynomials of the first kind + numpy.polynomial.chebyshev.Chebyshev : Chebyshev series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_sh_chebyu", + r""" + eval_sh_chebyu(n, x, out=None) + + Evaluate shifted Chebyshev polynomial of the second kind at a + point. + + These polynomials are defined as + + .. math:: + + U_n^*(x) = U_n(2x - 1) + + where :math:`U_n` is a Chebyshev polynomial of the first kind. See + 22.5.15 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to `eval_chebyu`. + x : array_like + Points at which to evaluate the shifted Chebyshev polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + U : scalar or ndarray + Values of the shifted Chebyshev polynomial + + See Also + -------- + roots_sh_chebyu : roots and quadrature weights of shifted + Chebychev polynomials of the second kind + sh_chebyu : shifted Chebyshev polynomial object + eval_chebyu : evaluate Chebyshev polynomials of the second kind + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_legendre", + r""" + eval_legendre(n, x, out=None) + + Evaluate Legendre polynomial at a point. + + The Legendre polynomials can be defined via the Gauss + hypergeometric function :math:`{}_2F_1` as + + .. math:: + + P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.49 in [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the Gauss hypergeometric + function. + x : array_like + Points at which to evaluate the Legendre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the Legendre polynomial + + See Also + -------- + roots_legendre : roots and quadrature weights of Legendre + polynomials + legendre : Legendre polynomial object + hyp2f1 : Gauss hypergeometric function + numpy.polynomial.legendre.Legendre : Legendre series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import eval_legendre + + Evaluate the zero-order Legendre polynomial at x = 0 + + >>> eval_legendre(0, 0) + 1.0 + + Evaluate the first-order Legendre polynomial between -1 and 1 + + >>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials + >>> eval_legendre(1, X) + array([-1. , -0.5, 0. , 0.5, 1. ]) + + Evaluate Legendre polynomials of order 0 through 4 at x = 0 + + >>> N = range(0, 5) + >>> eval_legendre(N, 0) + array([ 1. , 0. , -0.5 , 0. , 0.375]) + + Plot Legendre polynomials of order 0 through 4 + + >>> X = np.linspace(-1, 1) + + >>> import matplotlib.pyplot as plt + >>> for n in range(0, 5): + ... y = eval_legendre(n, X) + ... plt.plot(X, y, label=r'$P_{}(x)$'.format(n)) + + >>> plt.title("Legendre Polynomials") + >>> plt.xlabel("x") + >>> plt.ylabel(r'$P_n(x)$') + >>> plt.legend(loc='lower right') + >>> plt.show() + + """) + +add_newdoc("eval_sh_legendre", + r""" + eval_sh_legendre(n, x, out=None) + + Evaluate shifted Legendre polynomial at a point. + + These polynomials are defined as + + .. math:: + + P_n^*(x) = P_n(2x - 1) + + where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_ + for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the value is + determined via the relation to `eval_legendre`. + x : array_like + Points at which to evaluate the shifted Legendre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + P : scalar or ndarray + Values of the shifted Legendre polynomial + + See Also + -------- + roots_sh_legendre : roots and quadrature weights of shifted + Legendre polynomials + sh_legendre : shifted Legendre polynomial object + eval_legendre : evaluate Legendre polynomials + numpy.polynomial.legendre.Legendre : Legendre series + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_genlaguerre", + r""" + eval_genlaguerre(n, alpha, x, out=None) + + Evaluate generalized Laguerre polynomial at a point. + + The generalized Laguerre polynomials can be defined via the + confluent hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n^{(\alpha)}(x) = \binom{n + \alpha}{n} + {}_1F_1(-n, \alpha + 1, x). + + When :math:`n` is an integer the result is a polynomial of degree + :math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre + polynomials are the special case where :math:`\alpha = 0`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer, the result is + determined via the relation to the confluent hypergeometric + function. + alpha : array_like + Parameter; must have ``alpha > -1`` + x : array_like + Points at which to evaluate the generalized Laguerre + polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + L : scalar or ndarray + Values of the generalized Laguerre polynomial + + See Also + -------- + roots_genlaguerre : roots and quadrature weights of generalized + Laguerre polynomials + genlaguerre : generalized Laguerre polynomial object + hyp1f1 : confluent hypergeometric function + eval_laguerre : evaluate Laguerre polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_laguerre", + r""" + eval_laguerre(n, x, out=None) + + Evaluate Laguerre polynomial at a point. + + The Laguerre polynomials can be defined via the confluent + hypergeometric function :math:`{}_1F_1` as + + .. math:: + + L_n(x) = {}_1F_1(-n, 1, x). + + See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an + integer the result is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : array_like + Degree of the polynomial. If not an integer the result is + determined via the relation to the confluent hypergeometric + function. + x : array_like + Points at which to evaluate the Laguerre polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + L : scalar or ndarray + Values of the Laguerre polynomial + + See Also + -------- + roots_laguerre : roots and quadrature weights of Laguerre + polynomials + laguerre : Laguerre polynomial object + numpy.polynomial.laguerre.Laguerre : Laguerre series + eval_genlaguerre : evaluate generalized Laguerre polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_hermite", + r""" + eval_hermite(n, x, out=None) + + Evaluate physicist's Hermite polynomial at a point. + + Defined by + + .. math:: + + H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2}; + + :math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in + [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + H : scalar or ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermite : roots and quadrature weights of physicist's + Hermite polynomials + hermite : physicist's Hermite polynomial object + numpy.polynomial.hermite.Hermite : Physicist's Hermite series + eval_hermitenorm : evaluate Probabilist's Hermite polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("eval_hermitenorm", + r""" + eval_hermitenorm(n, x, out=None) + + Evaluate probabilist's (normalized) Hermite polynomial at a + point. + + Defined by + + .. math:: + + He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2}; + + :math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in + [AS]_ for details. + + Parameters + ---------- + n : array_like + Degree of the polynomial + x : array_like + Points at which to evaluate the Hermite polynomial + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + He : scalar or ndarray + Values of the Hermite polynomial + + See Also + -------- + roots_hermitenorm : roots and quadrature weights of probabilist's + Hermite polynomials + hermitenorm : probabilist's Hermite polynomial object + numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series + eval_hermite : evaluate physicist's Hermite polynomials + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """) + +add_newdoc("exp1", + r""" + exp1(z, out=None) + + Exponential integral E1. + + For complex :math:`z \ne 0` the exponential integral can be defined as + [1]_ + + .. math:: + + E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt, + + where the path of the integral does not cross the negative real + axis or pass through the origin. + + Parameters + ---------- + z: array_like + Real or complex argument. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the exponential integral E1 + + See Also + -------- + expi : exponential integral :math:`Ei` + expn : generalization of :math:`E_1` + + Notes + ----- + For :math:`x > 0` it is related to the exponential integral + :math:`Ei` (see `expi`) via the relation + + .. math:: + + E_1(x) = -Ei(-x). + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 6.2.1 + https://dlmf.nist.gov/6.2#E1 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It has a pole at 0. + + >>> sc.exp1(0) + inf + + It has a branch cut on the negative real axis. + + >>> sc.exp1(-1) + nan + >>> sc.exp1(complex(-1, 0)) + (-1.8951178163559368-3.141592653589793j) + >>> sc.exp1(complex(-1, -0.0)) + (-1.8951178163559368+3.141592653589793j) + + It approaches 0 along the positive real axis. + + >>> sc.exp1([1, 10, 100, 1000]) + array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00]) + + It is related to `expi`. + + >>> x = np.array([1, 2, 3, 4]) + >>> sc.exp1(x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + >>> -sc.expi(-x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + + """) + + +add_newdoc( + "_scaled_exp1", + """ + _scaled_exp1(x, out=None): + + Compute the scaled exponential integral. + + This is a private function, subject to change or removal with no + deprecation. + + This function computes F(x), where F is the factor remaining in E_1(x) + when exp(-x)/x is factored out. That is,:: + + E_1(x) = exp(-x)/x * F(x) + + or + + F(x) = x * exp(x) * E_1(x) + + The function is defined for real x >= 0. For x < 0, nan is returned. + + F has the properties: + + * F(0) = 0 + * F(x) is increasing on [0, inf). + * The limit as x goes to infinity of F(x) is 1. + + Parameters + ---------- + x: array_like + The input values. Must be real. The implementation is limited to + double precision floating point, so other types will be cast to + to double precision. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the scaled exponential integral. + + See Also + -------- + exp1 : exponential integral E_1 + + Examples + -------- + >>> from scipy.special import _scaled_exp1 + >>> _scaled_exp1([0, 0.1, 1, 10, 100]) + + """ +) + + +add_newdoc("exp10", + """ + exp10(x, out=None) + + Compute ``10**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``10**x``, computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import exp10 + + >>> exp10(3) + 1000.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp10(x) + array([[ 0.1 , 0.31622777, 1. ], + [ 3.16227766, 10. , 31.6227766 ]]) + + """) + +add_newdoc("exp2", + """ + exp2(x, out=None) + + Compute ``2**x`` element-wise. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``2**x``, computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import exp2 + + >>> exp2(3) + 8.0 + >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]]) + >>> exp2(x) + array([[ 0.5 , 0.70710678, 1. ], + [ 1.41421356, 2. , 2.82842712]]) + """) + +add_newdoc("expi", + r""" + expi(x, out=None) + + Exponential integral Ei. + + For real :math:`x`, the exponential integral is defined as [1]_ + + .. math:: + + Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt. + + For :math:`x > 0` the integral is understood as a Cauchy principal + value. + + It is extended to the complex plane by analytic continuation of + the function on the interval :math:`(0, \infty)`. The complex + variant has a branch cut on the negative real axis. + + Parameters + ---------- + x : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the exponential integral + + See Also + -------- + exp1 : Exponential integral :math:`E_1` + expn : Generalized exponential integral :math:`E_n` + + Notes + ----- + The exponential integrals :math:`E_1` and :math:`Ei` satisfy the + relation + + .. math:: + + E_1(x) = -Ei(-x) + + for :math:`x > 0`. + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 6.2.5 + https://dlmf.nist.gov/6.2#E5 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is related to `exp1`. + + >>> x = np.array([1, 2, 3, 4]) + >>> -sc.expi(-x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + >>> sc.exp1(x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + + The complex variant has a branch cut on the negative real axis. + + >>> sc.expi(-1 + 1e-12j) + (-0.21938393439552062+3.1415926535894254j) + >>> sc.expi(-1 - 1e-12j) + (-0.21938393439552062-3.1415926535894254j) + + As the complex variant approaches the branch cut, the real parts + approach the value of the real variant. + + >>> sc.expi(-1) + -0.21938393439552062 + + The SciPy implementation returns the real variant for complex + values on the branch cut. + + >>> sc.expi(complex(-1, 0.0)) + (-0.21938393439552062-0j) + >>> sc.expi(complex(-1, -0.0)) + (-0.21938393439552062-0j) + + """) + +add_newdoc('expit', + """ + expit(x, out=None) + + Expit (a.k.a. logistic sigmoid) ufunc for ndarrays. + + The expit function, also known as the logistic sigmoid function, is + defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the + logit function. + + Parameters + ---------- + x : ndarray + The ndarray to apply expit to element-wise. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + An ndarray of the same shape as x. Its entries + are `expit` of the corresponding entry of x. + + See Also + -------- + logit + + Notes + ----- + As a ufunc expit takes a number of optional + keyword arguments. For more information + see `ufuncs `_ + + .. versionadded:: 0.10.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import expit, logit + + >>> expit([-np.inf, -1.5, 0, 1.5, np.inf]) + array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ]) + + `logit` is the inverse of `expit`: + + >>> logit(expit([-2.5, 0, 3.1, 5.0])) + array([-2.5, 0. , 3.1, 5. ]) + + Plot expit(x) for x in [-6, 6]: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-6, 6, 121) + >>> y = expit(x) + >>> plt.plot(x, y) + >>> plt.grid() + >>> plt.xlim(-6, 6) + >>> plt.xlabel('x') + >>> plt.title('expit(x)') + >>> plt.show() + + """) + +add_newdoc("expm1", + """ + expm1(x, out=None) + + Compute ``exp(x) - 1``. + + When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation + of ``exp(x) - 1`` can suffer from catastrophic loss of precision. + ``expm1(x)`` is implemented to avoid the loss of precision that occurs when + `x` is near zero. + + Parameters + ---------- + x : array_like + `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``exp(x) - 1`` computed element-wise. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import expm1 + + >>> expm1(1.0) + 1.7182818284590451 + >>> expm1([-0.2, -0.1, 0, 0.1, 0.2]) + array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276]) + + The exact value of ``exp(7.5e-13) - 1`` is:: + + 7.5000000000028125000000007031250000001318...*10**-13. + + Here is what ``expm1(7.5e-13)`` gives: + + >>> expm1(7.5e-13) + 7.5000000000028135e-13 + + Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in + a "catastrophic" loss of precision: + + >>> np.exp(7.5e-13) - 1 + 7.5006667543675576e-13 + + """) + +add_newdoc("expn", + r""" + expn(n, x, out=None) + + Generalized exponential integral En. + + For integer :math:`n \geq 0` and real :math:`x \geq 0` the + generalized exponential integral is defined as [dlmf]_ + + .. math:: + + E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt. + + Parameters + ---------- + n : array_like + Non-negative integers + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the generalized exponential integral + + See Also + -------- + exp1 : special case of :math:`E_n` for :math:`n = 1` + expi : related to :math:`E_n` when :math:`n = 1` + + References + ---------- + .. [dlmf] Digital Library of Mathematical Functions, 8.19.2 + https://dlmf.nist.gov/8.19#E2 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + Its domain is nonnegative n and x. + + >>> sc.expn(-1, 1.0), sc.expn(1, -1.0) + (nan, nan) + + It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it + is equal to ``1 / (n - 1)``. + + >>> sc.expn([0, 1, 2, 3, 4], 0) + array([ inf, inf, 1. , 0.5 , 0.33333333]) + + For n equal to 0 it reduces to ``exp(-x) / x``. + + >>> x = np.array([1, 2, 3, 4]) + >>> sc.expn(0, x) + array([0.36787944, 0.06766764, 0.01659569, 0.00457891]) + >>> np.exp(-x) / x + array([0.36787944, 0.06766764, 0.01659569, 0.00457891]) + + For n equal to 1 it reduces to `exp1`. + + >>> sc.expn(1, x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + >>> sc.exp1(x) + array([0.21938393, 0.04890051, 0.01304838, 0.00377935]) + + """) + +add_newdoc("exprel", + r""" + exprel(x, out=None) + + Relative error exponential, ``(exp(x) - 1)/x``. + + When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation + of ``exp(x) - 1`` can suffer from catastrophic loss of precision. + ``exprel(x)`` is implemented to avoid the loss of precision that occurs when + `x` is near zero. + + Parameters + ---------- + x : ndarray + Input array. `x` must contain real numbers. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + ``(exp(x) - 1)/x``, computed element-wise. + + See Also + -------- + expm1 + + Notes + ----- + .. versionadded:: 0.17.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import exprel + + >>> exprel(0.01) + 1.0050167084168056 + >>> exprel([-0.25, -0.1, 0, 0.1, 0.25]) + array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167]) + + Compare ``exprel(5e-9)`` to the naive calculation. The exact value + is ``1.00000000250000000416...``. + + >>> exprel(5e-9) + 1.0000000025 + + >>> (np.exp(5e-9) - 1)/5e-9 + 0.99999999392252903 + """) + +add_newdoc("fdtr", + r""" + fdtr(dfn, dfd, x, out=None) + + F cumulative distribution function. + + Returns the value of the cumulative distribution function of the + F-distribution, also known as Snedecor's F-distribution or the + Fisher-Snedecor distribution. + + The F-distribution with parameters :math:`d_n` and :math:`d_d` is the + distribution of the random variable, + + .. math:: + X = \frac{U_n/d_n}{U_d/d_d}, + + where :math:`U_n` and :math:`U_d` are random variables distributed + :math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom, + respectively. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`. + + See Also + -------- + fdtrc : F distribution survival function + fdtri : F distribution inverse cumulative distribution + scipy.stats.f : F distribution + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2). + + Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also + available as `scipy.stats.f`. Calling `fdtr` directly can improve + performance compared to the ``cdf`` method of `scipy.stats.f` (see last + example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``. + + >>> import numpy as np + >>> from scipy.special import fdtr + >>> fdtr(1, 2, 1) + 0.5773502691896258 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.5, 2., 3.]) + >>> fdtr(1, 2, x) + array([0.4472136 , 0.70710678, 0.77459667]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [1, 5, 10, 50] + >>> dfd_parameters = [1, 1, 2, 3] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtr_vals = fdtr(dfn, dfd, x) + ... ax.plot(x, fdtr_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("F distribution cumulative distribution function") + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtr` + directly can be much faster than calling the ``cdf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 1 + >>> fdtr_res = fdtr(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).cdf(x) + >>> fdtr_res == f_dist_res # test that results are equal + True + """) + +add_newdoc("fdtrc", + r""" + fdtrc(dfn, dfd, x, out=None) + + F survival function. + + Returns the complemented F-distribution function (the integral of the + density from `x` to infinity). + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + y : scalar or ndarray + The complemented F-distribution function with parameters `dfn` and + `dfd` at `x`. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtri : F distribution inverse cumulative distribution function + scipy.stats.f : F distribution + + Notes + ----- + The regularized incomplete beta function is used, according to the + formula, + + .. math:: + F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2). + + Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also + available as `scipy.stats.f`. Calling `fdtrc` directly can improve + performance compared to the ``sf`` method of `scipy.stats.f` (see last + example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``. + + >>> import numpy as np + >>> from scipy.special import fdtrc + >>> fdtrc(1, 2, 1) + 0.42264973081037427 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.5, 2., 3.]) + >>> fdtrc(1, 2, x) + array([0.5527864 , 0.29289322, 0.22540333]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [1, 5, 10, 50] + >>> dfd_parameters = [1, 1, 2, 3] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtrc_vals = fdtrc(dfn, dfd, x) + ... ax.plot(x, fdtrc_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("F distribution survival function") + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtrc` + directly can be much faster than calling the ``sf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 1 + >>> fdtrc_res = fdtrc(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).sf(x) + >>> f_dist_res == fdtrc_res # test that results are equal + True + """) + +add_newdoc("fdtri", + r""" + fdtri(dfn, dfd, p, out=None) + + The `p`-th quantile of the F-distribution. + + This function is the inverse of the F-distribution CDF, `fdtr`, returning + the `x` such that `fdtr(dfn, dfd, x) = p`. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + dfd : array_like + Second parameter (positive float). + p : array_like + Cumulative probability, in [0, 1]. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + x : scalar or ndarray + The quantile corresponding to `p`. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtrc : F distribution survival function + scipy.stats.f : F distribution + + Notes + ----- + The computation is carried out using the relation to the inverse + regularized beta function, :math:`I^{-1}_x(a, b)`. Let + :math:`z = I^{-1}_p(d_d/2, d_n/2).` Then, + + .. math:: + x = \frac{d_d (1 - z)}{d_n z}. + + If `p` is such that :math:`x < 0.5`, the following relation is used + instead for improved stability: let + :math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then, + + .. math:: + x = \frac{d_d z'}{d_n (1 - z')}. + + Wrapper for the Cephes [1]_ routine `fdtri`. + + The F distribution is also available as `scipy.stats.f`. Calling + `fdtri` directly can improve performance compared to the ``ppf`` + method of `scipy.stats.f` (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + `fdtri` represents the inverse of the F distribution CDF which is + available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2`` + at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`, + `df2` and the computed CDF value. + + >>> import numpy as np + >>> from scipy.special import fdtri, fdtr + >>> df1, df2 = 1, 2 + >>> x = 3 + >>> cdf_value = fdtr(df1, df2, x) + >>> fdtri(df1, df2, cdf_value) + 3.000000000000006 + + Calculate the function at several points by providing a NumPy array for + `x`. + + >>> x = np.array([0.1, 0.4, 0.7]) + >>> fdtri(1, 2, x) + array([0.02020202, 0.38095238, 1.92156863]) + + Plot the function for several parameter sets. + + >>> import matplotlib.pyplot as plt + >>> dfn_parameters = [50, 10, 1, 50] + >>> dfd_parameters = [0.5, 1, 1, 5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(dfn_parameters, dfd_parameters, + ... linestyles)) + >>> x = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... dfn, dfd, style = parameter_set + ... fdtri_vals = fdtri(dfn, dfd, x) + ... ax.plot(x, fdtri_vals, label=rf"$d_n={dfn},\, d_d={dfd}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> title = "F distribution inverse cumulative distribution function" + >>> ax.set_title(title) + >>> ax.set_ylim(0, 30) + >>> plt.show() + + The F distribution is also available as `scipy.stats.f`. Using `fdtri` + directly can be much faster than calling the ``ppf`` method of + `scipy.stats.f`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``. + + >>> from scipy.stats import f + >>> dfn, dfd = 1, 2 + >>> x = 0.7 + >>> fdtri_res = fdtri(dfn, dfd, x) # this will often be faster than below + >>> f_dist_res = f(dfn, dfd).ppf(x) + >>> f_dist_res == fdtri_res # test that results are equal + True + """) + +add_newdoc("fdtridfd", + """ + fdtridfd(dfn, p, x, out=None) + + Inverse to `fdtr` vs dfd + + Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``. + + Parameters + ---------- + dfn : array_like + First parameter (positive float). + p : array_like + Cumulative probability, in [0, 1]. + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + dfd : scalar or ndarray + `dfd` such that ``fdtr(dfn, dfd, x) == p``. + + See Also + -------- + fdtr : F distribution cumulative distribution function + fdtrc : F distribution survival function + fdtri : F distribution quantile function + scipy.stats.f : F distribution + + Examples + -------- + Compute the F distribution cumulative distribution function for one + parameter set. + + >>> from scipy.special import fdtridfd, fdtr + >>> dfn, dfd, x = 10, 5, 2 + >>> cdf_value = fdtr(dfn, dfd, x) + >>> cdf_value + 0.7700248806501017 + + Verify that `fdtridfd` recovers the original value for `dfd`: + + >>> fdtridfd(dfn, cdf_value, x) + 5.0 + """) + +''' +commented out as fdtridfn seems to have bugs and is not in functions.json +see: https://github.com/scipy/scipy/pull/15622#discussion_r811440983 + +add_newdoc( + "fdtridfn", + """ + fdtridfn(p, dfd, x, out=None) + + Inverse to `fdtr` vs dfn + + finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``. + + + Parameters + ---------- + p : array_like + Cumulative probability, in [0, 1]. + dfd : array_like + Second parameter (positive float). + x : array_like + Argument (nonnegative float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + dfn : scalar or ndarray + `dfn` such that ``fdtr(dfn, dfd, x) == p``. + + See Also + -------- + fdtr, fdtrc, fdtri, fdtridfd + + + """) +''' + +add_newdoc("fresnel", + r""" + fresnel(z, out=None) + + Fresnel integrals. + + The Fresnel integrals are defined as + + .. math:: + + S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\ + C(z) &= \int_0^z \cos(\pi t^2 /2) dt. + + See [dlmf]_ for details. + + Parameters + ---------- + z : array_like + Real or complex valued argument + out : 2-tuple of ndarrays, optional + Optional output arrays for the function results + + Returns + ------- + S, C : 2-tuple of scalar or ndarray + Values of the Fresnel integrals + + See Also + -------- + fresnel_zeros : zeros of the Fresnel integrals + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/7.2#iii + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + As z goes to infinity along the real axis, S and C converge to 0.5. + + >>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf]) + >>> S + array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ]) + >>> C + array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ]) + + They are related to the error function `erf`. + + >>> z = np.array([1, 2, 3, 4]) + >>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z + >>> S, C = sc.fresnel(z) + >>> C + 1j*S + array([0.7798934 +0.43825915j, 0.48825341+0.34341568j, + 0.60572079+0.496313j , 0.49842603+0.42051575j]) + >>> 0.5 * (1 + 1j) * sc.erf(zeta) + array([0.7798934 +0.43825915j, 0.48825341+0.34341568j, + 0.60572079+0.496313j , 0.49842603+0.42051575j]) + + """) + +add_newdoc("gamma", + r""" + gamma(z, out=None) + + gamma function. + + The gamma function is defined as + + .. math:: + + \Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt + + for :math:`\Re(z) > 0` and is extended to the rest of the complex + plane by analytic continuation. See [dlmf]_ for more details. + + Parameters + ---------- + z : array_like + Real or complex valued argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the gamma function + + Notes + ----- + The gamma function is often referred to as the generalized + factorial since :math:`\Gamma(n + 1) = n!` for natural numbers + :math:`n`. More generally it satisfies the recurrence relation + :math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`, + which, combined with the fact that :math:`\Gamma(1) = 1`, implies + the above identity for :math:`z = n`. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5.2#E1 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import gamma, factorial + + >>> gamma([0, 0.5, 1, 5]) + array([ inf, 1.77245385, 1. , 24. ]) + + >>> z = 2.5 + 1j + >>> gamma(z) + (0.77476210455108352+0.70763120437959293j) + >>> gamma(z+1), z*gamma(z) # Recurrence property + ((1.2292740569981171+2.5438401155000685j), + (1.2292740569981158+2.5438401155000658j)) + + >>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi) + 3.1415926535897927 + + Plot gamma(x) for real x + + >>> x = np.linspace(-3.5, 5.5, 2251) + >>> y = gamma(x) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)') + >>> k = np.arange(1, 7) + >>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6, + ... label='(x-1)!, x = 1, 2, ...') + >>> plt.xlim(-3.5, 5.5) + >>> plt.ylim(-10, 25) + >>> plt.grid() + >>> plt.xlabel('x') + >>> plt.legend(loc='lower right') + >>> plt.show() + + """) + +add_newdoc("gammainc", + r""" + gammainc(a, x, out=None) + + Regularized lower incomplete gamma function. + + It is defined as + + .. math:: + + P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details. + + Parameters + ---------- + a : array_like + Positive parameter + x : array_like + Nonnegative argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the lower incomplete gamma function + + See Also + -------- + gammaincc : regularized upper incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + Notes + ----- + The function satisfies the relation ``gammainc(a, x) + + gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper + incomplete gamma function. + + The implementation largely follows that of [boost]_. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical functions + https://dlmf.nist.gov/8.2#E4 + .. [boost] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + + Examples + -------- + >>> import scipy.special as sc + + It is the CDF of the gamma distribution, so it starts at 0 and + monotonically increases to 1. + + >>> sc.gammainc(0.5, [0, 1, 10, 100]) + array([0. , 0.84270079, 0.99999226, 1. ]) + + It is equal to one minus the upper incomplete gamma function. + + >>> a, x = 0.5, 0.4 + >>> sc.gammainc(a, x) + 0.6289066304773024 + >>> 1 - sc.gammaincc(a, x) + 0.6289066304773024 + + """) + +add_newdoc("gammaincc", + r""" + gammaincc(a, x, out=None) + + Regularized upper incomplete gamma function. + + It is defined as + + .. math:: + + Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt + + for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details. + + Parameters + ---------- + a : array_like + Positive parameter + x : array_like + Nonnegative argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the upper incomplete gamma function + + See Also + -------- + gammainc : regularized lower incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + Notes + ----- + The function satisfies the relation ``gammainc(a, x) + + gammaincc(a, x) = 1`` where `gammainc` is the regularized lower + incomplete gamma function. + + The implementation largely follows that of [boost]_. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical functions + https://dlmf.nist.gov/8.2#E4 + .. [boost] Maddock et. al., "Incomplete Gamma Functions", + https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + + Examples + -------- + >>> import scipy.special as sc + + It is the survival function of the gamma distribution, so it + starts at 1 and monotonically decreases to 0. + + >>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000]) + array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45, + 0.00000000e+00]) + + It is equal to one minus the lower incomplete gamma function. + + >>> a, x = 0.5, 0.4 + >>> sc.gammaincc(a, x) + 0.37109336952269756 + >>> 1 - sc.gammainc(a, x) + 0.37109336952269756 + + """) + +add_newdoc("gammainccinv", + """ + gammainccinv(a, y, out=None) + + Inverse of the regularized upper incomplete gamma function. + + Given an input :math:`y` between 0 and 1, returns :math:`x` such + that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper + incomplete gamma function; see `gammaincc`. This is well-defined + because the upper incomplete gamma function is monotonic as can + be seen from its definition in [dlmf]_. + + Parameters + ---------- + a : array_like + Positive parameter + y : array_like + Argument between 0 and 1, inclusive + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the inverse of the upper incomplete gamma function + + See Also + -------- + gammaincc : regularized upper incomplete gamma function + gammainc : regularized lower incomplete gamma function + gammaincinv : inverse of the regularized lower incomplete gamma function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.2#E4 + + Examples + -------- + >>> import scipy.special as sc + + It starts at infinity and monotonically decreases to 0. + + >>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1]) + array([ inf, 1.35277173, 0.22746821, 0. ]) + + It inverts the upper incomplete gamma function. + + >>> a, x = 0.5, [0, 0.1, 0.5, 1] + >>> sc.gammaincc(a, sc.gammainccinv(a, x)) + array([0. , 0.1, 0.5, 1. ]) + + >>> a, x = 0.5, [0, 10, 50] + >>> sc.gammainccinv(a, sc.gammaincc(a, x)) + array([ 0., 10., 50.]) + + """) + +add_newdoc("gammaincinv", + """ + gammaincinv(a, y, out=None) + + Inverse to the regularized lower incomplete gamma function. + + Given an input :math:`y` between 0 and 1, returns :math:`x` such + that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower + incomplete gamma function; see `gammainc`. This is well-defined + because the lower incomplete gamma function is monotonic as can be + seen from its definition in [dlmf]_. + + Parameters + ---------- + a : array_like + Positive parameter + y : array_like + Parameter between 0 and 1, inclusive + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the inverse of the lower incomplete gamma function + + See Also + -------- + gammainc : regularized lower incomplete gamma function + gammaincc : regularized upper incomplete gamma function + gammainccinv : inverse of the regularized upper incomplete gamma function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/8.2#E4 + + Examples + -------- + >>> import scipy.special as sc + + It starts at 0 and monotonically increases to infinity. + + >>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1]) + array([0. , 0.00789539, 0.22746821, inf]) + + It inverts the lower incomplete gamma function. + + >>> a, x = 0.5, [0, 0.1, 0.5, 1] + >>> sc.gammainc(a, sc.gammaincinv(a, x)) + array([0. , 0.1, 0.5, 1. ]) + + >>> a, x = 0.5, [0, 10, 25] + >>> sc.gammaincinv(a, sc.gammainc(a, x)) + array([ 0. , 10. , 25.00001465]) + + """) + +add_newdoc("gammaln", + r""" + gammaln(x, out=None) + + Logarithm of the absolute value of the gamma function. + + Defined as + + .. math:: + + \ln(\lvert\Gamma(x)\rvert) + + where :math:`\Gamma` is the gamma function. For more details on + the gamma function, see [dlmf]_. + + Parameters + ---------- + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the log of the absolute value of gamma + + See Also + -------- + gammasgn : sign of the gamma function + loggamma : principal branch of the logarithm of the gamma function + + Notes + ----- + It is the same function as the Python standard library function + :func:`math.lgamma`. + + When used in conjunction with `gammasgn`, this function is useful + for working in logspace on the real axis without having to deal + with complex numbers via the relation ``exp(gammaln(x)) = + gammasgn(x) * gamma(x)``. + + For complex-valued log-gamma, use `loggamma` instead of `gammaln`. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It has two positive zeros. + + >>> sc.gammaln([1, 2]) + array([0., 0.]) + + It has poles at nonpositive integers. + + >>> sc.gammaln([0, -1, -2, -3, -4]) + array([inf, inf, inf, inf, inf]) + + It asymptotically approaches ``x * log(x)`` (Stirling's formula). + + >>> x = np.array([1e10, 1e20, 1e40, 1e80]) + >>> sc.gammaln(x) + array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82]) + >>> x * np.log(x) + array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82]) + + """) + +add_newdoc("gammasgn", + r""" + gammasgn(x, out=None) + + Sign of the gamma function. + + It is defined as + + .. math:: + + \text{gammasgn}(x) = + \begin{cases} + +1 & \Gamma(x) > 0 \\ + -1 & \Gamma(x) < 0 + \end{cases} + + where :math:`\Gamma` is the gamma function; see `gamma`. This + definition is complete since the gamma function is never zero; + see the discussion after [dlmf]_. + + Parameters + ---------- + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Sign of the gamma function + + See Also + -------- + gamma : the gamma function + gammaln : log of the absolute value of the gamma function + loggamma : analytic continuation of the log of the gamma function + + Notes + ----- + The gamma function can be computed as ``gammasgn(x) * + np.exp(gammaln(x))``. + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5.2#E1 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is 1 for `x > 0`. + + >>> sc.gammasgn([1, 2, 3, 4]) + array([1., 1., 1., 1.]) + + It alternates between -1 and 1 for negative integers. + + >>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5]) + array([-1., 1., -1., 1.]) + + It can be used to compute the gamma function. + + >>> x = [1.5, 0.5, -0.5, -1.5] + >>> sc.gammasgn(x) * np.exp(sc.gammaln(x)) + array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ]) + >>> sc.gamma(x) + array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ]) + + """) + +add_newdoc("gdtr", + r""" + gdtr(a, b, x, out=None) + + Gamma distribution cumulative distribution function. + + Returns the integral from zero to `x` of the gamma probability density + function, + + .. math:: + + F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (upper limit of integration; float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + F : scalar or ndarray + The CDF of the gamma distribution with parameters `a` and `b` + evaluated at `x`. + + See Also + -------- + gdtrc : 1 - CDF of the gamma distribution. + scipy.stats.gamma: Gamma distribution + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can + improve performance compared to the ``cdf`` method of `scipy.stats.gamma` + (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``a=1``, ``b=2`` at ``x=5``. + + >>> import numpy as np + >>> from scipy.special import gdtr + >>> import matplotlib.pyplot as plt + >>> gdtr(1., 2., 5.) + 0.9595723180054873 + + Compute the function for ``a=1`` and ``b=2`` at several points by + providing a NumPy array for `x`. + + >>> xvalues = np.array([1., 2., 3., 4]) + >>> gdtr(1., 1., xvalues) + array([0.63212056, 0.86466472, 0.95021293, 0.98168436]) + + `gdtr` can evaluate different parameter sets by providing arrays with + broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the + function for three different `a` at four positions `x` and ``b=3``, + resulting in a 3x4 array. + + >>> a = np.array([[0.5], [1.5], [2.5]]) + >>> x = np.array([1., 2., 3., 4]) + >>> a.shape, x.shape + ((3, 1), (4,)) + + >>> gdtr(a, 3., x) + array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358], + [0.19115317, 0.57680992, 0.82642193, 0.9380312 ], + [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]]) + + Plot the function for four different parameter sets. + + >>> a_parameters = [0.3, 1, 2, 6] + >>> b_parameters = [2, 10, 15, 20] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... a, b, style = parameter_set + ... gdtr_vals = gdtr(a, b, x) + ... ax.plot(x, gdtr_vals, label=fr"$a= {a},\, b={b}$", ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("Gamma distribution cumulative distribution function") + >>> plt.show() + + The gamma distribution is also available as `scipy.stats.gamma`. Using + `gdtr` directly can be much faster than calling the ``cdf`` method of + `scipy.stats.gamma`, especially for small arrays or individual values. + To get the same results one must use the following parametrization: + ``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``. + + >>> from scipy.stats import gamma + >>> a = 2. + >>> b = 3 + >>> x = 1. + >>> gdtr_result = gdtr(a, b, x) # this will often be faster than below + >>> gamma_dist_result = gamma(b, scale=1/a).cdf(x) + >>> gdtr_result == gamma_dist_result # test that results are equal + True + """) + +add_newdoc("gdtrc", + r""" + gdtrc(a, b, x, out=None) + + Gamma distribution survival function. + + Integral from `x` to infinity of the gamma probability density function, + + .. math:: + + F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + a : array_like + The rate parameter of the gamma distribution, sometimes denoted + :math:`\beta` (float). It is also the reciprocal of the scale + parameter :math:`\theta`. + b : array_like + The shape parameter of the gamma distribution, sometimes denoted + :math:`\alpha` (float). + x : array_like + The quantile (lower limit of integration; float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + F : scalar or ndarray + The survival function of the gamma distribution with parameters `a` + and `b` evaluated at `x`. + + See Also + -------- + gdtr: Gamma distribution cumulative distribution function + scipy.stats.gamma: Gamma distribution + gdtrix + + Notes + ----- + The evaluation is carried out using the relation to the incomplete gamma + integral (regularized gamma function). + + Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can + improve performance compared to the ``sf`` method of `scipy.stats.gamma` + (see last example below). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``a=1`` and ``b=2`` at ``x=5``. + + >>> import numpy as np + >>> from scipy.special import gdtrc + >>> import matplotlib.pyplot as plt + >>> gdtrc(1., 2., 5.) + 0.04042768199451279 + + Compute the function for ``a=1``, ``b=2`` at several points by providing + a NumPy array for `x`. + + >>> xvalues = np.array([1., 2., 3., 4]) + >>> gdtrc(1., 1., xvalues) + array([0.36787944, 0.13533528, 0.04978707, 0.01831564]) + + `gdtrc` can evaluate different parameter sets by providing arrays with + broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the + function for three different `a` at four positions `x` and ``b=3``, + resulting in a 3x4 array. + + >>> a = np.array([[0.5], [1.5], [2.5]]) + >>> x = np.array([1., 2., 3., 4]) + >>> a.shape, x.shape + ((3, 1), (4,)) + + >>> gdtrc(a, 3., x) + array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642], + [0.80884683, 0.42319008, 0.17357807, 0.0619688 ], + [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]]) + + Plot the function for four different parameter sets. + + >>> a_parameters = [0.3, 1, 2, 6] + >>> b_parameters = [2, 10, 15, 20] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles)) + >>> x = np.linspace(0, 30, 1000) + >>> fig, ax = plt.subplots() + >>> for parameter_set in parameters_list: + ... a, b, style = parameter_set + ... gdtrc_vals = gdtrc(a, b, x) + ... ax.plot(x, gdtrc_vals, label=fr"$a= {a},\, b={b}$", ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$x$") + >>> ax.set_title("Gamma distribution survival function") + >>> plt.show() + + The gamma distribution is also available as `scipy.stats.gamma`. + Using `gdtrc` directly can be much faster than calling the ``sf`` method + of `scipy.stats.gamma`, especially for small arrays or individual + values. To get the same results one must use the following parametrization: + ``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``. + + >>> from scipy.stats import gamma + >>> a = 2 + >>> b = 3 + >>> x = 1. + >>> gdtrc_result = gdtrc(a, b, x) # this will often be faster than below + >>> gamma_dist_result = gamma(b, scale=1/a).sf(x) + >>> gdtrc_result == gamma_dist_result # test that results are equal + True + """) + +add_newdoc("gdtria", + """ + gdtria(p, b, x, out=None) + + Inverse of `gdtr` vs a. + + Returns the inverse with respect to the parameter `a` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + p : array_like + Probability values. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + a : scalar or ndarray + Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a` + is the "scale" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `a` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `a`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtria + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtria(p, 3.4, 5.6) + 1.2 + """) + +add_newdoc("gdtrib", + """ + gdtrib(a, p, x, out=None) + + Inverse of `gdtr` vs b. + + Returns the inverse with respect to the parameter `b` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + p : array_like + Probability values. + x : array_like + Nonnegative real values, from the domain of the gamma distribution. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + b : scalar or ndarray + Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is + the "shape" parameter of the gamma distribution. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `b` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `b`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrib + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrib(1.2, p, 5.6) + 3.3999999999723882 + """) + +add_newdoc("gdtrix", + """ + gdtrix(a, b, p, out=None) + + Inverse of `gdtr` vs x. + + Returns the inverse with respect to the parameter `x` of ``p = + gdtr(a, b, x)``, the cumulative distribution function of the gamma + distribution. This is also known as the pth quantile of the + distribution. + + Parameters + ---------- + a : array_like + `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale" + parameter of the gamma distribution. + b : array_like + `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter + of the gamma distribution. + p : array_like + Probability values. + out : ndarray, optional + If a fourth argument is given, it must be a numpy.ndarray whose size + matches the broadcast result of `a`, `b` and `x`. `out` is then the + array returned by the function. + + Returns + ------- + x : scalar or ndarray + Values of the `x` parameter such that `p = gdtr(a, b, x)`. + + See Also + -------- + gdtr : CDF of the gamma distribution. + gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`. + gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`. + + The cumulative distribution function `p` is computed using a routine by + DiDinato and Morris [2]_. Computation of `x` involves a search for a value + that produces the desired value of `p`. The search relies on the + monotonicity of `p` with `x`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] DiDinato, A. R. and Morris, A. H., + Computation of the incomplete gamma function ratios and their + inverse. ACM Trans. Math. Softw. 12 (1986), 377-393. + + Examples + -------- + First evaluate `gdtr`. + + >>> from scipy.special import gdtr, gdtrix + >>> p = gdtr(1.2, 3.4, 5.6) + >>> print(p) + 0.94378087442 + + Verify the inverse. + + >>> gdtrix(1.2, 3.4, p) + 5.5999999999999996 + """) + +add_newdoc("hankel1", + r""" + hankel1(v, z, out=None) + + Hankel function of the first kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the Hankel function of the first kind. + + See Also + -------- + hankel1e : ndarray + This function with leading exponential behavior stripped off. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = + \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel1e", + r""" + hankel1e(v, z, out=None) + + Exponentially scaled Hankel function of the first kind + + Defined as:: + + hankel1e(v, z) = hankel1(v, z) * exp(-1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled Hankel function. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(1)}_v(z) = + \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel2", + r""" + hankel2(v, z, out=None) + + Hankel function of the second kind + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the Hankel function of the second kind. + + See Also + -------- + hankel2e : this function with leading exponential behavior stripped off. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = + -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2)) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + """) + +add_newdoc("hankel2e", + r""" + hankel2e(v, z, out=None) + + Exponentially scaled Hankel function of the second kind + + Defined as:: + + hankel2e(v, z) = hankel2(v, z) * exp(1j * z) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled Hankel function of the second kind. + + Notes + ----- + A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the + computation using the relation, + + .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} + \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2})) + + where :math:`K_v` is the modified Bessel function of the second kind. + For negative orders, the relation + + .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v) + + is used. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + """) + +add_newdoc("huber", + r""" + huber(delta, r, out=None) + + Huber loss function. + + .. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ + \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ + \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases} + + Parameters + ---------- + delta : ndarray + Input array, indicating the quadratic vs. linear loss changepoint. + r : ndarray + Input array, possibly representing residuals. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The computed Huber loss function values. + + See Also + -------- + pseudo_huber : smooth approximation of this function + + Notes + ----- + `huber` is useful as a loss function in robust statistics or machine + learning to reduce the influence of outliers as compared to the common + squared error loss, residuals with a magnitude higher than `delta` are + not squared [1]_. + + Typically, `r` represents residuals, the difference + between a model prediction and data. Then, for :math:`|r|\leq\delta`, + `huber` resembles the squared error and for :math:`|r|>\delta` the + absolute error. This way, the Huber loss often achieves + a fast convergence in model fitting for small residuals like the squared + error loss function and still reduces the influence of outliers + (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is + the cutoff between squared and absolute error regimes, it has + to be tuned carefully for each problem. `huber` is also + convex, making it suitable for gradient based optimization. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Peter Huber. "Robust Estimation of a Location Parameter", + 1964. Annals of Statistics. 53 (1): 73 - 101. + + Examples + -------- + Import all necessary modules. + + >>> import numpy as np + >>> from scipy.special import huber + >>> import matplotlib.pyplot as plt + + Compute the function for ``delta=1`` at ``r=2`` + + >>> huber(1., 2.) + 1.5 + + Compute the function for different `delta` by providing a NumPy array or + list for `delta`. + + >>> huber([1., 3., 5.], 4.) + array([3.5, 7.5, 8. ]) + + Compute the function at different points by providing a NumPy array or + list for `r`. + + >>> huber(2., np.array([1., 1.5, 3.])) + array([0.5 , 1.125, 4. ]) + + The function can be calculated for different `delta` and `r` by + providing arrays for both with compatible shapes for broadcasting. + + >>> r = np.array([1., 2.5, 8., 10.]) + >>> deltas = np.array([[1.], [5.], [9.]]) + >>> print(r.shape, deltas.shape) + (4,) (3, 1) + + >>> huber(deltas, r) + array([[ 0.5 , 2. , 7.5 , 9.5 ], + [ 0.5 , 3.125, 27.5 , 37.5 ], + [ 0.5 , 3.125, 32. , 49.5 ]]) + + Plot the function for different `delta`. + + >>> x = np.linspace(-4, 4, 500) + >>> deltas = [1, 2, 3] + >>> linestyles = ["dashed", "dotted", "dashdot"] + >>> fig, ax = plt.subplots() + >>> combined_plot_parameters = list(zip(deltas, linestyles)) + >>> for delta, style in combined_plot_parameters: + ... ax.plot(x, huber(delta, x), label=fr"$\delta={delta}$", ls=style) + >>> ax.legend(loc="upper center") + >>> ax.set_xlabel("$x$") + >>> ax.set_title(r"Huber loss function $h_{\delta}(x)$") + >>> ax.set_xlim(-4, 4) + >>> ax.set_ylim(0, 8) + >>> plt.show() + """) + +add_newdoc("hyp0f1", + r""" + hyp0f1(v, z, out=None) + + Confluent hypergeometric limit function 0F1. + + Parameters + ---------- + v : array_like + Real-valued parameter + z : array_like + Real- or complex-valued argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The confluent hypergeometric limit function + + Notes + ----- + This function is defined as: + + .. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}. + + It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`, + and satisfies the differential equation :math:`f''(z) + vf'(z) = + f(z)`. See [1]_ for more information. + + References + ---------- + .. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function", + http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is one when `z` is zero. + + >>> sc.hyp0f1(1, 0) + 1.0 + + It is the limit of the confluent hypergeometric function as `q` + goes to infinity. + + >>> q = np.array([1, 10, 100, 1000]) + >>> v = 1 + >>> z = 1 + >>> sc.hyp1f1(q, v, z / q) + array([2.71828183, 2.31481985, 2.28303778, 2.27992985]) + >>> sc.hyp0f1(v, z) + 2.2795853023360673 + + It is related to Bessel functions. + + >>> n = 1 + >>> x = np.linspace(0, 1, 5) + >>> sc.jv(n, x) + array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059]) + >>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2) + array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059]) + + """) + +add_newdoc("hyp1f1", + r""" + hyp1f1(a, b, x, out=None) + + Confluent hypergeometric function 1F1. + + The confluent hypergeometric function is defined by the series + + .. math:: + + {}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k. + + See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the + Pochhammer symbol; see `poch`. + + Parameters + ---------- + a, b : array_like + Real parameters + x : array_like + Real or complex argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the confluent hypergeometric function + + See Also + -------- + hyperu : another confluent hypergeometric function + hyp0f1 : confluent hypergeometric limit function + hyp2f1 : Gaussian hypergeometric function + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/13.2#E2 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is one when `x` is zero: + + >>> sc.hyp1f1(0.5, 0.5, 0) + 1.0 + + It is singular when `b` is a nonpositive integer. + + >>> sc.hyp1f1(0.5, -1, 0) + inf + + It is a polynomial when `a` is a nonpositive integer. + + >>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.hyp1f1(a, b, x) + array([-1., -3., -5., -7.]) + >>> 1 + (a / b) * x + array([-1., -3., -5., -7.]) + + It reduces to the exponential function when `a = b`. + + >>> sc.hyp1f1(2, 2, [1, 2, 3, 4]) + array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003]) + >>> np.exp([1, 2, 3, 4]) + array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003]) + + """) + +add_newdoc("hyp2f1", + r""" + hyp2f1(a, b, c, z, out=None) + + Gauss hypergeometric function 2F1(a, b; c; z) + + Parameters + ---------- + a, b, c : array_like + Arguments, should be real-valued. + z : array_like + Argument, real or complex. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + hyp2f1 : scalar or ndarray + The values of the gaussian hypergeometric function. + + See Also + -------- + hyp0f1 : confluent hypergeometric limit function. + hyp1f1 : Kummer's (confluent hypergeometric) function. + + Notes + ----- + This function is defined for :math:`|z| < 1` as + + .. math:: + + \mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty + \frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!}, + + and defined on the rest of the complex z-plane by analytic + continuation [1]_. + Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When + :math:`n` is an integer the result is a polynomial of degree :math:`n`. + + The implementation for complex values of ``z`` is described in [2]_, + except for ``z`` in the region defined by + + .. math:: + + 0.9 <= \left|z\right| < 1.1, + \left|1 - z\right| >= 0.9, + \mathrm{real}(z) >= 0 + + in which the implementation follows [4]_. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/15.2 + .. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996 + .. [3] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [4] J.L. Lopez and N.M. Temme, "New series expansions of the Gauss + hypergeometric function", Adv Comput Math 39, 349-365 (2013). + https://doi.org/10.1007/s10444-012-9283-y + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It has poles when `c` is a negative integer. + + >>> sc.hyp2f1(1, 1, -2, 1) + inf + + It is a polynomial when `a` or `b` is a negative integer. + + >>> a, b, c = -1, 1, 1.5 + >>> z = np.linspace(0, 1, 5) + >>> sc.hyp2f1(a, b, c, z) + array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333]) + >>> 1 + a * b * z / c + array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333]) + + It is symmetric in `a` and `b`. + + >>> a = np.linspace(0, 1, 5) + >>> b = np.linspace(0, 1, 5) + >>> sc.hyp2f1(a, b, 1, 0.5) + array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ]) + >>> sc.hyp2f1(b, a, 1, 0.5) + array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ]) + + It contains many other functions as special cases. + + >>> z = 0.5 + >>> sc.hyp2f1(1, 1, 2, z) + 1.3862943611198901 + >>> -np.log(1 - z) / z + 1.3862943611198906 + + >>> sc.hyp2f1(0.5, 1, 1.5, z**2) + 1.098612288668109 + >>> np.log((1 + z) / (1 - z)) / (2 * z) + 1.0986122886681098 + + >>> sc.hyp2f1(0.5, 1, 1.5, -z**2) + 0.9272952180016117 + >>> np.arctan(z) / z + 0.9272952180016122 + + """) + +add_newdoc("hyperu", + r""" + hyperu(a, b, x, out=None) + + Confluent hypergeometric function U + + It is defined as the solution to the equation + + .. math:: + + x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0 + + which satisfies the property + + .. math:: + + U(a, b, x) \sim x^{-a} + + as :math:`x \to \infty`. See [dlmf]_ for more details. + + Parameters + ---------- + a, b : array_like + Real-valued parameters + x : array_like + Real-valued argument + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of `U` + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematics Functions + https://dlmf.nist.gov/13.2#E6 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It has a branch cut along the negative `x` axis. + + >>> x = np.linspace(-0.1, -10, 5) + >>> sc.hyperu(1, 1, x) + array([nan, nan, nan, nan, nan]) + + It approaches zero as `x` goes to infinity. + + >>> x = np.array([1, 10, 100]) + >>> sc.hyperu(1, 1, x) + array([0.59634736, 0.09156333, 0.00990194]) + + It satisfies Kummer's transformation. + + >>> a, b, x = 2, 1, 1 + >>> sc.hyperu(a, b, x) + 0.1926947246463881 + >>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x) + 0.1926947246463881 + + """) + +add_newdoc("i0", + r""" + i0(x, out=None) + + Modified Bessel function of order 0. + + Defined as, + + .. math:: + I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x), + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the modified Bessel function of order 0 at `x`. + + See Also + -------- + iv: Modified Bessel function of any order + i0e: Exponentially scaled modified Bessel function of order 0 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import i0 + >>> i0(1.) + 1.2660658777520082 + + Calculate at several points: + + >>> import numpy as np + >>> i0(np.array([-2., 0., 3.5])) + array([2.2795853 , 1. , 7.37820343]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("i0e", + """ + i0e(x, out=None) + + Exponentially scaled modified Bessel function of order 0. + + Defined as:: + + i0e(x) = exp(-abs(x)) * i0(x). + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the exponentially scaled modified Bessel function of order 0 + at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i0: Modified Bessel function of order 0 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i0`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i0e`. `i0e` + is useful for large arguments `x`: for these, `i0` quickly overflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `i0` returns infinity whereas `i0e` still returns + a finite number. + + >>> from scipy.special import i0, i0e + >>> i0(1000.), i0e(1000.) + (inf, 0.012617240455891257) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> i0e(np.array([-2., 0., 3.])) + array([0.30850832, 1. , 0.24300035]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i0e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("i1", + r""" + i1(x, out=None) + + Modified Bessel function of order 1. + + Defined as, + + .. math:: + I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!} + = -\imath J_1(\imath x), + + where :math:`J_1` is the Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the modified Bessel function of order 1 at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i1e: Exponentially scaled modified Bessel function of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `i1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import i1 + >>> i1(1.) + 0.5651591039924851 + + Calculate the function at several points: + + >>> import numpy as np + >>> i1(np.array([-2., 0., 6.])) + array([-1.59063685, 0. , 61.34193678]) + + Plot the function between -10 and 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("i1e", + """ + i1e(x, out=None) + + Exponentially scaled modified Bessel function of order 1. + + Defined as:: + + i1e(x) = exp(-abs(x)) * i1(x) + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + Value of the exponentially scaled modified Bessel function of order 1 + at `x`. + + See Also + -------- + iv: Modified Bessel function of the first kind + i1: Modified Bessel function of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 8] and (8, infinity). + Chebyshev polynomial expansions are employed in each interval. The + polynomial expansions used are the same as those in `i1`, but + they are not multiplied by the dominant exponential factor. + + This function is a wrapper for the Cephes [1]_ routine `i1e`. `i1e` + is useful for large arguments `x`: for these, `i1` quickly overflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `i1` returns infinity whereas `i1e` still returns + a finite number. + + >>> from scipy.special import i1, i1e + >>> i1(1000.), i1e(1000.) + (inf, 0.01261093025692863) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> i1e(np.array([-2., 0., 6.])) + array([-0.21526929, 0. , 0.15205146]) + + Plot the function between -10 and 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> y = i1e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("_igam_fac", + """ + Internal function, do not use. + """) + +add_newdoc("it2i0k0", + r""" + it2i0k0(x, out=None) + + Integrals related to modified Bessel functions of order 0. + + Computes the integrals + + .. math:: + + \int_0^x \frac{I_0(t) - 1}{t} dt \\ + \int_x^\infty \frac{K_0(t)}{t} dt. + + Parameters + ---------- + x : array_like + Values at which to evaluate the integrals. + out : tuple of ndarrays, optional + Optional output arrays for the function results. + + Returns + ------- + ii0 : scalar or ndarray + The integral for `i0` + ik0 : scalar or ndarray + The integral for `k0` + + References + ---------- + .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", + Wiley 1996 + + Examples + -------- + Evaluate the functions at one point. + + >>> from scipy.special import it2i0k0 + >>> int_i, int_k = it2i0k0(1.) + >>> int_i, int_k + (0.12897944249456852, 0.2085182909001295) + + Evaluate the functions at several points. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> int_i, int_k = it2i0k0(points) + >>> int_i, int_k + (array([0.03149527, 0.30187149, 1.50012461]), + array([0.66575102, 0.0823715 , 0.00823631])) + + Plot the functions from 0 to 5. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 5., 1000) + >>> int_i, int_k = it2i0k0(x) + >>> ax.plot(x, int_i, label=r"$\int_0^x \frac{I_0(t)-1}{t}\,dt$") + >>> ax.plot(x, int_k, label=r"$\int_x^{\infty} \frac{K_0(t)}{t}\,dt$") + >>> ax.legend() + >>> ax.set_ylim(0, 10) + >>> plt.show() + """) + +add_newdoc("it2j0y0", + r""" + it2j0y0(x, out=None) + + Integrals related to Bessel functions of the first kind of order 0. + + Computes the integrals + + .. math:: + + \int_0^x \frac{1 - J_0(t)}{t} dt \\ + \int_x^\infty \frac{Y_0(t)}{t} dt. + + For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`. + + Parameters + ---------- + x : array_like + Values at which to evaluate the integrals. + out : tuple of ndarrays, optional + Optional output arrays for the function results. + + Returns + ------- + ij0 : scalar or ndarray + The integral for `j0` + iy0 : scalar or ndarray + The integral for `y0` + + References + ---------- + .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", + Wiley 1996 + + Examples + -------- + Evaluate the functions at one point. + + >>> from scipy.special import it2j0y0 + >>> int_j, int_y = it2j0y0(1.) + >>> int_j, int_y + (0.12116524699506871, 0.39527290169929336) + + Evaluate the functions at several points. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> int_j, int_y = it2j0y0(points) + >>> int_j, int_y + (array([0.03100699, 0.26227724, 0.85614669]), + array([ 0.26968854, 0.29769696, -0.02987272])) + + Plot the functions from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> int_j, int_y = it2j0y0(x) + >>> ax.plot(x, int_j, label=r"$\int_0^x \frac{1-J_0(t)}{t}\,dt$") + >>> ax.plot(x, int_y, label=r"$\int_x^{\infty} \frac{Y_0(t)}{t}\,dt$") + >>> ax.legend() + >>> ax.set_ylim(-2.5, 2.5) + >>> plt.show() + """) + +add_newdoc("it2struve0", + r""" + it2struve0(x, out=None) + + Integral related to the Struve function of order 0. + + Returns the integral, + + .. math:: + \int_x^\infty \frac{H_0(t)}{t}\,dt + + where :math:`H_0` is the Struve function of order 0. + + Parameters + ---------- + x : array_like + Lower limit of integration. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + The value of the integral. + + See Also + -------- + struve + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + Examples + -------- + Evaluate the function at one point. + + >>> import numpy as np + >>> from scipy.special import it2struve0 + >>> it2struve0(1.) + 0.9571973506383524 + + Evaluate the function at several points by supplying + an array for `x`. + + >>> points = np.array([1., 2., 3.5]) + >>> it2struve0(points) + array([0.95719735, 0.46909296, 0.10366042]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-10., 10., 1000) + >>> it2struve0_values = it2struve0(x) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, it2struve0_values) + >>> ax.set_xlabel(r'$x$') + >>> ax.set_ylabel(r'$\int_x^{\infty}\frac{H_0(t)}{t}\,dt$') + >>> plt.show() + """) + +add_newdoc( + "itairy", + r""" + itairy(x, out=None) + + Integrals of Airy functions + + Calculates the integrals of Airy functions from 0 to `x`. + + Parameters + ---------- + + x : array_like + Upper limit of integration (float). + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + Apt : scalar or ndarray + Integral of Ai(t) from 0 to x. + Bpt : scalar or ndarray + Integral of Bi(t) from 0 to x. + Ant : scalar or ndarray + Integral of Ai(-t) from 0 to x. + Bnt : scalar or ndarray + Integral of Bi(-t) from 0 to x. + + Notes + ----- + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + Examples + -------- + Compute the functions at ``x=1.``. + + >>> import numpy as np + >>> from scipy.special import itairy + >>> import matplotlib.pyplot as plt + >>> apt, bpt, ant, bnt = itairy(1.) + >>> apt, bpt, ant, bnt + (0.23631734191710949, + 0.8727691167380077, + 0.46567398346706845, + 0.3730050096342943) + + Compute the functions at several points by providing a NumPy array for `x`. + + >>> x = np.array([1., 1.5, 2.5, 5]) + >>> apt, bpt, ant, bnt = itairy(x) + >>> apt, bpt, ant, bnt + (array([0.23631734, 0.28678675, 0.324638 , 0.33328759]), + array([ 0.87276912, 1.62470809, 5.20906691, 321.47831857]), + array([0.46567398, 0.72232876, 0.93187776, 0.7178822 ]), + array([ 0.37300501, 0.35038814, -0.02812939, 0.15873094])) + + Plot the functions from -10 to 10. + + >>> x = np.linspace(-10, 10, 500) + >>> apt, bpt, ant, bnt = itairy(x) + >>> fig, ax = plt.subplots(figsize=(6, 5)) + >>> ax.plot(x, apt, label=r"$\int_0^x\, Ai(t)\, dt$") + >>> ax.plot(x, bpt, ls="dashed", label=r"$\int_0^x\, Bi(t)\, dt$") + >>> ax.plot(x, ant, ls="dashdot", label=r"$\int_0^x\, Ai(-t)\, dt$") + >>> ax.plot(x, bnt, ls="dotted", label=r"$\int_0^x\, Bi(-t)\, dt$") + >>> ax.set_ylim(-2, 1.5) + >>> ax.legend(loc="lower right") + >>> plt.show() + """) + +add_newdoc("iti0k0", + r""" + iti0k0(x, out=None) + + Integrals of modified Bessel functions of order 0. + + Computes the integrals + + .. math:: + + \int_0^x I_0(t) dt \\ + \int_0^x K_0(t) dt. + + For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`. + + Parameters + ---------- + x : array_like + Values at which to evaluate the integrals. + out : tuple of ndarrays, optional + Optional output arrays for the function results. + + Returns + ------- + ii0 : scalar or ndarray + The integral for `i0` + ik0 : scalar or ndarray + The integral for `k0` + + References + ---------- + .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", + Wiley 1996 + + Examples + -------- + Evaluate the functions at one point. + + >>> from scipy.special import iti0k0 + >>> int_i, int_k = iti0k0(1.) + >>> int_i, int_k + (1.0865210970235892, 1.2425098486237771) + + Evaluate the functions at several points. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> int_i, int_k = iti0k0(points) + >>> int_i, int_k + (array([0. , 1.80606937, 6.16096149]), + array([0. , 1.39458246, 1.53994809])) + + Plot the functions from 0 to 5. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 5., 1000) + >>> int_i, int_k = iti0k0(x) + >>> ax.plot(x, int_i, label=r"$\int_0^x I_0(t)\,dt$") + >>> ax.plot(x, int_k, label=r"$\int_0^x K_0(t)\,dt$") + >>> ax.legend() + >>> plt.show() + """) + +add_newdoc("itj0y0", + r""" + itj0y0(x, out=None) + + Integrals of Bessel functions of the first kind of order 0. + + Computes the integrals + + .. math:: + + \int_0^x J_0(t) dt \\ + \int_0^x Y_0(t) dt. + + For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`. + + Parameters + ---------- + x : array_like + Values at which to evaluate the integrals. + out : tuple of ndarrays, optional + Optional output arrays for the function results. + + Returns + ------- + ij0 : scalar or ndarray + The integral of `j0` + iy0 : scalar or ndarray + The integral of `y0` + + References + ---------- + .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", + Wiley 1996 + + Examples + -------- + Evaluate the functions at one point. + + >>> from scipy.special import itj0y0 + >>> int_j, int_y = itj0y0(1.) + >>> int_j, int_y + (0.9197304100897596, -0.637069376607422) + + Evaluate the functions at several points. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> int_j, int_y = itj0y0(points) + >>> int_j, int_y + (array([0. , 1.24144951, 1.38756725]), + array([ 0. , -0.51175903, 0.19765826])) + + Plot the functions from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> int_j, int_y = itj0y0(x) + >>> ax.plot(x, int_j, label=r"$\int_0^x J_0(t)\,dt$") + >>> ax.plot(x, int_y, label=r"$\int_0^x Y_0(t)\,dt$") + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("itmodstruve0", + r""" + itmodstruve0(x, out=None) + + Integral of the modified Struve function of order 0. + + .. math:: + I = \int_0^x L_0(t)\,dt + + Parameters + ---------- + x : array_like + Upper limit of integration (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + The integral of :math:`L_0` from 0 to `x`. + + See Also + -------- + modstruve: Modified Struve function which is integrated by this function + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + Examples + -------- + Evaluate the function at one point. + + >>> import numpy as np + >>> from scipy.special import itmodstruve0 + >>> itmodstruve0(1.) + 0.3364726286440384 + + Evaluate the function at several points by supplying + an array for `x`. + + >>> points = np.array([1., 2., 3.5]) + >>> itmodstruve0(points) + array([0.33647263, 1.588285 , 7.60382578]) + + Plot the function from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-10., 10., 1000) + >>> itmodstruve0_values = itmodstruve0(x) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, itmodstruve0_values) + >>> ax.set_xlabel(r'$x$') + >>> ax.set_ylabel(r'$\int_0^xL_0(t)\,dt$') + >>> plt.show() + """) + +add_newdoc("itstruve0", + r""" + itstruve0(x, out=None) + + Integral of the Struve function of order 0. + + .. math:: + I = \int_0^x H_0(t)\,dt + + Parameters + ---------- + x : array_like + Upper limit of integration (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + I : scalar or ndarray + The integral of :math:`H_0` from 0 to `x`. + + See Also + -------- + struve: Function which is integrated by this function + + Notes + ----- + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + + Examples + -------- + Evaluate the function at one point. + + >>> import numpy as np + >>> from scipy.special import itstruve0 + >>> itstruve0(1.) + 0.30109042670805547 + + Evaluate the function at several points by supplying + an array for `x`. + + >>> points = np.array([1., 2., 3.5]) + >>> itstruve0(points) + array([0.30109043, 1.01870116, 1.96804581]) + + Plot the function from -20 to 20. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-20., 20., 1000) + >>> istruve0_values = itstruve0(x) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, istruve0_values) + >>> ax.set_xlabel(r'$x$') + >>> ax.set_ylabel(r'$\int_0^{x}H_0(t)\,dt$') + >>> plt.show() + """) + +add_newdoc("iv", + r""" + iv(v, z, out=None) + + Modified Bessel function of the first kind of real order. + + Parameters + ---------- + v : array_like + Order. If `z` is of real type and negative, `v` must be integer + valued. + z : array_like of float or complex + Argument. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the modified Bessel function. + + See Also + -------- + ive : This function with leading exponential behavior stripped off. + i0 : Faster version of this function for order 0. + i1 : Faster version of this function for order 1. + + Notes + ----- + For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out + using Temme's method [1]_. For larger orders, uniform asymptotic + expansions are applied. + + For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is + called. It uses a power series for small `z`, the asymptotic expansion + for large `abs(z)`, the Miller algorithm normalized by the Wronskian + and a Neumann series for intermediate magnitudes, and the uniform + asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large + orders. Backward recurrence is used to generate sequences or reduce + orders when necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + References + ---------- + .. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976) + .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import iv + >>> iv(0, 1.) + 1.2660658777520084 + + Evaluate the function at one point for different orders. + + >>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.) + (1.2660658777520084, 0.565159103992485, 0.2935253263474798) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> iv([0, 1, 1.5], 1.) + array([1.26606588, 0.5651591 , 0.29352533]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([-2., 0., 3.]) + >>> iv(0, points) + array([2.2795853 , 1. , 4.88079259]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> iv(orders, points) + array([[ 2.2795853 , 1. , 4.88079259], + [-1.59063685, 0. , 3.95337022]]) + + Plot the functions of order 0 to 3 from -5 to 5. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, iv(i, x), label=f'$I_{i!r}$') + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("ive", + r""" + ive(v, z, out=None) + + Exponentially scaled modified Bessel function of the first kind. + + Defined as:: + + ive(v, z) = iv(v, z) * exp(-abs(z.real)) + + For imaginary numbers without a real part, returns the unscaled + Bessel function of the first kind `iv`. + + Parameters + ---------- + v : array_like of float + Order. + z : array_like of float or complex + Argument. + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + Values of the exponentially scaled modified Bessel function. + + See Also + -------- + iv: Modified Bessel function of the first kind + i0e: Faster implementation of this function for order 0 + i1e: Faster implementation of this function for order 1 + + Notes + ----- + For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a + power series for small `z`, the asymptotic expansion for large + `abs(z)`, the Miller algorithm normalized by the Wronskian and a + Neumann series for intermediate magnitudes, and the uniform asymptotic + expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders. + Backward recurrence is used to generate sequences or reduce orders when + necessary. + + The calculations above are done in the right half plane and continued + into the left half plane by the formula, + + .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z) + + (valid when the real part of `z` is positive). For negative `v`, the + formula + + .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z) + + is used, where :math:`K_v(z)` is the modified Bessel function of the + second kind, evaluated using the AMOS routine `zbesk`. + + `ive` is useful for large arguments `z`: for these, `iv` easily overflows, + while `ive` does not due to the exponential scaling. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + In the following example `iv` returns infinity whereas `ive` still returns + a finite number. + + >>> from scipy.special import iv, ive + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> iv(3, 1000.), ive(3, 1000.) + (inf, 0.01256056218254712) + + Evaluate the function at one point for different orders by + providing a list or NumPy array as argument for the `v` parameter: + + >>> ive([0, 1, 1.5], 1.) + array([0.46575961, 0.20791042, 0.10798193]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> points = np.array([-2., 0., 3.]) + >>> ive(0, points) + array([0.30850832, 1. , 0.24300035]) + + Evaluate the function at several points for different orders by + providing arrays for both `v` for `z`. Both arrays have to be + broadcastable to the correct shape. To calculate the orders 0, 1 + and 2 for a 1D array of points: + + >>> ive([[0], [1], [2]], points) + array([[ 0.30850832, 1. , 0.24300035], + [-0.21526929, 0. , 0.19682671], + [ 0.09323903, 0. , 0.11178255]]) + + Plot the functions of order 0 to 3 from -5 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, ive(i, x), label=fr'$I_{i!r}(z)\cdot e^{{-|z|}}$') + >>> ax.legend() + >>> ax.set_xlabel(r"$z$") + >>> plt.show() + """) + +add_newdoc("j0", + r""" + j0(x, out=None) + + Bessel function of the first kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function of the first kind of order 0 at `x`. + + See Also + -------- + jv : Bessel function of real order and complex argument. + spherical_jn : spherical Bessel functions. + + Notes + ----- + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval the following rational approximation is used: + + .. math:: + + J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)}, + + where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of + :math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3 + and 8, respectively. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `j0`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import j0 + >>> j0(1.) + 0.7651976865579665 + + Calculate the function at several points: + + >>> import numpy as np + >>> j0(np.array([-2., 0., 4.])) + array([ 0.22389078, 1. , -0.39714981]) + + Plot the function from -20 to 20. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-20., 20., 1000) + >>> y = j0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("j1", + """ + j1(x, out=None) + + Bessel function of the first kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function of the first kind of order 1 at `x`. + + See Also + -------- + jv: Bessel function of the first kind + spherical_jn: spherical Bessel functions. + + Notes + ----- + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 24 term Chebyshev expansion is used. In the second, the + asymptotic trigonometric representation is employed using two rational + functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `j1`. + It should not be confused with the spherical Bessel functions (see + `spherical_jn`). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import j1 + >>> j1(1.) + 0.44005058574493355 + + Calculate the function at several points: + + >>> import numpy as np + >>> j1(np.array([-2., 0., 4.])) + array([-0.57672481, 0. , -0.06604333]) + + Plot the function from -20 to 20. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-20., 20., 1000) + >>> y = j1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("jn", + """ + jn(n, x, out=None) + + Bessel function of the first kind of integer order and real argument. + + Parameters + ---------- + n : array_like + order of the Bessel function + x : array_like + argument of the Bessel function + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + scalar or ndarray + The value of the bessel function + + See Also + -------- + jv + spherical_jn : spherical Bessel functions. + + Notes + ----- + `jn` is an alias of `jv`. + Not to be confused with the spherical Bessel functions (see + `spherical_jn`). + + """) + +add_newdoc("jv", + r""" + jv(v, z, out=None) + + Bessel function of the first kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the Bessel function, :math:`J_v(z)`. + + See Also + -------- + jve : :math:`J_v` with leading exponential behavior stripped off. + spherical_jn : spherical Bessel functions. + j0 : faster version of this function for order 0. + j1 : faster version of this function for order 1. + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + Not to be confused with the spherical Bessel functions (see `spherical_jn`). + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import jv + >>> jv(0, 1.) + 0.7651976865579666 + + Evaluate the function at one point for different orders. + + >>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.) + (0.7651976865579666, 0.44005058574493355, 0.24029783912342725) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> jv([0, 1, 1.5], 1.) + array([0.76519769, 0.44005059, 0.24029784]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([-2., 0., 3.]) + >>> jv(0, points) + array([ 0.22389078, 1. , -0.26005195]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> jv(orders, points) + array([[ 0.22389078, 1. , -0.26005195], + [-0.57672481, 0. , 0.33905896]]) + + Plot the functions of order 0 to 3 from -10 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, jv(i, x), label=f'$J_{i!r}$') + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("jve", + r""" + jve(v, z, out=None) + + Exponentially scaled Bessel function of the first kind of order `v`. + + Defined as:: + + jve(v, z) = jv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + J : scalar or ndarray + Value of the exponentially scaled Bessel function. + + See Also + -------- + jv: Unscaled Bessel function of the first kind + + Notes + ----- + For positive `v` values, the computation is carried out using the AMOS + [1]_ `zbesj` routine, which exploits the connection to the modified + Bessel function :math:`I_v`, + + .. math:: + J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0) + + J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0) + + For negative `v` values the formula, + + .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v) + + is used, where :math:`Y_v(z)` is the Bessel function of the second + kind, computed using the AMOS routine `zbesy`. Note that the second + term is exactly zero for integer `v`; to improve accuracy the second + term is explicitly omitted for `v` values such that `v = floor(v)`. + + Exponentially scaled Bessel functions are useful for large arguments `z`: + for these, the unscaled Bessel functions can easily under-or overflow. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compare the output of `jv` and `jve` for large complex arguments for `z` + by computing their values for order ``v=1`` at ``z=1000j``. We see that + `jv` overflows but `jve` returns a finite number: + + >>> import numpy as np + >>> from scipy.special import jv, jve + >>> v = 1 + >>> z = 1000j + >>> jv(v, z), jve(v, z) + ((inf+infj), (7.721967686709077e-19+0.012610930256928629j)) + + For real arguments for `z`, `jve` returns the same as `jv`. + + >>> v, z = 1, 1000 + >>> jv(v, z), jve(v, z) + (0.004728311907089523, 0.004728311907089523) + + The function can be evaluated for several orders at the same time by + providing a list or NumPy array for `v`: + + >>> jve([1, 3, 5], 1j) + array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j, + 6.11480940e-21+9.98657141e-05j]) + + In the same way, the function can be evaluated at several points in one + call by providing a list or NumPy array for `z`: + + >>> jve(1, np.array([1j, 2j, 3j])) + array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j, + 1.20521602e-17+0.19682671j]) + + It is also possible to evaluate several orders at several points + at the same time by providing arrays for `v` and `z` with + compatible shapes for broadcasting. Compute `jve` for two different orders + `v` and three points `z` resulting in a 2x3 array. + + >>> v = np.array([[1], [3]]) + >>> z = np.array([1j, 2j, 3j]) + >>> v.shape, z.shape + ((2, 1), (3,)) + + >>> jve(v, z) + array([[1.27304208e-17+0.20791042j, 1.31810070e-17+0.21526929j, + 1.20517622e-17+0.19682671j], + [-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j, + -2.92578784e-18-0.04778332j]]) + """) + +add_newdoc("k0", + r""" + k0(x, out=None) + + Modified Bessel function of the second kind of order 0, :math:`K_0`. + + This function is also sometimes referred to as the modified Bessel + function of the third kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the modified Bessel function :math:`K_0` at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k0e: Exponentially scaled modified Bessel function of the second kind + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import k0 + >>> k0(1.) + 0.42102443824070823 + + Calculate the function at several points: + + >>> import numpy as np + >>> k0(np.array([0.5, 2., 3.])) + array([0.92441907, 0.11389387, 0.0347395 ]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("k0e", + """ + k0e(x, out=None) + + Exponentially scaled modified Bessel function K of order 0 + + Defined as:: + + k0e(x) = exp(x) * k0(x). + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the exponentially scaled modified Bessel function K of order + 0 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k0: Modified Bessel function of the second kind + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k0e`. `k0e` is + useful for large arguments: for these, `k0` easily underflows. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `k0` returns 0 whereas `k0e` still returns a + useful finite number: + + >>> from scipy.special import k0, k0e + >>> k0(1000.), k0e(1000) + (0., 0.03962832160075422) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> k0e(np.array([0.5, 2., 3.])) + array([1.52410939, 0.84156822, 0.6977616 ]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k0e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("k1", + """ + k1(x, out=None) + + Modified Bessel function of the second kind of order 1, :math:`K_1(x)`. + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the modified Bessel function K of order 1 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k1e: Exponentially scaled modified Bessel function K of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import k1 + >>> k1(1.) + 0.6019072301972346 + + Calculate the function at several points: + + >>> import numpy as np + >>> k1(np.array([0.5, 2., 3.])) + array([1.65644112, 0.13986588, 0.04015643]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("k1e", + """ + k1e(x, out=None) + + Exponentially scaled modified Bessel function K of order 1 + + Defined as:: + + k1e(x) = exp(x) * k1(x) + + Parameters + ---------- + x : array_like + Argument (float) + out : ndarray, optional + Optional output array for the function values + + Returns + ------- + K : scalar or ndarray + Value of the exponentially scaled modified Bessel function K of order + 1 at `x`. + + See Also + -------- + kv: Modified Bessel function of the second kind of any order + k1: Modified Bessel function of the second kind of order 1 + + Notes + ----- + The range is partitioned into the two intervals [0, 2] and (2, infinity). + Chebyshev polynomial expansions are employed in each interval. + + This function is a wrapper for the Cephes [1]_ routine `k1e`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + In the following example `k1` returns 0 whereas `k1e` still returns a + useful floating point number. + + >>> from scipy.special import k1, k1e + >>> k1(1000.), k1e(1000.) + (0., 0.03964813081296021) + + Calculate the function at several points by providing a NumPy array or + list for `x`: + + >>> import numpy as np + >>> k1e(np.array([0.5, 2., 3.])) + array([2.73100971, 1.03347685, 0.80656348]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = k1e(x) + >>> ax.plot(x, y) + >>> plt.show() + """) + +add_newdoc("kei", + r""" + kei(x, out=None) + + Kelvin function kei. + + Defined as + + .. math:: + + \mathrm{kei}(x) = \Im[K_0(x e^{\pi i / 4})] + + where :math:`K_0` is the modified Bessel function of the second + kind (see `kv`). See [dlmf]_ for more details. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the Kelvin function. + + See Also + -------- + ker : the corresponding real part + keip : the derivative of kei + kv : modified Bessel function of the second kind + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10.61 + + Examples + -------- + It can be expressed using the modified Bessel function of the + second kind. + + >>> import numpy as np + >>> import scipy.special as sc + >>> x = np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag + array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ]) + >>> sc.kei(x) + array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ]) + + """) + +add_newdoc("keip", + r""" + keip(x, out=None) + + Derivative of the Kelvin function kei. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + The values of the derivative of kei. + + See Also + -------- + kei + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10#PT5 + + """) + +add_newdoc("kelvin", + """ + kelvin(x, out=None) + + Kelvin functions as complex numbers + + Parameters + ---------- + x : array_like + Argument + out : tuple of ndarray, optional + Optional output arrays for the function values + + Returns + ------- + Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray + The tuple (Be, Ke, Bep, Kep) contains complex numbers + representing the real and imaginary Kelvin functions and their + derivatives evaluated at `x`. For example, kelvin(x)[0].real = + ber x and kelvin(x)[0].imag = bei x with similar relationships + for ker and kei. + """) + +add_newdoc("ker", + r""" + ker(x, out=None) + + Kelvin function ker. + + Defined as + + .. math:: + + \mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})] + + Where :math:`K_0` is the modified Bessel function of the second + kind (see `kv`). See [dlmf]_ for more details. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the Kelvin function. + + See Also + -------- + kei : the corresponding imaginary part + kerp : the derivative of ker + kv : modified Bessel function of the second kind + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10.61 + + Examples + -------- + It can be expressed using the modified Bessel function of the + second kind. + + >>> import numpy as np + >>> import scipy.special as sc + >>> x = np.array([1.0, 2.0, 3.0, 4.0]) + >>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real + array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885]) + >>> sc.ker(x) + array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885]) + + """) + +add_newdoc("kerp", + r""" + kerp(x, out=None) + + Derivative of the Kelvin function ker. + + Parameters + ---------- + x : array_like + Real argument. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the derivative of ker. + + See Also + -------- + ker + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/10#PT5 + + """) + +add_newdoc("kl_div", + r""" + kl_div(x, y, out=None) + + Elementwise function for computing Kullback-Leibler divergence. + + .. math:: + + \mathrm{kl\_div}(x, y) = + \begin{cases} + x \log(x / y) - x + y & x > 0, y > 0 \\ + y & x = 0, y \ge 0 \\ + \infty & \text{otherwise} + \end{cases} + + Parameters + ---------- + x, y : array_like + Real arguments + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Kullback-Liebler divergence. + + See Also + -------- + entr, rel_entr, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is non-negative and is jointly convex in `x` and `y`. + + The origin of this function is in convex programming; see [1]_ for + details. This is why the function contains the extra :math:`-x + + y` terms over what might be expected from the Kullback-Leibler + divergence. For a version of the function without the extra terms, + see `rel_entr`. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + + """) + +add_newdoc("kn", + r""" + kn(n, x, out=None) + + Modified Bessel function of the second kind of integer order `n` + + Returns the modified Bessel function of the second kind for integer order + `n` at real `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. + + Parameters + ---------- + n : array_like of int + Order of Bessel functions (floats will truncate with a warning) + x : array_like of float + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Value of the Modified Bessel function of the second kind, + :math:`K_n(x)`. + + See Also + -------- + kv : Same function, but accepts real order and complex argument + kvp : Derivative of this function + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + + Examples + -------- + Plot the function of several orders for real input: + + >>> import numpy as np + >>> from scipy.special import kn + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in range(6): + ... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kn([4, 5, 6], 1) + array([ 44.23241585, 360.9605896 , 3653.83831186]) + """) + +add_newdoc("kolmogi", + """ + kolmogi(p, out=None) + + Inverse Survival Function of Kolmogorov distribution + + It is the inverse function to `kolmogorov`. + Returns y such that ``kolmogorov(y) == p``. + + Parameters + ---------- + p : float array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of kolmogi(p) + + See Also + -------- + kolmogorov : The Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distribution. + + Examples + -------- + >>> from scipy.special import kolmogi + >>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0]) + array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769, + 0.57117327, 0. ]) + + """) + +add_newdoc("kolmogorov", + r""" + kolmogorov(y, out=None) + + Complementary cumulative distribution (Survival Function) function of + Kolmogorov distribution. + + Returns the complementary cumulative distribution function of + Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity) + of a two-sided test for equality between an empirical and a theoretical + distribution. It is equal to the (limit as n->infinity of the) + probability that ``sqrt(n) * max absolute deviation > y``. + + Parameters + ---------- + y : float array_like + Absolute deviation between the Empirical CDF (ECDF) and the target CDF, + multiplied by sqrt(n). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of kolmogorov(y) + + See Also + -------- + kolmogi : The Inverse Survival Function for the distribution + scipy.stats.kstwobign : Provides the functionality as a continuous distribution + smirnov, smirnovi : Functions for the one-sided distribution + + Notes + ----- + `kolmogorov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.kstwobign` distribution. + + Examples + -------- + Show the probability of a gap at least as big as 0, 0.5 and 1.0. + + >>> import numpy as np + >>> from scipy.special import kolmogorov + >>> from scipy.stats import kstwobign + >>> kolmogorov([0, 0.5, 1.0]) + array([ 1. , 0.96394524, 0.26999967]) + + Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against + the target distribution, a Normal(0, 1) distribution. + + >>> from scipy.stats import norm, laplace + >>> rng = np.random.default_rng() + >>> n = 1000 + >>> lap01 = laplace(0, 1) + >>> x = np.sort(lap01.rvs(n, random_state=rng)) + >>> np.mean(x), np.std(x) + (-0.05841730131499543, 1.3968109101997568) + + Construct the Empirical CDF and the K-S statistic Dn. + + >>> target = norm(0,1) # Normal mean 0, stddev 1 + >>> cdfs = target.cdf(x) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs]) + >>> Dn = np.max(gaps) + >>> Kn = np.sqrt(n) * Dn + >>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn)) + Dn=0.043363, sqrt(n)*Dn=1.371265 + >>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:', + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % + ... (Kn, kolmogorov(Kn)), + ... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % + ... (Kn, kstwobign.cdf(Kn))])) + For a sample of size n drawn from a N(0, 1) distribution: + the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533 + the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467 + + Plot the Empirical CDF against the target N(0, 1) CDF. + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF') + >>> x3 = np.linspace(-3, 3, 100) + >>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)') + >>> plt.ylim([0, 1]); plt.grid(True); plt.legend(); + >>> # Add vertical lines marking Dn+ and Dn- + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], + ... color='r', linestyle='dashed', lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], + ... color='r', linestyle='dashed', lw=4) + >>> plt.show() + """) + +add_newdoc("_kolmogc", + r""" + Internal function, do not use. + """) + +add_newdoc("_kolmogci", + r""" + Internal function, do not use. + """) + +add_newdoc("_kolmogp", + r""" + Internal function, do not use. + """) + +add_newdoc("kv", + r""" + kv(v, z, out=None) + + Modified Bessel function of the second kind of real order `v` + + Returns the modified Bessel function of the second kind for real order + `v` at complex `z`. + + These are also sometimes called functions of the third kind, Basset + functions, or Macdonald functions. They are defined as those solutions + of the modified Bessel equation for which, + + .. math:: + K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x) + + as :math:`x \to \infty` [3]_. + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The results. Note that input must be of complex type to get complex + output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``. + + See Also + -------- + kve : This function with leading exponential behavior stripped off. + kvp : Derivative of this function + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + .. [3] NIST Digital Library of Mathematical Functions, + Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3 + + Examples + -------- + Plot the function of several orders for real input: + + >>> import numpy as np + >>> from scipy.special import kv + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> for N in np.linspace(0, 6, 5): + ... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N)) + >>> plt.ylim(0, 10) + >>> plt.legend() + >>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$') + >>> plt.show() + + Calculate for a single value at multiple orders: + + >>> kv([4, 4.5, 5], 1+2j) + array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j]) + + """) + +add_newdoc("kve", + r""" + kve(v, z, out=None) + + Exponentially scaled modified Bessel function of the second kind. + + Returns the exponentially scaled, modified Bessel function of the + second kind (sometimes called the third kind) for real order `v` at + complex `z`:: + + kve(v, z) = kv(v, z) * exp(z) + + Parameters + ---------- + v : array_like of float + Order of Bessel functions + z : array_like of complex + Argument at which to evaluate the Bessel functions + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The exponentially scaled modified Bessel function of the second kind. + + See Also + -------- + kv : This function without exponential scaling. + k0e : Faster version of this function for order 0. + k1e : Faster version of this function for order 1. + + Notes + ----- + Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the + algorithm used, see [2]_ and the references therein. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel + functions of a complex argument and nonnegative order", ACM + TOMS Vol. 12 Issue 3, Sept. 1986, p. 265 + + Examples + -------- + In the following example `kv` returns 0 whereas `kve` still returns + a useful finite number. + + >>> import numpy as np + >>> from scipy.special import kv, kve + >>> import matplotlib.pyplot as plt + >>> kv(3, 1000.), kve(3, 1000.) + (0.0, 0.03980696128440973) + + Evaluate the function at one point for different orders by + providing a list or NumPy array as argument for the `v` parameter: + + >>> kve([0, 1, 1.5], 1.) + array([1.14446308, 1.63615349, 2.50662827]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> points = np.array([1., 3., 10.]) + >>> kve(0, points) + array([1.14446308, 0.6977616 , 0.39163193]) + + Evaluate the function at several points for different orders by + providing arrays for both `v` for `z`. Both arrays have to be + broadcastable to the correct shape. To calculate the orders 0, 1 + and 2 for a 1D array of points: + + >>> kve([[0], [1], [2]], points) + array([[1.14446308, 0.6977616 , 0.39163193], + [1.63615349, 0.80656348, 0.41076657], + [4.41677005, 1.23547058, 0.47378525]]) + + Plot the functions of order 0 to 3 from 0 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, kve(i, x), label=fr'$K_{i!r}(z)\cdot e^z$') + >>> ax.legend() + >>> ax.set_xlabel(r"$z$") + >>> ax.set_ylim(0, 4) + >>> ax.set_xlim(0, 5) + >>> plt.show() + """) + +add_newdoc("_lanczos_sum_expg_scaled", + """ + Internal function, do not use. + """) + +add_newdoc("_lgam1p", + """ + Internal function, do not use. + """) + +add_newdoc("log1p", + """ + log1p(x, out=None) + + Calculates log(1 + x) for use when `x` is near zero. + + Parameters + ---------- + x : array_like + Real or complex valued input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of ``log(1 + x)``. + + See Also + -------- + expm1, cosm1 + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using ``log(1 + x)`` directly for ``x`` + near 0. Note that in the below example ``1 + 1e-17 == 1`` to + double precision. + + >>> sc.log1p(1e-17) + 1e-17 + >>> np.log(1 + 1e-17) + 0.0 + + """) + +add_newdoc("_log1pmx", + """ + Internal function, do not use. + """) + +add_newdoc('log_expit', + """ + log_expit(x, out=None) + + Logarithm of the logistic sigmoid function. + + The SciPy implementation of the logistic sigmoid function is + `scipy.special.expit`, so this function is called ``log_expit``. + + The function is mathematically equivalent to ``log(expit(x))``, but + is formulated to avoid loss of precision for inputs with large + (positive or negative) magnitude. + + Parameters + ---------- + x : array_like + The values to apply ``log_expit`` to element-wise. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + out : scalar or ndarray + The computed values, an ndarray of the same shape as ``x``. + + See Also + -------- + expit + + Notes + ----- + As a ufunc, ``log_expit`` takes a number of optional keyword arguments. + For more information see + `ufuncs `_ + + .. versionadded:: 1.8.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import log_expit, expit + + >>> log_expit([-3.0, 0.25, 2.5, 5.0]) + array([-3.04858735, -0.57593942, -0.07888973, -0.00671535]) + + Large negative values: + + >>> log_expit([-100, -500, -1000]) + array([ -100., -500., -1000.]) + + Note that ``expit(-1000)`` returns 0, so the naive implementation + ``log(expit(-1000))`` return ``-inf``. + + Large positive values: + + >>> log_expit([29, 120, 400]) + array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174]) + + Compare that to the naive implementation: + + >>> np.log(expit([29, 120, 400])) + array([-2.54463117e-13, 0.00000000e+00, 0.00000000e+00]) + + The first value is accurate to only 3 digits, and the larger inputs + lose all precision and return 0. + """) + +add_newdoc('logit', + """ + logit(x, out=None) + + Logit ufunc for ndarrays. + + The logit function is defined as logit(p) = log(p/(1-p)). + Note that logit(0) = -inf, logit(1) = inf, and logit(p) + for p<0 or p>1 yields nan. + + Parameters + ---------- + x : ndarray + The ndarray to apply logit to element-wise. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + An ndarray of the same shape as x. Its entries + are logit of the corresponding entry of x. + + See Also + -------- + expit + + Notes + ----- + As a ufunc logit takes a number of optional + keyword arguments. For more information + see `ufuncs `_ + + .. versionadded:: 0.10.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import logit, expit + + >>> logit([0, 0.25, 0.5, 0.75, 1]) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + + `expit` is the inverse of `logit`: + + >>> expit(logit([0.1, 0.75, 0.999])) + array([ 0.1 , 0.75 , 0.999]) + + Plot logit(x) for x in [0, 1]: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 1, 501) + >>> y = logit(x) + >>> plt.plot(x, y) + >>> plt.grid() + >>> plt.ylim(-6, 6) + >>> plt.xlabel('x') + >>> plt.title('logit(x)') + >>> plt.show() + + """) + +add_newdoc("lpmv", + r""" + lpmv(m, v, x, out=None) + + Associated Legendre function of integer order and real degree. + + Defined as + + .. math:: + + P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x) + + where + + .. math:: + + P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2} + \left(\frac{1 - x}{2}\right)^k + + is the Legendre function of the first kind. Here :math:`(\cdot)_k` + is the Pochhammer symbol; see `poch`. + + Parameters + ---------- + m : array_like + Order (int or float). If passed a float not equal to an + integer the function returns NaN. + v : array_like + Degree (float). + x : array_like + Argument (float). Must have ``|x| <= 1``. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + pmv : scalar or ndarray + Value of the associated Legendre function. + + See Also + -------- + lpmn : Compute the associated Legendre function for all orders + ``0, ..., m`` and degrees ``0, ..., n``. + clpmn : Compute the associated Legendre function at complex + arguments. + + Notes + ----- + Note that this implementation includes the Condon-Shortley phase. + + References + ---------- + .. [1] Zhang, Jin, "Computation of Special Functions", John Wiley + and Sons, Inc, 1996. + + """) + +add_newdoc("mathieu_a", + """ + mathieu_a(m, q, out=None) + + Characteristic value of even Mathieu functions + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Characteristic value for the even solution, ``ce_m(z, q)``, of + Mathieu's equation. + + See Also + -------- + mathieu_b, mathieu_cem, mathieu_sem + + """) + +add_newdoc("mathieu_b", + """ + mathieu_b(m, q, out=None) + + Characteristic value of odd Mathieu functions + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's + equation. + + See Also + -------- + mathieu_a, mathieu_cem, mathieu_sem + + """) + +add_newdoc("mathieu_cem", + """ + mathieu_cem(m, q, x, out=None) + + Even Mathieu function and its derivative + + Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and + parameter `q` evaluated at `x` (given in degrees). Also returns the + derivative with respect to `x` of ce_m(x, q) + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians* + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_a, mathieu_b, mathieu_sem + + """) + +add_newdoc("mathieu_modcem1", + """ + mathieu_modcem1(m, q, x, out=None) + + Even modified Mathieu function of the first kind and its derivative + + Evaluates the even modified Mathieu function of the first kind, + ``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter + `q`. + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians* + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_modsem1 + + """) + +add_newdoc("mathieu_modcem2", + """ + mathieu_modcem2(m, q, x, out=None) + + Even modified Mathieu function of the second kind and its derivative + + Evaluates the even modified Mathieu function of the second kind, + Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter `q`. + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians* + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_modsem2 + + """) + +add_newdoc("mathieu_modsem1", + """ + mathieu_modsem1(m, q, x, out=None) + + Odd modified Mathieu function of the first kind and its derivative + + Evaluates the odd modified Mathieu function of the first kind, + Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter `q`. + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians* + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_modcem1 + + """) + +add_newdoc("mathieu_modsem2", + """ + mathieu_modsem2(m, q, x, out=None) + + Odd modified Mathieu function of the second kind and its derivative + + Evaluates the odd modified Mathieu function of the second kind, + Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m` + and parameter q. + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians* + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_modcem2 + + """) + +add_newdoc( + "mathieu_sem", + """ + mathieu_sem(m, q, x, out=None) + + Odd Mathieu function and its derivative + + Returns the odd Mathieu function, se_m(x, q), of order `m` and + parameter `q` evaluated at `x` (given in degrees). Also returns the + derivative with respect to `x` of se_m(x, q). + + Parameters + ---------- + m : array_like + Order of the function + q : array_like + Parameter of the function + x : array_like + Argument of the function, *given in degrees, not radians*. + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + y : scalar or ndarray + Value of the function + yp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + mathieu_a, mathieu_b, mathieu_cem + + """) + +add_newdoc("modfresnelm", + """ + modfresnelm(x, out=None) + + Modified Fresnel negative integrals + + Parameters + ---------- + x : array_like + Function argument + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + fm : scalar or ndarray + Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)`` + km : scalar or ndarray + Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp`` + + See Also + -------- + modfresnelp + + """) + +add_newdoc("modfresnelp", + """ + modfresnelp(x, out=None) + + Modified Fresnel positive integrals + + Parameters + ---------- + x : array_like + Function argument + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + fp : scalar or ndarray + Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)`` + kp : scalar or ndarray + Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp`` + + See Also + -------- + modfresnelm + + """) + +add_newdoc("modstruve", + r""" + modstruve(v, x, out=None) + + Modified Struve function. + + Return the value of the modified Struve function of order `v` at `x`. The + modified Struve function is defined as, + + .. math:: + L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x), + + where :math:`H_v` is the Struve function. + + Parameters + ---------- + v : array_like + Order of the modified Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + L : scalar or ndarray + Value of the modified Struve function of order `v` at `x`. + + See Also + -------- + struve + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the function: + + - power series + - expansion in Bessel functions (if :math:`|x| < |v| + 20`) + - asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + + Examples + -------- + Calculate the modified Struve function of order 1 at 2. + + >>> import numpy as np + >>> from scipy.special import modstruve + >>> import matplotlib.pyplot as plt + >>> modstruve(1, 2.) + 1.102759787367716 + + Calculate the modified Struve function at 2 for orders 1, 2 and 3 by + providing a list for the order parameter `v`. + + >>> modstruve([1, 2, 3], 2.) + array([1.10275979, 0.41026079, 0.11247294]) + + Calculate the modified Struve function of order 1 for several points + by providing an array for `x`. + + >>> points = np.array([2., 5., 8.]) + >>> modstruve(1, points) + array([ 1.10275979, 23.72821578, 399.24709139]) + + Compute the modified Struve function for several orders at several + points by providing arrays for `v` and `z`. The arrays have to be + broadcastable to the correct shapes. + + >>> orders = np.array([[1], [2], [3]]) + >>> points.shape, orders.shape + ((3,), (3, 1)) + + >>> modstruve(orders, points) + array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02], + [4.10260789e-01, 1.65535979e+01, 3.25973609e+02], + [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]]) + + Plot the modified Struve functions of order 0 to 3 from -5 to 5. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-5., 5., 1000) + >>> for i in range(4): + ... ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$') + >>> ax.legend(ncol=2) + >>> ax.set_xlim(-5, 5) + >>> ax.set_title(r"Modified Struve functions $L_{\nu}$") + >>> plt.show() + """) + +add_newdoc("nbdtr", + r""" + nbdtr(k, n, p, out=None) + + Negative binomial cumulative distribution function. + + Returns the sum of the terms 0 through `k` of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that `k` or fewer failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + F : scalar or ndarray + The probability of `k` or fewer failures before `n` successes in a + sequence of events with individual success probability `p`. + + See Also + -------- + nbdtrc : Negative binomial survival function + nbdtrik : Negative binomial quantile function + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1). + + Wrapper for the Cephes [1]_ routine `nbdtr`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtr` directly can improve performance + compared to the ``cdf`` method of `scipy.stats.nbinom` (see last example). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``. + + >>> import numpy as np + >>> from scipy.special import nbdtr + >>> nbdtr(10, 5, 0.5) + 0.940765380859375 + + Compute the function for ``n=10`` and ``p=0.5`` at several points by + providing a NumPy array or list for `k`. + + >>> nbdtr([5, 10, 15], 10, 0.5) + array([0.15087891, 0.58809853, 0.88523853]) + + Plot the function for four different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> k = np.arange(130) + >>> n_parameters = [20, 20, 20, 80] + >>> p_parameters = [0.2, 0.5, 0.8, 0.5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, + ... linestyles)) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtr_vals = nbdtr(k, n, p) + ... ax.plot(k, nbdtr_vals, label=rf"$n={n},\, p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$k$") + >>> ax.set_title("Negative binomial cumulative distribution function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtr` directly can be much faster than + calling the ``cdf`` method of `scipy.stats.nbinom`, especially for small + arrays or individual values. To get the same results one must use the + following parametrization: ``nbinom(n, p).cdf(k)=nbdtr(k, n, p)``. + + >>> from scipy.stats import nbinom + >>> k, n, p = 5, 3, 0.5 + >>> nbdtr_res = nbdtr(k, n, p) # this will often be faster than below + >>> stats_res = nbinom(n, p).cdf(k) + >>> stats_res, nbdtr_res # test that results are equal + (0.85546875, 0.85546875) + + `nbdtr` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> p = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, p.shape + ((3, 1), (4,)) + + >>> nbdtr(k, 5, p) + array([[0.15026833, 0.62304687, 0.95265101, 0.9998531 ], + [0.48450894, 0.94076538, 0.99932777, 0.99999999], + [0.76249222, 0.99409103, 0.99999445, 1. ]]) + """) + +add_newdoc("nbdtrc", + r""" + nbdtrc(k, n, p, out=None) + + Negative binomial survival function. + + Returns the sum of the terms `k + 1` to infinity of the negative binomial + distribution probability mass function, + + .. math:: + + F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j. + + In a sequence of Bernoulli trials with individual success probabilities + `p`, this is the probability that more than `k` failures precede the nth + success. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + F : scalar or ndarray + The probability of `k + 1` or more failures before `n` successes in a + sequence of events with individual success probability `p`. + + See Also + -------- + nbdtr : Negative binomial cumulative distribution function + nbdtrik : Negative binomial percentile function + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + If floating point values are passed for `k` or `n`, they will be truncated + to integers. + + The terms are not summed directly; instead the regularized incomplete beta + function is employed, according to the formula, + + .. math:: + \mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n). + + Wrapper for the Cephes [1]_ routine `nbdtrc`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtrc` directly can improve performance + compared to the ``sf`` method of `scipy.stats.nbinom` (see last example). + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``. + + >>> import numpy as np + >>> from scipy.special import nbdtrc + >>> nbdtrc(10, 5, 0.5) + 0.059234619140624986 + + Compute the function for ``n=10`` and ``p=0.5`` at several points by + providing a NumPy array or list for `k`. + + >>> nbdtrc([5, 10, 15], 10, 0.5) + array([0.84912109, 0.41190147, 0.11476147]) + + Plot the function for four different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> k = np.arange(130) + >>> n_parameters = [20, 20, 20, 80] + >>> p_parameters = [0.2, 0.5, 0.8, 0.5] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, + ... linestyles)) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtrc_vals = nbdtrc(k, n, p) + ... ax.plot(k, nbdtrc_vals, label=rf"$n={n},\, p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_xlabel("$k$") + >>> ax.set_title("Negative binomial distribution survival function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtrc` directly can be much faster than + calling the ``sf`` method of `scipy.stats.nbinom`, especially for small + arrays or individual values. To get the same results one must use the + following parametrization: ``nbinom(n, p).sf(k)=nbdtrc(k, n, p)``. + + >>> from scipy.stats import nbinom + >>> k, n, p = 3, 5, 0.5 + >>> nbdtr_res = nbdtrc(k, n, p) # this will often be faster than below + >>> stats_res = nbinom(n, p).sf(k) + >>> stats_res, nbdtr_res # test that results are equal + (0.6367187499999999, 0.6367187499999999) + + `nbdtrc` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> p = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, p.shape + ((3, 1), (4,)) + + >>> nbdtrc(k, 5, p) + array([[8.49731667e-01, 3.76953125e-01, 4.73489874e-02, 1.46902600e-04], + [5.15491059e-01, 5.92346191e-02, 6.72234070e-04, 9.29610100e-09], + [2.37507779e-01, 5.90896606e-03, 5.55025308e-06, 3.26346760e-13]]) + """) + +add_newdoc( + "nbdtri", + r""" + nbdtri(k, n, y, out=None) + + Returns the inverse with respect to the parameter `p` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + n : array_like + The target number of successes (positive int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + p : scalar or ndarray + Probability of success in a single event (float) such that + `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtrc : Negative binomial survival function. + scipy.stats.nbinom : negative binomial distribution. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + Wrapper for the Cephes [1]_ routine `nbdtri`. + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. Using `nbdtri` directly can improve performance + compared to the ``ppf`` method of `scipy.stats.nbinom`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + `nbdtri` is the inverse of `nbdtr` with respect to `p`. + Up to floating point errors the following holds: + ``nbdtri(k, n, nbdtr(k, n, p))=p``. + + >>> import numpy as np + >>> from scipy.special import nbdtri, nbdtr + >>> k, n, y = 5, 10, 0.2 + >>> cdf_val = nbdtr(k, n, y) + >>> nbdtri(k, n, cdf_val) + 0.20000000000000004 + + Compute the function for ``k=10`` and ``n=5`` at several points by + providing a NumPy array or list for `y`. + + >>> y = np.array([0.1, 0.4, 0.8]) + >>> nbdtri(3, 5, y) + array([0.34462319, 0.51653095, 0.69677416]) + + Plot the function for three different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> n_parameters = [5, 20, 30, 30] + >>> k_parameters = [20, 20, 60, 80] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(n_parameters, k_parameters, linestyles)) + >>> cdf_vals = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... n, k, style = parameter_set + ... nbdtri_vals = nbdtri(k, n, cdf_vals) + ... ax.plot(cdf_vals, nbdtri_vals, label=rf"$k={k},\ n={n}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_ylabel("$p$") + >>> ax.set_xlabel("$CDF$") + >>> title = "nbdtri: inverse of negative binomial CDF with respect to $p$" + >>> ax.set_title(title) + >>> plt.show() + + `nbdtri` can evaluate different parameter sets by providing arrays with + shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute + the function for three different `k` at four locations `p`, resulting in + a 3x4 array. + + >>> k = np.array([[5], [10], [15]]) + >>> y = np.array([0.3, 0.5, 0.7, 0.9]) + >>> k.shape, y.shape + ((3, 1), (4,)) + + >>> nbdtri(k, 5, y) + array([[0.37258157, 0.45169416, 0.53249956, 0.64578407], + [0.24588501, 0.30451981, 0.36778453, 0.46397088], + [0.18362101, 0.22966758, 0.28054743, 0.36066188]]) + """) + +add_newdoc("nbdtrik", + r""" + nbdtrik(y, n, p, out=None) + + Negative binomial percentile function. + + Returns the inverse with respect to the parameter `k` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + n : array_like + The target number of successes (positive int). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + k : scalar or ndarray + The maximum number of allowed failures such that `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtrc : Survival function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`. + scipy.stats.nbinom : Negative binomial distribution + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1} + \choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `k` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `k`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Compute the negative binomial cumulative distribution function for an + exemplary parameter set. + + >>> import numpy as np + >>> from scipy.special import nbdtr, nbdtrik + >>> k, n, p = 5, 2, 0.5 + >>> cdf_value = nbdtr(k, n, p) + >>> cdf_value + 0.9375 + + Verify that `nbdtrik` recovers the original value for `k`. + + >>> nbdtrik(cdf_value, n, p) + 5.0 + + Plot the function for different parameter sets. + + >>> import matplotlib.pyplot as plt + >>> p_parameters = [0.2, 0.5, 0.7, 0.5] + >>> n_parameters = [30, 30, 30, 80] + >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot'] + >>> parameters_list = list(zip(p_parameters, n_parameters, linestyles)) + >>> cdf_vals = np.linspace(0, 1, 1000) + >>> fig, ax = plt.subplots(figsize=(8, 8)) + >>> for parameter_set in parameters_list: + ... p, n, style = parameter_set + ... nbdtrik_vals = nbdtrik(cdf_vals, n, p) + ... ax.plot(cdf_vals, nbdtrik_vals, label=rf"$n={n},\ p={p}$", + ... ls=style) + >>> ax.legend() + >>> ax.set_ylabel("$k$") + >>> ax.set_xlabel("$CDF$") + >>> ax.set_title("Negative binomial percentile function") + >>> plt.show() + + The negative binomial distribution is also available as + `scipy.stats.nbinom`. The percentile function method ``ppf`` + returns the result of `nbdtrik` rounded up to integers: + + >>> from scipy.stats import nbinom + >>> q, n, p = 0.6, 5, 0.5 + >>> nbinom.ppf(q, n, p), nbdtrik(q, n, p) + (5.0, 4.800428460273882) + + """) + +add_newdoc("nbdtrin", + r""" + nbdtrin(k, y, p, out=None) + + Inverse of `nbdtr` vs `n`. + + Returns the inverse with respect to the parameter `n` of + `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution + function. + + Parameters + ---------- + k : array_like + The maximum number of allowed failures (nonnegative int). + y : array_like + The probability of `k` or fewer failures before `n` successes (float). + p : array_like + Probability of success in a single event (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + n : scalar or ndarray + The number of successes `n` such that `nbdtr(k, n, p) = y`. + + See Also + -------- + nbdtr : Cumulative distribution function of the negative binomial. + nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`. + nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`. + + Formula 26.5.26 of [2]_, + + .. math:: + \sum_{j=k + 1}^\infty {{n + j - 1} + \choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n), + + is used to reduce calculation of the cumulative distribution function to + that of a regularized incomplete beta :math:`I`. + + Computation of `n` involves a search for a value that produces the desired + value of `y`. The search relies on the monotonicity of `y` with `n`. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Compute the negative binomial cumulative distribution function for an + exemplary parameter set. + + >>> from scipy.special import nbdtr, nbdtrin + >>> k, n, p = 5, 2, 0.5 + >>> cdf_value = nbdtr(k, n, p) + >>> cdf_value + 0.9375 + + Verify that `nbdtrin` recovers the original value for `n` up to floating + point accuracy. + + >>> nbdtrin(k, cdf_value, p) + 1.999999999998137 + """) + +add_newdoc("ncfdtr", + r""" + ncfdtr(dfn, dfd, nc, f, out=None) + + Cumulative distribution function of the non-central F distribution. + + The non-central F describes the distribution of, + + .. math:: + Z = \frac{X/d_n}{Y/d_d} + + where :math:`X` and :math:`Y` are independently distributed, with + :math:`X` distributed non-central :math:`\chi^2` with noncentrality + parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y` + distributed :math:`\chi^2` with :math:`d_d` degrees of freedom. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e. the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise it will be an array. + + See Also + -------- + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`. + + The cumulative distribution function is computed using Formula 26.6.20 of + [2]_: + + .. math:: + F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} + \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}), + + where :math:`I` is the regularized incomplete beta function, and + :math:`x = f d_n/(f d_n + d_d)`. + + The computation time required for this routine is proportional to the + noncentrality parameter `nc`. Very large values of this parameter can + consume immense computer resources. This is why the search range is + bounded by 10,000. + + References + ---------- + .. [1] Barry Brown, James Lovato, and Kathy Russell, + CDFLIB: Library of Fortran Routines for Cumulative Distribution + Functions, Inverses, and Other Parameters. + .. [2] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central F distribution, for nc=0. Compare with the + F-distribution from scipy.stats: + + >>> x = np.linspace(-1, 8, num=500) + >>> dfn = 3 + >>> dfd = 2 + >>> ncf_stats = stats.f.cdf(x, dfn, dfd) + >>> ncf_special = special.ncfdtr(dfn, dfd, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, ncf_stats, 'b-', lw=3) + >>> ax.plot(x, ncf_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("ncfdtri", + """ + ncfdtri(dfn, dfd, nc, p, out=None) + + Inverse with respect to `f` of the CDF of the non-central F distribution. + + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + f : scalar or ndarray + Quantiles, i.e., the upper limit of integration. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtri + + Compute the CDF for several values of `f`: + + >>> f = [0.5, 1, 1.5] + >>> p = ncfdtr(2, 3, 1.5, f) + >>> p + array([ 0.20782291, 0.36107392, 0.47345752]) + + Compute the inverse. We recover the values of `f`, as expected: + + >>> ncfdtri(2, 3, 1.5, p) + array([ 0.5, 1. , 1.5]) + + """) + +add_newdoc("ncfdtridfd", + """ + ncfdtridfd(dfn, p, nc, f, out=None) + + Calculate degrees of freedom (denominator) for the noncentral F-distribution. + + This is the inverse with respect to `dfd` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + dfd : scalar or ndarray + Degrees of freedom of the denominator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfd + + Compute the CDF for several values of `dfd`: + + >>> dfd = [1, 2, 3] + >>> p = ncfdtr(2, dfd, 0.25, 15) + >>> p + array([ 0.8097138 , 0.93020416, 0.96787852]) + + Compute the inverse. We recover the values of `dfd`, as expected: + + >>> ncfdtridfd(2, p, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("ncfdtridfn", + """ + ncfdtridfn(p, dfd, nc, f, out=None) + + Calculate degrees of freedom (numerator) for the noncentral F-distribution. + + This is the inverse with respect to `dfn` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (0, 1e4). + f : float + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + dfn : scalar or ndarray + Degrees of freedom of the numerator sum of squares. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`. + + Notes + ----- + The value of the cumulative noncentral F distribution is not necessarily + monotone in either degrees of freedom. There thus may be two values that + provide a given CDF value. This routine assumes monotonicity and will + find an arbitrary one of the two values. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtridfn + + Compute the CDF for several values of `dfn`: + + >>> dfn = [1, 2, 3] + >>> p = ncfdtr(dfn, 2, 0.25, 15) + >>> p + array([ 0.92562363, 0.93020416, 0.93188394]) + + Compute the inverse. We recover the values of `dfn`, as expected: + + >>> ncfdtridfn(p, 2, 0.25, 15) + array([ 1., 2., 3.]) + + """) + +add_newdoc("ncfdtrinc", + """ + ncfdtrinc(dfn, dfd, p, f, out=None) + + Calculate non-centrality parameter for non-central F distribution. + + This is the inverse with respect to `nc` of `ncfdtr`. + See `ncfdtr` for more details. + + Parameters + ---------- + dfn : array_like + Degrees of freedom of the numerator sum of squares. Range (0, inf). + dfd : array_like + Degrees of freedom of the denominator sum of squares. Range (0, inf). + p : array_like + Value of the cumulative distribution function. Must be in the + range [0, 1]. + f : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Noncentrality parameter. + + See Also + -------- + ncfdtr : CDF of the non-central F distribution. + ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`. + ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`. + ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`. + + Examples + -------- + >>> from scipy.special import ncfdtr, ncfdtrinc + + Compute the CDF for several values of `nc`: + + >>> nc = [0.5, 1.5, 2.0] + >>> p = ncfdtr(2, 3, nc, 15) + >>> p + array([ 0.96309246, 0.94327955, 0.93304098]) + + Compute the inverse. We recover the values of `nc`, as expected: + + >>> ncfdtrinc(2, 3, p, 15) + array([ 0.5, 1.5, 2. ]) + + """) + +add_newdoc("nctdtr", + """ + nctdtr(df, nc, t, out=None) + + Cumulative distribution function of the non-central `t` distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + The calculated CDF. If all inputs are scalar, the return will be a + float. Otherwise, it will be an array. + + See Also + -------- + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Plot the CDF of the non-central t distribution, for nc=0. Compare with the + t-distribution from scipy.stats: + + >>> x = np.linspace(-5, 5, num=500) + >>> df = 3 + >>> nct_stats = stats.t.cdf(x, df) + >>> nct_special = special.nctdtr(df, 0, x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(x, nct_stats, 'b-', lw=3) + >>> ax.plot(x, nct_special, 'r-') + >>> plt.show() + + """) + +add_newdoc("nctdtridf", + """ + nctdtridf(p, nc, t, out=None) + + Calculate degrees of freedom for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + The degrees of freedom. If all inputs are scalar, the return will be a + float. Otherwise, it will be an array. + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtridf + + Compute the CDF for several values of `df`: + + >>> df = [1, 2, 3] + >>> p = nctdtr(df, 0.25, 1) + >>> p + array([0.67491974, 0.716464 , 0.73349456]) + + Compute the inverse. We recover the values of `df`, as expected: + + >>> nctdtridf(p, 0.25, 1) + array([1., 2., 3.]) + + """) + +add_newdoc("nctdtrinc", + """ + nctdtrinc(df, p, t, out=None) + + Calculate non-centrality parameter for non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + p : array_like + CDF values, in range (0, 1]. + t : array_like + Quantiles, i.e., the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + nc : scalar or ndarray + Noncentrality parameter + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtrit : Inverse CDF (iCDF) of the non-central t distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtrinc + + Compute the CDF for several values of `nc`: + + >>> nc = [0.5, 1.5, 2.5] + >>> p = nctdtr(3, nc, 1.5) + >>> p + array([0.77569497, 0.45524533, 0.1668691 ]) + + Compute the inverse. We recover the values of `nc`, as expected: + + >>> nctdtrinc(3, p, 1.5) + array([0.5, 1.5, 2.5]) + + """) + +add_newdoc("nctdtrit", + """ + nctdtrit(df, nc, p, out=None) + + Inverse cumulative distribution function of the non-central t distribution. + + See `nctdtr` for more details. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution. Should be in range (0, inf). + nc : array_like + Noncentrality parameter. Should be in range (-1e6, 1e6). + p : array_like + CDF values, in range (0, 1]. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t : scalar or ndarray + Quantiles + + See Also + -------- + nctdtr : CDF of the non-central `t` distribution. + nctdtridf : Calculate degrees of freedom, given CDF and iCDF values. + nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values. + + Examples + -------- + >>> from scipy.special import nctdtr, nctdtrit + + Compute the CDF for several values of `t`: + + >>> t = [0.5, 1, 1.5] + >>> p = nctdtr(3, 1, t) + >>> p + array([0.29811049, 0.46922687, 0.6257559 ]) + + Compute the inverse. We recover the values of `t`, as expected: + + >>> nctdtrit(3, 1, p) + array([0.5, 1. , 1.5]) + + """) + +add_newdoc("ndtr", + r""" + ndtr(x, out=None) + + Cumulative distribution of the standard normal distribution. + + Returns the area under the standard Gaussian probability + density function, integrated from minus infinity to `x` + + .. math:: + + \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt + + Parameters + ---------- + x : array_like, real or complex + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the normal CDF evaluated at `x` + + See Also + -------- + log_ndtr : Logarithm of ndtr + ndtri : Inverse of ndtr, standard normal percentile function + erf : Error function + erfc : 1 - erf + scipy.stats.norm : Normal distribution + + Examples + -------- + Evaluate `ndtr` at one point. + + >>> import numpy as np + >>> from scipy.special import ndtr + >>> ndtr(0.5) + 0.6914624612740131 + + Evaluate the function at several points by providing a NumPy array + or list for `x`. + + >>> ndtr([0, 0.5, 2]) + array([0.5 , 0.69146246, 0.97724987]) + + Plot the function. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-5, 5, 100) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ndtr(x)) + >>> ax.set_title(r"Standard normal cumulative distribution function $\Phi$") + >>> plt.show() + """) + + +add_newdoc("nrdtrimn", + """ + nrdtrimn(p, std, x, out=None) + + Calculate mean of normal distribution given other params. + + Parameters + ---------- + p : array_like + CDF values, in range (0, 1]. + std : array_like + Standard deviation. + x : array_like + Quantiles, i.e. the upper limit of integration. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + mn : scalar or ndarray + The mean of the normal distribution. + + See Also + -------- + scipy.stats.norm : Normal distribution + ndtr : Standard normal cumulative probability distribution + ndtri : Inverse of standard normal CDF with respect to quantile + nrdtrisd : Inverse of normal distribution CDF with respect to + standard deviation + + Examples + -------- + `nrdtrimn` can be used to recover the mean of a normal distribution + if we know the CDF value `p` for a given quantile `x` and the + standard deviation `std`. First, we calculate + the normal distribution CDF for an exemplary parameter set. + + >>> from scipy.stats import norm + >>> mean = 3. + >>> std = 2. + >>> x = 6. + >>> p = norm.cdf(x, loc=mean, scale=std) + >>> p + 0.9331927987311419 + + Verify that `nrdtrimn` returns the original value for `mean`. + + >>> from scipy.special import nrdtrimn + >>> nrdtrimn(p, std, x) + 3.0000000000000004 + + """) + +add_newdoc("nrdtrisd", + """ + nrdtrisd(mn, p, x, out=None) + + Calculate standard deviation of normal distribution given other params. + + Parameters + ---------- + mn : scalar or ndarray + The mean of the normal distribution. + p : array_like + CDF values, in range (0, 1]. + x : array_like + Quantiles, i.e. the upper limit of integration. + + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + std : scalar or ndarray + Standard deviation. + + See Also + -------- + scipy.stats.norm : Normal distribution + ndtr : Standard normal cumulative probability distribution + ndtri : Inverse of standard normal CDF with respect to quantile + nrdtrimn : Inverse of normal distribution CDF with respect to + mean + + Examples + -------- + `nrdtrisd` can be used to recover the standard deviation of a normal + distribution if we know the CDF value `p` for a given quantile `x` and + the mean `mn`. First, we calculate the normal distribution CDF for an + exemplary parameter set. + + >>> from scipy.stats import norm + >>> mean = 3. + >>> std = 2. + >>> x = 6. + >>> p = norm.cdf(x, loc=mean, scale=std) + >>> p + 0.9331927987311419 + + Verify that `nrdtrisd` returns the original value for `std`. + + >>> from scipy.special import nrdtrisd + >>> nrdtrisd(mean, p, x) + 2.0000000000000004 + + """) + +add_newdoc("log_ndtr", + """ + log_ndtr(x, out=None) + + Logarithm of Gaussian cumulative distribution function. + + Returns the log of the area under the standard Gaussian probability + density function, integrated from minus infinity to `x`:: + + log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)) + + Parameters + ---------- + x : array_like, real or complex + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the log of the normal CDF evaluated at `x` + + See Also + -------- + erf + erfc + scipy.stats.norm + ndtr + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import log_ndtr, ndtr + + The benefit of ``log_ndtr(x)`` over the naive implementation + ``np.log(ndtr(x))`` is most evident with moderate to large positive + values of ``x``: + + >>> x = np.array([6, 7, 9, 12, 15, 25]) + >>> log_ndtr(x) + array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019, + -1.77648211e-033, -3.67096620e-051, -3.05669671e-138]) + + The results of the naive calculation for the moderate ``x`` values + have only 5 or 6 correct significant digits. For values of ``x`` + greater than approximately 8.3, the naive expression returns 0: + + >>> np.log(ndtr(x)) + array([-9.86587701e-10, -1.27986510e-12, 0.00000000e+00, + 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]) + """) + +add_newdoc("ndtri", + """ + ndtri(y, out=None) + + Inverse of `ndtr` vs x + + Returns the argument x for which the area under the standard normal + probability density function (integrated from minus infinity to `x`) + is equal to y. + + Parameters + ---------- + p : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + x : scalar or ndarray + Value of x such that ``ndtr(x) == p``. + + See Also + -------- + ndtr : Standard normal cumulative probability distribution + ndtri_exp : Inverse of log_ndtr + + Examples + -------- + `ndtri` is the percentile function of the standard normal distribution. + This means it returns the inverse of the cumulative density `ndtr`. First, + let us compute a cumulative density value. + + >>> import numpy as np + >>> from scipy.special import ndtri, ndtr + >>> cdf_val = ndtr(2) + >>> cdf_val + 0.9772498680518208 + + Verify that `ndtri` yields the original value for `x` up to floating point + errors. + + >>> ndtri(cdf_val) + 2.0000000000000004 + + Plot the function. For that purpose, we provide a NumPy array as argument. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0.01, 1, 200) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ndtri(x)) + >>> ax.set_title("Standard normal percentile function") + >>> plt.show() + """) + +add_newdoc("obl_ang1", + """ + obl_ang1(m, n, c, x, out=None) + + Oblate spheroidal angular function of the first kind and its derivative + + Computes the oblate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_ang1_cv + + """) + +add_newdoc("obl_ang1_cv", + """ + obl_ang1_cv(m, n, c, cv, x, out=None) + + Oblate spheroidal angular function obl_ang1 for precomputed characteristic value + + Computes the oblate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_ang1 + + """) + +add_newdoc("obl_cv", + """ + obl_cv(m, n, c, out=None) + + Characteristic value of oblate spheroidal function + + Computes the characteristic value of oblate spheroidal wave + functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cv : scalar or ndarray + Characteristic value + + """) + +add_newdoc("obl_rad1", + """ + obl_rad1(m, n, c, x, out=None) + + Oblate spheroidal radial function of the first kind and its derivative + + Computes the oblate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_rad1_cv + + """) + +add_newdoc("obl_rad1_cv", + """ + obl_rad1_cv(m, n, c, cv, x, out=None) + + Oblate spheroidal radial function obl_rad1 for precomputed characteristic value + + Computes the oblate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_rad1 + + """) + +add_newdoc("obl_rad2", + """ + obl_rad2(m, n, c, x, out=None) + + Oblate spheroidal radial function of the second kind and its derivative. + + Computes the oblate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_rad2_cv + + """) + +add_newdoc("obl_rad2_cv", + """ + obl_rad2_cv(m, n, c, cv, x, out=None) + + Oblate spheroidal radial function obl_rad2 for precomputed characteristic value + + Computes the oblate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Mode parameter m (nonnegative) + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Parameter x (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + + See Also + -------- + obl_rad2 + """) + +add_newdoc("pbdv", + """ + pbdv(v, x, out=None) + + Parabolic cylinder function D + + Returns (d, dp) the parabolic cylinder function Dv(x) in d and the + derivative, Dv'(x) in dp. + + Parameters + ---------- + v : array_like + Real parameter + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + d : scalar or ndarray + Value of the function + dp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pbvv", + """ + pbvv(v, x, out=None) + + Parabolic cylinder function V + + Returns the parabolic cylinder function Vv(x) in v and the + derivative, Vv'(x) in vp. + + Parameters + ---------- + v : array_like + Real parameter + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + v : scalar or ndarray + Value of the function + vp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pbwa", + r""" + pbwa(a, x, out=None) + + Parabolic cylinder function W. + + The function is a particular solution to the differential equation + + .. math:: + + y'' + \left(\frac{1}{4}x^2 - a\right)y = 0, + + for a full definition see section 12.14 in [1]_. + + Parameters + ---------- + a : array_like + Real parameter + x : array_like + Real argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + w : scalar or ndarray + Value of the function + wp : scalar or ndarray + Value of the derivative in x + + Notes + ----- + The function is a wrapper for a Fortran routine by Zhang and Jin + [2]_. The implementation is accurate only for ``|a|, |x| < 5`` and + returns NaN outside that range. + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 14.30. + https://dlmf.nist.gov/14.30 + .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html + """) + +add_newdoc("pdtr", + r""" + pdtr(k, m, out=None) + + Poisson cumulative distribution function. + + Defined as the probability that a Poisson-distributed random + variable with event rate :math:`m` is less than or equal to + :math:`k`. More concretely, this works out to be [1]_ + + .. math:: + + \exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{j!}. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + m : array_like + Shape parameter (nonnegative, real) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Poisson cumulative distribution function + + See Also + -------- + pdtrc : Poisson survival function + pdtrik : inverse of `pdtr` with respect to `k` + pdtri : inverse of `pdtr` with respect to `m` + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Poisson_distribution + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is a cumulative distribution function, so it converges to 1 + monotonically as `k` goes to infinity. + + >>> sc.pdtr([1, 10, 100, np.inf], 1) + array([0.73575888, 0.99999999, 1. , 1. ]) + + It is discontinuous at integers and constant between integers. + + >>> sc.pdtr([1, 1.5, 1.9, 2], 1) + array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ]) + + """) + +add_newdoc("pdtrc", + """ + pdtrc(k, m, out=None) + + Poisson survival function + + Returns the sum of the terms from k+1 to infinity of the Poisson + distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc( + k+1, m). Arguments must both be non-negative doubles. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + m : array_like + Shape parameter (nonnegative, real) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the Poisson survival function + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrik : inverse of `pdtr` with respect to `k` + pdtri : inverse of `pdtr` with respect to `m` + + """) + +add_newdoc("pdtri", + """ + pdtri(k, y, out=None) + + Inverse to `pdtr` vs m + + Returns the Poisson variable `m` such that the sum from 0 to `k` of + the Poisson density is equal to the given probability `y`: + calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative + integer and `y` between 0 and 1. + + Parameters + ---------- + k : array_like + Number of occurrences (nonnegative, real) + y : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of the shape parameter `m` such that ``pdtr(k, m) = p`` + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrc : Poisson survival function + pdtrik : inverse of `pdtr` with respect to `k` + + """) + +add_newdoc("pdtrik", + """ + pdtrik(p, m, out=None) + + Inverse to `pdtr` vs `m`. + + Parameters + ---------- + m : array_like + Shape parameter (nonnegative, real) + p : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The number of occurrences `k` such that ``pdtr(k, m) = p`` + + See Also + -------- + pdtr : Poisson cumulative distribution function + pdtrc : Poisson survival function + pdtri : inverse of `pdtr` with respect to `m` + + """) + +add_newdoc("poch", + r""" + poch(z, m, out=None) + + Pochhammer symbol. + + The Pochhammer symbol (rising factorial) is defined as + + .. math:: + + (z)_m = \frac{\Gamma(z + m)}{\Gamma(z)} + + For positive integer `m` it reads + + .. math:: + + (z)_m = z (z + 1) ... (z + m - 1) + + See [dlmf]_ for more details. + + Parameters + ---------- + z, m : array_like + Real-valued arguments. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value of the function. + + References + ---------- + .. [dlmf] Nist, Digital Library of Mathematical Functions + https://dlmf.nist.gov/5.2#iii + + Examples + -------- + >>> import scipy.special as sc + + It is 1 when m is 0. + + >>> sc.poch([1, 2, 3, 4], 0) + array([1., 1., 1., 1.]) + + For z equal to 1 it reduces to the factorial function. + + >>> sc.poch(1, 5) + 120.0 + >>> 1 * 2 * 3 * 4 * 5 + 120 + + It can be expressed in terms of the gamma function. + + >>> z, m = 3.7, 2.1 + >>> sc.poch(z, m) + 20.529581933776953 + >>> sc.gamma(z + m) / sc.gamma(z) + 20.52958193377696 + + """) + +add_newdoc("powm1", """ + powm1(x, y, out=None) + + Computes ``x**y - 1``. + + This function is useful when `y` is near 0, or when `x` is near 1. + + The function is implemented for real types only (unlike ``numpy.power``, + which accepts complex inputs). + + Parameters + ---------- + x : array_like + The base. Must be a real type (i.e. integer or float, not complex). + y : array_like + The exponent. Must be a real type (i.e. integer or float, not complex). + + Returns + ------- + array_like + Result of the calculation + + Notes + ----- + .. versionadded:: 1.10.0 + + The underlying code is implemented for single precision and double + precision floats only. Unlike `numpy.power`, integer inputs to + `powm1` are converted to floating point, and complex inputs are + not accepted. + + Note the following edge cases: + + * ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf`` + and ``nan``. + * ``powm1(1, y)`` returns 0 for any ``y``, including ``nan`` + and ``inf``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import powm1 + + >>> x = np.array([1.2, 10.0, 0.9999999975]) + >>> y = np.array([1e-9, 1e-11, 0.1875]) + >>> powm1(x, y) + array([ 1.82321557e-10, 2.30258509e-11, -4.68749998e-10]) + + It can be verified that the relative errors in those results + are less than 2.5e-16. + + Compare that to the result of ``x**y - 1``, where the + relative errors are all larger than 8e-8: + + >>> x**y - 1 + array([ 1.82321491e-10, 2.30258035e-11, -4.68750039e-10]) + + """) + + +add_newdoc("pro_ang1", + """ + pro_ang1(m, n, c, x, out=None) + + Prolate spheroidal angular function of the first kind and its derivative + + Computes the prolate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pro_ang1_cv", + """ + pro_ang1_cv(m, n, c, cv, x, out=None) + + Prolate spheroidal angular function pro_ang1 for precomputed characteristic value + + Computes the prolate spheroidal angular function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pro_cv", + """ + pro_cv(m, n, c, out=None) + + Characteristic value of prolate spheroidal function + + Computes the characteristic value of prolate spheroidal wave + functions of order `m`, `n` (n>=m) and spheroidal parameter `c`. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cv : scalar or ndarray + Characteristic value + """) + +add_newdoc("pro_rad1", + """ + pro_rad1(m, n, c, x, out=None) + + Prolate spheroidal radial function of the first kind and its derivative + + Computes the prolate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pro_rad1_cv", + """ + pro_rad1_cv(m, n, c, cv, x, out=None) + + Prolate spheroidal radial function pro_rad1 for precomputed characteristic value + + Computes the prolate spheroidal radial function of the first kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pro_rad2", + """ + pro_rad2(m, n, c, x, out=None) + + Prolate spheroidal radial function of the second kind and its derivative + + Computes the prolate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pro_rad2_cv", + """ + pro_rad2_cv(m, n, c, cv, x, out=None) + + Prolate spheroidal radial function pro_rad2 for precomputed characteristic value + + Computes the prolate spheroidal radial function of the second kind + and its derivative (with respect to `x`) for mode parameters m>=0 + and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires + pre-computed characteristic value. + + Parameters + ---------- + m : array_like + Nonnegative mode parameter m + n : array_like + Mode parameter n (>= m) + c : array_like + Spheroidal parameter + cv : array_like + Characteristic value + x : array_like + Real parameter (``|x| < 1.0``) + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Value of the function + sp : scalar or ndarray + Value of the derivative vs x + """) + +add_newdoc("pseudo_huber", + r""" + pseudo_huber(delta, r, out=None) + + Pseudo-Huber loss function. + + .. math:: \mathrm{pseudo\_huber}(\delta, r) = + \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right) + + Parameters + ---------- + delta : array_like + Input array, indicating the soft quadratic vs. linear loss changepoint. + r : array_like + Input array, possibly representing residuals. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + res : scalar or ndarray + The computed Pseudo-Huber loss function values. + + See Also + -------- + huber: Similar function which this function approximates + + Notes + ----- + Like `huber`, `pseudo_huber` often serves as a robust loss function + in statistics or machine learning to reduce the influence of outliers. + Unlike `huber`, `pseudo_huber` is smooth. + + Typically, `r` represents residuals, the difference + between a model prediction and data. Then, for :math:`|r|\leq\delta`, + `pseudo_huber` resembles the squared error and for :math:`|r|>\delta` the + absolute error. This way, the Pseudo-Huber loss often achieves + a fast convergence in model fitting for small residuals like the squared + error loss function and still reduces the influence of outliers + (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is + the cutoff between squared and absolute error regimes, it has + to be tuned carefully for each problem. `pseudo_huber` is also + convex, making it suitable for gradient based optimization. [1]_ [2]_ + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Hartley, Zisserman, "Multiple View Geometry in Computer Vision". + 2003. Cambridge University Press. p. 619 + .. [2] Charbonnier et al. "Deterministic edge-preserving regularization + in computed imaging". 1997. IEEE Trans. Image Processing. + 6 (2): 298 - 311. + + Examples + -------- + Import all necessary modules. + + >>> import numpy as np + >>> from scipy.special import pseudo_huber, huber + >>> import matplotlib.pyplot as plt + + Calculate the function for ``delta=1`` at ``r=2``. + + >>> pseudo_huber(1., 2.) + 1.2360679774997898 + + Calculate the function at ``r=2`` for different `delta` by providing + a list or NumPy array for `delta`. + + >>> pseudo_huber([1., 2., 4.], 3.) + array([2.16227766, 3.21110255, 4. ]) + + Calculate the function for ``delta=1`` at several points by providing + a list or NumPy array for `r`. + + >>> pseudo_huber(2., np.array([1., 1.5, 3., 4.])) + array([0.47213595, 1. , 3.21110255, 4.94427191]) + + The function can be calculated for different `delta` and `r` by + providing arrays for both with compatible shapes for broadcasting. + + >>> r = np.array([1., 2.5, 8., 10.]) + >>> deltas = np.array([[1.], [5.], [9.]]) + >>> print(r.shape, deltas.shape) + (4,) (3, 1) + + >>> pseudo_huber(deltas, r) + array([[ 0.41421356, 1.6925824 , 7.06225775, 9.04987562], + [ 0.49509757, 2.95084972, 22.16990566, 30.90169944], + [ 0.49846624, 3.06693762, 27.37435121, 40.08261642]]) + + Plot the function for different `delta`. + + >>> x = np.linspace(-4, 4, 500) + >>> deltas = [1, 2, 3] + >>> linestyles = ["dashed", "dotted", "dashdot"] + >>> fig, ax = plt.subplots() + >>> combined_plot_parameters = list(zip(deltas, linestyles)) + >>> for delta, style in combined_plot_parameters: + ... ax.plot(x, pseudo_huber(delta, x), label=rf"$\delta={delta}$", + ... ls=style) + >>> ax.legend(loc="upper center") + >>> ax.set_xlabel("$x$") + >>> ax.set_title(r"Pseudo-Huber loss function $h_{\delta}(x)$") + >>> ax.set_xlim(-4, 4) + >>> ax.set_ylim(0, 8) + >>> plt.show() + + Finally, illustrate the difference between `huber` and `pseudo_huber` by + plotting them and their gradients with respect to `r`. The plot shows + that `pseudo_huber` is continuously differentiable while `huber` is not + at the points :math:`\pm\delta`. + + >>> def huber_grad(delta, x): + ... grad = np.copy(x) + ... linear_area = np.argwhere(np.abs(x) > delta) + ... grad[linear_area]=delta*np.sign(x[linear_area]) + ... return grad + >>> def pseudo_huber_grad(delta, x): + ... return x* (1+(x/delta)**2)**(-0.5) + >>> x=np.linspace(-3, 3, 500) + >>> delta = 1. + >>> fig, ax = plt.subplots(figsize=(7, 7)) + >>> ax.plot(x, huber(delta, x), label="Huber", ls="dashed") + >>> ax.plot(x, huber_grad(delta, x), label="Huber Gradient", ls="dashdot") + >>> ax.plot(x, pseudo_huber(delta, x), label="Pseudo-Huber", ls="dotted") + >>> ax.plot(x, pseudo_huber_grad(delta, x), label="Pseudo-Huber Gradient", + ... ls="solid") + >>> ax.legend(loc="upper center") + >>> plt.show() + """) + +add_newdoc("psi", + """ + psi(z, out=None) + + The digamma function. + + The logarithmic derivative of the gamma function evaluated at ``z``. + + Parameters + ---------- + z : array_like + Real or complex argument. + out : ndarray, optional + Array for the computed values of ``psi``. + + Returns + ------- + digamma : scalar or ndarray + Computed values of ``psi``. + + Notes + ----- + For large values not close to the negative real axis, ``psi`` is + computed using the asymptotic series (5.11.2) from [1]_. For small + arguments not close to the negative real axis, the recurrence + relation (5.5.2) from [1]_ is used until the argument is large + enough to use the asymptotic series. For values close to the + negative real axis, the reflection formula (5.5.4) from [1]_ is + used first. Note that ``psi`` has a family of zeros on the + negative real axis which occur between the poles at nonpositive + integers. Around the zeros the reflection formula suffers from + cancellation and the implementation loses precision. The sole + positive zero and the first negative zero, however, are handled + separately by precomputing series expansions using [2]_, so the + function should maintain full accuracy around the origin. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/5 + .. [2] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point arithmetic" + (Version 0.19) http://mpmath.org/ + + Examples + -------- + >>> from scipy.special import psi + >>> z = 3 + 4j + >>> psi(z) + (1.55035981733341+1.0105022091860445j) + + Verify psi(z) = psi(z + 1) - 1/z: + + >>> psi(z + 1) - 1/z + (1.55035981733341+1.0105022091860445j) + """) + +add_newdoc("radian", + """ + radian(d, m, s, out=None) + + Convert from degrees to radians. + + Returns the angle given in (d)egrees, (m)inutes, and (s)econds in + radians. + + Parameters + ---------- + d : array_like + Degrees, can be real-valued. + m : array_like + Minutes, can be real-valued. + s : array_like + Seconds, can be real-valued. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Values of the inputs in radians. + + Examples + -------- + >>> import scipy.special as sc + + There are many ways to specify an angle. + + >>> sc.radian(90, 0, 0) + 1.5707963267948966 + >>> sc.radian(0, 60 * 90, 0) + 1.5707963267948966 + >>> sc.radian(0, 0, 60**2 * 90) + 1.5707963267948966 + + The inputs can be real-valued. + + >>> sc.radian(1.5, 0, 0) + 0.02617993877991494 + >>> sc.radian(1, 30, 0) + 0.02617993877991494 + + """) + +add_newdoc("rel_entr", + r""" + rel_entr(x, y, out=None) + + Elementwise function for computing relative entropy. + + .. math:: + + \mathrm{rel\_entr}(x, y) = + \begin{cases} + x \log(x / y) & x > 0, y > 0 \\ + 0 & x = 0, y \ge 0 \\ + \infty & \text{otherwise} + \end{cases} + + Parameters + ---------- + x, y : array_like + Input arrays + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Relative entropy of the inputs + + See Also + -------- + entr, kl_div, scipy.stats.entropy + + Notes + ----- + .. versionadded:: 0.15.0 + + This function is jointly convex in x and y. + + The origin of this function is in convex programming; see + [1]_. Given two discrete probability distributions :math:`p_1, + \ldots, p_n` and :math:`q_1, \ldots, q_n`, the definition of relative + entropy in the context of *information theory* is + + .. math:: + + \sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i). + + To compute the latter quantity, use `scipy.stats.entropy`. + + See [2]_ for details. + + References + ---------- + .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*. + Cambridge University Press, 2004. + :doi:`https://doi.org/10.1017/CBO9780511804441` + .. [2] Kullback-Leibler divergence, + https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + """) + +add_newdoc("rgamma", + r""" + rgamma(z, out=None) + + Reciprocal of the gamma function. + + Defined as :math:`1 / \Gamma(z)`, where :math:`\Gamma` is the + gamma function. For more on the gamma function see `gamma`. + + Parameters + ---------- + z : array_like + Real or complex valued input + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Function results + + See Also + -------- + gamma, gammaln, loggamma + + Notes + ----- + The gamma function has no zeros and has simple poles at + nonpositive integers, so `rgamma` is an entire function with zeros + at the nonpositive integers. See the discussion in [dlmf]_ for + more details. + + References + ---------- + .. [dlmf] Nist, Digital Library of Mathematical functions, + https://dlmf.nist.gov/5.2#i + + Examples + -------- + >>> import scipy.special as sc + + It is the reciprocal of the gamma function. + + >>> sc.rgamma([1, 2, 3, 4]) + array([1. , 1. , 0.5 , 0.16666667]) + >>> 1 / sc.gamma([1, 2, 3, 4]) + array([1. , 1. , 0.5 , 0.16666667]) + + It is zero at nonpositive integers. + + >>> sc.rgamma([0, -1, -2, -3]) + array([0., 0., 0., 0.]) + + It rapidly underflows to zero along the positive real axis. + + >>> sc.rgamma([10, 100, 179]) + array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000]) + + """) + +add_newdoc("round", + """ + round(x, out=None) + + Round to the nearest integer. + + Returns the nearest integer to `x`. If `x` ends in 0.5 exactly, + the nearest even integer is chosen. + + Parameters + ---------- + x : array_like + Real valued input. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + The nearest integers to the elements of `x`. The result is of + floating type, not integer type. + + Examples + -------- + >>> import scipy.special as sc + + It rounds to even. + + >>> sc.round([0.5, 1.5]) + array([0., 2.]) + + """) + +add_newdoc("shichi", + r""" + shichi(x, out=None) + + Hyperbolic sine and cosine integrals. + + The hyperbolic sine integral is + + .. math:: + + \int_0^x \frac{\sinh{t}}{t}dt + + and the hyperbolic cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principal branch of the logarithm [1]_. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the hyperbolic sine + and cosine integrals. + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + si : scalar or ndarray + Hyperbolic sine integral at ``x`` + ci : scalar or ndarray + Hyperbolic cosine integral at ``x`` + + See Also + -------- + sici : Sine and cosine integrals. + exp1 : Exponential integral E1. + expi : Exponential integral Ei. + + Notes + ----- + For real arguments with ``x < 0``, ``chi`` is the real part of the + hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x + + 0j)`` differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [2]_ *shichi* routine. For complex arguments the algorithm is based + on Mpmath's [3]_ *shi* and *chi* routines. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + (See Section 5.2.) + .. [2] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [3] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point + arithmetic" (Version 0.19) http://mpmath.org/ + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import shichi, sici + + `shichi` accepts real or complex input: + + >>> shichi(0.5) + (0.5069967498196671, -0.05277684495649357) + >>> shichi(0.5 + 2.5j) + ((0.11772029666668238+1.831091777729851j), + (0.29912435887648825+1.7395351121166562j)) + + The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are + related to the sine and cosine integrals Si(z) and Ci(z) by + + * Shi(z) = -i*Si(i*z) + * Chi(z) = Ci(-i*z) + i*pi/2 + + >>> z = 0.25 + 5j + >>> shi, chi = shichi(z) + >>> shi, -1j*sici(1j*z)[0] # Should be the same. + ((-0.04834719325101729+1.5469354086921228j), + (-0.04834719325101729+1.5469354086921228j)) + >>> chi, sici(-1j*z)[1] + 1j*np.pi/2 # Should be the same. + ((-0.19568708973868087+1.556276312103824j), + (-0.19568708973868087+1.556276312103824j)) + + Plot the functions evaluated on the real axis: + + >>> xp = np.geomspace(1e-8, 4.0, 250) + >>> x = np.concatenate((-xp[::-1], xp)) + >>> shi, chi = shichi(x) + + >>> fig, ax = plt.subplots() + >>> ax.plot(x, shi, label='Shi(x)') + >>> ax.plot(x, chi, '--', label='Chi(x)') + >>> ax.set_xlabel('x') + >>> ax.set_title('Hyperbolic Sine and Cosine Integrals') + >>> ax.legend(shadow=True, framealpha=1, loc='lower right') + >>> ax.grid(True) + >>> plt.show() + + """) + +add_newdoc("sici", + r""" + sici(x, out=None) + + Sine and cosine integrals. + + The sine integral is + + .. math:: + + \int_0^x \frac{\sin{t}}{t}dt + + and the cosine integral is + + .. math:: + + \gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt + + where :math:`\gamma` is Euler's constant and :math:`\log` is the + principal branch of the logarithm [1]_. + + Parameters + ---------- + x : array_like + Real or complex points at which to compute the sine and cosine + integrals. + out : tuple of ndarray, optional + Optional output arrays for the function results + + Returns + ------- + si : scalar or ndarray + Sine integral at ``x`` + ci : scalar or ndarray + Cosine integral at ``x`` + + See Also + -------- + shichi : Hyperbolic sine and cosine integrals. + exp1 : Exponential integral E1. + expi : Exponential integral Ei. + + Notes + ----- + For real arguments with ``x < 0``, ``ci`` is the real part of the + cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)`` + differ by a factor of ``1j*pi``. + + For real arguments the function is computed by calling Cephes' + [2]_ *sici* routine. For complex arguments the algorithm is based + on Mpmath's [3]_ *si* and *ci* routines. + + References + ---------- + .. [1] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + (See Section 5.2.) + .. [2] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + .. [3] Fredrik Johansson and others. + "mpmath: a Python library for arbitrary-precision floating-point + arithmetic" (Version 0.19) http://mpmath.org/ + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import sici, exp1 + + `sici` accepts real or complex input: + + >>> sici(2.5) + (1.7785201734438267, 0.2858711963653835) + >>> sici(2.5 + 3j) + ((4.505735874563953+0.06863305018999577j), + (0.0793644206906966-2.935510262937543j)) + + For z in the right half plane, the sine and cosine integrals are + related to the exponential integral E1 (implemented in SciPy as + `scipy.special.exp1`) by + + * Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2 + * Ci(z) = -(E1(i*z) + E1(-i*z))/2 + + See [1]_ (equations 5.2.21 and 5.2.23). + + We can verify these relations: + + >>> z = 2 - 3j + >>> sici(z) + ((4.54751388956229-1.3991965806460565j), + (1.408292501520851+2.9836177420296055j)) + + >>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2 # Same as sine integral + (4.54751388956229-1.3991965806460565j) + + >>> -(exp1(1j*z) + exp1(-1j*z))/2 # Same as cosine integral + (1.408292501520851+2.9836177420296055j) + + Plot the functions evaluated on the real axis; the dotted horizontal + lines are at pi/2 and -pi/2: + + >>> x = np.linspace(-16, 16, 150) + >>> si, ci = sici(x) + + >>> fig, ax = plt.subplots() + >>> ax.plot(x, si, label='Si(x)') + >>> ax.plot(x, ci, '--', label='Ci(x)') + >>> ax.legend(shadow=True, framealpha=1, loc='upper left') + >>> ax.set_xlabel('x') + >>> ax.set_title('Sine and Cosine Integrals') + >>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k') + >>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k') + >>> ax.grid(True) + >>> plt.show() + + """) + +add_newdoc("sindg", + """ + sindg(x, out=None) + + Sine of the angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Sine at the input. + + See Also + -------- + cosdg, tandg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using sine directly. + + >>> x = 180 * np.arange(3) + >>> sc.sindg(x) + array([ 0., -0., 0.]) + >>> np.sin(x * np.pi / 180) + array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16]) + + """) + +add_newdoc("smirnov", + r""" + smirnov(n, d, out=None) + + Kolmogorov-Smirnov complementary cumulative distribution function + + Returns the exact Kolmogorov-Smirnov complementary cumulative + distribution function,(aka the Survival Function) of Dn+ (or Dn-) + for a one-sided test of equality between an empirical and a + theoretical distribution. It is equal to the probability that the + maximum difference between a theoretical distribution and an empirical + one based on `n` samples is greater than d. + + Parameters + ---------- + n : int + Number of samples + d : float array_like + Deviation between the Empirical CDF (ECDF) and the target CDF. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d)) + + See Also + -------- + smirnovi : The Inverse Survival Function for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi : Functions for the two-sided distribution + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distribution. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import smirnov + >>> from scipy.stats import norm + + Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a + sample of size 5. + + >>> smirnov(5, [0, 0.5, 1.0]) + array([ 1. , 0.056, 0. ]) + + Compare a sample of size 5 against N(0, 1), the standard normal + distribution with mean 0 and standard deviation 1. + + `x` is the sample. + + >>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82]) + + >>> target = norm(0, 1) + >>> cdfs = target.cdf(x) + >>> cdfs + array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ]) + + Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn). + + >>> n = len(x) + >>> ecdfs = np.arange(n+1, dtype=float)/n + >>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], + ... ecdfs[1:] - cdfs]) + >>> with np.printoptions(precision=3): + ... print(cols) + [[-1.392 0.2 0.082 0.082 0.118] + [-0.135 0.4 0.446 0.246 -0.046] + [ 0.114 0.6 0.545 0.145 0.055] + [ 0.19 0.8 0.575 -0.025 0.225] + [ 1.82 1. 0.966 0.166 0.034]] + >>> gaps = cols[:, -2:] + >>> Dnpm = np.max(gaps, axis=0) + >>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}') + Dn-=0.246306, Dn+=0.224655 + >>> probs = smirnov(n, Dnpm) + >>> print(f'For a sample of size {n} drawn from N(0, 1):', + ... f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}', + ... f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}', + ... sep='\n') + For a sample of size 5 drawn from N(0, 1): + Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711 + Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245 + + Plot the empirical CDF and the standard normal CDF. + + >>> import matplotlib.pyplot as plt + >>> plt.step(np.concatenate(([-2.5], x, [2.5])), + ... np.concatenate((ecdfs, [1])), + ... where='post', label='Empirical CDF') + >>> xx = np.linspace(-2.5, 2.5, 100) + >>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)') + + Add vertical lines marking Dn+ and Dn-. + + >>> iminus, iplus = np.argmax(gaps, axis=0) + >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', + ... alpha=0.5, lw=4) + >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', + ... alpha=0.5, lw=4) + + >>> plt.grid(True) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() + """) + +add_newdoc("smirnovi", + """ + smirnovi(n, p, out=None) + + Inverse to `smirnov` + + Returns `d` such that ``smirnov(n, d) == p``, the critical value + corresponding to `p`. + + Parameters + ---------- + n : int + Number of samples + p : float array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + The value(s) of smirnovi(n, p), the critical values. + + See Also + -------- + smirnov : The Survival Function (SF) for the distribution + scipy.stats.ksone : Provides the functionality as a continuous distribution + kolmogorov, kolmogi : Functions for the two-sided distribution + scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n + + Notes + ----- + `smirnov` is used by `stats.kstest` in the application of the + Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this + function is exposed in `scpy.special`, but the recommended way to achieve + the most accurate CDF/SF/PDF/PPF/ISF computations is to use the + `stats.ksone` distribution. + + Examples + -------- + >>> from scipy.special import smirnovi, smirnov + + >>> n = 24 + >>> deviations = [0.1, 0.2, 0.3] + + Use `smirnov` to compute the complementary CDF of the Smirnov + distribution for the given number of samples and deviations. + + >>> p = smirnov(n, deviations) + >>> p + array([0.58105083, 0.12826832, 0.01032231]) + + The inverse function ``smirnovi(n, p)`` returns ``deviations``. + + >>> smirnovi(n, p) + array([0.1, 0.2, 0.3]) + + """) + +add_newdoc("_smirnovc", + """ + _smirnovc(n, d) + Internal function, do not use. + """) + +add_newdoc("_smirnovci", + """ + Internal function, do not use. + """) + +add_newdoc("_smirnovp", + """ + _smirnovp(n, p) + Internal function, do not use. + """) + +add_newdoc("spence", + r""" + spence(z, out=None) + + Spence's function, also known as the dilogarithm. + + It is defined to be + + .. math:: + \int_1^z \frac{\log(t)}{1 - t}dt + + for complex :math:`z`, where the contour of integration is taken + to avoid the branch cut of the logarithm. Spence's function is + analytic everywhere except the negative real axis where it has a + branch cut. + + Parameters + ---------- + z : array_like + Points at which to evaluate Spence's function + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + s : scalar or ndarray + Computed values of Spence's function + + Notes + ----- + There is a different convention which defines Spence's function by + the integral + + .. math:: + -\int_0^z \frac{\log(1 - t)}{t}dt; + + this is our ``spence(1 - z)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import spence + >>> import matplotlib.pyplot as plt + + The function is defined for complex inputs: + + >>> spence([1-1j, 1.5+2j, 3j, -10-5j]) + array([-0.20561676+0.91596559j, -0.86766909-1.39560134j, + -0.59422064-2.49129918j, -1.14044398+6.80075924j]) + + For complex inputs on the branch cut, which is the negative real axis, + the function returns the limit for ``z`` with positive imaginary part. + For example, in the following, note the sign change of the imaginary + part of the output for ``z = -2`` and ``z = -2 - 1e-8j``: + + >>> spence([-2 + 1e-8j, -2, -2 - 1e-8j]) + array([2.32018041-3.45139229j, 2.32018042-3.4513923j , + 2.32018041+3.45139229j]) + + The function returns ``nan`` for real inputs on the branch cut: + + >>> spence(-1.5) + nan + + Verify some particular values: ``spence(0) = pi**2/6``, + ``spence(1) = 0`` and ``spence(2) = -pi**2/12``. + + >>> spence([0, 1, 2]) + array([ 1.64493407, 0. , -0.82246703]) + >>> np.pi**2/6, -np.pi**2/12 + (1.6449340668482264, -0.8224670334241132) + + Verify the identity:: + + spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z) + + >>> z = 3 + 4j + >>> spence(z) + spence(1 - z) + (-2.6523186143876067+1.8853470951513935j) + >>> np.pi**2/6 - np.log(z)*np.log(1 - z) + (-2.652318614387606+1.885347095151394j) + + Plot the function for positive real input. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0, 6, 400) + >>> ax.plot(x, spence(x)) + >>> ax.grid() + >>> ax.set_xlabel('x') + >>> ax.set_title('spence(x)') + >>> plt.show() + """) + +add_newdoc( + "stdtr", + r""" + stdtr(df, t, out=None) + + Student t distribution cumulative distribution function + + Returns the integral: + + .. math:: + \frac{\Gamma((df+1)/2)}{\sqrt{\pi df} \Gamma(df/2)} + \int_{-\infty}^t (1+x^2/df)^{-(df+1)/2}\, dx + + Parameters + ---------- + df : array_like + Degrees of freedom + t : array_like + Upper bound of the integral + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Value of the Student t CDF at t + + See Also + -------- + stdtridf : inverse of stdtr with respect to `df` + stdtrit : inverse of stdtr with respect to `t` + scipy.stats.t : student t distribution + + Notes + ----- + The student t distribution is also available as `scipy.stats.t`. + Calling `stdtr` directly can improve performance compared to the + ``cdf`` method of `scipy.stats.t` (see last example below). + + Examples + -------- + Calculate the function for ``df=3`` at ``t=1``. + + >>> import numpy as np + >>> from scipy.special import stdtr + >>> import matplotlib.pyplot as plt + >>> stdtr(3, 1) + 0.8044988905221148 + + Plot the function for three different degrees of freedom. + + >>> x = np.linspace(-10, 10, 1000) + >>> fig, ax = plt.subplots() + >>> parameters = [(1, "solid"), (3, "dashed"), (10, "dotted")] + >>> for (df, linestyle) in parameters: + ... ax.plot(x, stdtr(df, x), ls=linestyle, label=f"$df={df}$") + >>> ax.legend() + >>> ax.set_title("Student t distribution cumulative distribution function") + >>> plt.show() + + The function can be computed for several degrees of freedom at the same + time by providing a NumPy array or list for `df`: + + >>> stdtr([1, 2, 3], 1) + array([0.75 , 0.78867513, 0.80449889]) + + It is possible to calculate the function at several points for several + different degrees of freedom simultaneously by providing arrays for `df` + and `t` with shapes compatible for broadcasting. Compute `stdtr` at + 4 points for 3 degrees of freedom resulting in an array of shape 3x4. + + >>> dfs = np.array([[1], [2], [3]]) + >>> t = np.array([2, 4, 6, 8]) + >>> dfs.shape, t.shape + ((3, 1), (4,)) + + >>> stdtr(dfs, t) + array([[0.85241638, 0.92202087, 0.94743154, 0.96041658], + [0.90824829, 0.97140452, 0.98666426, 0.99236596], + [0.93033702, 0.98599577, 0.99536364, 0.99796171]]) + + The t distribution is also available as `scipy.stats.t`. Calling `stdtr` + directly can be much faster than calling the ``cdf`` method of + `scipy.stats.t`. To get the same results, one must use the following + parametrization: ``scipy.stats.t(df).cdf(x) = stdtr(df, x)``. + + >>> from scipy.stats import t + >>> df, x = 3, 1 + >>> stdtr_result = stdtr(df, x) # this can be faster than below + >>> stats_result = t(df).cdf(x) + >>> stats_result == stdtr_result # test that results are equal + True + """) + +add_newdoc("stdtridf", + """ + stdtridf(p, t, out=None) + + Inverse of `stdtr` vs df + + Returns the argument df such that stdtr(df, t) is equal to `p`. + + Parameters + ---------- + p : array_like + Probability + t : array_like + Upper bound of the integral + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + df : scalar or ndarray + Value of `df` such that ``stdtr(df, t) == p`` + + See Also + -------- + stdtr : Student t CDF + stdtrit : inverse of stdtr with respect to `t` + scipy.stats.t : Student t distribution + + Examples + -------- + Compute the student t cumulative distribution function for one + parameter set. + + >>> from scipy.special import stdtr, stdtridf + >>> df, x = 5, 2 + >>> cdf_value = stdtr(df, x) + >>> cdf_value + 0.9490302605850709 + + Verify that `stdtridf` recovers the original value for `df` given + the CDF value and `x`. + + >>> stdtridf(cdf_value, x) + 5.0 + """) + +add_newdoc("stdtrit", + """ + stdtrit(df, p, out=None) + + The `p`-th quantile of the student t distribution. + + This function is the inverse of the student t distribution cumulative + distribution function (CDF), returning `t` such that `stdtr(df, t) = p`. + + Returns the argument `t` such that stdtr(df, t) is equal to `p`. + + Parameters + ---------- + df : array_like + Degrees of freedom + p : array_like + Probability + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t : scalar or ndarray + Value of `t` such that ``stdtr(df, t) == p`` + + See Also + -------- + stdtr : Student t CDF + stdtridf : inverse of stdtr with respect to `df` + scipy.stats.t : Student t distribution + + Notes + ----- + The student t distribution is also available as `scipy.stats.t`. Calling + `stdtrit` directly can improve performance compared to the ``ppf`` + method of `scipy.stats.t` (see last example below). + + Examples + -------- + `stdtrit` represents the inverse of the student t distribution CDF which + is available as `stdtr`. Here, we calculate the CDF for ``df`` at + ``x=1``. `stdtrit` then returns ``1`` up to floating point errors + given the same value for `df` and the computed CDF value. + + >>> import numpy as np + >>> from scipy.special import stdtr, stdtrit + >>> import matplotlib.pyplot as plt + >>> df = 3 + >>> x = 1 + >>> cdf_value = stdtr(df, x) + >>> stdtrit(df, cdf_value) + 0.9999999994418539 + + Plot the function for three different degrees of freedom. + + >>> x = np.linspace(0, 1, 1000) + >>> parameters = [(1, "solid"), (2, "dashed"), (5, "dotted")] + >>> fig, ax = plt.subplots() + >>> for (df, linestyle) in parameters: + ... ax.plot(x, stdtrit(df, x), ls=linestyle, label=f"$df={df}$") + >>> ax.legend() + >>> ax.set_ylim(-10, 10) + >>> ax.set_title("Student t distribution quantile function") + >>> plt.show() + + The function can be computed for several degrees of freedom at the same + time by providing a NumPy array or list for `df`: + + >>> stdtrit([1, 2, 3], 0.7) + array([0.72654253, 0.6172134 , 0.58438973]) + + It is possible to calculate the function at several points for several + different degrees of freedom simultaneously by providing arrays for `df` + and `p` with shapes compatible for broadcasting. Compute `stdtrit` at + 4 points for 3 degrees of freedom resulting in an array of shape 3x4. + + >>> dfs = np.array([[1], [2], [3]]) + >>> p = np.array([0.2, 0.4, 0.7, 0.8]) + >>> dfs.shape, p.shape + ((3, 1), (4,)) + + >>> stdtrit(dfs, p) + array([[-1.37638192, -0.3249197 , 0.72654253, 1.37638192], + [-1.06066017, -0.28867513, 0.6172134 , 1.06066017], + [-0.97847231, -0.27667066, 0.58438973, 0.97847231]]) + + The t distribution is also available as `scipy.stats.t`. Calling `stdtrit` + directly can be much faster than calling the ``ppf`` method of + `scipy.stats.t`. To get the same results, one must use the following + parametrization: ``scipy.stats.t(df).ppf(x) = stdtrit(df, x)``. + + >>> from scipy.stats import t + >>> df, x = 3, 0.5 + >>> stdtrit_result = stdtrit(df, x) # this can be faster than below + >>> stats_result = t(df).ppf(x) + >>> stats_result == stdtrit_result # test that results are equal + True + """) + +add_newdoc("struve", + r""" + struve(v, x, out=None) + + Struve function. + + Return the value of the Struve function of order `v` at `x`. The Struve + function is defined as, + + .. math:: + H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty + \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})}, + + where :math:`\Gamma` is the gamma function. + + Parameters + ---------- + v : array_like + Order of the Struve function (float). + x : array_like + Argument of the Struve function (float; must be positive unless `v` is + an integer). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + H : scalar or ndarray + Value of the Struve function of order `v` at `x`. + + See Also + -------- + modstruve: Modified Struve function + + Notes + ----- + Three methods discussed in [1]_ are used to evaluate the Struve function: + + - power series + - expansion in Bessel functions (if :math:`|z| < |v| + 20`) + - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`) + + Rounding errors are estimated based on the largest terms in the sums, and + the result associated with the smallest error is returned. + + References + ---------- + .. [1] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/11 + + Examples + -------- + Calculate the Struve function of order 1 at 2. + + >>> import numpy as np + >>> from scipy.special import struve + >>> import matplotlib.pyplot as plt + >>> struve(1, 2.) + 0.6467637282835622 + + Calculate the Struve function at 2 for orders 1, 2 and 3 by providing + a list for the order parameter `v`. + + >>> struve([1, 2, 3], 2.) + array([0.64676373, 0.28031806, 0.08363767]) + + Calculate the Struve function of order 1 for several points by providing + an array for `x`. + + >>> points = np.array([2., 5., 8.]) + >>> struve(1, points) + array([0.64676373, 0.80781195, 0.48811605]) + + Compute the Struve function for several orders at several points by + providing arrays for `v` and `z`. The arrays have to be broadcastable + to the correct shapes. + + >>> orders = np.array([[1], [2], [3]]) + >>> points.shape, orders.shape + ((3,), (3, 1)) + + >>> struve(orders, points) + array([[0.64676373, 0.80781195, 0.48811605], + [0.28031806, 1.56937455, 1.51769363], + [0.08363767, 1.50872065, 2.98697513]]) + + Plot the Struve functions of order 0 to 3 from -10 to 10. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-10., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, struve(i, x), label=f'$H_{i!r}$') + >>> ax.legend(ncol=2) + >>> ax.set_xlim(-10, 10) + >>> ax.set_title(r"Struve functions $H_{\nu}$") + >>> plt.show() + """) + +add_newdoc("tandg", + """ + tandg(x, out=None) + + Tangent of angle `x` given in degrees. + + Parameters + ---------- + x : array_like + Angle, given in degrees. + out : ndarray, optional + Optional output array for the function results. + + Returns + ------- + scalar or ndarray + Tangent at the input. + + See Also + -------- + sindg, cosdg, cotdg + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + It is more accurate than using tangent directly. + + >>> x = 180 * np.arange(3) + >>> sc.tandg(x) + array([0., 0., 0.]) + >>> np.tan(x * np.pi / 180) + array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16]) + + """) + +add_newdoc( + "tklmbda", + r""" + tklmbda(x, lmbda, out=None) + + Cumulative distribution function of the Tukey lambda distribution. + + Parameters + ---------- + x, lmbda : array_like + Parameters + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + cdf : scalar or ndarray + Value of the Tukey lambda CDF + + See Also + -------- + scipy.stats.tukeylambda : Tukey lambda distribution + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import tklmbda, expit + + Compute the cumulative distribution function (CDF) of the Tukey lambda + distribution at several ``x`` values for `lmbda` = -1.5. + + >>> x = np.linspace(-2, 2, 9) + >>> x + array([-2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. ]) + >>> tklmbda(x, -1.5) + array([0.34688734, 0.3786554 , 0.41528805, 0.45629737, 0.5 , + 0.54370263, 0.58471195, 0.6213446 , 0.65311266]) + + When `lmbda` is 0, the function is the logistic sigmoid function, + which is implemented in `scipy.special` as `expit`. + + >>> tklmbda(x, 0) + array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 , + 0.62245933, 0.73105858, 0.81757448, 0.88079708]) + >>> expit(x) + array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 , + 0.62245933, 0.73105858, 0.81757448, 0.88079708]) + + When `lmbda` is 1, the Tukey lambda distribution is uniform on the + interval [-1, 1], so the CDF increases linearly. + + >>> t = np.linspace(-1, 1, 9) + >>> tklmbda(t, 1) + array([0. , 0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875, 1. ]) + + In the following, we generate plots for several values of `lmbda`. + + The first figure shows graphs for `lmbda` <= 0. + + >>> styles = ['-', '-.', '--', ':'] + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-12, 12, 500) + >>> for k, lmbda in enumerate([-1.0, -0.5, 0.0]): + ... y = tklmbda(x, lmbda) + ... ax.plot(x, y, styles[k], label=rf'$\lambda$ = {lmbda:-4.1f}') + + >>> ax.set_title(r'tklmbda(x, $\lambda$)') + >>> ax.set_label('x') + >>> ax.legend(framealpha=1, shadow=True) + >>> ax.grid(True) + + The second figure shows graphs for `lmbda` > 0. The dots in the + graphs show the bounds of the support of the distribution. + + >>> fig, ax = plt.subplots() + >>> x = np.linspace(-4.2, 4.2, 500) + >>> lmbdas = [0.25, 0.5, 1.0, 1.5] + >>> for k, lmbda in enumerate(lmbdas): + ... y = tklmbda(x, lmbda) + ... ax.plot(x, y, styles[k], label=fr'$\lambda$ = {lmbda}') + + >>> ax.set_prop_cycle(None) + >>> for lmbda in lmbdas: + ... ax.plot([-1/lmbda, 1/lmbda], [0, 1], '.', ms=8) + + >>> ax.set_title(r'tklmbda(x, $\lambda$)') + >>> ax.set_xlabel('x') + >>> ax.legend(framealpha=1, shadow=True) + >>> ax.grid(True) + + >>> plt.tight_layout() + >>> plt.show() + + The CDF of the Tukey lambda distribution is also implemented as the + ``cdf`` method of `scipy.stats.tukeylambda`. In the following, + ``tukeylambda.cdf(x, -0.5)`` and ``tklmbda(x, -0.5)`` compute the + same values: + + >>> from scipy.stats import tukeylambda + >>> x = np.linspace(-2, 2, 9) + + >>> tukeylambda.cdf(x, -0.5) + array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 , + 0.58671839, 0.66458323, 0.72906142, 0.78004843]) + + >>> tklmbda(x, -0.5) + array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 , + 0.58671839, 0.66458323, 0.72906142, 0.78004843]) + + The implementation in ``tukeylambda`` also provides location and scale + parameters, and other methods such as ``pdf()`` (the probability + density function) and ``ppf()`` (the inverse of the CDF), so for + working with the Tukey lambda distribution, ``tukeylambda`` is more + generally useful. The primary advantage of ``tklmbda`` is that it is + significantly faster than ``tukeylambda.cdf``. + """) + +add_newdoc("wofz", + """ + wofz(z, out=None) + + Faddeeva function + + Returns the value of the Faddeeva function for complex argument:: + + exp(-z**2) * erfc(-i*z) + + Parameters + ---------- + z : array_like + complex argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Value of the Faddeeva function + + See Also + -------- + dawsn, erf, erfc, erfcx, erfi + + References + ---------- + .. [1] Steven G. Johnson, Faddeeva W function implementation. + http://ab-initio.mit.edu/Faddeeva + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-3, 3) + >>> z = special.wofz(x) + + >>> plt.plot(x, z.real, label='wofz(x).real') + >>> plt.plot(x, z.imag, label='wofz(x).imag') + >>> plt.xlabel('$x$') + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.grid(alpha=0.25) + >>> plt.show() + + """) + +add_newdoc("xlogy", + """ + xlogy(x, y, out=None) + + Compute ``x*log(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + z : scalar or ndarray + Computed x*log(y) + + Notes + ----- + The log function used in the computation is the natural log. + + .. versionadded:: 0.13.0 + + Examples + -------- + We can use this function to calculate the binary logistic loss also + known as the binary cross entropy. This loss function is used for + binary classification problems and is defined as: + + .. math:: + L = 1/n * \\sum_{i=0}^n -(y_i*log(y\\_pred_i) + (1-y_i)*log(1-y\\_pred_i)) + + We can define the parameters `x` and `y` as y and y_pred respectively. + y is the array of the actual labels which over here can be either 0 or 1. + y_pred is the array of the predicted probabilities with respect to + the positive class (1). + + >>> import numpy as np + >>> from scipy.special import xlogy + >>> y = np.array([0, 1, 0, 1, 1, 0]) + >>> y_pred = np.array([0.3, 0.8, 0.4, 0.7, 0.9, 0.2]) + >>> n = len(y) + >>> loss = -(xlogy(y, y_pred) + xlogy(1 - y, 1 - y_pred)).sum() + >>> loss /= n + >>> loss + 0.29597052165495025 + + A lower loss is usually better as it indicates that the predictions are + similar to the actual labels. In this example since our predicted + probabilities are close to the actual labels, we get an overall loss + that is reasonably low and appropriate. + + """) + +add_newdoc("xlog1py", + """ + xlog1py(x, y, out=None) + + Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``. + + Parameters + ---------- + x : array_like + Multiplier + y : array_like + Argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + z : scalar or ndarray + Computed x*log1p(y) + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + This example shows how the function can be used to calculate the log of + the probability mass function for a geometric discrete random variable. + The probability mass function of the geometric distribution is defined + as follows: + + .. math:: f(k) = (1-p)^{k-1} p + + where :math:`p` is the probability of a single success + and :math:`1-p` is the probability of a single failure + and :math:`k` is the number of trials to get the first success. + + >>> import numpy as np + >>> from scipy.special import xlog1py + >>> p = 0.5 + >>> k = 100 + >>> _pmf = np.power(1 - p, k - 1) * p + >>> _pmf + 7.888609052210118e-31 + + If we take k as a relatively large number the value of the probability + mass function can become very low. In such cases taking the log of the + pmf would be more suitable as the log function can change the values + to a scale that is more appropriate to work with. + + >>> _log_pmf = xlog1py(k - 1, -p) + np.log(p) + >>> _log_pmf + -69.31471805599453 + + We can confirm that we get a value close to the original pmf value by + taking the exponential of the log pmf. + + >>> _orig_pmf = np.exp(_log_pmf) + >>> np.isclose(_pmf, _orig_pmf) + True + + """) + +add_newdoc("y0", + r""" + y0(x, out=None) + + Bessel function of the second kind of order 0. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind of order 0 at `x`. + + See Also + -------- + j0: Bessel function of the first kind of order 0 + yv: Bessel function of the first kind + + Notes + ----- + The domain is divided into the intervals [0, 5] and (5, infinity). In the + first interval a rational approximation :math:`R(x)` is employed to + compute, + + .. math:: + + Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi}, + + where :math:`J_0` is the Bessel function of the first kind of order 0. + + In the second interval, the Hankel asymptotic expansion is employed with + two rational functions of degree 6/6 and 7/7. + + This function is a wrapper for the Cephes [1]_ routine `y0`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import y0 + >>> y0(1.) + 0.08825696421567697 + + Calculate at several points: + + >>> import numpy as np + >>> y0(np.array([0.5, 2., 3.])) + array([-0.44451873, 0.51037567, 0.37685001]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = y0(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("y1", + """ + y1(x, out=None) + + Bessel function of the second kind of order 1. + + Parameters + ---------- + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind of order 1 at `x`. + + See Also + -------- + j1: Bessel function of the first kind of order 1 + yn: Bessel function of the second kind + yv: Bessel function of the second kind + + Notes + ----- + The domain is divided into the intervals [0, 8] and (8, infinity). In the + first interval a 25 term Chebyshev expansion is used, and computing + :math:`J_1` (the Bessel function of the first kind) is required. In the + second, the asymptotic trigonometric representation is employed using two + rational functions of degree 5/5. + + This function is a wrapper for the Cephes [1]_ routine `y1`. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Calculate the function at one point: + + >>> from scipy.special import y1 + >>> y1(1.) + -0.7812128213002888 + + Calculate at several points: + + >>> import numpy as np + >>> y1(np.array([0.5, 2., 3.])) + array([-1.47147239, -0.10703243, 0.32467442]) + + Plot the function from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> y = y1(x) + >>> ax.plot(x, y) + >>> plt.show() + + """) + +add_newdoc("yn", + r""" + yn(n, x, out=None) + + Bessel function of the second kind of integer order and real argument. + + Parameters + ---------- + n : array_like + Order (integer). + x : array_like + Argument (float). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function, :math:`Y_n(x)`. + + See Also + -------- + yv : For real order and real or complex argument. + y0: faster implementation of this function for order 0 + y1: faster implementation of this function for order 1 + + Notes + ----- + Wrapper for the Cephes [1]_ routine `yn`. + + The function is evaluated by forward recurrence on `n`, starting with + values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1, + the routine for `y0` or `y1` is called directly. + + References + ---------- + .. [1] Cephes Mathematical Functions Library, + http://www.netlib.org/cephes/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import yn + >>> yn(0, 1.) + 0.08825696421567697 + + Evaluate the function at one point for different orders. + + >>> yn(0, 1.), yn(1, 1.), yn(2, 1.) + (0.08825696421567697, -0.7812128213002888, -1.6506826068162546) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> yn([0, 1, 2], 1.) + array([ 0.08825696, -0.78121282, -1.65068261]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 3., 8.]) + >>> yn(0, points) + array([-0.44451873, 0.37685001, 0.22352149]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> yn(orders, points) + array([[-0.44451873, 0.37685001, 0.22352149], + [-1.47147239, 0.32467442, -0.15806046]]) + + Plot the functions of order 0 to 3 from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, yn(i, x), label=f'$Y_{i!r}$') + >>> ax.set_ylim(-3, 1) + >>> ax.legend() + >>> plt.show() + """) + +add_newdoc("yv", + r""" + yv(v, z, out=None) + + Bessel function of the second kind of real order and complex argument. + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the Bessel function of the second kind, :math:`Y_v(x)`. + + See Also + -------- + yve : :math:`Y_v` with leading exponential behavior stripped off. + y0: faster implementation of this function for order 0 + y1: faster implementation of this function for order 1 + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Evaluate the function of order 0 at one point. + + >>> from scipy.special import yv + >>> yv(0, 1.) + 0.088256964215677 + + Evaluate the function at one point for different orders. + + >>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.) + (0.088256964215677, -0.7812128213002889, -1.102495575160179) + + The evaluation for different orders can be carried out in one call by + providing a list or NumPy array as argument for the `v` parameter: + + >>> yv([0, 1, 1.5], 1.) + array([ 0.08825696, -0.78121282, -1.10249558]) + + Evaluate the function at several points for order 0 by providing an + array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 3., 8.]) + >>> yv(0, points) + array([-0.44451873, 0.37685001, 0.22352149]) + + If `z` is an array, the order parameter `v` must be broadcastable to + the correct shape if different orders shall be computed in one call. + To calculate the orders 0 and 1 for an 1D array: + + >>> orders = np.array([[0], [1]]) + >>> orders.shape + (2, 1) + + >>> yv(orders, points) + array([[-0.44451873, 0.37685001, 0.22352149], + [-1.47147239, 0.32467442, -0.15806046]]) + + Plot the functions of order 0 to 3 from 0 to 10. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> x = np.linspace(0., 10., 1000) + >>> for i in range(4): + ... ax.plot(x, yv(i, x), label=f'$Y_{i!r}$') + >>> ax.set_ylim(-3, 1) + >>> ax.legend() + >>> plt.show() + + """) + +add_newdoc("yve", + r""" + yve(v, z, out=None) + + Exponentially scaled Bessel function of the second kind of real order. + + Returns the exponentially scaled Bessel function of the second + kind of real order `v` at complex `z`:: + + yve(v, z) = yv(v, z) * exp(-abs(z.imag)) + + Parameters + ---------- + v : array_like + Order (float). + z : array_like + Argument (float or complex). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + Y : scalar or ndarray + Value of the exponentially scaled Bessel function. + + See Also + -------- + yv: Unscaled Bessel function of the second kind of real order. + + Notes + ----- + For positive `v` values, the computation is carried out using the + AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel + Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`, + + .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}). + + For negative `v` values the formula, + + .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v) + + is used, where :math:`J_v(z)` is the Bessel function of the first kind, + computed using the AMOS routine `zbesj`. Note that the second term is + exactly zero for integer `v`; to improve accuracy the second term is + explicitly omitted for `v` values such that `v = floor(v)`. + + Exponentially scaled Bessel functions are useful for large `z`: + for these, the unscaled Bessel functions can easily under-or overflow. + + References + ---------- + .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions + of a Complex Argument and Nonnegative Order", + http://netlib.org/amos/ + + Examples + -------- + Compare the output of `yv` and `yve` for large complex arguments for `z` + by computing their values for order ``v=1`` at ``z=1000j``. We see that + `yv` returns nan but `yve` returns a finite number: + + >>> import numpy as np + >>> from scipy.special import yv, yve + >>> v = 1 + >>> z = 1000j + >>> yv(v, z), yve(v, z) + ((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j)) + + For real arguments for `z`, `yve` returns the same as `yv` up to + floating point errors. + + >>> v, z = 1, 1000 + >>> yv(v, z), yve(v, z) + (-0.02478433129235178, -0.02478433129235179) + + The function can be evaluated for several orders at the same time by + providing a list or NumPy array for `v`: + + >>> yve([1, 2, 3], 1j) + array([-0.20791042+0.14096627j, 0.38053618-0.04993878j, + 0.00815531-1.66311097j]) + + In the same way, the function can be evaluated at several points in one + call by providing a list or NumPy array for `z`: + + >>> yve(1, np.array([1j, 2j, 3j])) + array([-0.20791042+0.14096627j, -0.21526929+0.01205044j, + -0.19682671+0.00127278j]) + + It is also possible to evaluate several orders at several points + at the same time by providing arrays for `v` and `z` with + broadcasting compatible shapes. Compute `yve` for two different orders + `v` and three points `z` resulting in a 2x3 array. + + >>> v = np.array([[1], [2]]) + >>> z = np.array([3j, 4j, 5j]) + >>> v.shape, z.shape + ((2, 1), (3,)) + + >>> yve(v, z) + array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j, + -1.63972267e-01+1.73494110e-05j], + [1.94960056e-03-1.11782545e-01j, 2.02902325e-04-1.17626501e-01j, + 2.27727687e-05-1.17951906e-01j]]) + """) + +add_newdoc("_zeta", + """ + _zeta(x, q) + + Internal function, Hurwitz zeta. + + """) + +add_newdoc("zetac", + """ + zetac(x, out=None) + + Riemann zeta function minus 1. + + This function is defined as + + .. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x, + + where ``x > 1``. For ``x < 1`` the analytic continuation is + computed. For more information on the Riemann zeta function, see + [dlmf]_. + + Parameters + ---------- + x : array_like of float + Values at which to compute zeta(x) - 1 (must be real). + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Values of zeta(x) - 1. + + See Also + -------- + zeta + + References + ---------- + .. [dlmf] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/25 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import zetac, zeta + + Some special values: + + >>> zetac(2), np.pi**2/6 - 1 + (0.64493406684822641, 0.6449340668482264) + + >>> zetac(-1), -1.0/12 - 1 + (-1.0833333333333333, -1.0833333333333333) + + Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`: + + >>> zetac(60), zeta(60) - 1 + (8.673617380119933e-19, 0.0) + """) + +add_newdoc("_riemann_zeta", + """ + Internal function, use `zeta` instead. + """) + +add_newdoc("_struve_asymp_large_z", + """ + _struve_asymp_large_z(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using asymptotic expansion + + Returns + ------- + v, err + """) + +add_newdoc("_struve_power_series", + """ + _struve_power_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using power series + + Returns + ------- + v, err + """) + +add_newdoc("_struve_bessel_series", + """ + _struve_bessel_series(v, z, is_h) + + Internal function for testing `struve` & `modstruve` + + Evaluates using Bessel function series + + Returns + ------- + v, err + """) + +add_newdoc("_spherical_jn", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("_spherical_jn_d", + """ + Internal function, use `spherical_jn` instead. + """) + +add_newdoc("_spherical_yn", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("_spherical_yn_d", + """ + Internal function, use `spherical_yn` instead. + """) + +add_newdoc("_spherical_in", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("_spherical_in_d", + """ + Internal function, use `spherical_in` instead. + """) + +add_newdoc("_spherical_kn", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("_spherical_kn_d", + """ + Internal function, use `spherical_kn` instead. + """) + +add_newdoc("loggamma", + r""" + loggamma(z, out=None) + + Principal branch of the logarithm of the gamma function. + + Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and + extended to the complex plane by analytic continuation. The + function has a single branch cut on the negative real axis. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + z : array_like + Values in the complex plane at which to compute ``loggamma`` + out : ndarray, optional + Output array for computed values of ``loggamma`` + + Returns + ------- + loggamma : scalar or ndarray + Values of ``loggamma`` at z. + + See Also + -------- + gammaln : logarithm of the absolute value of the gamma function + gammasgn : sign of the gamma function + + Notes + ----- + It is not generally true that :math:`\log\Gamma(z) = + \log(\Gamma(z))`, though the real parts of the functions do + agree. The benefit of not defining `loggamma` as + :math:`\log(\Gamma(z))` is that the latter function has a + complicated branch cut structure whereas `loggamma` is analytic + except for on the negative real axis. + + The identities + + .. math:: + \exp(\log\Gamma(z)) &= \Gamma(z) \\ + \log\Gamma(z + 1) &= \log(z) + \log\Gamma(z) + + make `loggamma` useful for working in complex logspace. + + On the real line `loggamma` is related to `gammaln` via + ``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to + rounding error. + + The implementation here is based on [hare1997]_. + + References + ---------- + .. [hare1997] D.E.G. Hare, + *Computing the Principal Branch of log-Gamma*, + Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236. + """) + +add_newdoc("_sinpi", + """ + Internal function, do not use. + """) + +add_newdoc("_cospi", + """ + Internal function, do not use. + """) + +add_newdoc("owens_t", + """ + owens_t(h, a, out=None) + + Owen's T Function. + + The function T(h, a) gives the probability of the event + (X > h and 0 < Y < a * X) where X and Y are independent + standard normal random variables. + + Parameters + ---------- + h: array_like + Input value. + a: array_like + Input value. + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + t: scalar or ndarray + Probability of the event (X > h and 0 < Y < a * X), + where X and Y are independent standard normal random variables. + + References + ---------- + .. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of + Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000. + + Examples + -------- + >>> from scipy import special + >>> a = 3.5 + >>> h = 0.78 + >>> special.owens_t(h, a) + 0.10877216734852274 + """) + +add_newdoc("_factorial", + """ + Internal function, do not use. + """) + +add_newdoc("wright_bessel", + r""" + wright_bessel(a, b, x, out=None) + + Wright's generalized Bessel function. + + Wright's generalized Bessel function is an entire function and defined as + + .. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)} + + See Also [1]. + + Parameters + ---------- + a : array_like of float + a >= 0 + b : array_like of float + b >= 0 + x : array_like of float + x >= 0 + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Value of the Wright's generalized Bessel function + + Notes + ----- + Due to the complexity of the function with its three parameters, only + non-negative arguments are implemented. + + References + ---------- + .. [1] Digital Library of Mathematical Functions, 10.46. + https://dlmf.nist.gov/10.46.E1 + + Examples + -------- + >>> from scipy.special import wright_bessel + >>> a, b, x = 1.5, 1.1, 2.5 + >>> wright_bessel(a, b-1, x) + 4.5314465939443025 + + Now, let us verify the relation + + .. math:: \Phi(a, b-1; x) = a x \Phi(a, b+a; x) + (b-1) \Phi(a, b; x) + + >>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x) + 4.5314465939443025 + + + """) + + +add_newdoc("ndtri_exp", + r""" + ndtri_exp(y, out=None) + + Inverse of `log_ndtr` vs x. Allows for greater precision than + `ndtri` composed with `numpy.exp` for very small values of y and for + y close to 0. + + Parameters + ---------- + y : array_like of float + Function argument + out : ndarray, optional + Optional output array for the function results + + Returns + ------- + scalar or ndarray + Inverse of the log CDF of the standard normal distribution, evaluated + at y. + + See Also + -------- + log_ndtr : log of the standard normal cumulative distribution function + ndtr : standard normal cumulative distribution function + ndtri : standard normal percentile function + + Examples + -------- + >>> import numpy as np + >>> import scipy.special as sc + + `ndtri_exp` agrees with the naive implementation when the latter does + not suffer from underflow. + + >>> sc.ndtri_exp(-1) + -0.33747496376420244 + >>> sc.ndtri(np.exp(-1)) + -0.33747496376420244 + + For extreme values of y, the naive approach fails + + >>> sc.ndtri(np.exp(-800)) + -inf + >>> sc.ndtri(np.exp(-1e-20)) + inf + + whereas `ndtri_exp` is still able to compute the result to high precision. + + >>> sc.ndtri_exp(-800) + -39.88469483825668 + >>> sc.ndtri_exp(-1e-20) + 9.262340089798409 + """) + +add_newdoc("_stirling2_inexact", + r""" + Internal function, do not use. + """) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_basic.py b/venv/lib/python3.10/site-packages/scipy/special/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..36fe74a5427d33044550a5c7aaa57b3436bd2c40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_basic.py @@ -0,0 +1,3398 @@ +# +# Author: Travis Oliphant, 2002 +# + +import operator +import numpy as np +import math +import warnings +from collections import defaultdict +from heapq import heapify, heappop +from numpy import (pi, asarray, floor, isscalar, iscomplex, sqrt, where, mgrid, + sin, place, issubdtype, extract, inexact, nan, zeros, sinc) +from . import _ufuncs +from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma, + psi, hankel1, hankel2, yv, kv, poch, binom, + _stirling2_inexact) +from . import _specfun +from ._comb import _comb_int +from scipy._lib.deprecation import _NoValue, _deprecate_positional_args + + +__all__ = [ + 'ai_zeros', + 'assoc_laguerre', + 'bei_zeros', + 'beip_zeros', + 'ber_zeros', + 'bernoulli', + 'berp_zeros', + 'bi_zeros', + 'clpmn', + 'comb', + 'digamma', + 'diric', + 'erf_zeros', + 'euler', + 'factorial', + 'factorial2', + 'factorialk', + 'fresnel_zeros', + 'fresnelc_zeros', + 'fresnels_zeros', + 'h1vp', + 'h2vp', + 'ivp', + 'jn_zeros', + 'jnjnp_zeros', + 'jnp_zeros', + 'jnyn_zeros', + 'jvp', + 'kei_zeros', + 'keip_zeros', + 'kelvin_zeros', + 'ker_zeros', + 'kerp_zeros', + 'kvp', + 'lmbda', + 'lpmn', + 'lpn', + 'lqmn', + 'lqn', + 'mathieu_even_coef', + 'mathieu_odd_coef', + 'obl_cv_seq', + 'pbdn_seq', + 'pbdv_seq', + 'pbvv_seq', + 'perm', + 'polygamma', + 'pro_cv_seq', + 'riccati_jn', + 'riccati_yn', + 'sinc', + 'stirling2', + 'y0_zeros', + 'y1_zeros', + 'y1p_zeros', + 'yn_zeros', + 'ynp_zeros', + 'yvp', + 'zeta' +] + + +# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int64).max +_FACTORIALK_LIMITS_64BITS = {1: 20, 2: 33, 3: 44, 4: 54, 5: 65, + 6: 74, 7: 84, 8: 93, 9: 101} +# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int32).max +_FACTORIALK_LIMITS_32BITS = {1: 12, 2: 19, 3: 25, 4: 31, 5: 37, + 6: 43, 7: 47, 8: 51, 9: 56} + + +def _nonneg_int_or_fail(n, var_name, strict=True): + try: + if strict: + # Raises an exception if float + n = operator.index(n) + elif n == floor(n): + n = int(n) + else: + raise ValueError() + if n < 0: + raise ValueError() + except (ValueError, TypeError) as err: + raise err.__class__(f"{var_name} must be a non-negative integer") from err + return n + + +def diric(x, n): + """Periodic sinc function, also called the Dirichlet function. + + The Dirichlet function is defined as:: + + diric(x, n) = sin(x * n/2) / (n * sin(x / 2)), + + where `n` is a positive integer. + + Parameters + ---------- + x : array_like + Input data + n : int + Integer defining the periodicity. + + Returns + ------- + diric : ndarray + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201) + >>> plt.figure(figsize=(8, 8)); + >>> for idx, n in enumerate([2, 3, 4, 9]): + ... plt.subplot(2, 2, idx+1) + ... plt.plot(x, special.diric(x, n)) + ... plt.title('diric, n={}'.format(n)) + >>> plt.show() + + The following example demonstrates that `diric` gives the magnitudes + (modulo the sign and scaling) of the Fourier coefficients of a + rectangular pulse. + + Suppress output of values that are effectively 0: + + >>> np.set_printoptions(suppress=True) + + Create a signal `x` of length `m` with `k` ones: + + >>> m = 8 + >>> k = 3 + >>> x = np.zeros(m) + >>> x[:k] = 1 + + Use the FFT to compute the Fourier transform of `x`, and + inspect the magnitudes of the coefficients: + + >>> np.abs(np.fft.fft(x)) + array([ 3. , 2.41421356, 1. , 0.41421356, 1. , + 0.41421356, 1. , 2.41421356]) + + Now find the same values (up to sign) using `diric`. We multiply + by `k` to account for the different scaling conventions of + `numpy.fft.fft` and `diric`: + + >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False) + >>> k * special.diric(theta, k) + array([ 3. , 2.41421356, 1. , -0.41421356, -1. , + -0.41421356, 1. , 2.41421356]) + """ + x, n = asarray(x), asarray(n) + n = asarray(n + (x-x)) + x = asarray(x + (n-n)) + if issubdtype(x.dtype, inexact): + ytype = x.dtype + else: + ytype = float + y = zeros(x.shape, ytype) + + # empirical minval for 32, 64 or 128 bit float computations + # where sin(x/2) < minval, result is fixed at +1 or -1 + if np.finfo(ytype).eps < 1e-18: + minval = 1e-11 + elif np.finfo(ytype).eps < 1e-15: + minval = 1e-7 + else: + minval = 1e-3 + + mask1 = (n <= 0) | (n != floor(n)) + place(y, mask1, nan) + + x = x / 2 + denom = sin(x) + mask2 = (1-mask1) & (abs(denom) < minval) + xsub = extract(mask2, x) + nsub = extract(mask2, n) + zsub = xsub / pi + place(y, mask2, pow(-1, np.round(zsub)*(nsub-1))) + + mask = (1-mask1) & (1-mask2) + xsub = extract(mask, x) + nsub = extract(mask, n) + dsub = extract(mask, denom) + place(y, mask, sin(nsub*xsub)/(nsub*dsub)) + return y + + +def jnjnp_zeros(nt): + """Compute zeros of integer-order Bessel functions Jn and Jn'. + + Results are arranged in order of the magnitudes of the zeros. + + Parameters + ---------- + nt : int + Number (<=1200) of zeros to compute + + Returns + ------- + zo[l-1] : ndarray + Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`. + n[l-1] : ndarray + Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. + m[l-1] : ndarray + Serial number of the zeros of Jn(x) or Jn'(x) associated + with lth zero. Of length `nt`. + t[l-1] : ndarray + 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of + length `nt`. + + See Also + -------- + jn_zeros, jnp_zeros : to get separated arrays of zeros. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200): + raise ValueError("Number must be integer <= 1200.") + nt = int(nt) + n, m, t, zo = _specfun.jdzo(nt) + return zo[1:nt+1], n[:nt], m[:nt], t[:nt] + + +def jnyn_zeros(n, nt): + """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). + + Returns 4 arrays of length `nt`, corresponding to the first `nt` + zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. The zeros + are returned in ascending order. + + Parameters + ---------- + n : int + Order of the Bessel functions + nt : int + Number (<=1200) of zeros to compute + + Returns + ------- + Jn : ndarray + First `nt` zeros of Jn + Jnp : ndarray + First `nt` zeros of Jn' + Yn : ndarray + First `nt` zeros of Yn + Ynp : ndarray + First `nt` zeros of Yn' + + See Also + -------- + jn_zeros, jnp_zeros, yn_zeros, ynp_zeros + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first three roots of :math:`J_1`, :math:`J_1'`, + :math:`Y_1` and :math:`Y_1'`. + + >>> from scipy.special import jnyn_zeros + >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3) + >>> jn_roots, yn_roots + (array([ 3.83170597, 7.01558667, 10.17346814]), + array([2.19714133, 5.42968104, 8.59600587])) + + Plot :math:`J_1`, :math:`J_1'`, :math:`Y_1`, :math:`Y_1'` and their roots. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jnyn_zeros, jvp, jn, yvp, yn + >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3) + >>> fig, ax = plt.subplots() + >>> xmax= 11 + >>> x = np.linspace(0, xmax) + >>> x[0] += 1e-15 + >>> ax.plot(x, jn(1, x), label=r"$J_1$", c='r') + >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$", c='b') + >>> ax.plot(x, yn(1, x), label=r"$Y_1$", c='y') + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$", c='c') + >>> zeros = np.zeros((3, )) + >>> ax.scatter(jn_roots, zeros, s=30, c='r', zorder=5, + ... label=r"$J_1$ roots") + >>> ax.scatter(jnp_roots, zeros, s=30, c='b', zorder=5, + ... label=r"$J_1'$ roots") + >>> ax.scatter(yn_roots, zeros, s=30, c='y', zorder=5, + ... label=r"$Y_1$ roots") + >>> ax.scatter(ynp_roots, zeros, s=30, c='c', zorder=5, + ... label=r"$Y_1'$ roots") + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.6, 0.6) + >>> ax.set_xlim(0, xmax) + >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75)) + >>> plt.tight_layout() + >>> plt.show() + """ + if not (isscalar(nt) and isscalar(n)): + raise ValueError("Arguments must be scalars.") + if (floor(n) != n) or (floor(nt) != nt): + raise ValueError("Arguments must be integers.") + if (nt <= 0): + raise ValueError("nt > 0") + return _specfun.jyzo(abs(n), nt) + + +def jn_zeros(n, nt): + r"""Compute zeros of integer-order Bessel functions Jn. + + Compute `nt` zeros of the Bessel functions :math:`J_n(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. Note that this interval excludes the zero at :math:`x = 0` + that exists for :math:`n > 0`. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + jv: Real-order Bessel functions of the first kind + jnp_zeros: Zeros of :math:`Jn'` + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four positive roots of :math:`J_3`. + + >>> from scipy.special import jn_zeros + >>> jn_zeros(3, 4) + array([ 6.3801619 , 9.76102313, 13.01520072, 16.22346616]) + + Plot :math:`J_3` and its first four positive roots. Note + that the root located at 0 is not returned by `jn_zeros`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jn, jn_zeros + >>> j3_roots = jn_zeros(3, 4) + >>> xmax = 18 + >>> xmin = -1 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jn(3, x), label=r'$J_3$') + >>> ax.scatter(j3_roots, np.zeros((4, )), s=30, c='r', + ... label=r"$J_3$_Zeros", zorder=5) + >>> ax.scatter(0, 0, s=30, c='k', + ... label=r"Root at 0", zorder=5) + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[0] + + +def jnp_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function derivatives Jn'. + + Compute `nt` zeros of the functions :math:`J_n'(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. Note that this interval excludes the zero at :math:`x = 0` + that exists for :math:`n > 1`. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + jvp: Derivatives of integer-order Bessel functions of the first kind + jv: Float-order Bessel functions of the first kind + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`J_2'`. + + >>> from scipy.special import jnp_zeros + >>> jnp_zeros(2, 4) + array([ 3.05423693, 6.70613319, 9.96946782, 13.17037086]) + + As `jnp_zeros` yields the roots of :math:`J_n'`, it can be used to + compute the locations of the peaks of :math:`J_n`. Plot + :math:`J_2`, :math:`J_2'` and the locations of the roots of :math:`J_2'`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import jn, jnp_zeros, jvp + >>> j2_roots = jnp_zeros(2, 4) + >>> xmax = 15 + >>> x = np.linspace(0, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jn(2, x), label=r'$J_2$') + >>> ax.plot(x, jvp(2, x, 1), label=r"$J_2'$") + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.scatter(j2_roots, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $J_2'$", zorder=5) + >>> ax.set_ylim(-0.4, 0.8) + >>> ax.set_xlim(0, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[1] + + +def yn_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function Yn(x). + + Compute `nt` zeros of the functions :math:`Y_n(x)` on the interval + :math:`(0, \infty)`. The zeros are returned in ascending order. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel function. + + See Also + -------- + yn: Bessel function of the second kind for integer order + yv: Bessel function of the second kind for real order + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`Y_2`. + + >>> from scipy.special import yn_zeros + >>> yn_zeros(2, 4) + array([ 3.38424177, 6.79380751, 10.02347798, 13.20998671]) + + Plot :math:`Y_2` and its first four roots. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import yn, yn_zeros + >>> xmin = 2 + >>> xmax = 15 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, yn(2, x), label=r'$Y_2$') + >>> ax.scatter(yn_zeros(2, 4), np.zeros((4, )), s=30, c='r', + ... label='Roots', zorder=5) + >>> ax.set_ylim(-0.4, 0.4) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[2] + + +def ynp_zeros(n, nt): + r"""Compute zeros of integer-order Bessel function derivatives Yn'(x). + + Compute `nt` zeros of the functions :math:`Y_n'(x)` on the + interval :math:`(0, \infty)`. The zeros are returned in ascending + order. + + Parameters + ---------- + n : int + Order of Bessel function + nt : int + Number of zeros to return + + Returns + ------- + ndarray + First `nt` zeros of the Bessel derivative function. + + + See Also + -------- + yvp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of the first derivative of the + Bessel function of second kind for order 0 :math:`Y_0'`. + + >>> from scipy.special import ynp_zeros + >>> ynp_zeros(0, 4) + array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483]) + + Plot :math:`Y_0`, :math:`Y_0'` and confirm visually that the roots of + :math:`Y_0'` are located at local extrema of :math:`Y_0`. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.special import yn, ynp_zeros, yvp + >>> zeros = ynp_zeros(0, 4) + >>> xmax = 13 + >>> x = np.linspace(0, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, yn(0, x), label=r'$Y_0$') + >>> ax.plot(x, yvp(0, x, 1), label=r"$Y_0'$") + >>> ax.scatter(zeros, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $Y_0'$", zorder=5) + >>> for root in zeros: + ... y0_extremum = yn(0, root) + ... lower = min(0, y0_extremum) + ... upper = max(0, y0_extremum) + ... ax.vlines(root, lower, upper, color='r') + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.6, 0.6) + >>> ax.set_xlim(0, xmax) + >>> plt.legend() + >>> plt.show() + """ + return jnyn_zeros(n, nt)[3] + + +def y0_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y0(z), and derivative at each zero. + + The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z0n : ndarray + Location of nth zero of Y0(z) + y0pz0n : ndarray + Value of derivative Y0'(z0) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first 4 real roots and the derivatives at the roots of + :math:`Y_0`: + + >>> import numpy as np + >>> from scipy.special import y0_zeros + >>> zeros, grads = y0_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Roots: {zeros}") + ... print(f"Gradients: {grads}") + Roots: [ 0.89358+0.j 3.95768+0.j 7.08605+0.j 10.22235+0.j] + Gradients: [-0.87942+0.j 0.40254+0.j -0.3001 +0.j 0.2497 +0.j] + + Plot the real part of :math:`Y_0` and the first four computed roots. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y0 + >>> xmin = 0 + >>> xmax = 11 + >>> x = np.linspace(xmin, xmax, 500) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, y0(x), label=r'$Y_0$') + >>> zeros, grads = y0_zeros(4) + >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r', + ... label=r'$Y_0$_zeros', zorder=5) + >>> ax.set_ylim(-0.5, 0.6) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend(ncol=2) + >>> plt.show() + + Compute the first 4 complex roots and the derivatives at the roots of + :math:`Y_0` by setting ``complex=True``: + + >>> y0_zeros(4, True) + (array([ -2.40301663+0.53988231j, -5.5198767 +0.54718001j, + -8.6536724 +0.54841207j, -11.79151203+0.54881912j]), + array([ 0.10074769-0.88196771j, -0.02924642+0.5871695j , + 0.01490806-0.46945875j, -0.00937368+0.40230454j])) + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 0 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def y1_zeros(nt, complex=False): + """Compute nt zeros of Bessel function Y1(z), and derivative at each zero. + + The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1n : ndarray + Location of nth zero of Y1(z) + y1pz1n : ndarray + Value of derivative Y1'(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first 4 real roots and the derivatives at the roots of + :math:`Y_1`: + + >>> import numpy as np + >>> from scipy.special import y1_zeros + >>> zeros, grads = y1_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Roots: {zeros}") + ... print(f"Gradients: {grads}") + Roots: [ 2.19714+0.j 5.42968+0.j 8.59601+0.j 11.74915+0.j] + Gradients: [ 0.52079+0.j -0.34032+0.j 0.27146+0.j -0.23246+0.j] + + Extract the real parts: + + >>> realzeros = zeros.real + >>> realzeros + array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483]) + + Plot :math:`Y_1` and the first four computed roots. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y1 + >>> xmin = 0 + >>> xmax = 13 + >>> x = np.linspace(xmin, xmax, 500) + >>> zeros, grads = y1_zeros(4) + >>> fig, ax = plt.subplots() + >>> ax.hlines(0, xmin, xmax, color='k') + >>> ax.plot(x, y1(x), label=r'$Y_1$') + >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r', + ... label=r'$Y_1$_zeros', zorder=5) + >>> ax.set_ylim(-0.5, 0.5) + >>> ax.set_xlim(xmin, xmax) + >>> plt.legend() + >>> plt.show() + + Compute the first 4 complex roots and the derivatives at the roots of + :math:`Y_1` by setting ``complex=True``: + + >>> y1_zeros(4, True) + (array([ -0.50274327+0.78624371j, -3.83353519+0.56235654j, + -7.01590368+0.55339305j, -10.17357383+0.55127339j]), + array([-0.45952768+1.31710194j, 0.04830191-0.69251288j, + -0.02012695+0.51864253j, 0.011614 -0.43203296j])) + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 1 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def y1p_zeros(nt, complex=False): + """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. + + The values are given by Y1(z1) at each z1 where Y1'(z1)=0. + + Parameters + ---------- + nt : int + Number of zeros to return + complex : bool, default False + Set to False to return only the real zeros; set to True to return only + the complex zeros with negative real part and positive imaginary part. + Note that the complex conjugates of the latter are also zeros of the + function, but are not returned by this routine. + + Returns + ------- + z1pn : ndarray + Location of nth zero of Y1'(z) + y1z1pn : ndarray + Value of derivative Y1(z1) for nth zero + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + Compute the first four roots of :math:`Y_1'` and the values of + :math:`Y_1` at these roots. + + >>> import numpy as np + >>> from scipy.special import y1p_zeros + >>> y1grad_roots, y1_values = y1p_zeros(4) + >>> with np.printoptions(precision=5): + ... print(f"Y1' Roots: {y1grad_roots.real}") + ... print(f"Y1 values: {y1_values.real}") + Y1' Roots: [ 3.68302 6.9415 10.1234 13.28576] + Y1 values: [ 0.41673 -0.30317 0.25091 -0.21897] + + `y1p_zeros` can be used to calculate the extremal points of :math:`Y_1` + directly. Here we plot :math:`Y_1` and the first four extrema. + + >>> import matplotlib.pyplot as plt + >>> from scipy.special import y1, yvp + >>> y1_roots, y1_values_at_roots = y1p_zeros(4) + >>> real_roots = y1_roots.real + >>> xmax = 15 + >>> x = np.linspace(0, xmax, 500) + >>> x[0] += 1e-15 + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y1(x), label=r'$Y_1$') + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$") + >>> ax.scatter(real_roots, np.zeros((4, )), s=30, c='r', + ... label=r"Roots of $Y_1'$", zorder=5) + >>> ax.scatter(real_roots, y1_values_at_roots.real, s=30, c='k', + ... label=r"Extrema of $Y_1$", zorder=5) + >>> ax.hlines(0, 0, xmax, color='k') + >>> ax.set_ylim(-0.5, 0.5) + >>> ax.set_xlim(0, xmax) + >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75)) + >>> plt.tight_layout() + >>> plt.show() + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("Arguments must be scalar positive integer.") + kf = 2 + kc = not complex + return _specfun.cyzo(nt, kf, kc) + + +def _bessel_diff_formula(v, z, n, L, phase): + # from AMS55. + # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1 + # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1 + # For K, you can pull out the exp((v-k)*pi*i) into the caller + v = asarray(v) + p = 1.0 + s = L(v-n, z) + for i in range(1, n+1): + p = phase * (p * (n-i+1)) / i # = choose(k, i) + s += p*L(v-n + i*2, z) + return s / (2.**n) + + +def jvp(v, z, n=1): + """Compute derivatives of Bessel functions of the first kind. + + Compute the nth derivative of the Bessel function `Jv` with + respect to `z`. + + Parameters + ---------- + v : array_like or float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative; can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Bessel function `jv` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Bessel function. + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + + Compute the Bessel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import jvp + >>> jvp(0, 1, 0), jvp(0, 1, 1), jvp(0, 1, 2) + (0.7651976865579666, -0.44005058574493355, -0.3251471008130331) + + Compute the first derivative of the Bessel function of the first + kind for several orders at 1 by providing an array for `v`. + + >>> jvp([0, 1, 2], 1, 1) + array([-0.44005059, 0.3251471 , 0.21024362]) + + Compute the first derivative of the Bessel function of the first + kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> jvp(0, points, 1) + array([-0. , -0.55793651, -0.33905896]) + + Plot the Bessel function of the first kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-10, 10, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, jvp(1, x, 0), label=r"$J_1$") + >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$") + >>> ax.plot(x, jvp(1, x, 2), label=r"$J_1''$") + >>> ax.plot(x, jvp(1, x, 3), label=r"$J_1'''$") + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return jv(v, z) + else: + return _bessel_diff_formula(v, z, n, jv, -1) + + +def yvp(v, z, n=1): + """Compute derivatives of Bessel functions of the second kind. + + Compute the nth derivative of the Bessel function `Yv` with + respect to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative. For 0 returns the BEssel function `yv` + + Returns + ------- + scalar or ndarray + nth derivative of the Bessel function. + + See Also + -------- + yv : Bessel functions of the second kind + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Bessel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import yvp + >>> yvp(0, 1, 0), yvp(0, 1, 1), yvp(0, 1, 2) + (0.088256964215677, 0.7812128213002889, -0.8694697855159659) + + Compute the first derivative of the Bessel function of the second + kind for several orders at 1 by providing an array for `v`. + + >>> yvp([0, 1, 2], 1, 1) + array([0.78121282, 0.86946979, 2.52015239]) + + Compute the first derivative of the Bessel function of the + second kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> yvp(0, points, 1) + array([ 1.47147239, 0.41230863, -0.32467442]) + + Plot the Bessel function of the second kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> x[0] += 1e-15 + >>> fig, ax = plt.subplots() + >>> ax.plot(x, yvp(1, x, 0), label=r"$Y_1$") + >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$") + >>> ax.plot(x, yvp(1, x, 2), label=r"$Y_1''$") + >>> ax.plot(x, yvp(1, x, 3), label=r"$Y_1'''$") + >>> ax.set_ylim(-10, 10) + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return yv(v, z) + else: + return _bessel_diff_formula(v, z, n, yv, -1) + + +def kvp(v, z, n=1): + """Compute derivatives of real-order modified Bessel function Kv(z) + + Kv(z) is the modified Bessel function of the second kind. + Derivative is calculated with respect to `z`. + + Parameters + ---------- + v : array_like of float + Order of Bessel function + z : array_like of complex + Argument at which to evaluate the derivative + n : int, default 1 + Order of derivative. For 0 returns the Bessel function `kv` itself. + + Returns + ------- + out : ndarray + The results + + See Also + -------- + kv + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + Examples + -------- + Compute the modified bessel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import kvp + >>> kvp(0, 1, 0), kvp(0, 1, 1), kvp(0, 1, 2) + (0.42102443824070834, -0.6019072301972346, 1.0229316684379428) + + Compute the first derivative of the modified Bessel function of the second + kind for several orders at 1 by providing an array for `v`. + + >>> kvp([0, 1, 2], 1, 1) + array([-0.60190723, -1.02293167, -3.85158503]) + + Compute the first derivative of the modified Bessel function of the + second kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> kvp(0, points, 1) + array([-1.65644112, -0.2773878 , -0.04015643]) + + Plot the modified bessel function of the second kind and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0, 5, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, kvp(1, x, 0), label=r"$K_1$") + >>> ax.plot(x, kvp(1, x, 1), label=r"$K_1'$") + >>> ax.plot(x, kvp(1, x, 2), label=r"$K_1''$") + >>> ax.plot(x, kvp(1, x, 3), label=r"$K_1'''$") + >>> ax.set_ylim(-2.5, 2.5) + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return kv(v, z) + else: + return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1) + + +def ivp(v, z, n=1): + """Compute derivatives of modified Bessel functions of the first kind. + + Compute the nth derivative of the modified Bessel function `Iv` + with respect to `z`. + + Parameters + ---------- + v : array_like or float + Order of Bessel function + z : array_like + Argument at which to evaluate the derivative; can be real or + complex. + n : int, default 1 + Order of derivative. For 0, returns the Bessel function `iv` itself. + + Returns + ------- + scalar or ndarray + nth derivative of the modified Bessel function. + + See Also + -------- + iv + + Notes + ----- + The derivative is computed using the relation DLFM 10.29.5 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 6. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.29.E5 + + Examples + -------- + Compute the modified Bessel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import ivp + >>> ivp(0, 1, 0), ivp(0, 1, 1), ivp(0, 1, 2) + (1.2660658777520084, 0.565159103992485, 0.7009067737595233) + + Compute the first derivative of the modified Bessel function of the first + kind for several orders at 1 by providing an array for `v`. + + >>> ivp([0, 1, 2], 1, 1) + array([0.5651591 , 0.70090677, 0.29366376]) + + Compute the first derivative of the modified Bessel function of the + first kind of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0., 1.5, 3.]) + >>> ivp(0, points, 1) + array([0. , 0.98166643, 3.95337022]) + + Plot the modified Bessel function of the first kind of order 1 and its + first three derivatives. + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-5, 5, 1000) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, ivp(1, x, 0), label=r"$I_1$") + >>> ax.plot(x, ivp(1, x, 1), label=r"$I_1'$") + >>> ax.plot(x, ivp(1, x, 2), label=r"$I_1''$") + >>> ax.plot(x, ivp(1, x, 3), label=r"$I_1'''$") + >>> plt.legend() + >>> plt.show() + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return iv(v, z) + else: + return _bessel_diff_formula(v, z, n, iv, 1) + + +def h1vp(v, z, n=1): + """Compute derivatives of Hankel function H1v(z) with respect to `z`. + + Parameters + ---------- + v : array_like + Order of Hankel function + z : array_like + Argument at which to evaluate the derivative. Can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Hankel function `h1v` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Hankel function. + + See Also + -------- + hankel1 + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Hankel function of the first kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import h1vp + >>> h1vp(0, 1, 0), h1vp(0, 1, 1), h1vp(0, 1, 2) + ((0.7651976865579664+0.088256964215677j), + (-0.44005058574493355+0.7812128213002889j), + (-0.3251471008130329-0.8694697855159659j)) + + Compute the first derivative of the Hankel function of the first kind + for several orders at 1 by providing an array for `v`. + + >>> h1vp([0, 1, 2], 1, 1) + array([-0.44005059+0.78121282j, 0.3251471 +0.86946979j, + 0.21024362+2.52015239j]) + + Compute the first derivative of the Hankel function of the first kind + of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> h1vp(0, points, 1) + array([-0.24226846+1.47147239j, -0.55793651+0.41230863j, + -0.33905896-0.32467442j]) + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel1(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel1, -1) + + +def h2vp(v, z, n=1): + """Compute derivatives of Hankel function H2v(z) with respect to `z`. + + Parameters + ---------- + v : array_like + Order of Hankel function + z : array_like + Argument at which to evaluate the derivative. Can be real or + complex. + n : int, default 1 + Order of derivative. For 0 returns the Hankel function `h2v` itself. + + Returns + ------- + scalar or ndarray + Values of the derivative of the Hankel function. + + See Also + -------- + hankel2 + + Notes + ----- + The derivative is computed using the relation DLFM 10.6.7 [2]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 5. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.6.E7 + + Examples + -------- + Compute the Hankel function of the second kind of order 0 and + its first two derivatives at 1. + + >>> from scipy.special import h2vp + >>> h2vp(0, 1, 0), h2vp(0, 1, 1), h2vp(0, 1, 2) + ((0.7651976865579664-0.088256964215677j), + (-0.44005058574493355-0.7812128213002889j), + (-0.3251471008130329+0.8694697855159659j)) + + Compute the first derivative of the Hankel function of the second kind + for several orders at 1 by providing an array for `v`. + + >>> h2vp([0, 1, 2], 1, 1) + array([-0.44005059-0.78121282j, 0.3251471 -0.86946979j, + 0.21024362-2.52015239j]) + + Compute the first derivative of the Hankel function of the second kind + of order 0 at several points by providing an array for `z`. + + >>> import numpy as np + >>> points = np.array([0.5, 1.5, 3.]) + >>> h2vp(0, points, 1) + array([-0.24226846-1.47147239j, -0.55793651-0.41230863j, + -0.33905896+0.32467442j]) + """ + n = _nonneg_int_or_fail(n, 'n') + if n == 0: + return hankel2(v, z) + else: + return _bessel_diff_formula(v, z, n, hankel2, -1) + + +def riccati_jn(n, x): + r"""Compute Ricatti-Bessel function of the first kind and its derivative. + + The Ricatti-Bessel function of the first kind is defined as :math:`x + j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first + kind of order :math:`n`. + + This function computes the value and first derivative of the + Ricatti-Bessel function for all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + jn : ndarray + Value of j0(x), ..., jn(x) + jnp : ndarray + First derivative j0'(x), ..., jn'(x) + + Notes + ----- + The computation is carried out via backward recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + nm, jn, jnp = _specfun.rctj(n1, x) + return jn[:(n+1)], jnp[:(n+1)] + + +def riccati_yn(n, x): + """Compute Ricatti-Bessel function of the second kind and its derivative. + + The Ricatti-Bessel function of the second kind is defined as :math:`x + y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second + kind of order :math:`n`. + + This function computes the value and first derivative of the function for + all orders up to and including `n`. + + Parameters + ---------- + n : int + Maximum order of function to compute + x : float + Argument at which to evaluate + + Returns + ------- + yn : ndarray + Value of y0(x), ..., yn(x) + ynp : ndarray + First derivative y0'(x), ..., yn'(x) + + Notes + ----- + The computation is carried out via ascending recurrence, using the + relation DLMF 10.51.1 [2]_. + + Wrapper for a Fortran routine created by Shanjie Zhang and Jianming + Jin [1]_. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions. + https://dlmf.nist.gov/10.51.E1 + + """ + if not (isscalar(n) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n == 0): + n1 = 1 + else: + n1 = n + nm, jn, jnp = _specfun.rcty(n1, x) + return jn[:(n+1)], jnp[:(n+1)] + + +def erf_zeros(nt): + """Compute the first nt zero in the first quadrant, ordered by absolute value. + + Zeros in the other quadrants can be obtained by using the symmetries + erf(-z) = erf(z) and erf(conj(z)) = conj(erf(z)). + + + Parameters + ---------- + nt : int + The number of zeros to compute + + Returns + ------- + The locations of the zeros of erf : ndarray (complex) + Complex values at which zeros of erf(z) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> special.erf_zeros(1) + array([1.45061616+1.880943j]) + + Check that erf is (close to) zero for the value returned by erf_zeros + + >>> special.erf(special.erf_zeros(1)) + array([4.95159469e-14-1.16407394e-16j]) + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.cerzo(nt) + + +def fresnelc_zeros(nt): + """Compute nt complex zeros of cosine Fresnel integral C(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + fresnelc_zeros: ndarray + Zeros of the cosine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(1, nt) + + +def fresnels_zeros(nt): + """Compute nt complex zeros of sine Fresnel integral S(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + fresnels_zeros: ndarray + Zeros of the sine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(2, nt) + + +def fresnel_zeros(nt): + """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + zeros_sine: ndarray + Zeros of the sine Fresnel integral + zeros_cosine : ndarray + Zeros of the cosine Fresnel integral + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): + raise ValueError("Argument must be positive scalar integer.") + return _specfun.fcszo(2, nt), _specfun.fcszo(1, nt) + + +def assoc_laguerre(x, n, k=0.0): + """Compute the generalized (associated) Laguerre polynomial of degree n and order k. + + The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``, + with weighting function ``exp(-x) * x**k`` with ``k > -1``. + + Parameters + ---------- + x : float or ndarray + Points where to evaluate the Laguerre polynomial + n : int + Degree of the Laguerre polynomial + k : int + Order of the Laguerre polynomial + + Returns + ------- + assoc_laguerre: float or ndarray + Associated laguerre polynomial values + + Notes + ----- + `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with + reversed argument order ``(x, n, k=0.0) --> (n, k, x)``. + + """ + return _ufuncs.eval_genlaguerre(n, k, x) + + +digamma = psi + + +def polygamma(n, x): + r"""Polygamma functions. + + Defined as :math:`\psi^{(n)}(x)` where :math:`\psi` is the + `digamma` function. See [dlmf]_ for details. + + Parameters + ---------- + n : array_like + The order of the derivative of the digamma function; must be + integral + x : array_like + Real valued input + + Returns + ------- + ndarray + Function results + + See Also + -------- + digamma + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/5.15 + + Examples + -------- + >>> from scipy import special + >>> x = [2, 3, 25.5] + >>> special.polygamma(1, x) + array([ 0.64493407, 0.39493407, 0.03999467]) + >>> special.polygamma(0, x) == special.psi(x) + array([ True, True, True], dtype=bool) + + """ + n, x = asarray(n), asarray(x) + fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x) + return where(n == 0, psi(x), fac2) + + +def mathieu_even_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the even solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz + + .. math:: \mathrm{ce}_{2n+1}(z, q) = + \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z + + This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even + input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input + m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Ak : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/28.4#i + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m < 0): + raise ValueError("m must be an integer >=0.") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + warnings.warn("Too many predicted coefficients.", RuntimeWarning, stacklevel=2) + kd = 1 + m = int(floor(m)) + if m % 2: + kd = 2 + + a = mathieu_a(m, q) + fc = _specfun.fcoef(kd, m, q, a) + return fc[:km] + + +def mathieu_odd_coef(m, q): + r"""Fourier coefficients for even Mathieu and modified Mathieu functions. + + The Fourier series of the odd solutions of the Mathieu differential + equation are of the form + + .. math:: \mathrm{se}_{2n+1}(z, q) = + \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z + + .. math:: \mathrm{se}_{2n+2}(z, q) = + \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z + + This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even + input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd + input m=2n+1. + + Parameters + ---------- + m : int + Order of Mathieu functions. Must be non-negative. + q : float (>=0) + Parameter of Mathieu functions. Must be non-negative. + + Returns + ------- + Bk : ndarray + Even or odd Fourier coefficients, corresponding to even or odd m. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(q)): + raise ValueError("m and q must be scalars.") + if (q < 0): + raise ValueError("q >=0") + if (m != floor(m)) or (m <= 0): + raise ValueError("m must be an integer > 0") + + if (q <= 1): + qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q + else: + qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q + km = int(qm + 0.5*m) + if km > 251: + warnings.warn("Too many predicted coefficients.", RuntimeWarning, stacklevel=2) + kd = 4 + m = int(floor(m)) + if m % 2: + kd = 3 + + b = mathieu_b(m, q) + fc = _specfun.fcoef(kd, m, q, b) + return fc[:km] + + +def lpmn(m, n, z): + """Sequence of associated Legendre functions of the first kind. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + This function takes a real argument ``z``. For complex arguments ``z`` + use clpmn instead. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : float + Input value. + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + See Also + -------- + clpmn: associated Legendre functions of the first kind for complex z + + Notes + ----- + In the interval (-1, 1), Ferrer's function of the first kind is + returned. The phase convention used for the intervals (1, inf) + and (-inf, -1) is such that the result is always real. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.3 + + """ + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + if iscomplex(z): + raise ValueError("Argument must be real. Use clpmn instead.") + if (m < 0): + mp = -m + mf, nf = mgrid[0:mp+1, 0:n+1] + with _ufuncs.errstate(all='ignore'): + if abs(z) < 1: + # Ferrer function; DLMF 14.9.3 + fixarr = where(mf > nf, 0.0, + (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + # Match to clpmn; DLMF 14.9.13 + fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + mp = m + p, pd = _specfun.lpmn(mp, n, z) + if (m < 0): + p = p * fixarr + pd = pd * fixarr + return p, pd + + +def clpmn(m, n, z, type=3): + """Associated Legendre function of the first kind for complex arguments. + + Computes the associated Legendre function of the first kind of order m and + degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and + ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : float or complex + Input value. + type : int, optional + takes values 2 or 3 + 2: cut on the real axis ``|x| > 1`` + 3: cut on the real axis ``-1 < x < 1`` (default) + + Returns + ------- + Pmn_z : (m+1, n+1) array + Values for all orders ``0..m`` and degrees ``0..n`` + Pmn_d_z : (m+1, n+1) array + Derivatives for all orders ``0..m`` and degrees ``0..n`` + + See Also + -------- + lpmn: associated Legendre functions of the first kind for real z + + Notes + ----- + By default, i.e. for ``type=3``, phase conventions are chosen according + to [1]_ such that the function is analytic. The cut lies on the interval + (-1, 1). Approaching the cut from above or below in general yields a phase + factor with respect to Ferrer's function of the first kind + (cf. `lpmn`). + + For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values + on the interval (-1, 1) in the complex plane yields Ferrer's function + of the first kind. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] NIST Digital Library of Mathematical Functions + https://dlmf.nist.gov/14.21 + + """ + if not isscalar(m) or (abs(m) > n): + raise ValueError("m must be <= n.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + if not (type == 2 or type == 3): + raise ValueError("type must be either 2 or 3.") + if (m < 0): + mp = -m + mf, nf = mgrid[0:mp+1, 0:n+1] + with _ufuncs.errstate(all='ignore'): + if type == 2: + fixarr = where(mf > nf, 0.0, + (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) + else: + mp = m + p, pd = _specfun.clpmn(mp, n, z, type) + if (m < 0): + p = p * fixarr + pd = pd * fixarr + return p, pd + + +def lqmn(m, n, z): + """Sequence of associated Legendre functions of the second kind. + + Computes the associated Legendre function of the second kind of order m and + degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``. + Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and + ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. + + Parameters + ---------- + m : int + ``|m| <= n``; the order of the Legendre function. + n : int + where ``n >= 0``; the degree of the Legendre function. Often + called ``l`` (lower case L) in descriptions of the associated + Legendre function + z : complex + Input value. + + Returns + ------- + Qmn_z : (m+1, n+1) array + Values for all orders 0..m and degrees 0..n + Qmn_d_z : (m+1, n+1) array + Derivatives for all orders 0..m and degrees 0..n + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(m) or (m < 0): + raise ValueError("m must be a non-negative integer.") + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + if not isscalar(z): + raise ValueError("z must be scalar.") + m = int(m) + n = int(n) + + # Ensure neither m nor n == 0 + mm = max(1, m) + nn = max(1, n) + + if iscomplex(z): + q, qd = _specfun.clqmn(mm, nn, z) + else: + q, qd = _specfun.lqmn(mm, nn, z) + return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)] + + +def bernoulli(n): + """Bernoulli numbers B0..Bn (inclusive). + + Parameters + ---------- + n : int + Indicated the number of terms in the Bernoulli series to generate. + + Returns + ------- + ndarray + The Bernoulli numbers ``[B(0), B(1), ..., B(n)]``. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] "Bernoulli number", Wikipedia, https://en.wikipedia.org/wiki/Bernoulli_number + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import bernoulli, zeta + >>> bernoulli(4) + array([ 1. , -0.5 , 0.16666667, 0. , -0.03333333]) + + The Wikipedia article ([2]_) points out the relationship between the + Bernoulli numbers and the zeta function, ``B_n^+ = -n * zeta(1 - n)`` + for ``n > 0``: + + >>> n = np.arange(1, 5) + >>> -n * zeta(1 - n) + array([ 0.5 , 0.16666667, -0. , -0.03333333]) + + Note that, in the notation used in the wikipedia article, + `bernoulli` computes ``B_n^-`` (i.e. it used the convention that + ``B_1`` is -1/2). The relation given above is for ``B_n^+``, so the + sign of 0.5 does not match the output of ``bernoulli(4)``. + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return _specfun.bernob(int(n1))[:(n+1)] + + +def euler(n): + """Euler numbers E(0), E(1), ..., E(n). + + The Euler numbers [1]_ are also known as the secant numbers. + + Because ``euler(n)`` returns floating point values, it does not give + exact values for large `n`. The first inexact value is E(22). + + Parameters + ---------- + n : int + The highest index of the Euler number to be returned. + + Returns + ------- + ndarray + The Euler numbers [E(0), E(1), ..., E(n)]. + The odd Euler numbers, which are all zero, are included. + + References + ---------- + .. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences, + https://oeis.org/A122045 + .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import euler + >>> euler(6) + array([ 1., 0., -1., 0., 5., 0., -61.]) + + >>> euler(13).astype(np.int64) + array([ 1, 0, -1, 0, 5, 0, -61, + 0, 1385, 0, -50521, 0, 2702765, 0]) + + >>> euler(22)[-1] # Exact value of E(22) is -69348874393137901. + -69348874393137976.0 + + """ + if not isscalar(n) or (n < 0): + raise ValueError("n must be a non-negative integer.") + n = int(n) + if (n < 2): + n1 = 2 + else: + n1 = n + return _specfun.eulerb(n1)[:(n+1)] + + +def lpn(n, z): + """Legendre function of the first kind. + + Compute sequence of Legendre functions of the first kind (polynomials), + Pn(z) and derivatives for all degrees from 0 to n (inclusive). + + See also special.legendre for polynomial class. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n < 1): + n1 = 1 + else: + n1 = n + if iscomplex(z): + pn, pd = _specfun.clpn(n1, z) + else: + pn, pd = _specfun.lpn(n1, z) + return pn[:(n+1)], pd[:(n+1)] + + +def lqn(n, z): + """Legendre function of the second kind. + + Compute sequence of Legendre functions of the second kind, Qn(z) and + derivatives for all degrees from 0 to n (inclusive). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + n = _nonneg_int_or_fail(n, 'n', strict=False) + if (n < 1): + n1 = 1 + else: + n1 = n + if iscomplex(z): + qn, qd = _specfun.clqn(n1, z) + else: + qn, qd = _specfun.lqnb(n1, z) + return qn[:(n+1)], qd[:(n+1)] + + +def ai_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Ai and its derivative. + + Computes the first `nt` zeros, `a`, of the Airy function Ai(x); + first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x); + the corresponding values Ai(a'); + and the corresponding values Ai'(a). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + a : ndarray + First `nt` zeros of Ai(x) + ap : ndarray + First `nt` zeros of Ai'(x) + ai : ndarray + Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) + aip : ndarray + Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> a, ap, ai, aip = special.ai_zeros(3) + >>> a + array([-2.33810741, -4.08794944, -5.52055983]) + >>> ap + array([-1.01879297, -3.24819758, -4.82009921]) + >>> ai + array([ 0.53565666, -0.41901548, 0.38040647]) + >>> aip + array([ 0.70121082, -0.80311137, 0.86520403]) + + """ + kf = 1 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return _specfun.airyzo(nt, kf) + + +def bi_zeros(nt): + """ + Compute `nt` zeros and values of the Airy function Bi and its derivative. + + Computes the first `nt` zeros, b, of the Airy function Bi(x); + first `nt` zeros, b', of the derivative of the Airy function Bi'(x); + the corresponding values Bi(b'); + and the corresponding values Bi'(b). + + Parameters + ---------- + nt : int + Number of zeros to compute + + Returns + ------- + b : ndarray + First `nt` zeros of Bi(x) + bp : ndarray + First `nt` zeros of Bi'(x) + bi : ndarray + Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) + bip : ndarray + Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + Examples + -------- + >>> from scipy import special + >>> b, bp, bi, bip = special.bi_zeros(3) + >>> b + array([-1.17371322, -3.2710933 , -4.83073784]) + >>> bp + array([-2.29443968, -4.07315509, -5.51239573]) + >>> bi + array([-0.45494438, 0.39652284, -0.36796916]) + >>> bip + array([ 0.60195789, -0.76031014, 0.83699101]) + + """ + kf = 2 + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be a positive integer scalar.") + return _specfun.airyzo(nt, kf) + + +def lmbda(v, x): + r"""Jahnke-Emden Lambda function, Lambdav(x). + + This function is defined as [2]_, + + .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v}, + + where :math:`\Gamma` is the gamma function and :math:`J_v` is the + Bessel function of the first kind. + + Parameters + ---------- + v : float + Order of the Lambda function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + vl : ndarray + Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dl : ndarray + Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and + Curves" (4th ed.), Dover, 1945 + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + if (v < 0): + raise ValueError("argument must be > 0.") + n = int(v) + v0 = v - n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + if (v != floor(v)): + vm, vl, dl = _specfun.lamv(v1, x) + else: + vm, vl, dl = _specfun.lamn(v1, x) + return vl[:(n+1)], dl[:(n+1)] + + +def pbdv_seq(v, x): + """Parabolic cylinder functions Dv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n < 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = _specfun.pbdv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbvv_seq(v, x): + """Parabolic cylinder functions Vv(x) and derivatives. + + Parameters + ---------- + v : float + Order of the parabolic cylinder function + x : float + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + dp : ndarray + Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(v) and isscalar(x)): + raise ValueError("arguments must be scalars.") + n = int(v) + v0 = v-n + if (n <= 1): + n1 = 1 + else: + n1 = n + v1 = n1 + v0 + dv, dp, pdf, pdd = _specfun.pbvv(v1, x) + return dv[:n1+1], dp[:n1+1] + + +def pbdn_seq(n, z): + """Parabolic cylinder functions Dn(z) and derivatives. + + Parameters + ---------- + n : int + Order of the parabolic cylinder function + z : complex + Value at which to evaluate the function and derivatives + + Returns + ------- + dv : ndarray + Values of D_i(z), for i=0, ..., i=n. + dp : ndarray + Derivatives D_i'(z), for i=0, ..., i=n. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996, chapter 13. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(n) and isscalar(z)): + raise ValueError("arguments must be scalars.") + if (floor(n) != n): + raise ValueError("n must be an integer.") + if (abs(n) <= 1): + n1 = 1 + else: + n1 = n + cpb, cpd = _specfun.cpbdn(n1, z) + return cpb[:n1+1], cpd[:n1+1] + + +def ber_zeros(nt): + """Compute nt zeros of the Kelvin function ber. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + ber + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 1) + + +def bei_zeros(nt): + """Compute nt zeros of the Kelvin function bei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + bei + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 2) + + +def ker_zeros(nt): + """Compute nt zeros of the Kelvin function ker. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + ker + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 3) + + +def kei_zeros(nt): + """Compute nt zeros of the Kelvin function kei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the Kelvin function. + + See Also + -------- + kei + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 4) + + +def berp_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function ber. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + ber, berp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 5) + + +def beip_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function bei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + bei, beip + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 6) + + +def kerp_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function ker. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + ker, kerp + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 7) + + +def keip_zeros(nt): + """Compute nt zeros of the derivative of the Kelvin function kei. + + Parameters + ---------- + nt : int + Number of zeros to compute. Must be positive. + + Returns + ------- + ndarray + First `nt` zeros of the derivative of the Kelvin function. + + See Also + -------- + kei, keip + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return _specfun.klvnzo(nt, 8) + + +def kelvin_zeros(nt): + """Compute nt zeros of all Kelvin functions. + + Returned in a length-8 tuple of arrays of length nt. The tuple contains + the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): + raise ValueError("nt must be positive integer scalar.") + return (_specfun.klvnzo(nt, 1), + _specfun.klvnzo(nt, 2), + _specfun.klvnzo(nt, 3), + _specfun.klvnzo(nt, 4), + _specfun.klvnzo(nt, 5), + _specfun.klvnzo(nt, 6), + _specfun.klvnzo(nt, 7), + _specfun.klvnzo(nt, 8)) + + +def pro_cv_seq(m, n, c): + """Characteristic values for prolate spheroidal wave functions. + + Compute a sequence of characteristic values for the prolate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return _specfun.segv(m, n, c, 1)[1][:maxL] + + +def obl_cv_seq(m, n, c): + """Characteristic values for oblate spheroidal wave functions. + + Compute a sequence of characteristic values for the oblate + spheroidal wave functions for mode m and n'=m..n and spheroidal + parameter c. + + References + ---------- + .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special + Functions", John Wiley and Sons, 1996. + https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html + + """ + if not (isscalar(m) and isscalar(n) and isscalar(c)): + raise ValueError("Arguments must be scalars.") + if (n != floor(n)) or (m != floor(m)): + raise ValueError("Modes must be integers.") + if (n-m > 199): + raise ValueError("Difference between n and m is too large.") + maxL = n-m+1 + return _specfun.segv(m, n, c, -1)[1][:maxL] + + +@_deprecate_positional_args(version="1.14") +def comb(N, k, *, exact=False, repetition=False, legacy=_NoValue): + """The number of combinations of N things taken k at a time. + + This is often expressed as "N choose k". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + For integers, if `exact` is False, then floating point precision is + used, otherwise the result is computed exactly. For non-integers, if + `exact` is True, is disregarded. + repetition : bool, optional + If `repetition` is True, then the number of combinations with + repetition is computed. + legacy : bool, optional + If `legacy` is True and `exact` is True, then non-integral arguments + are cast to ints; if `legacy` is False, the result for non-integral + arguments is unaffected by the value of `exact`. + + .. deprecated:: 1.9.0 + Using `legacy` is deprecated and will removed by + Scipy 1.14.0. If you want to keep the legacy behaviour, cast + your inputs directly, e.g. + ``comb(int(your_N), int(your_k), exact=True)``. + + Returns + ------- + val : int, float, ndarray + The total number of combinations. + + See Also + -------- + binom : Binomial coefficient considered as a function of two real + variables. + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If N < 0, or k < 0, then 0 is returned. + - If k > N and repetition=False, then 0 is returned. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import comb + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> comb(n, k, exact=False) + array([ 120., 210.]) + >>> comb(10, 3, exact=True) + 120 + >>> comb(10, 3, exact=True, repetition=True) + 220 + + """ + if legacy is not _NoValue: + warnings.warn( + "Using 'legacy' keyword is deprecated and will be removed by " + "Scipy 1.14.0. If you want to keep the legacy behaviour, cast " + "your inputs directly, e.g. " + "'comb(int(your_N), int(your_k), exact=True)'.", + DeprecationWarning, + stacklevel=2 + ) + if repetition: + return comb(N + k - 1, k, exact=exact, legacy=legacy) + if exact: + if int(N) == N and int(k) == k: + # _comb_int casts inputs to integers, which is safe & intended here + return _comb_int(N, k) + elif legacy: + # here at least one number is not an integer; legacy behavior uses + # lossy casts to int + return _comb_int(N, k) + # otherwise, we disregard `exact=True`; it makes no sense for + # non-integral arguments + return comb(N, k) + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = binom(N, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +def perm(N, k, exact=False): + """Permutations of N things taken k at a time, i.e., k-permutations of N. + + It's also known as "partial permutations". + + Parameters + ---------- + N : int, ndarray + Number of things. + k : int, ndarray + Number of elements taken. + exact : bool, optional + If `exact` is False, then floating point precision is used, otherwise + exact long integer is computed. + + Returns + ------- + val : int, ndarray + The number of k-permutations of N. + + Notes + ----- + - Array arguments accepted only for exact=False case. + - If k > N, N < 0, or k < 0, then a 0 is returned. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import perm + >>> k = np.array([3, 4]) + >>> n = np.array([10, 10]) + >>> perm(n, k) + array([ 720., 5040.]) + >>> perm(10, 3, exact=True) + 720 + + """ + if exact: + if (k > N) or (N < 0) or (k < 0): + return 0 + val = 1 + for i in range(N - k + 1, N + 1): + val *= i + return val + else: + k, N = asarray(k), asarray(N) + cond = (k <= N) & (N >= 0) & (k >= 0) + vals = poch(N - k + 1, k) + if isinstance(vals, np.ndarray): + vals[~cond] = 0 + elif not cond: + vals = np.float64(0) + return vals + + +# https://stackoverflow.com/a/16327037 +def _range_prod(lo, hi, k=1): + """ + Product of a range of numbers spaced k apart (from hi). + + For k=1, this returns the product of + lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi + = hi! / (lo-1)! + + For k>1, it correspond to taking only every k'th number when + counting down from hi - e.g. 18!!!! = _range_prod(1, 18, 4). + + Breaks into smaller products first for speed: + _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9)) + """ + if lo + k < hi: + mid = (hi + lo) // 2 + if k > 1: + # make sure mid is a multiple of k away from hi + mid = mid - ((mid - hi) % k) + return _range_prod(lo, mid, k) * _range_prod(mid + k, hi, k) + elif lo + k == hi: + return lo * hi + else: + return hi + + +def _factorialx_array_exact(n, k=1): + """ + Exact computation of factorial for an array. + + The factorials are computed in incremental fashion, by taking + the sorted unique values of n and multiplying the intervening + numbers between the different unique values. + + In other words, the factorial for the largest input is only + computed once, with each other result computed in the process. + + k > 1 corresponds to the multifactorial. + """ + un = np.unique(n) + # numpy changed nan-sorting behaviour with 1.21, see numpy/numpy#18070; + # to unify the behaviour, we remove the nan's here; the respective + # values will be set separately at the end + un = un[~np.isnan(un)] + + # Convert to object array if np.int64 can't handle size + if np.isnan(n).any(): + dt = float + elif k in _FACTORIALK_LIMITS_64BITS.keys(): + if un[-1] > _FACTORIALK_LIMITS_64BITS[k]: + # e.g. k=1: 21! > np.iinfo(np.int64).max + dt = object + elif un[-1] > _FACTORIALK_LIMITS_32BITS[k]: + # e.g. k=3: 26!!! > np.iinfo(np.int32).max + dt = np.int64 + else: + dt = np.dtype("long") + else: + # for k >= 10, we always use object + dt = object + + out = np.empty_like(n, dtype=dt) + + # Handle invalid/trivial values + un = un[un > 1] + out[n < 2] = 1 + out[n < 0] = 0 + + # Calculate products of each range of numbers + # we can only multiply incrementally if the values are k apart; + # therefore we partition `un` into "lanes", i.e. its residues modulo k + for lane in range(0, k): + ul = un[(un % k) == lane] if k > 1 else un + if ul.size: + # after np.unique, un resp. ul are sorted, ul[0] is the smallest; + # cast to python ints to avoid overflow with np.int-types + val = _range_prod(1, int(ul[0]), k=k) + out[n == ul[0]] = val + for i in range(len(ul) - 1): + # by the filtering above, we have ensured that prev & current + # are a multiple of k apart + prev = ul[i] + current = ul[i + 1] + # we already multiplied all factors until prev; continue + # building the full factorial from the following (`prev + 1`); + # use int() for the same reason as above + val *= _range_prod(int(prev + 1), int(current), k=k) + out[n == current] = val + + if np.isnan(n).any(): + out = out.astype(np.float64) + out[np.isnan(n)] = np.nan + return out + + +def _factorialx_array_approx(n, k): + """ + Calculate approximation to multifactorial for array n and integer k. + + Ensure we only call _factorialx_approx_core where necessary/required. + """ + result = zeros(n.shape) + # keep nans as nans + place(result, np.isnan(n), np.nan) + # only compute where n >= 0 (excludes nans), everything else is 0 + cond = (n >= 0) + n_to_compute = extract(cond, n) + place(result, cond, _factorialx_approx_core(n_to_compute, k=k)) + return result + + +def _factorialx_approx_core(n, k): + """ + Core approximation to multifactorial for array n and integer k. + """ + if k == 1: + # shortcut for k=1 + result = gamma(n + 1) + if isinstance(n, np.ndarray): + # gamma does not maintain 0-dim arrays + result = np.array(result) + return result + + n_mod_k = n % k + # scalar case separately, unified handling would be inefficient for arrays; + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if not isinstance(n, np.ndarray): + return ( + np.power(k, (n - n_mod_k) / k) + * gamma(n / k + 1) / gamma(n_mod_k / k + 1) + * max(n_mod_k, 1) + ) + + # factor that's independent of the residue class (see factorialk docstring) + result = np.power(k, n / k) * gamma(n / k + 1) + # factor dependent on residue r (for `r=0` it's 1, so we skip `r=0` + # below and thus also avoid evaluating `max(r, 1)`) + def corr(k, r): return np.power(k, -r / k) / gamma(r / k + 1) * r + for r in np.unique(n_mod_k): + if r == 0: + continue + result[n_mod_k == r] *= corr(k, r) + return result + + +def factorial(n, exact=False): + """ + The factorial of a number or array of numbers. + + The factorial of non-negative integer `n` is the product of all + positive integers less than or equal to `n`:: + + n! = n * (n - 1) * (n - 2) * ... * 1 + + Parameters + ---------- + n : int or array_like of ints + Input values. If ``n < 0``, the return value is 0. + exact : bool, optional + If True, calculate the answer exactly using long integer arithmetic. + If False, result is approximated in floating point rapidly using the + `gamma` function. + Default is False. + + Returns + ------- + nf : float or int or ndarray + Factorial of `n`, as integer or float depending on `exact`. + + Notes + ----- + For arrays with ``exact=True``, the factorial is computed only once, for + the largest input, with each other result computed in the process. + The output dtype is increased to ``int64`` or ``object`` if necessary. + + With ``exact=False`` the factorial is approximated using the gamma + function: + + .. math:: n! = \\Gamma(n+1) + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import factorial + >>> arr = np.array([3, 4, 5]) + >>> factorial(arr, exact=False) + array([ 6., 24., 120.]) + >>> factorial(arr, exact=True) + array([ 6, 24, 120]) + >>> factorial(5, exact=True) + 120 + + """ + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not (np.issubdtype(type(n), np.integer) + or np.issubdtype(type(n), np.floating)): + raise ValueError( + f"Unsupported datatype for factorial: {type(n)}\n" + "Permitted data types are integers and floating point numbers" + ) + elif n < 0: + return 0 + elif exact and np.issubdtype(type(n), np.integer): + return math.factorial(n) + elif exact: + msg = ("Non-integer values of `n` together with `exact=True` are " + "deprecated. Either ensure integer `n` or use `exact=False`.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return _factorialx_approx_core(n, k=1) + + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not (np.issubdtype(n.dtype, np.integer) + or np.issubdtype(n.dtype, np.floating)): + raise ValueError( + f"Unsupported datatype for factorial: {n.dtype}\n" + "Permitted data types are integers and floating point numbers" + ) + if exact and not np.issubdtype(n.dtype, np.integer): + msg = ("factorial with `exact=True` does not " + "support non-integral arrays") + raise ValueError(msg) + + if exact: + return _factorialx_array_exact(n, k=1) + return _factorialx_array_approx(n, k=1) + + +def factorial2(n, exact=False): + """Double factorial. + + This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5 + * 3 * 1``. It can be approximated numerically as:: + + n!! = 2 ** (n / 2) * gamma(n / 2 + 1) * sqrt(2 / pi) n odd + = 2 ** (n / 2) * gamma(n / 2 + 1) n even + = 2 ** (n / 2) * (n / 2)! n even + + Parameters + ---------- + n : int or array_like + Calculate ``n!!``. If ``n < 0``, the return value is 0. + exact : bool, optional + The result can be approximated rapidly using the gamma-formula + above (default). If `exact` is set to True, calculate the + answer exactly using integer arithmetic. + + Returns + ------- + nff : float or int + Double factorial of `n`, as an int or a float depending on + `exact`. + + Examples + -------- + >>> from scipy.special import factorial2 + >>> factorial2(7, exact=False) + array(105.00000000000001) + >>> factorial2(7, exact=True) + 105 + + """ + + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not np.issubdtype(type(n), np.integer): + msg = "factorial2 does not support non-integral scalar arguments" + raise ValueError(msg) + elif n < 0: + return 0 + elif n in {0, 1}: + return 1 + # general integer case + if exact: + return _range_prod(1, n, k=2) + return _factorialx_approx_core(n, k=2) + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not np.issubdtype(n.dtype, np.integer): + raise ValueError("factorial2 does not support non-integral arrays") + if exact: + return _factorialx_array_exact(n, k=2) + return _factorialx_array_approx(n, k=2) + + +def factorialk(n, k, exact=None): + """Multifactorial of n of order k, n(!!...!). + + This is the multifactorial of n skipping k values. For example, + + factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 + + In particular, for any integer ``n``, we have + + factorialk(n, 1) = factorial(n) + + factorialk(n, 2) = factorial2(n) + + Parameters + ---------- + n : int or array_like + Calculate multifactorial. If ``n < 0``, the return value is 0. + k : int + Order of multifactorial. + exact : bool, optional + If exact is set to True, calculate the answer exactly using + integer arithmetic, otherwise use an approximation (faster, + but yields floats instead of integers) + + .. warning:: + The default value for ``exact`` will be changed to + ``False`` in SciPy 1.15.0. + + Returns + ------- + val : int + Multifactorial of `n`. + + Examples + -------- + >>> from scipy.special import factorialk + >>> factorialk(5, k=1, exact=True) + 120 + >>> factorialk(5, k=3, exact=True) + 10 + >>> factorialk([5, 7, 9], k=3, exact=True) + array([ 10, 28, 162]) + >>> factorialk([5, 7, 9], k=3, exact=False) + array([ 10., 28., 162.]) + + Notes + ----- + While less straight-forward than for the double-factorial, it's possible to + calculate a general approximation formula of n!(k) by studying ``n`` for a given + remainder ``r < k`` (thus ``n = m * k + r``, resp. ``r = n % k``), which can be + put together into something valid for all integer values ``n >= 0`` & ``k > 0``:: + + n!(k) = k ** ((n - r)/k) * gamma(n/k + 1) / gamma(r/k + 1) * max(r, 1) + + This is the basis of the approximation when ``exact=False``. Compare also [1]. + + References + ---------- + .. [1] Complex extension to multifactorial + https://en.wikipedia.org/wiki/Double_factorial#Alternative_extension_of_the_multifactorial + """ + if not np.issubdtype(type(k), np.integer) or k < 1: + raise ValueError(f"k must be a positive integer, received: {k}") + if exact is None: + msg = ( + "factorialk will default to `exact=False` starting from SciPy " + "1.15.0. To avoid behaviour changes due to this, explicitly " + "specify either `exact=False` (faster, returns floats), or the " + "past default `exact=True` (slower, lossless result as integer)." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + exact = True + + helpmsg = "" + if k in {1, 2}: + func = "factorial" if k == 1 else "factorial2" + helpmsg = f"\nYou can try to use {func} instead" + + # don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below + if np.ndim(n) == 0 and not isinstance(n, np.ndarray): + # scalar cases + if n is None or np.isnan(n): + return np.nan + elif not np.issubdtype(type(n), np.integer): + msg = "factorialk does not support non-integral scalar arguments!" + raise ValueError(msg + helpmsg) + elif n < 0: + return 0 + elif n in {0, 1}: + return 1 + # general integer case + if exact: + return _range_prod(1, n, k=k) + return _factorialx_approx_core(n, k=k) + # arrays & array-likes + n = asarray(n) + if n.size == 0: + # return empty arrays unchanged + return n + if not np.issubdtype(n.dtype, np.integer): + msg = "factorialk does not support non-integral arrays!" + raise ValueError(msg + helpmsg) + if exact: + return _factorialx_array_exact(n, k=k) + return _factorialx_array_approx(n, k=k) + + +def stirling2(N, K, *, exact=False): + r"""Generate Stirling number(s) of the second kind. + + Stirling numbers of the second kind count the number of ways to + partition a set with N elements into K non-empty subsets. + + The values this function returns are calculated using a dynamic + program which avoids redundant computation across the subproblems + in the solution. For array-like input, this implementation also + avoids redundant computation across the different Stirling number + calculations. + + The numbers are sometimes denoted + + .. math:: + + {N \brace{K}} + + see [1]_ for details. This is often expressed-verbally-as + "N subset K". + + Parameters + ---------- + N : int, ndarray + Number of things. + K : int, ndarray + Number of non-empty subsets taken. + exact : bool, optional + Uses dynamic programming (DP) with floating point + numbers for smaller arrays and uses a second order approximation due to + Temme for larger entries of `N` and `K` that allows trading speed for + accuracy. See [2]_ for a description. Temme approximation is used for + values `n>50`. The max error from the DP has max relative error + `4.5*10^-16` for `n<=50` and the max error from the Temme approximation + has max relative error `5*10^-5` for `51 <= n < 70` and + `9*10^-6` for `70 <= n < 101`. Note that these max relative errors will + decrease further as `n` increases. + + Returns + ------- + val : int, float, ndarray + The number of partitions. + + See Also + -------- + comb : The number of combinations of N things taken k at a time. + + Notes + ----- + - If N < 0, or K < 0, then 0 is returned. + - If K > N, then 0 is returned. + + The output type will always be `int` or ndarray of `object`. + The input must contain either numpy or python integers otherwise a + TypeError is raised. + + References + ---------- + .. [1] R. L. Graham, D. E. Knuth and O. Patashnik, "Concrete + Mathematics: A Foundation for Computer Science," Addison-Wesley + Publishing Company, Boston, 1989. Chapter 6, page 258. + + .. [2] Temme, Nico M. "Asymptotic estimates of Stirling numbers." + Studies in Applied Mathematics 89.3 (1993): 233-243. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import stirling2 + >>> k = np.array([3, -1, 3]) + >>> n = np.array([10, 10, 9]) + >>> stirling2(n, k) + array([9330, 0, 3025], dtype=object) + + """ + output_is_scalar = np.isscalar(N) and np.isscalar(K) + # make a min-heap of unique (n,k) pairs + N, K = asarray(N), asarray(K) + if not np.issubdtype(N.dtype, np.integer): + raise TypeError("Argument `N` must contain only integers") + if not np.issubdtype(K.dtype, np.integer): + raise TypeError("Argument `K` must contain only integers") + if not exact: + # NOTE: here we allow np.uint via casting to double types prior to + # passing to private ufunc dispatcher. All dispatched functions + # take double type for (n,k) arguments and return double. + return _stirling2_inexact(N.astype(float), K.astype(float)) + nk_pairs = list( + set([(n.take(0), k.take(0)) + for n, k in np.nditer([N, K], ['refs_ok'])]) + ) + heapify(nk_pairs) + # base mapping for small values + snsk_vals = defaultdict(int) + for pair in [(0, 0), (1, 1), (2, 1), (2, 2)]: + snsk_vals[pair] = 1 + # for each pair in the min-heap, calculate the value, store for later + n_old, n_row = 2, [0, 1, 1] + while nk_pairs: + n, k = heappop(nk_pairs) + if n < 2 or k > n or k <= 0: + continue + elif k == n or k == 1: + snsk_vals[(n, k)] = 1 + continue + elif n != n_old: + num_iters = n - n_old + while num_iters > 0: + n_row.append(1) + # traverse from back to remove second row + for j in range(len(n_row)-2, 1, -1): + n_row[j] = n_row[j]*j + n_row[j-1] + num_iters -= 1 + snsk_vals[(n, k)] = n_row[k] + else: + snsk_vals[(n, k)] = n_row[k] + n_old, n_row = n, n_row + out_types = [object, object, object] if exact else [float, float, float] + # for each pair in the map, fetch the value, and populate the array + it = np.nditer( + [N, K, None], + ['buffered', 'refs_ok'], + [['readonly'], ['readonly'], ['writeonly', 'allocate']], + op_dtypes=out_types, + ) + with it: + while not it.finished: + it[2] = snsk_vals[(int(it[0]), int(it[1]))] + it.iternext() + output = it.operands[2] + # If N and K were both scalars, convert output to scalar. + if output_is_scalar: + output = output.take(0) + return output + + +def zeta(x, q=None, out=None): + r""" + Riemann or Hurwitz zeta function. + + Parameters + ---------- + x : array_like of float + Input data, must be real + q : array_like of float, optional + Input data, must be real. Defaults to Riemann zeta. + out : ndarray, optional + Output array for the computed values. + + Returns + ------- + out : array_like + Values of zeta(x). + + See Also + -------- + zetac + + Notes + ----- + The two-argument version is the Hurwitz zeta function + + .. math:: + + \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}; + + see [dlmf]_ for details. The Riemann zeta function corresponds to + the case when ``q = 1``. + + References + ---------- + .. [dlmf] NIST, Digital Library of Mathematical Functions, + https://dlmf.nist.gov/25.11#i + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import zeta, polygamma, factorial + + Some specific values: + + >>> zeta(2), np.pi**2/6 + (1.6449340668482266, 1.6449340668482264) + + >>> zeta(4), np.pi**4/90 + (1.0823232337111381, 1.082323233711138) + + Relation to the `polygamma` function: + + >>> m = 3 + >>> x = 1.25 + >>> polygamma(m, x) + array(2.782144009188397) + >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x) + 2.7821440091883969 + + """ + if q is None: + return _ufuncs._riemann_zeta(x, out) + else: + return _ufuncs._zeta(x, q, out) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_cdflib.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_cdflib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..54778394141893b6aea5c50db98ca4b34fde78ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_cdflib.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..98d298488258d813fa2cb037c4a3875b0b947cff Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_comb.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py b/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1ce34aa58054be13edfd5d87f2059e8a0d9224 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm.py @@ -0,0 +1,214 @@ +import numpy as np + +from ._ufuncs import _ellip_harm +from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm + + +def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): + r""" + Ellipsoidal harmonic functions E^p_n(l) + + These are also known as Lame functions of the first kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree + s : float + Coordinate + p : int + Order, can range between [1,2n+1] + signm : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + signn : {1, -1}, optional + Sign of prefactor of functions. Can be +/-1. See Notes. + + Returns + ------- + E : float + the harmonic :math:`E^p_n(s)` + + See Also + -------- + ellip_harm_2, ellip_normal + + Notes + ----- + The geometric interpretation of the ellipsoidal functions is + explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the + sign of prefactors for functions according to their type:: + + K : +1 + L : signm + M : signn + N : signm*signn + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Digital Library of Mathematical Functions 29.12 + https://dlmf.nist.gov/29.12 + .. [2] Bardhan and Knepley, "Computational science and + re-discovery: open-source implementations of + ellipsoidal harmonics for problems in potential theory", + Comput. Sci. Disc. 5, 014006 (2012) + :doi:`10.1088/1749-4699/5/1/014006`. + .. [3] David J.and Dechambre P, "Computation of Ellipsoidal + Gravity Field Harmonics for small solar system bodies" + pp. 30-36, 2000 + .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" + pp. 418, 2012 + + Examples + -------- + >>> from scipy.special import ellip_harm + >>> w = ellip_harm(5,8,1,1,2.5) + >>> w + 2.5 + + Check that the functions indeed are solutions to the Lame equation: + + >>> import numpy as np + >>> from scipy.interpolate import UnivariateSpline + >>> def eigenvalue(f, df, ddf): + ... r = (((s**2 - h**2) * (s**2 - k**2) * ddf + ... + s * (2*s**2 - h**2 - k**2) * df + ... - n * (n + 1)*s**2*f) / f) + ... return -r.mean(), r.std() + >>> s = np.linspace(0.1, 10, 200) + >>> k, h, n, p = 8.0, 2.2, 3, 2 + >>> E = ellip_harm(h**2, k**2, n, p, s) + >>> E_spl = UnivariateSpline(s, E) + >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) + >>> a, a_err + (583.44366156701483, 6.4580890640310646e-11) + + """ # noqa: E501 + return _ellip_harm(h2, k2, n, p, s, signm, signn) + + +_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d') + + +def ellip_harm_2(h2, k2, n, p, s): + r""" + Ellipsoidal harmonic functions F^p_n(l) + + These are also known as Lame functions of the second kind, and are + solutions to the Lame equation: + + .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 + + where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not + returned) corresponding to the solutions. + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + s : float + Coordinate + + Returns + ------- + F : float + The harmonic :math:`F^p_n(s)` + + See Also + -------- + ellip_harm, ellip_normal + + Notes + ----- + Lame functions of the second kind are related to the functions of the first kind: + + .. math:: + + F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s} + \frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} + + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_harm_2 + >>> w = ellip_harm_2(5,8,2,1,10) + >>> w + 0.00108056853382 + + """ + with np.errstate(all='ignore'): + return _ellip_harm_2_vec(h2, k2, n, p, s) + + +def _ellip_normal_vec(h2, k2, n, p): + return _ellipsoid_norm(h2, k2, n, p) + + +_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') + + +def ellip_normal(h2, k2, n, p): + r""" + Ellipsoidal harmonic normalization constants gamma^p_n + + The normalization constant is defined as + + .. math:: + + \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy + \frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} + + Parameters + ---------- + h2 : float + ``h**2`` + k2 : float + ``k**2``; should be larger than ``h**2`` + n : int + Degree. + p : int + Order, can range between [1,2n+1]. + + Returns + ------- + gamma : float + The normalization constant :math:`\gamma^p_n` + + See Also + -------- + ellip_harm, ellip_harm_2 + + Notes + ----- + .. versionadded:: 0.15.0 + + Examples + -------- + >>> from scipy.special import ellip_normal + >>> w = ellip_normal(5,8,3,7) + >>> w + 1723.38796997 + + """ + with np.errstate(all='ignore'): + return _ellip_normal_vec(h2, k2, n, p) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6908d3f2fd304536394a40d9635e2f41ddbb92f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py b/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py new file mode 100644 index 0000000000000000000000000000000000000000..f758c7c21fdddc0ec1b84727d90c6de7f34a094e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_lambertw.py @@ -0,0 +1,149 @@ +from ._ufuncs import _lambertw + +import numpy as np + + +def lambertw(z, k=0, tol=1e-8): + r""" + lambertw(z, k=0, tol=1e-8) + + Lambert W function. + + The Lambert W function `W(z)` is defined as the inverse function + of ``w * exp(w)``. In other words, the value of ``W(z)`` is + such that ``z = W(z) * exp(W(z))`` for any complex number + ``z``. + + The Lambert W function is a multivalued function with infinitely + many branches. Each branch gives a separate solution of the + equation ``z = w exp(w)``. Here, the branches are indexed by the + integer `k`. + + Parameters + ---------- + z : array_like + Input argument. + k : int, optional + Branch index. + tol : float, optional + Evaluation tolerance. + + Returns + ------- + w : array + `w` will have the same shape as `z`. + + See Also + -------- + wrightomega : the Wright Omega function + + Notes + ----- + All branches are supported by `lambertw`: + + * ``lambertw(z)`` gives the principal solution (branch 0) + * ``lambertw(z, k)`` gives the solution on branch `k` + + The Lambert W function has two partially real branches: the + principal branch (`k = 0`) is real for real ``z > -1/e``, and the + ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except + ``k = 0`` have a logarithmic singularity at ``z = 0``. + + **Possible issues** + + The evaluation can become inaccurate very close to the branch point + at ``-1/e``. In some corner cases, `lambertw` might currently + fail to converge, or can end up on the wrong branch. + + **Algorithm** + + Halley's iteration is used to invert ``w * exp(w)``, using a first-order + asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate. + + The definition, implementation and choice of branches is based on [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Lambert_W_function + .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5 + (1996) 329-359. + https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf + + Examples + -------- + The Lambert W function is the inverse of ``w exp(w)``: + + >>> import numpy as np + >>> from scipy.special import lambertw + >>> w = lambertw(1) + >>> w + (0.56714329040978384+0j) + >>> w * np.exp(w) + (1.0+0j) + + Any branch gives a valid inverse: + + >>> w = lambertw(1, k=3) + >>> w + (-2.8535817554090377+17.113535539412148j) + >>> w*np.exp(w) + (1.0000000000000002+1.609823385706477e-15j) + + **Applications to equation-solving** + + The Lambert W function may be used to solve various kinds of + equations. We give two examples here. + + First, the function can be used to solve implicit equations of the + form + + :math:`x = a + b e^{c x}` + + for :math:`x`. We assume :math:`c` is not zero. After a little + algebra, the equation may be written + + :math:`z e^z = -b c e^{a c}` + + where :math:`z = c (a - x)`. :math:`z` may then be expressed using + the Lambert W function + + :math:`z = W(-b c e^{a c})` + + giving + + :math:`x = a - W(-b c e^{a c})/c` + + For example, + + >>> a = 3 + >>> b = 2 + >>> c = -0.5 + + The solution to :math:`x = a + b e^{c x}` is: + + >>> x = a - lambertw(-b*c*np.exp(a*c))/c + >>> x + (3.3707498368978794+0j) + + Verify that it solves the equation: + + >>> a + b*np.exp(c*x) + (3.37074983689788+0j) + + The Lambert W function may also be used find the value of the infinite + power tower :math:`z^{z^{z^{\ldots}}}`: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(0.5, 100) + 0.641185744504986 + >>> -lambertw(-np.log(0.5)) / np.log(0.5) + (0.64118574450498589+0j) + """ + # TODO: special expert should inspect this + # interception; better place to do it? + k = np.asarray(k, dtype=np.dtype("long")) + return _lambertw(z, k, tol) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py b/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py new file mode 100644 index 0000000000000000000000000000000000000000..9d15fa9aa08274208038084af901c978d15eff19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_logsumexp.py @@ -0,0 +1,307 @@ +import numpy as np +from scipy._lib._util import _asarray_validated + +__all__ = ["logsumexp", "softmax", "log_softmax"] + + +def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): + """Compute the log of the sum of exponentials of input elements. + + Parameters + ---------- + a : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes over which the sum is taken. By default `axis` is None, + and all elements are summed. + + .. versionadded:: 0.11.0 + b : array-like, optional + Scaling factor for exp(`a`) must be of the same shape as `a` or + broadcastable to `a`. These values may be negative in order to + implement subtraction. + + .. versionadded:: 0.12.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array. + + .. versionadded:: 0.15.0 + return_sign : bool, optional + If this is set to True, the result will be a pair containing sign + information; if False, results that are negative will be returned + as NaN. Default is False (no sign information). + + .. versionadded:: 0.16.0 + + Returns + ------- + res : ndarray + The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically + more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` + is returned. If ``return_sign`` is True, ``res`` contains the log of + the absolute value of the argument. + sgn : ndarray + If ``return_sign`` is True, this will be an array of floating-point + numbers matching res containing +1, 0, -1 (for real-valued inputs) + or a complex phase (for complex inputs). This gives the sign of the + argument of the logarithm in ``res``. + If ``return_sign`` is False, only one result is returned. + + See Also + -------- + numpy.logaddexp, numpy.logaddexp2 + + Notes + ----- + NumPy has a logaddexp function which is very similar to `logsumexp`, but + only handles two arguments. `logaddexp.reduce` is similar to this + function, but may be less stable. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import logsumexp + >>> a = np.arange(10) + >>> logsumexp(a) + 9.4586297444267107 + >>> np.log(np.sum(np.exp(a))) + 9.4586297444267107 + + With weights + + >>> a = np.arange(10) + >>> b = np.arange(10, 0, -1) + >>> logsumexp(a, b=b) + 9.9170178533034665 + >>> np.log(np.sum(b*np.exp(a))) + 9.9170178533034647 + + Returning a sign flag + + >>> logsumexp([1,2],b=[1,-1],return_sign=True) + (1.5413248546129181, -1.0) + + Notice that `logsumexp` does not directly support masked arrays. To use it + on a masked array, convert the mask into zero weights: + + >>> a = np.ma.array([np.log(2), 2, np.log(3)], + ... mask=[False, True, False]) + >>> b = (~a.mask).astype(int) + >>> logsumexp(a.data, b=b), np.log(5) + 1.6094379124341005, 1.6094379124341005 + + """ + a = _asarray_validated(a, check_finite=False) + if b is not None: + a, b = np.broadcast_arrays(a, b) + if np.any(b == 0): + a = a + 0. # promote to at least float + a[b == 0] = -np.inf + + # Scale by real part for complex inputs, because this affects + # the magnitude of the exponential. + a_max = np.amax(a.real, axis=axis, keepdims=True) + + if a_max.ndim > 0: + a_max[~np.isfinite(a_max)] = 0 + elif not np.isfinite(a_max): + a_max = 0 + + if b is not None: + b = np.asarray(b) + tmp = b * np.exp(a - a_max) + else: + tmp = np.exp(a - a_max) + + # suppress warnings about log of zero + with np.errstate(divide='ignore'): + s = np.sum(tmp, axis=axis, keepdims=keepdims) + if return_sign: + # For complex, use the numpy>=2.0 convention for sign. + if np.issubdtype(s.dtype, np.complexfloating): + sgn = s / np.where(s == 0, 1, abs(s)) + else: + sgn = np.sign(s) + s = abs(s) + out = np.log(s) + + if not keepdims: + a_max = np.squeeze(a_max, axis=axis) + out += a_max + + if return_sign: + return out, sgn + else: + return out + + +def softmax(x, axis=None): + r"""Compute the softmax function. + + The softmax function transforms each element of a collection by + computing the exponential of each element divided by the sum of the + exponentials of all the elements. That is, if `x` is a one-dimensional + numpy array:: + + softmax(x) = np.exp(x)/sum(np.exp(x)) + + Parameters + ---------- + x : array_like + Input array. + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax will be + computed over the entire array `x`. + + Returns + ------- + s : ndarray + An array the same shape as `x`. The result will sum to 1 along the + specified axis. + + Notes + ----- + The formula for the softmax function :math:`\sigma(x)` for a vector + :math:`x = \{x_0, x_1, ..., x_{n-1}\}` is + + .. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}} + + The `softmax` function is the gradient of `logsumexp`. + + The implementation uses shifting to avoid overflow. See [1]_ for more + details. + + .. versionadded:: 1.2.0 + + References + ---------- + .. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the + log-sum-exp and softmax functions", IMA Journal of Numerical Analysis, + Vol.41(4), :doi:`10.1093/imanum/draa038`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import softmax + >>> np.set_printoptions(precision=5) + + >>> x = np.array([[1, 0.5, 0.2, 3], + ... [1, -1, 7, 3], + ... [2, 12, 13, 3]]) + ... + + Compute the softmax transformation over the entire array. + + >>> m = softmax(x) + >>> m + array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05], + [ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05], + [ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]]) + + >>> m.sum() + 1.0 + + Compute the softmax transformation along the first axis (i.e., the + columns). + + >>> m = softmax(x, axis=0) + + >>> m + array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01], + [ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01], + [ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]]) + + >>> m.sum(axis=0) + array([ 1., 1., 1., 1.]) + + Compute the softmax transformation along the second axis (i.e., the rows). + + >>> m = softmax(x, axis=1) + >>> m + array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01], + [ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02], + [ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]]) + + >>> m.sum(axis=1) + array([ 1., 1., 1.]) + + """ + x = _asarray_validated(x, check_finite=False) + x_max = np.amax(x, axis=axis, keepdims=True) + exp_x_shifted = np.exp(x - x_max) + return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True) + + +def log_softmax(x, axis=None): + r"""Compute the logarithm of the softmax function. + + In principle:: + + log_softmax(x) = log(softmax(x)) + + but using a more accurate implementation. + + Parameters + ---------- + x : array_like + Input array. + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax will be + computed over the entire array `x`. + + Returns + ------- + s : ndarray or scalar + An array with the same shape as `x`. Exponential of the result will + sum to 1 along the specified axis. If `x` is a scalar, a scalar is + returned. + + Notes + ----- + `log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that + make `softmax` saturate (see examples below). + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import log_softmax + >>> from scipy.special import softmax + >>> np.set_printoptions(precision=5) + + >>> x = np.array([1000.0, 1.0]) + + >>> y = log_softmax(x) + >>> y + array([ 0., -999.]) + + >>> with np.errstate(divide='ignore'): + ... y = np.log(softmax(x)) + ... + >>> y + array([ 0., -inf]) + + """ + + x = _asarray_validated(x, check_finite=False) + + x_max = np.amax(x, axis=axis, keepdims=True) + + if x_max.ndim > 0: + x_max[~np.isfinite(x_max)] = 0 + elif not np.isfinite(x_max): + x_max = 0 + + tmp = x - x_max + exp_tmp = np.exp(tmp) + + # suppress warnings about log of zero + with np.errstate(divide='ignore'): + s = np.sum(exp_tmp, axis=axis, keepdims=True) + out = np.log(s) + + out = tmp - out + return out diff --git a/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py b/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b88f6b244bc5ff95af04a241f1959030df2568 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_mptestutils.py @@ -0,0 +1,453 @@ +import os +import sys +import time +from itertools import zip_longest + +import numpy as np +from numpy.testing import assert_ +import pytest + +from scipy.special._testutils import assert_func_equal + +try: + import mpmath +except ImportError: + pass + + +# ------------------------------------------------------------------------------ +# Machinery for systematic tests with mpmath +# ------------------------------------------------------------------------------ + +class Arg: + """Generate a set of numbers on the real axis, concentrating on + 'interesting' regions and covering all orders of magnitude. + + """ + + def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True): + if a > b: + raise ValueError("a should be less than or equal to b") + if a == -np.inf: + a = -0.5*np.finfo(float).max + if b == np.inf: + b = 0.5*np.finfo(float).max + self.a, self.b = a, b + + self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b + + def _positive_values(self, a, b, n): + if a < 0: + raise ValueError("a should be positive") + + # Try to put half of the points into a linspace between a and + # 10 the other half in a logspace. + if n % 2 == 0: + nlogpts = n//2 + nlinpts = nlogpts + else: + nlogpts = n//2 + nlinpts = nlogpts + 1 + + if a >= 10: + # Outside of linspace range; just return a logspace. + pts = np.logspace(np.log10(a), np.log10(b), n) + elif a > 0 and b < 10: + # Outside of logspace range; just return a linspace + pts = np.linspace(a, b, n) + elif a > 0: + # Linspace between a and 10 and a logspace between 10 and + # b. + linpts = np.linspace(a, 10, nlinpts, endpoint=False) + logpts = np.logspace(1, np.log10(b), nlogpts) + pts = np.hstack((linpts, logpts)) + elif a == 0 and b <= 10: + # Linspace between 0 and b and a logspace between 0 and + # the smallest positive point of the linspace + linpts = np.linspace(0, b, nlinpts) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts = np.logspace(-30, right, nlogpts, endpoint=False) + pts = np.hstack((logpts, linpts)) + else: + # Linspace between 0 and 10, logspace between 0 and the + # smallest positive point of the linspace, and a logspace + # between 10 and b. + if nlogpts % 2 == 0: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + else: + nlogpts1 = nlogpts//2 + nlogpts2 = nlogpts1 + 1 + linpts = np.linspace(0, 10, nlinpts, endpoint=False) + if linpts.size > 1: + right = np.log10(linpts[1]) + else: + right = -30 + logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False) + logpts2 = np.logspace(1, np.log10(b), nlogpts2) + pts = np.hstack((logpts1, linpts, logpts2)) + + return np.sort(pts) + + def values(self, n): + """Return an array containing n numbers.""" + a, b = self.a, self.b + if a == b: + return np.zeros(n) + + if not self.inclusive_a: + n += 1 + if not self.inclusive_b: + n += 1 + + if n % 2 == 0: + n1 = n//2 + n2 = n1 + else: + n1 = n//2 + n2 = n1 + 1 + + if a >= 0: + pospts = self._positive_values(a, b, n) + negpts = [] + elif b <= 0: + pospts = [] + negpts = -self._positive_values(-b, -a, n) + else: + pospts = self._positive_values(0, b, n1) + negpts = -self._positive_values(0, -a, n2 + 1) + # Don't want to get zero twice + negpts = negpts[1:] + pts = np.hstack((negpts[::-1], pospts)) + + if not self.inclusive_a: + pts = pts[1:] + if not self.inclusive_b: + pts = pts[:-1] + return pts + + +class FixedArg: + def __init__(self, values): + self._values = np.asarray(values) + + def values(self, n): + return self._values + + +class ComplexArg: + def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)): + self.real = Arg(a.real, b.real) + self.imag = Arg(a.imag, b.imag) + + def values(self, n): + m = int(np.floor(np.sqrt(n))) + x = self.real.values(m) + y = self.imag.values(m + 1) + return (x[:,None] + 1j*y[None,:]).ravel() + + +class IntArg: + def __init__(self, a=-1000, b=1000): + self.a = a + self.b = b + + def values(self, n): + v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int) + v2 = np.arange(-5, 5) + v = np.unique(np.r_[v1, v2]) + v = v[(v >= self.a) & (v < self.b)] + return v + + +def get_args(argspec, n): + if isinstance(argspec, np.ndarray): + args = argspec.copy() + else: + nargs = len(argspec) + ms = np.asarray( + [1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec] + ) + ms = (n**(ms/sum(ms))).astype(int) + 1 + + args = [spec.values(m) for spec, m in zip(argspec, ms)] + args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T + + return args + + +class MpmathData: + def __init__(self, scipy_func, mpmath_func, arg_spec, name=None, + dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300, + ignore_inf_sign=False, distinguish_nan_and_inf=True, + nan_ok=True, param_filter=None): + + # mpmath tests are really slow (see gh-6989). Use a small number of + # points by default, increase back to 5000 (old default) if XSLOW is + # set + if n is None: + try: + is_xslow = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + is_xslow = False + + n = 5000 if is_xslow else 500 + + self.scipy_func = scipy_func + self.mpmath_func = mpmath_func + self.arg_spec = arg_spec + self.dps = dps + self.prec = prec + self.n = n + self.rtol = rtol + self.atol = atol + self.ignore_inf_sign = ignore_inf_sign + self.nan_ok = nan_ok + if isinstance(self.arg_spec, np.ndarray): + self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating) + else: + self.is_complex = any( + [isinstance(arg, ComplexArg) for arg in self.arg_spec] + ) + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not name or name == '': + name = getattr(scipy_func, '__name__', None) + if not name or name == '': + name = getattr(mpmath_func, '__name__', None) + self.name = name + self.param_filter = param_filter + + def check(self): + np.random.seed(1234) + + # Generate values for the arguments + argarr = get_args(self.arg_spec, self.n) + + # Check + old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec + try: + if self.dps is not None: + dps_list = [self.dps] + else: + dps_list = [20] + if self.prec is not None: + mpmath.mp.prec = self.prec + + # Proper casting of mpmath input and output types. Using + # native mpmath types as inputs gives improved precision + # in some cases. + if np.issubdtype(argarr.dtype, np.complexfloating): + pytype = mpc2complex + + def mptype(x): + return mpmath.mpc(complex(x)) + else: + def mptype(x): + return mpmath.mpf(float(x)) + + def pytype(x): + if abs(x.imag) > 1e-16*(1 + abs(x.real)): + return np.nan + else: + return mpf2float(x.real) + + # Try out different dps until one (or none) works + for j, dps in enumerate(dps_list): + mpmath.mp.dps = dps + + try: + assert_func_equal( + self.scipy_func, + lambda *a: pytype(self.mpmath_func(*map(mptype, a))), + argarr, + vectorized=False, + rtol=self.rtol, + atol=self.atol, + ignore_inf_sign=self.ignore_inf_sign, + distinguish_nan_and_inf=self.distinguish_nan_and_inf, + nan_ok=self.nan_ok, + param_filter=self.param_filter + ) + break + except AssertionError: + if j >= len(dps_list)-1: + # reraise the Exception + tp, value, tb = sys.exc_info() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec + + def __repr__(self): + if self.is_complex: + return f"" + else: + return f"" + + +def assert_mpmath_equal(*a, **kw): + d = MpmathData(*a, **kw) + d.check() + + +def nonfunctional_tooslow(func): + return pytest.mark.skip( + reason=" Test not yet functional (too slow), needs more work." + )(func) + + +# ------------------------------------------------------------------------------ +# Tools for dealing with mpmath quirks +# ------------------------------------------------------------------------------ + +def mpf2float(x): + """ + Convert an mpf to the nearest floating point number. Just using + float directly doesn't work because of results like this: + + with mp.workdps(50): + float(mpf("0.99999999999999999")) = 0.9999999999999999 + + """ + return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) + + +def mpc2complex(x): + return complex(mpf2float(x.real), mpf2float(x.imag)) + + +def trace_args(func): + def tofloat(x): + if isinstance(x, mpmath.mpc): + return complex(x) + else: + return float(x) + + def wrap(*a, **kw): + sys.stderr.write(f"{tuple(map(tofloat, a))!r}: ") + sys.stderr.flush() + try: + r = func(*a, **kw) + sys.stderr.write("-> %r" % r) + finally: + sys.stderr.write("\n") + sys.stderr.flush() + return r + return wrap + + +try: + import signal + POSIX = ('setitimer' in dir(signal)) +except ImportError: + POSIX = False + + +class TimeoutError(Exception): + pass + + +def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True): + """ + Decorator for setting a timeout for pure-Python functions. + + If the function does not return within `timeout` seconds, the + value `return_val` is returned instead. + + On POSIX this uses SIGALRM by default. On non-POSIX, settrace is + used. Do not use this with threads: the SIGALRM implementation + does probably not work well. The settrace implementation only + traces the current thread. + + The settrace implementation slows down execution speed. Slowdown + by a factor around 10 is probably typical. + """ + if POSIX and use_sigalrm: + def sigalrm_handler(signum, frame): + raise TimeoutError() + + def deco(func): + def wrap(*a, **kw): + old_handler = signal.signal(signal.SIGALRM, sigalrm_handler) + signal.setitimer(signal.ITIMER_REAL, timeout) + try: + return func(*a, **kw) + except TimeoutError: + return return_val + finally: + signal.setitimer(signal.ITIMER_REAL, 0) + signal.signal(signal.SIGALRM, old_handler) + return wrap + else: + def deco(func): + def wrap(*a, **kw): + start_time = time.time() + + def trace(frame, event, arg): + if time.time() - start_time > timeout: + raise TimeoutError() + return trace + sys.settrace(trace) + try: + return func(*a, **kw) + except TimeoutError: + sys.settrace(None) + return return_val + finally: + sys.settrace(None) + return wrap + return deco + + +def exception_to_nan(func): + """Decorate function to return nan if it raises an exception""" + def wrap(*a, **kw): + try: + return func(*a, **kw) + except Exception: + return np.nan + return wrap + + +def inf_to_nan(func): + """Decorate function to return nan if it returns inf""" + def wrap(*a, **kw): + v = func(*a, **kw) + if not np.isfinite(v): + return np.nan + return v + return wrap + + +def mp_assert_allclose(res, std, atol=0, rtol=1e-17): + """ + Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it + can be done to higher precision than double. + """ + failures = [] + for k, (resval, stdval) in enumerate(zip_longest(res, std)): + if resval is None or stdval is None: + raise ValueError('Lengths of inputs res and std are not equal.') + if mpmath.fabs(resval - stdval) > atol + rtol*mpmath.fabs(stdval): + failures.append((k, resval, stdval)) + + nfail = len(failures) + if nfail > 0: + ndigits = int(abs(np.log10(rtol))) + msg = [""] + msg.append(f"Bad results ({nfail} out of {k + 1}) for the following points:") + for k, resval, stdval in failures: + resrep = mpmath.nstr(resval, ndigits, min_fixed=0, max_fixed=0) + stdrep = mpmath.nstr(stdval, ndigits, min_fixed=0, max_fixed=0) + if stdval == 0: + rdiff = "inf" + else: + rdiff = mpmath.fabs((resval - stdval)/stdval) + rdiff = mpmath.nstr(rdiff, 3) + msg.append(f"{k}: {resrep} != {stdrep} (rdiff {rdiff})") + assert_(False, "\n".join(msg)) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.py b/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b49cd8c6d412d2f84cfe957e5b235fc3e4a008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.py @@ -0,0 +1,2605 @@ +""" +A collection of functions to find the weights and abscissas for +Gaussian Quadrature. + +These calculations are done by finding the eigenvalues of a +tridiagonal matrix whose entries are dependent on the coefficients +in the recursion formula for the orthogonal polynomials with the +corresponding weighting function over the interval. + +Many recursion relations for orthogonal polynomials are given: + +.. math:: + + a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x) + +The recursion relation of interest is + +.. math:: + + P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x) + +where :math:`P` has a different normalization than :math:`f`. + +The coefficients can be found as: + +.. math:: + + A_n = -a2n / a3n + \\qquad + B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2 + +where + +.. math:: + + h_n = \\int_a^b w(x) f_n(x)^2 + +assume: + +.. math:: + + P_0 (x) = 1 + \\qquad + P_{-1} (x) == 0 + +For the mathematical background, see [golub.welsch-1969-mathcomp]_ and +[abramowitz.stegun-1965]_. + +References +---------- +.. [golub.welsch-1969-mathcomp] + Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss + Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10. + +.. [abramowitz.stegun-1965] + Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of + Mathematical Functions: with Formulas, Graphs, and Mathematical + Tables*. Gaithersburg, MD: National Bureau of Standards. + http://www.math.sfu.ca/~cbm/aands/ + +.. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + +.. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. +""" +# +# Author: Travis Oliphant 2000 +# Updated Sep. 2003 (fixed bugs --- tested to be accurate) + +# SciPy imports. +import numpy as np +from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, + hstack, arccos, arange) +from scipy import linalg +from scipy.special import airy + +# Local imports. +# There is no .pyi file for _specfun +from . import _specfun # type: ignore +from . import _ufuncs +_gam = _ufuncs.gamma + +_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', + 'jacobi', 'laguerre', 'genlaguerre', 'hermite', + 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', + 'sh_chebyu', 'sh_jacobi'] + +# Correspondence between new and old names of root functions +_rootfuns_map = {'roots_legendre': 'p_roots', + 'roots_chebyt': 't_roots', + 'roots_chebyu': 'u_roots', + 'roots_chebyc': 'c_roots', + 'roots_chebys': 's_roots', + 'roots_jacobi': 'j_roots', + 'roots_laguerre': 'l_roots', + 'roots_genlaguerre': 'la_roots', + 'roots_hermite': 'h_roots', + 'roots_hermitenorm': 'he_roots', + 'roots_gegenbauer': 'cg_roots', + 'roots_sh_legendre': 'ps_roots', + 'roots_sh_chebyt': 'ts_roots', + 'roots_sh_chebyu': 'us_roots', + 'roots_sh_jacobi': 'js_roots'} + +__all__ = _polyfuns + list(_rootfuns_map.keys()) + + +class orthopoly1d(np.poly1d): + + def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, + limits=None, monic=False, eval_func=None): + equiv_weights = [weights[k] / wfunc(roots[k]) for + k in range(len(roots))] + mu = sqrt(hn) + if monic: + evf = eval_func + if evf: + knn = kn + def eval_func(x): + return evf(x) / knn + mu = mu / abs(kn) + kn = 1.0 + + # compute coefficients from roots, then scale + poly = np.poly1d(roots, r=True) + np.poly1d.__init__(self, poly.coeffs * float(kn)) + + self.weights = np.array(list(zip(roots, weights, equiv_weights))) + self.weight_func = wfunc + self.limits = limits + self.normcoef = mu + + # Note: eval_func will be discarded on arithmetic + self._eval_func = eval_func + + def __call__(self, v): + if self._eval_func and not isinstance(v, np.poly1d): + return self._eval_func(v) + else: + return np.poly1d.__call__(self, v) + + def _scale(self, p): + if p == 1.0: + return + self._coeffs *= p + + evf = self._eval_func + if evf: + self._eval_func = lambda x: evf(x) * p + self.normcoef *= p + + +def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu): + """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) + + Returns the roots (x) of an nth order orthogonal polynomial, + and weights (w) to use in appropriate Gaussian quadrature with that + orthogonal polynomial. + + The polynomials have the recurrence relation + P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) + + an_func(n) should return A_n + sqrt_bn_func(n) should return sqrt(B_n) + mu ( = h_0 ) is the integral of the weight over the orthogonal + interval + """ + k = np.arange(n, dtype='d') + c = np.zeros((2, n)) + c[0,1:] = bn_func(k[1:]) + c[1,:] = an_func(k) + x = linalg.eigvals_banded(c, overwrite_a_band=True) + + # improve roots by one application of Newton's method + y = f(n, x) + dy = df(n, x) + x -= y/dy + + # fm and dy may contain very large/small values, so we + # log-normalize them to maintain precision in the product fm*dy + fm = f(n-1, x) + log_fm = np.log(np.abs(fm)) + log_dy = np.log(np.abs(dy)) + fm /= np.exp((log_fm.max() + log_fm.min()) / 2.) + dy /= np.exp((log_dy.max() + log_dy.min()) / 2.) + w = 1.0 / (fm * dy) + + if symmetrize: + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + w *= mu0 / w.sum() + + if mu: + return x, w, mu0 + else: + return x, w + +# Jacobi Polynomials 1 P^(alpha,beta)_n(x) + + +def roots_jacobi(n, alpha, beta, mu=False): + r"""Gauss-Jacobi quadrature. + + Compute the sample points and weights for Gauss-Jacobi + quadrature. The sample points are the roots of the nth degree + Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample + points and weights correctly integrate polynomials of degree + :math:`2n - 1` or less over the interval :math:`[-1, 1]` with + weight function :math:`w(x) = (1 - x)^{\alpha} (1 + + x)^{\beta}`. See 22.2.1 in [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -1 + beta : float + beta must be > -1 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha <= -1 or beta <= -1: + raise ValueError("alpha and beta must be greater than -1.") + + if alpha == 0.0 and beta == 0.0: + return roots_legendre(m, mu) + if alpha == beta: + return roots_gegenbauer(m, alpha+0.5, mu) + + if (alpha + beta) <= 1000: + mu0 = 2.0**(alpha+beta+1) * _ufuncs.beta(alpha+1, beta+1) + else: + # Avoid overflows in pow and beta for very large parameters + mu0 = np.exp((alpha + beta + 1) * np.log(2.0) + + _ufuncs.betaln(alpha+1, beta+1)) + a = alpha + b = beta + if a + b == 0.0: + def an_func(k): + return np.where(k == 0, (b - a) / (2 + a + b), 0.0) + else: + def an_func(k): + return np.where( + k == 0, + (b - a) / (2 + a + b), + (b * b - a * a) / ((2.0 * k + a + b) * (2.0 * k + a + b + 2)) + ) + + def bn_func(k): + return ( + 2.0 / (2.0 * k + a + b) + * np.sqrt((k + a) * (k + b) / (2 * k + a + b + 1)) + * np.where(k == 1, 1.0, np.sqrt(k * (k + a + b) / (2.0 * k + a + b - 1))) + ) + + def f(n, x): + return _ufuncs.eval_jacobi(n, a, b, x) + def df(n, x): + return 0.5 * (n + a + b + 1) * _ufuncs.eval_jacobi(n - 1, a + 1, b + 1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) + + +def jacobi(n, alpha, beta, monic=False): + r"""Jacobi polynomial. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)} + + (\beta - \alpha - (\alpha + \beta + 2)x) + \frac{d}{dx}P_n^{(\alpha, \beta)} + + n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0 + + for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a + polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + alpha : float + Parameter, must be greater than -1. + beta : float + Parameter, must be greater than -1. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Jacobi polynomial. + + Notes + ----- + For fixed :math:`\alpha, \beta`, the polynomials + :math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The Jacobi polynomials satisfy the recurrence relation: + + .. math:: + P_n^{(\alpha, \beta-1)}(x) - P_n^{(\alpha-1, \beta)}(x) + = P_{n-1}^{(\alpha, \beta)}(x) + + This can be verified, for example, for :math:`\alpha = \beta = 2` + and :math:`n = 1` over the interval :math:`[-1, 1]`: + + >>> import numpy as np + >>> from scipy.special import jacobi + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> np.allclose(jacobi(0, 2, 2)(x), + ... jacobi(1, 2, 1)(x) - jacobi(1, 1, 2)(x)) + True + + Plot of the Jacobi polynomial :math:`P_5^{(\alpha, -0.5)}` for + different values of :math:`\alpha`: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-2.0, 2.0) + >>> ax.set_title(r'Jacobi polynomials $P_5^{(\alpha, -0.5)}$') + >>> for alpha in np.arange(0, 4, 1): + ... ax.plot(x, jacobi(5, alpha, -0.5)(x), label=rf'$\alpha={alpha}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + def wfunc(x): + return (1 - x) ** alpha * (1 + x) ** beta + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, + eval_func=np.ones_like) + x, w, mu = roots_jacobi(n, alpha, beta, mu=True) + ab1 = alpha + beta + 1.0 + hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1) + hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1) + kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1) + # here kn = coefficient on x^n term + p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, + lambda x: _ufuncs.eval_jacobi(n, alpha, beta, x)) + return p + +# Jacobi Polynomials shifted G_n(p,q,x) + + +def roots_sh_jacobi(n, p1, q1, mu=False): + """Gauss-Jacobi (shifted) quadrature. + + Compute the sample points and weights for Gauss-Jacobi (shifted) + quadrature. The sample points are the roots of the nth degree + shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample + points and weights correctly integrate polynomials of degree + :math:`2n - 1` or less over the interval :math:`[0, 1]` with + weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2 + in [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + p1 : float + (p1 - q1) must be > -1 + q1 : float + q1 must be > 0 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + if (p1-q1) <= -1 or q1 <= 0: + message = "(p - q) must be greater than -1, and q must be greater than 0." + raise ValueError(message) + x, w, m = roots_jacobi(n, p1-q1, q1-1, True) + x = (x + 1) / 2 + scale = 2.0**p1 + w /= scale + m /= scale + if mu: + return x, w, m + else: + return x, w + + +def sh_jacobi(n, p, q, monic=False): + r"""Shifted Jacobi polynomial. + + Defined by + + .. math:: + + G_n^{(p, q)}(x) + = \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1), + + where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial. + + Parameters + ---------- + n : int + Degree of the polynomial. + p : float + Parameter, must have :math:`p > q - 1`. + q : float + Parameter, must be greater than 0. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + G : orthopoly1d + Shifted Jacobi polynomial. + + Notes + ----- + For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are + orthogonal over :math:`[0, 1]` with weight function :math:`(1 - + x)^{p - q}x^{q - 1}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + def wfunc(x): + return (1.0 - x) ** (p - q) * x ** (q - 1.0) + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, + eval_func=np.ones_like) + n1 = n + x, w = roots_sh_jacobi(n1, p, q) + hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1) + hn /= (2 * n + p) * (_gam(2 * n + p)**2) + # kn = 1.0 in standard form so monic is redundant. Kept for compatibility. + kn = 1.0 + pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic, + eval_func=lambda x: _ufuncs.eval_sh_jacobi(n, p, q, x)) + return pp + +# Generalized Laguerre L^(alpha)_n(x) + + +def roots_genlaguerre(n, alpha, mu=False): + r"""Gauss-generalized Laguerre quadrature. + + Compute the sample points and weights for Gauss-generalized + Laguerre quadrature. The sample points are the roots of the nth + degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`. + These sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[0, + \infty]` with weight function :math:`w(x) = x^{\alpha} + e^{-x}`. See 22.3.9 in [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -1 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha < -1: + raise ValueError("alpha must be greater than -1.") + + mu0 = _ufuncs.gamma(alpha + 1) + + if m == 1: + x = np.array([alpha+1.0], 'd') + w = np.array([mu0], 'd') + if mu: + return x, w, mu0 + else: + return x, w + + def an_func(k): + return 2 * k + alpha + 1 + def bn_func(k): + return -np.sqrt(k * (k + alpha)) + def f(n, x): + return _ufuncs.eval_genlaguerre(n, alpha, x) + def df(n, x): + return (n * _ufuncs.eval_genlaguerre(n, alpha, x) + - (n + alpha) * _ufuncs.eval_genlaguerre(n - 1, alpha, x)) / x + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu) + + +def genlaguerre(n, alpha, monic=False): + r"""Generalized (associated) Laguerre polynomial. + + Defined to be the solution of + + .. math:: + x\frac{d^2}{dx^2}L_n^{(\alpha)} + + (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)} + + nL_n^{(\alpha)} = 0, + + where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial + of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + alpha : float + Parameter, must be greater than -1. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + L : orthopoly1d + Generalized Laguerre polynomial. + + See Also + -------- + laguerre : Laguerre polynomial. + hyp1f1 : confluent hypergeometric function + + Notes + ----- + For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}` + are orthogonal over :math:`[0, \infty)` with weight function + :math:`e^{-x}x^\alpha`. + + The Laguerre polynomials are the special case where :math:`\alpha + = 0`. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The generalized Laguerre polynomials are closely related to the confluent + hypergeometric function :math:`{}_1F_1`: + + .. math:: + L_n^{(\alpha)} = \binom{n + \alpha}{n} {}_1F_1(-n, \alpha +1, x) + + This can be verified, for example, for :math:`n = \alpha = 3` over the + interval :math:`[-1, 1]`: + + >>> import numpy as np + >>> from scipy.special import binom + >>> from scipy.special import genlaguerre + >>> from scipy.special import hyp1f1 + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> np.allclose(genlaguerre(3, 3)(x), binom(6, 3) * hyp1f1(-3, 4, x)) + True + + This is the plot of the generalized Laguerre polynomials + :math:`L_3^{(\alpha)}` for some values of :math:`\alpha`: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(-4.0, 12.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-5.0, 10.0) + >>> ax.set_title(r'Generalized Laguerre polynomials $L_3^{\alpha}$') + >>> for alpha in np.arange(0, 5): + ... ax.plot(x, genlaguerre(3, alpha)(x), label=rf'$L_3^{(alpha)}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + if alpha <= -1: + raise ValueError("alpha must be > -1") + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_genlaguerre(n1, alpha) + def wfunc(x): + return exp(-x) * x ** alpha + if n == 0: + x, w = [], [] + hn = _gam(n + alpha + 1) / _gam(n + 1) + kn = (-1)**n / _gam(n + 1) + p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic, + lambda x: _ufuncs.eval_genlaguerre(n, alpha, x)) + return p + +# Laguerre L_n(x) + + +def roots_laguerre(n, mu=False): + r"""Gauss-Laguerre quadrature. + + Compute the sample points and weights for Gauss-Laguerre + quadrature. The sample points are the roots of the nth degree + Laguerre polynomial, :math:`L_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` + or less over the interval :math:`[0, \infty]` with weight function + :math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.laguerre.laggauss + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + return roots_genlaguerre(n, 0.0, mu=mu) + + +def laguerre(n, monic=False): + r"""Laguerre polynomial. + + Defined to be the solution of + + .. math:: + x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0; + + :math:`L_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + L : orthopoly1d + Laguerre Polynomial. + + See Also + -------- + genlaguerre : Generalized (associated) Laguerre polynomial. + + Notes + ----- + The polynomials :math:`L_n` are orthogonal over :math:`[0, + \infty)` with weight function :math:`e^{-x}`. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The Laguerre polynomials :math:`L_n` are the special case + :math:`\alpha = 0` of the generalized Laguerre polynomials + :math:`L_n^{(\alpha)}`. + Let's verify it on the interval :math:`[-1, 1]`: + + >>> import numpy as np + >>> from scipy.special import genlaguerre + >>> from scipy.special import laguerre + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> np.allclose(genlaguerre(3, 0)(x), laguerre(3)(x)) + True + + The polynomials :math:`L_n` also satisfy the recurrence relation: + + .. math:: + (n + 1)L_{n+1}(x) = (2n +1 -x)L_n(x) - nL_{n-1}(x) + + This can be easily checked on :math:`[0, 1]` for :math:`n = 3`: + + >>> x = np.arange(0.0, 1.0, 0.01) + >>> np.allclose(4 * laguerre(4)(x), + ... (7 - x) * laguerre(3)(x) - 3 * laguerre(2)(x)) + True + + This is the plot of the first few Laguerre polynomials :math:`L_n`: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(-1.0, 5.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-5.0, 5.0) + >>> ax.set_title(r'Laguerre polynomials $L_n$') + >>> for n in np.arange(0, 5): + ... ax.plot(x, laguerre(n)(x), label=rf'$L_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_laguerre(n1) + if n == 0: + x, w = [], [] + hn = 1.0 + kn = (-1)**n / _gam(n + 1) + p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic, + lambda x: _ufuncs.eval_laguerre(n, x)) + return p + +# Hermite 1 H_n(x) + + +def roots_hermite(n, mu=False): + r"""Gauss-Hermite (physicist's) quadrature. + + Compute the sample points and weights for Gauss-Hermite + quadrature. The sample points are the roots of the nth degree + Hermite polynomial, :math:`H_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` + or less over the interval :math:`[-\infty, \infty]` with weight + function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for + details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.hermite.hermgauss + roots_hermitenorm + + Notes + ----- + For small n up to 150 a modified version of the Golub-Welsch + algorithm is used. Nodes are computed from the eigenvalue + problem and improved by one step of a Newton iteration. + The weights are computed from the well-known analytical formula. + + For n larger than 150 an optimal asymptotic algorithm is applied + which computes nodes and weights in a numerically stable manner. + The algorithm has linear runtime making computation for very + large n (several thousand or more) feasible. + + References + ---------- + .. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + .. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = np.sqrt(np.pi) + if n <= 150: + def an_func(k): + return 0.0 * k + def bn_func(k): + return np.sqrt(k / 2.0) + f = _ufuncs.eval_hermite + def df(n, x): + return 2.0 * n * _ufuncs.eval_hermite(n - 1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + else: + nodes, weights = _roots_hermite_asy(m) + if mu: + return nodes, weights, mu0 + else: + return nodes, weights + + +def _compute_tauk(n, k, maxit=5): + """Helper function for Tricomi initial guesses + + For details, see formula 3.1 in lemma 3.1 in the + original paper. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots :math:`\tau_k` to compute + maxit : int + Number of Newton maxit performed, the default + value of 5 is sufficient. + + Returns + ------- + tauk : ndarray + Roots of equation 3.1 + + See Also + -------- + initial_nodes_a + roots_hermite_asy + """ + a = n % 2 - 0.5 + c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0) + def f(x): + return x - sin(x) - c + def df(x): + return 1.0 - cos(x) + xi = 0.5*pi + for i in range(maxit): + xi = xi - f(xi)/df(xi) + return xi + + +def _initial_nodes_a(n, k): + r"""Tricomi initial guesses + + Computes an initial approximation to the square of the `k`-th + (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The formula is the one from lemma 3.1 in the + original paper. The guesses are accurate except in the region + near :math:`\sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots to compute + + Returns + ------- + xksq : ndarray + Square of the approximate roots + + See Also + -------- + initial_nodes + roots_hermite_asy + """ + tauk = _compute_tauk(n, k) + sigk = cos(0.5*tauk)**2 + a = n % 2 - 0.5 + nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 + # Initial approximation of Hermite roots (square) + xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25) + return xksq + + +def _initial_nodes_b(n, k): + r"""Gatteschi initial guesses + + Computes an initial approximation to the square of the kth + (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The formula is the one from lemma 3.2 in the + original paper. The guesses are accurate in the region just + below :math:`\sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + k : ndarray of type int + Index of roots to compute + + Returns + ------- + xksq : ndarray + Square of the approximate root + + See Also + -------- + initial_nodes + roots_hermite_asy + """ + a = n % 2 - 0.5 + nu = 4.0*floor(n/2.0) + 2.0*a + 2.0 + # Airy roots by approximation + ak = _specfun.airyzo(k.max(), 1)[0][::-1] + # Initial approximation of Hermite roots (square) + xksq = (nu + + 2.0**(2.0/3.0) * ak * nu**(1.0/3.0) + + 1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) + + (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) + + (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) + - (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) + * 2.0**(1.0/3.0) * nu**(-7.0/3.0)) + return xksq + + +def _initial_nodes(n): + """Initial guesses for the Hermite roots + + Computes an initial approximation to the non-negative + roots :math:`x_k` of the Hermite polynomial :math:`H_n` + of order :math:`n`. The Tricomi and Gatteschi initial + guesses are used in the region where they are accurate. + + Parameters + ---------- + n : int + Quadrature order + + Returns + ------- + xk : ndarray + Approximate roots + + See Also + -------- + roots_hermite_asy + """ + # Turnover point + # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules + fit = 0.49082003*n - 4.37859653 + turnover = around(fit).astype(int) + # Compute all approximations + ia = arange(1, int(floor(n*0.5)+1)) + ib = ia[::-1] + xasq = _initial_nodes_a(n, ia[:turnover+1]) + xbsq = _initial_nodes_b(n, ib[turnover+1:]) + # Combine + iv = sqrt(hstack([xasq, xbsq])) + # Central node is always zero + if n % 2 == 1: + iv = hstack([0.0, iv]) + return iv + + +def _pbcf(n, theta): + r"""Asymptotic series expansion of parabolic cylinder function + + The implementation is based on sections 3.2 and 3.3 from the + original paper. Compared to the published version this code + adds one more term to the asymptotic series. The detailed + formulas can be found at [parabolic-asymptotics]_. The evaluation + is done in a transformed variable :math:`\theta := \arccos(t)` + where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`. + + Parameters + ---------- + n : int + Quadrature order + theta : ndarray + Transformed position variable + + Returns + ------- + U : ndarray + Value of the parabolic cylinder function :math:`U(a, \theta)`. + Ud : ndarray + Value of the derivative :math:`U^{\prime}(a, \theta)` of + the parabolic cylinder function. + + See Also + -------- + roots_hermite_asy + + References + ---------- + .. [parabolic-asymptotics] + https://dlmf.nist.gov/12.10#vii + """ + st = sin(theta) + ct = cos(theta) + # https://dlmf.nist.gov/12.10#vii + mu = 2.0*n + 1.0 + # https://dlmf.nist.gov/12.10#E23 + eta = 0.5*theta - 0.5*st*ct + # https://dlmf.nist.gov/12.10#E39 + zeta = -(3.0*eta/2.0) ** (2.0/3.0) + # https://dlmf.nist.gov/12.10#E40 + phi = (-zeta / st**2) ** (0.25) + # Coefficients + # https://dlmf.nist.gov/12.10#E43 + a0 = 1.0 + a1 = 0.10416666666666666667 + a2 = 0.08355034722222222222 + a3 = 0.12822657455632716049 + a4 = 0.29184902646414046425 + a5 = 0.88162726744375765242 + b0 = 1.0 + b1 = -0.14583333333333333333 + b2 = -0.09874131944444444444 + b3 = -0.14331205391589506173 + b4 = -0.31722720267841354810 + b5 = -0.94242914795712024914 + # Polynomials + # https://dlmf.nist.gov/12.10#E9 + # https://dlmf.nist.gov/12.10#E10 + ctp = ct ** arange(16).reshape((-1,1)) + u0 = 1.0 + u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0 + u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0 + u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] + - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0 + u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0 + u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] + - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:] - 37370295816.0*ctp[5,:] + - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0 + v0 = 1.0 + v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0 + v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0 + v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0 + v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] + - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0 + v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] + - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:] + 35213253348.0*ctp[5,:] + + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0 + # Airy Evaluation (Bi and Bip unused) + Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta) + # Prefactor for U + P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi + # Terms for U + # https://dlmf.nist.gov/12.10#E42 + phip = phi ** arange(6, 31, 6).reshape((-1,1)) + A0 = b0*u0 + A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3 + A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + + phip[3,:]*b0*u4) / zeta**6 + B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2 + B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5 + B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8 + # U + # https://dlmf.nist.gov/12.10#E35 + U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) + + Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0)) + # Prefactor for derivative of U + Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi + # Terms for derivative of U + # https://dlmf.nist.gov/12.10#E46 + C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta + C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4 + C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7 + D0 = a0*v0 + D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3 + D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + + phip[3,:]*a0*v4) / zeta**6 + # Derivative of U + # https://dlmf.nist.gov/12.10#E36 + Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) + + Aip * (D0 + D1/mu**2.0 + D2/mu**4.0)) + return U, Ud + + +def _newton(n, x_initial, maxit=5): + """Newton iteration for polishing the asymptotic approximation + to the zeros of the Hermite polynomials. + + Parameters + ---------- + n : int + Quadrature order + x_initial : ndarray + Initial guesses for the roots + maxit : int + Maximal number of Newton iterations. + The default 5 is sufficient, usually + only one or two steps are needed. + + Returns + ------- + nodes : ndarray + Quadrature nodes + weights : ndarray + Quadrature weights + + See Also + -------- + roots_hermite_asy + """ + # Variable transformation + mu = sqrt(2.0*n + 1.0) + t = x_initial / mu + theta = arccos(t) + # Newton iteration + for i in range(maxit): + u, ud = _pbcf(n, theta) + dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud) + theta = theta + dtheta + if max(abs(dtheta)) < 1e-14: + break + # Undo variable transformation + x = mu * cos(theta) + # Central node is always zero + if n % 2 == 1: + x[0] = 0.0 + # Compute weights + w = exp(-x**2) / (2.0*ud**2) + return x, w + + +def _roots_hermite_asy(n): + r"""Gauss-Hermite (physicist's) quadrature for large n. + + Computes the sample points and weights for Gauss-Hermite quadrature. + The sample points are the roots of the nth degree Hermite polynomial, + :math:`H_n(x)`. These sample points and weights correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`. + + This method relies on asymptotic expansions which work best for n > 150. + The algorithm has linear runtime making computation for very large n + feasible. + + Parameters + ---------- + n : int + quadrature order + + Returns + ------- + nodes : ndarray + Quadrature nodes + weights : ndarray + Quadrature weights + + See Also + -------- + roots_hermite + + References + ---------- + .. [townsend.trogdon.olver-2014] + Townsend, A. and Trogdon, T. and Olver, S. (2014) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. :arXiv:`1410.5286`. + + .. [townsend.trogdon.olver-2015] + Townsend, A. and Trogdon, T. and Olver, S. (2015) + *Fast computation of Gauss quadrature nodes and + weights on the whole real line*. + IMA Journal of Numerical Analysis + :doi:`10.1093/imanum/drv002`. + """ + iv = _initial_nodes(n) + nodes, weights = _newton(n, iv) + # Combine with negative parts + if n % 2 == 0: + nodes = hstack([-nodes[::-1], nodes]) + weights = hstack([weights[::-1], weights]) + else: + nodes = hstack([-nodes[-1:0:-1], nodes]) + weights = hstack([weights[-1:0:-1], weights]) + # Scale weights + weights *= sqrt(pi) / sum(weights) + return nodes, weights + + +def hermite(n, monic=False): + r"""Physicist's Hermite polynomial. + + Defined by + + .. math:: + + H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2}; + + :math:`H_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + H : orthopoly1d + Hermite polynomial. + + Notes + ----- + The polynomials :math:`H_n` are orthogonal over :math:`(-\infty, + \infty)` with weight function :math:`e^{-x^2}`. + + Examples + -------- + >>> from scipy import special + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + >>> p_monic = special.hermite(3, monic=True) + >>> p_monic + poly1d([ 1. , 0. , -1.5, 0. ]) + >>> p_monic(1) + -0.49999999999999983 + >>> x = np.linspace(-3, 3, 400) + >>> y = p_monic(x) + >>> plt.plot(x, y) + >>> plt.title("Monic Hermite polynomial of degree 3") + >>> plt.xlabel("x") + >>> plt.ylabel("H_3(x)") + >>> plt.show() + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_hermite(n1) + def wfunc(x): + return exp(-x * x) + if n == 0: + x, w = [], [] + hn = 2**n * _gam(n + 1) * sqrt(pi) + kn = 2**n + p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, + lambda x: _ufuncs.eval_hermite(n, x)) + return p + +# Hermite 2 He_n(x) + + +def roots_hermitenorm(n, mu=False): + r"""Gauss-Hermite (statistician's) quadrature. + + Compute the sample points and weights for Gauss-Hermite + quadrature. The sample points are the roots of the nth degree + Hermite polynomial, :math:`He_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` + or less over the interval :math:`[-\infty, \infty]` with weight + function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more + details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.hermite_e.hermegauss + + Notes + ----- + For small n up to 150 a modified version of the Golub-Welsch + algorithm is used. Nodes are computed from the eigenvalue + problem and improved by one step of a Newton iteration. + The weights are computed from the well-known analytical formula. + + For n larger than 150 an optimal asymptotic algorithm is used + which computes nodes and weights in a numerical stable manner. + The algorithm has linear runtime making computation for very + large n (several thousand or more) feasible. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = np.sqrt(2.0*np.pi) + if n <= 150: + def an_func(k): + return 0.0 * k + def bn_func(k): + return np.sqrt(k) + f = _ufuncs.eval_hermitenorm + def df(n, x): + return n * _ufuncs.eval_hermitenorm(n - 1, x) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + else: + nodes, weights = _roots_hermite_asy(m) + # Transform + nodes *= sqrt(2) + weights *= sqrt(2) + if mu: + return nodes, weights, mu0 + else: + return nodes, weights + + +def hermitenorm(n, monic=False): + r"""Normalized (probabilist's) Hermite polynomial. + + Defined by + + .. math:: + + He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}; + + :math:`He_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + He : orthopoly1d + Hermite polynomial. + + Notes + ----- + + The polynomials :math:`He_n` are orthogonal over :math:`(-\infty, + \infty)` with weight function :math:`e^{-x^2/2}`. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_hermitenorm(n1) + def wfunc(x): + return exp(-x * x / 2.0) + if n == 0: + x, w = [], [] + hn = sqrt(2 * pi) * _gam(n + 1) + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic, + eval_func=lambda x: _ufuncs.eval_hermitenorm(n, x)) + return p + +# The remainder of the polynomials can be derived from the ones above. + +# Ultraspherical (Gegenbauer) C^(alpha)_n(x) + + +def roots_gegenbauer(n, alpha, mu=False): + r"""Gauss-Gegenbauer quadrature. + + Compute the sample points and weights for Gauss-Gegenbauer + quadrature. The sample points are the roots of the nth degree + Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample + points and weights correctly integrate polynomials of degree + :math:`2n - 1` or less over the interval :math:`[-1, 1]` with + weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See + 22.2.3 in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + alpha : float + alpha must be > -0.5 + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + if alpha < -0.5: + raise ValueError("alpha must be greater than -0.5.") + elif alpha == 0.0: + # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x) + # strictly, we should just error out here, since the roots are not + # really defined, but we used to return something useful, so let's + # keep doing so. + return roots_chebyt(n, mu) + + if alpha <= 170: + mu0 = (np.sqrt(np.pi) * _ufuncs.gamma(alpha + 0.5)) \ + / _ufuncs.gamma(alpha + 1) + else: + # For large alpha we use a Taylor series expansion around inf, + # expressed as a 6th order polynomial of a^-1 and using Horner's + # method to minimize computation and maximize precision + inv_alpha = 1. / alpha + coeffs = np.array([0.000207186, -0.00152206, -0.000640869, + 0.00488281, 0.0078125, -0.125, 1.]) + mu0 = coeffs[0] + for term in range(1, len(coeffs)): + mu0 = mu0 * inv_alpha + coeffs[term] + mu0 = mu0 * np.sqrt(np.pi / alpha) + def an_func(k): + return 0.0 * k + def bn_func(k): + return np.sqrt(k * (k + 2 * alpha - 1) / (4 * (k + alpha) * (k + alpha - 1))) + def f(n, x): + return _ufuncs.eval_gegenbauer(n, alpha, x) + def df(n, x): + return ( + -n * x * _ufuncs.eval_gegenbauer(n, alpha, x) + + (n + 2 * alpha - 1) * _ufuncs.eval_gegenbauer(n - 1, alpha, x) + ) / (1 - x ** 2) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + + +def gegenbauer(n, alpha, monic=False): + r"""Gegenbauer (ultraspherical) polynomial. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)} + - (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)} + + n(n + 2\alpha)C_n^{(\alpha)} = 0 + + for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial + of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + alpha : float + Parameter, must be greater than -0.5. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + C : orthopoly1d + Gegenbauer polynomial. + + Notes + ----- + The polynomials :math:`C_n^{(\alpha)}` are orthogonal over + :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha - + 1/2)}`. + + Examples + -------- + >>> import numpy as np + >>> from scipy import special + >>> import matplotlib.pyplot as plt + + We can initialize a variable ``p`` as a Gegenbauer polynomial using the + `gegenbauer` function and evaluate at a point ``x = 1``. + + >>> p = special.gegenbauer(3, 0.5, monic=False) + >>> p + poly1d([ 2.5, 0. , -1.5, 0. ]) + >>> p(1) + 1.0 + + To evaluate ``p`` at various points ``x`` in the interval ``(-3, 3)``, + simply pass an array ``x`` to ``p`` as follows: + + >>> x = np.linspace(-3, 3, 400) + >>> y = p(x) + + We can then visualize ``x, y`` using `matplotlib.pyplot`. + + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y) + >>> ax.set_title("Gegenbauer (ultraspherical) polynomial of degree 3") + >>> ax.set_xlabel("x") + >>> ax.set_ylabel("G_3(x)") + >>> plt.show() + + """ + base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic) + if monic: + return base + # Abrahmowitz and Stegan 22.5.20 + factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) / + _gam(2*alpha) / _gam(alpha + 0.5 + n)) + base._scale(factor) + base.__dict__['_eval_func'] = lambda x: _ufuncs.eval_gegenbauer(float(n), + alpha, x) + return base + +# Chebyshev of the first kind: T_n(x) = +# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x) +# Computed anew. + + +def roots_chebyt(n, mu=False): + r"""Gauss-Chebyshev (first kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + Chebyshev polynomial of the first kind, :math:`T_n(x)`. These + sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` + with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4 + in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.chebyshev.chebgauss + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError('n must be a positive integer.') + x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m)) + w = np.full_like(x, pi/m) + if mu: + return x, w, pi + else: + return x, w + + +def chebyt(n, monic=False): + r"""Chebyshev polynomial of the first kind. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0; + + :math:`T_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + T : orthopoly1d + Chebyshev polynomial of the first kind. + + See Also + -------- + chebyu : Chebyshev polynomial of the second kind. + + Notes + ----- + The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x^2)^{-1/2}`. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Chebyshev polynomials of the first kind of order :math:`n` can + be obtained as the determinant of specific :math:`n \times n` + matrices. As an example we can check how the points obtained from + the determinant of the following :math:`3 \times 3` matrix + lay exactly on :math:`T_3`: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.linalg import det + >>> from scipy.special import chebyt + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-2.0, 2.0) + >>> ax.set_title(r'Chebyshev polynomial $T_3$') + >>> ax.plot(x, chebyt(3)(x), label=rf'$T_3$') + >>> for p in np.arange(-1.0, 1.0, 0.1): + ... ax.plot(p, + ... det(np.array([[p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])), + ... 'rx') + >>> plt.legend(loc='best') + >>> plt.show() + + They are also related to the Jacobi Polynomials + :math:`P_n^{(-0.5, -0.5)}` through the relation: + + .. math:: + P_n^{(-0.5, -0.5)}(x) = \frac{1}{4^n} \binom{2n}{n} T_n(x) + + Let's verify it for :math:`n = 3`: + + >>> from scipy.special import binom + >>> from scipy.special import jacobi + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> np.allclose(jacobi(3, -0.5, -0.5)(x), + ... 1/64 * binom(6, 3) * chebyt(3)(x)) + True + + We can plot the Chebyshev polynomials :math:`T_n` for some values + of :math:`n`: + + >>> x = np.arange(-1.5, 1.5, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-4.0, 4.0) + >>> ax.set_title(r'Chebyshev polynomials $T_n$') + >>> for n in np.arange(2,5): + ... ax.plot(x, chebyt(n)(x), label=rf'$T_n={n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + def wfunc(x): + return 1.0 / sqrt(1 - x * x) + if n == 0: + return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic, + lambda x: _ufuncs.eval_chebyt(n, x)) + n1 = n + x, w, mu = roots_chebyt(n1, mu=True) + hn = pi / 2 + kn = 2**(n - 1) + p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic, + lambda x: _ufuncs.eval_chebyt(n, x)) + return p + +# Chebyshev of the second kind +# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x) + + +def roots_chebyu(n, mu=False): + r"""Gauss-Chebyshev (second kind) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + Chebyshev polynomial of the second kind, :math:`U_n(x)`. These + sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` + with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in + [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError('n must be a positive integer.') + t = np.arange(m, 0, -1) * pi / (m + 1) + x = np.cos(t) + w = pi * np.sin(t)**2 / (m + 1) + if mu: + return x, w, pi / 2 + else: + return x, w + + +def chebyu(n, monic=False): + r"""Chebyshev polynomial of the second kind. + + Defined to be the solution of + + .. math:: + (1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n + + n(n + 2)U_n = 0; + + :math:`U_n` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + U : orthopoly1d + Chebyshev polynomial of the second kind. + + See Also + -------- + chebyt : Chebyshev polynomial of the first kind. + + Notes + ----- + The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]` + with weight function :math:`(1 - x^2)^{1/2}`. + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + Chebyshev polynomials of the second kind of order :math:`n` can + be obtained as the determinant of specific :math:`n \times n` + matrices. As an example we can check how the points obtained from + the determinant of the following :math:`3 \times 3` matrix + lay exactly on :math:`U_3`: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.linalg import det + >>> from scipy.special import chebyu + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-2.0, 2.0) + >>> ax.set_title(r'Chebyshev polynomial $U_3$') + >>> ax.plot(x, chebyu(3)(x), label=rf'$U_3$') + >>> for p in np.arange(-1.0, 1.0, 0.1): + ... ax.plot(p, + ... det(np.array([[2*p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])), + ... 'rx') + >>> plt.legend(loc='best') + >>> plt.show() + + They satisfy the recurrence relation: + + .. math:: + U_{2n-1}(x) = 2 T_n(x)U_{n-1}(x) + + where the :math:`T_n` are the Chebyshev polynomial of the first kind. + Let's verify it for :math:`n = 2`: + + >>> from scipy.special import chebyt + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> np.allclose(chebyu(3)(x), 2 * chebyt(2)(x) * chebyu(1)(x)) + True + + We can plot the Chebyshev polynomials :math:`U_n` for some values + of :math:`n`: + + >>> x = np.arange(-1.0, 1.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-1.5, 1.5) + >>> ax.set_title(r'Chebyshev polynomials $U_n$') + >>> for n in np.arange(1,5): + ... ax.plot(x, chebyu(n)(x), label=rf'$U_n={n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + base = jacobi(n, 0.5, 0.5, monic=monic) + if monic: + return base + factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5) + base._scale(factor) + return base + +# Chebyshev of the first kind C_n(x) + + +def roots_chebyc(n, mu=False): + r"""Gauss-Chebyshev (first kind) quadrature. + + Compute the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + Chebyshev polynomial of the first kind, :math:`C_n(x)`. These + sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` + with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See + 22.2.6 in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + x, w, m = roots_chebyt(n, True) + x *= 2 + w *= 2 + m *= 2 + if mu: + return x, w, m + else: + return x, w + + +def chebyc(n, monic=False): + r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + + Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the + nth Chebychev polynomial of the first kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + C : orthopoly1d + Chebyshev polynomial of the first kind on :math:`[-2, 2]`. + + See Also + -------- + chebyt : Chebyshev polynomial of the first kind. + + Notes + ----- + The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]` + with weight function :math:`1/\sqrt{1 - (x/2)^2}`. + + References + ---------- + .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" + Section 22. National Bureau of Standards, 1972. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_chebyc(n1) + if n == 0: + x, w = [], [] + hn = 4 * pi * ((n == 0) + 1) + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, + wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0), + limits=(-2, 2), monic=monic) + if not monic: + p._scale(2.0 / p(2)) + p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebyc(n, x) + return p + +# Chebyshev of the second kind S_n(x) + + +def roots_chebys(n, mu=False): + r"""Gauss-Chebyshev (second kind) quadrature. + + Compute the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + Chebyshev polynomial of the second kind, :math:`S_n(x)`. These + sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[-2, 2]` + with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7 + in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + x, w, m = roots_chebyu(n, True) + x *= 2 + w *= 2 + m *= 2 + if mu: + return x, w, m + else: + return x, w + + +def chebys(n, monic=False): + r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + + Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the + nth Chebychev polynomial of the second kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + S : orthopoly1d + Chebyshev polynomial of the second kind on :math:`[-2, 2]`. + + See Also + -------- + chebyu : Chebyshev polynomial of the second kind + + Notes + ----- + The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]` + with weight function :math:`\sqrt{1 - (x/2)}^2`. + + References + ---------- + .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions" + Section 22. National Bureau of Standards, 1972. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_chebys(n1) + if n == 0: + x, w = [], [] + hn = pi + kn = 1.0 + p = orthopoly1d(x, w, hn, kn, + wfunc=lambda x: sqrt(1 - x * x / 4.0), + limits=(-2, 2), monic=monic) + if not monic: + factor = (n + 1.0) / p(2) + p._scale(factor) + p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebys(n, x) + return p + +# Shifted Chebyshev of the first kind T^*_n(x) + + +def roots_sh_chebyt(n, mu=False): + r"""Gauss-Chebyshev (first kind, shifted) quadrature. + + Compute the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`. + These sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[0, 1]` + with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8 + in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + xw = roots_chebyt(n, mu) + return ((xw[0] + 1) / 2,) + xw[1:] + + +def sh_chebyt(n, monic=False): + r"""Shifted Chebyshev polynomial of the first kind. + + Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth + Chebyshev polynomial of the first kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + T : orthopoly1d + Shifted Chebyshev polynomial of the first kind. + + Notes + ----- + The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]` + with weight function :math:`(x - x^2)^{-1/2}`. + + """ + base = sh_jacobi(n, 0.0, 0.5, monic=monic) + if monic: + return base + if n > 0: + factor = 4**n / 2.0 + else: + factor = 1.0 + base._scale(factor) + return base + + +# Shifted Chebyshev of the second kind U^*_n(x) +def roots_sh_chebyu(n, mu=False): + r"""Gauss-Chebyshev (second kind, shifted) quadrature. + + Computes the sample points and weights for Gauss-Chebyshev + quadrature. The sample points are the roots of the nth degree + shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`. + These sample points and weights correctly integrate polynomials of + degree :math:`2n - 1` or less over the interval :math:`[0, 1]` + with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in + [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + x, w, m = roots_chebyu(n, True) + x = (x + 1) / 2 + m_us = _ufuncs.beta(1.5, 1.5) + w *= m_us / m + if mu: + return x, w, m_us + else: + return x, w + + +def sh_chebyu(n, monic=False): + r"""Shifted Chebyshev polynomial of the second kind. + + Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth + Chebyshev polynomial of the second kind. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + U : orthopoly1d + Shifted Chebyshev polynomial of the second kind. + + Notes + ----- + The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]` + with weight function :math:`(x - x^2)^{1/2}`. + + """ + base = sh_jacobi(n, 2.0, 1.5, monic=monic) + if monic: + return base + factor = 4**n + base._scale(factor) + return base + +# Legendre + + +def roots_legendre(n, mu=False): + r"""Gauss-Legendre quadrature. + + Compute the sample points and weights for Gauss-Legendre + quadrature [GL]_. The sample points are the roots of the nth degree + Legendre polynomial :math:`P_n(x)`. These sample points and + weights correctly integrate polynomials of degree :math:`2n - 1` + or less over the interval :math:`[-1, 1]` with weight function + :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + numpy.polynomial.legendre.leggauss + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + .. [GL] Gauss-Legendre quadrature, Wikipedia, + https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import roots_legendre, eval_legendre + >>> roots, weights = roots_legendre(9) + + ``roots`` holds the roots, and ``weights`` holds the weights for + Gauss-Legendre quadrature. + + >>> roots + array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. , + 0.32425342, 0.61337143, 0.83603111, 0.96816024]) + >>> weights + array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936, + 0.31234708, 0.2606107 , 0.18064816, 0.08127439]) + + Verify that we have the roots by evaluating the degree 9 Legendre + polynomial at ``roots``. All the values are approximately zero: + + >>> eval_legendre(9, roots) + array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16, + 0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16, + -8.32667268e-17]) + + Here we'll show how the above values can be used to estimate the + integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre + quadrature [GL]_. First define the function and the integration + limits. + + >>> def f(t): + ... return t + 1/t + ... + >>> a = 1 + >>> b = 2 + + We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral + of f from t=a to t=b. The sample points in ``roots`` are from the + interval [-1, 1], so we'll rewrite the integral with the simple change + of variable:: + + x = 2/(b - a) * t - (a + b)/(b - a) + + with inverse:: + + t = (b - a)/2 * x + (a + 2)/2 + + Then:: + + integral(f(t), a, b) = + (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1) + + We can approximate the latter integral with the values returned + by `roots_legendre`. + + Map the roots computed above from [-1, 1] to [a, b]. + + >>> t = (b - a)/2 * roots + (a + b)/2 + + Approximate the integral as the weighted sum of the function values. + + >>> (b - a)/2 * f(t).dot(weights) + 2.1931471805599276 + + Compare that to the exact result, which is 3/2 + log(2): + + >>> 1.5 + np.log(2) + 2.1931471805599454 + + """ + m = int(n) + if n < 1 or n != m: + raise ValueError("n must be a positive integer.") + + mu0 = 2.0 + def an_func(k): + return 0.0 * k + def bn_func(k): + return k * np.sqrt(1.0 / (4 * k * k - 1)) + f = _ufuncs.eval_legendre + def df(n, x): + return (-n * x * _ufuncs.eval_legendre(n, x) + + n * _ufuncs.eval_legendre(n - 1, x)) / (1 - x ** 2) + return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) + + +def legendre(n, monic=False): + r"""Legendre polynomial. + + Defined to be the solution of + + .. math:: + \frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right] + + n(n + 1)P_n(x) = 0; + + :math:`P_n(x)` is a polynomial of degree :math:`n`. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Legendre polynomial. + + Notes + ----- + The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]` + with weight function 1. + + Examples + -------- + Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0): + + >>> from scipy.special import legendre + >>> legendre(3) + poly1d([ 2.5, 0. , -1.5, 0. ]) + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + if n == 0: + n1 = n + 1 + else: + n1 = n + x, w = roots_legendre(n1) + if n == 0: + x, w = [], [] + hn = 2.0 / (2 * n + 1) + kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n + p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1), + monic=monic, + eval_func=lambda x: _ufuncs.eval_legendre(n, x)) + return p + +# Shifted Legendre P^*_n(x) + + +def roots_sh_legendre(n, mu=False): + r"""Gauss-Legendre (shifted) quadrature. + + Compute the sample points and weights for Gauss-Legendre + quadrature. The sample points are the roots of the nth degree + shifted Legendre polynomial :math:`P^*_n(x)`. These sample points + and weights correctly integrate polynomials of degree :math:`2n - + 1` or less over the interval :math:`[0, 1]` with weight function + :math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details. + + Parameters + ---------- + n : int + quadrature order + mu : bool, optional + If True, return the sum of the weights, optional. + + Returns + ------- + x : ndarray + Sample points + w : ndarray + Weights + mu : float + Sum of the weights + + See Also + -------- + scipy.integrate.quadrature + scipy.integrate.fixed_quad + + References + ---------- + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + """ + x, w = roots_legendre(n) + x = (x + 1) / 2 + w /= 2 + if mu: + return x, w, 1.0 + else: + return x, w + + +def sh_legendre(n, monic=False): + r"""Shifted Legendre polynomial. + + Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth + Legendre polynomial. + + Parameters + ---------- + n : int + Degree of the polynomial. + monic : bool, optional + If `True`, scale the leading coefficient to be 1. Default is + `False`. + + Returns + ------- + P : orthopoly1d + Shifted Legendre polynomial. + + Notes + ----- + The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]` + with weight function 1. + + """ + if n < 0: + raise ValueError("n must be nonnegative.") + + def wfunc(x): + return 0.0 * x + 1.0 + if n == 0: + return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic, + lambda x: _ufuncs.eval_sh_legendre(n, x)) + x, w = roots_sh_legendre(n) + hn = 1.0 / (2 * n + 1.0) + kn = _gam(2 * n + 1) / _gam(n + 1)**2 + p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic, + eval_func=lambda x: _ufuncs.eval_sh_legendre(n, x)) + return p + + +# Make the old root function names an alias for the new ones +_modattrs = globals() +for newfun, oldfun in _rootfuns_map.items(): + _modattrs[oldfun] = _modattrs[newfun] + __all__.append(oldfun) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.pyi b/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6f9e42f41882e74b1c25691a27318ab7b6421bd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_orthogonal.pyi @@ -0,0 +1,331 @@ +from __future__ import annotations +from typing import ( + Any, + Callable, + Literal, + Optional, + overload, +) + +import numpy + +_IntegerType = int | numpy.integer +_FloatingType = float | numpy.floating +_PointsAndWeights = tuple[numpy.ndarray, numpy.ndarray] +_PointsAndWeightsAndMu = tuple[numpy.ndarray, numpy.ndarray, float] + +_ArrayLike0D = bool | int | float | complex | str | bytes | numpy.generic + +__all__ = [ + 'legendre', + 'chebyt', + 'chebyu', + 'chebyc', + 'chebys', + 'jacobi', + 'laguerre', + 'genlaguerre', + 'hermite', + 'hermitenorm', + 'gegenbauer', + 'sh_legendre', + 'sh_chebyt', + 'sh_chebyu', + 'sh_jacobi', + 'roots_legendre', + 'roots_chebyt', + 'roots_chebyu', + 'roots_chebyc', + 'roots_chebys', + 'roots_jacobi', + 'roots_laguerre', + 'roots_genlaguerre', + 'roots_hermite', + 'roots_hermitenorm', + 'roots_gegenbauer', + 'roots_sh_legendre', + 'roots_sh_chebyt', + 'roots_sh_chebyu', + 'roots_sh_jacobi', +] + +@overload +def roots_jacobi( + n: _IntegerType, + alpha: _FloatingType, + beta: _FloatingType, +) -> _PointsAndWeights: ... +@overload +def roots_jacobi( + n: _IntegerType, + alpha: _FloatingType, + beta: _FloatingType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_jacobi( + n: _IntegerType, + alpha: _FloatingType, + beta: _FloatingType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_sh_jacobi( + n: _IntegerType, + p1: _FloatingType, + q1: _FloatingType, +) -> _PointsAndWeights: ... +@overload +def roots_sh_jacobi( + n: _IntegerType, + p1: _FloatingType, + q1: _FloatingType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_sh_jacobi( + n: _IntegerType, + p1: _FloatingType, + q1: _FloatingType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_genlaguerre( + n: _IntegerType, + alpha: _FloatingType, +) -> _PointsAndWeights: ... +@overload +def roots_genlaguerre( + n: _IntegerType, + alpha: _FloatingType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_genlaguerre( + n: _IntegerType, + alpha: _FloatingType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_laguerre(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_laguerre( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_laguerre( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_hermite(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_hermite( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_hermite( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_hermitenorm(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_hermitenorm( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_hermitenorm( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_gegenbauer( + n: _IntegerType, + alpha: _FloatingType, +) -> _PointsAndWeights: ... +@overload +def roots_gegenbauer( + n: _IntegerType, + alpha: _FloatingType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_gegenbauer( + n: _IntegerType, + alpha: _FloatingType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_chebyt(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_chebyt( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_chebyt( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_chebyu(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_chebyu( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_chebyu( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_chebyc(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_chebyc( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_chebyc( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_chebys(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_chebys( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_chebys( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_sh_chebyt(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_sh_chebyt( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_sh_chebyt( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_sh_chebyu(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_sh_chebyu( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_sh_chebyu( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_legendre(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_legendre( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_legendre( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +@overload +def roots_sh_legendre(n: _IntegerType) -> _PointsAndWeights: ... +@overload +def roots_sh_legendre( + n: _IntegerType, + mu: Literal[False], +) -> _PointsAndWeights: ... +@overload +def roots_sh_legendre( + n: _IntegerType, + mu: Literal[True], +) -> _PointsAndWeightsAndMu: ... + +class orthopoly1d(numpy.poly1d): + def __init__( + self, + roots: numpy.typing.ArrayLike, + weights: numpy.typing.ArrayLike | None, + hn: float = ..., + kn: float = ..., + wfunc = Optional[Callable[[float], float]], # noqa: UP007 + limits = tuple[float, float] | None, + monic: bool = ..., + eval_func: numpy.ufunc = ..., + ) -> None: ... + @property + def limits(self) -> tuple[float, float]: ... + def weight_func(self, x: float) -> float: ... + @overload + def __call__(self, x: _ArrayLike0D) -> Any: ... + @overload + def __call__(self, x: numpy.poly1d) -> numpy.poly1d: ... # type: ignore[misc] + @overload + def __call__(self, x: numpy.typing.ArrayLike) -> numpy.ndarray: ... + +def legendre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def chebyt(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def chebyu(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def chebyc(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def chebys(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def jacobi( + n: _IntegerType, + alpha: _FloatingType, + beta: _FloatingType, + monic: bool = ..., +) -> orthopoly1d: ... +def laguerre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def genlaguerre( + n: _IntegerType, + alpha: _FloatingType, + monic: bool = ..., +) -> orthopoly1d: ... +def hermite(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def hermitenorm(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def gegenbauer( + n: _IntegerType, + alpha: _FloatingType, + monic: bool = ..., +) -> orthopoly1d: ... +def sh_legendre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def sh_chebyt(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def sh_chebyu(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ... +def sh_jacobi( + n: _IntegerType, + p: _FloatingType, + q: _FloatingType, + monic: bool = ..., +) -> orthopoly1d: ... + +# These functions are not public, but still need stubs because they +# get checked in the tests. +def _roots_hermite_asy(n: _IntegerType) -> _PointsAndWeights: ... diff --git a/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py b/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py new file mode 100644 index 0000000000000000000000000000000000000000..e1edc9800759dfda9e49bde1becc775a64bce958 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_sf_error.py @@ -0,0 +1,15 @@ +"""Warnings and Exceptions that can be raised by special functions.""" +import warnings + + +class SpecialFunctionWarning(Warning): + """Warning that can be emitted by special functions.""" + pass + + +warnings.simplefilter("always", category=SpecialFunctionWarning) + + +class SpecialFunctionError(Exception): + """Exception that can be raised by special functions.""" + pass diff --git a/venv/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5896c9194c56528dbf846b1e680be6ef96823c91 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py b/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..2525eceb47ec2b20b45ca693e19e741f4a666597 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_spfun_stats.py @@ -0,0 +1,106 @@ +# Last Change: Sat Mar 21 02:00 PM 2009 J + +# Copyright (c) 2001, 2002 Enthought, Inc. +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of the Enthought nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +"""Some more special functions which may be useful for multivariate statistical +analysis.""" + +import numpy as np +from scipy.special import gammaln as loggam + + +__all__ = ['multigammaln'] + + +def multigammaln(a, d): + r"""Returns the log of multivariate gamma, also sometimes called the + generalized gamma. + + Parameters + ---------- + a : ndarray + The multivariate gamma is computed for each item of `a`. + d : int + The dimension of the space of integration. + + Returns + ------- + res : ndarray + The values of the log multivariate gamma at the given points `a`. + + Notes + ----- + The formal definition of the multivariate gamma of dimension d for a real + `a` is + + .. math:: + + \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA + + with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of + all the positive definite matrices of dimension `d`. Note that `a` is a + scalar: the integrand only is multivariate, the argument is not (the + function is defined over a subset of the real set). + + This can be proven to be equal to the much friendlier equation + + .. math:: + + \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2). + + References + ---------- + R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in + probability and mathematical statistics). + + Examples + -------- + >>> import numpy as np + >>> from scipy.special import multigammaln, gammaln + >>> a = 23.5 + >>> d = 10 + >>> multigammaln(a, d) + 454.1488605074416 + + Verify that the result agrees with the logarithm of the equation + shown above: + + >>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum() + 454.1488605074416 + """ + a = np.asarray(a) + if not np.isscalar(d) or (np.floor(d) != d): + raise ValueError("d should be a positive integer (dimension)") + if np.any(a <= 0.5 * (d - 1)): + raise ValueError(f"condition a ({a:f}) > 0.5 * (d-1) ({0.5 * (d-1):f}) not met") + + res = (d * (d-1) * 0.25) * np.log(np.pi) + res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0) + return res diff --git a/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py b/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..1f4feb3fa4a2dfaea75a8e8a37ae3b87565db1bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_spherical_bessel.py @@ -0,0 +1,354 @@ +import numpy as np +from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in, + _spherical_kn, _spherical_jn_d, _spherical_yn_d, + _spherical_in_d, _spherical_kn_d) + +def spherical_jn(n, z, derivative=False): + r"""Spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z), + + where :math:`J_n` is the Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + jn : ndarray + + Notes + ----- + For real arguments greater than the order, the function is computed + using the ascending recurrence [2]_. For small real or complex + arguments, the definitional relation to the cylindrical Bessel function + of the first kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z). + + j_0'(z) = -j_1(z) + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E3 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The spherical Bessel functions of the first kind :math:`j_n` accept + both real and complex second argument. They can return a complex type: + + >>> from scipy.special import spherical_jn + >>> spherical_jn(0, 3+5j) + (-9.878987731663194-8.021894345786002j) + >>> type(spherical_jn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_jn(3, x, True), + ... spherical_jn(2, x) - 4/x * spherical_jn(3, x)) + True + + The first few :math:`j_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 10.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-0.5, 1.5) + >>> ax.set_title(r'Spherical Bessel functions $j_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_jn(n, x), label=rf'$j_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_jn_d(n, z) + else: + return _spherical_jn(n, z) + + +def spherical_yn(n, z, derivative=False): + r"""Spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z), + + where :math:`Y_n` is the Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + yn : ndarray + + Notes + ----- + For real arguments, the function is computed using the ascending + recurrence [2]_. For complex arguments, the definitional relation to + the cylindrical Bessel function of the second kind is used. + + The derivative is computed using the relations [3]_, + + .. math:: + y_n' = y_{n-1} - \frac{n + 1}{z} y_n. + + y_0' = -y_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E4 + .. [2] https://dlmf.nist.gov/10.51.E1 + .. [3] https://dlmf.nist.gov/10.51.E2 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The spherical Bessel functions of the second kind :math:`y_n` accept + both real and complex second argument. They can return a complex type: + + >>> from scipy.special import spherical_yn + >>> spherical_yn(0, 3+5j) + (8.022343088587197-9.880052589376795j) + >>> type(spherical_yn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_yn(3, x, True), + ... spherical_yn(2, x) - 4/x * spherical_yn(3, x)) + True + + The first few :math:`y_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 10.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-2.0, 1.0) + >>> ax.set_title(r'Spherical Bessel functions $y_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_yn(n, x), label=rf'$y_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_yn_d(n, z) + else: + return _spherical_yn(n, z) + + +def spherical_in(n, z, derivative=False): + r"""Modified spherical Bessel function of the first kind or its derivative. + + Defined as [1]_, + + .. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z), + + where :math:`I_n` is the modified Bessel function of the first kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + in : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the first kind. + + The derivative is computed using the relations [2]_, + + .. math:: + i_n' = i_{n-1} - \frac{n + 1}{z} i_n. + + i_1' = i_0 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E7 + .. [2] https://dlmf.nist.gov/10.51.E5 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The modified spherical Bessel functions of the first kind :math:`i_n` + accept both real and complex second argument. + They can return a complex type: + + >>> from scipy.special import spherical_in + >>> spherical_in(0, 3+5j) + (-1.1689867793369182-1.2697305267234222j) + >>> type(spherical_in(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_in(3, x, True), + ... spherical_in(2, x) - 4/x * spherical_in(3, x)) + True + + The first few :math:`i_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 6.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(-0.5, 5.0) + >>> ax.set_title(r'Modified spherical Bessel functions $i_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_in(n, x), label=rf'$i_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_in_d(n, z) + else: + return _spherical_in(n, z) + + +def spherical_kn(n, z, derivative=False): + r"""Modified spherical Bessel function of the second kind or its derivative. + + Defined as [1]_, + + .. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z), + + where :math:`K_n` is the modified Bessel function of the second kind. + + Parameters + ---------- + n : int, array_like + Order of the Bessel function (n >= 0). + z : complex or float, array_like + Argument of the Bessel function. + derivative : bool, optional + If True, the value of the derivative (rather than the function + itself) is returned. + + Returns + ------- + kn : ndarray + + Notes + ----- + The function is computed using its definitional relation to the + modified cylindrical Bessel function of the second kind. + + The derivative is computed using the relations [2]_, + + .. math:: + k_n' = -k_{n-1} - \frac{n + 1}{z} k_n. + + k_0' = -k_1 + + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] https://dlmf.nist.gov/10.47.E9 + .. [2] https://dlmf.nist.gov/10.51.E5 + .. [AS] Milton Abramowitz and Irene A. Stegun, eds. + Handbook of Mathematical Functions with Formulas, + Graphs, and Mathematical Tables. New York: Dover, 1972. + + Examples + -------- + The modified spherical Bessel functions of the second kind :math:`k_n` + accept both real and complex second argument. + They can return a complex type: + + >>> from scipy.special import spherical_kn + >>> spherical_kn(0, 3+5j) + (0.012985785614001561+0.003354691603137546j) + >>> type(spherical_kn(0, 3+5j)) + + + We can verify the relation for the derivative from the Notes + for :math:`n=3` in the interval :math:`[1, 2]`: + + >>> import numpy as np + >>> x = np.arange(1.0, 2.0, 0.01) + >>> np.allclose(spherical_kn(3, x, True), + ... - 4/x * spherical_kn(3, x) - spherical_kn(2, x)) + True + + The first few :math:`k_n` with real argument: + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(0.0, 4.0, 0.01) + >>> fig, ax = plt.subplots() + >>> ax.set_ylim(0.0, 5.0) + >>> ax.set_title(r'Modified spherical Bessel functions $k_n$') + >>> for n in np.arange(0, 4): + ... ax.plot(x, spherical_kn(n, x), label=rf'$k_{n}$') + >>> plt.legend(loc='best') + >>> plt.show() + + """ + n = np.asarray(n, dtype=np.dtype("long")) + if derivative: + return _spherical_kn_d(n, z) + else: + return _spherical_kn(n, z) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py b/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b8ba28c2fe95947ae5a9391148aeb7df951159 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_support_alternative_backends.py @@ -0,0 +1,75 @@ +import os +import sys +import functools + +import numpy as np +from scipy._lib._array_api import array_namespace, is_cupy, is_torch, is_numpy +from . import _ufuncs +# These don't really need to be imported, but otherwise IDEs might not realize +# that these are defined in this file / report an error in __init__.py +from ._ufuncs import ( + log_ndtr, ndtr, ndtri, erf, erfc, i0, i0e, i1, i1e, # noqa: F401 + gammaln, gammainc, gammaincc, logit, expit) # noqa: F401 + +_SCIPY_ARRAY_API = os.environ.get("SCIPY_ARRAY_API", False) +array_api_compat_prefix = "scipy._lib.array_api_compat" + + +def get_array_special_func(f_name, xp, n_array_args): + if is_numpy(xp): + f = getattr(_ufuncs, f_name, None) + elif is_torch(xp): + f = getattr(xp.special, f_name, None) + elif is_cupy(xp): + import cupyx # type: ignore[import] + f = getattr(cupyx.scipy.special, f_name, None) + elif xp.__name__ == f"{array_api_compat_prefix}.jax": + f = getattr(xp.scipy.special, f_name, None) + else: + f_scipy = getattr(_ufuncs, f_name, None) + def f(*args, **kwargs): + array_args = args[:n_array_args] + other_args = args[n_array_args:] + array_args = [np.asarray(arg) for arg in array_args] + out = f_scipy(*array_args, *other_args, **kwargs) + return xp.asarray(out) + + return f + +# functools.wraps doesn't work because: +# 'numpy.ufunc' object has no attribute '__module__' +def support_alternative_backends(f_name, n_array_args): + func = getattr(_ufuncs, f_name) + + @functools.wraps(func) + def wrapped(*args, **kwargs): + xp = array_namespace(*args[:n_array_args]) + f = get_array_special_func(f_name, xp, n_array_args) + return f(*args, **kwargs) + + return wrapped + + +array_special_func_map = { + 'log_ndtr': 1, + 'ndtr': 1, + 'ndtri': 1, + 'erf': 1, + 'erfc': 1, + 'i0': 1, + 'i0e': 1, + 'i1': 1, + 'i1e': 1, + 'gammaln': 1, + 'gammainc': 2, + 'gammaincc': 2, + 'logit': 1, + 'expit': 1, +} + +for f_name, n_array_args in array_special_func_map.items(): + f = (support_alternative_backends(f_name, n_array_args) if _SCIPY_ARRAY_API + else getattr(_ufuncs, f_name)) + sys.modules[__name__].__dict__[f_name] = f + +__all__ = list(array_special_func_map) diff --git a/venv/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ef40abc66f4b5411f0eb395cd0cf8b96ba24e008 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi b/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0e209e366f0b37415159083434a053545bc78fae --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_test_internal.pyi @@ -0,0 +1,9 @@ +import numpy as np + +def have_fenv() -> bool: ... +def random_double(size: int) -> np.float64: ... +def test_add_round(size: int, mode: str): ... + +def _dd_exp(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_log(xhi: float, xlo: float) -> tuple[float, float]: ... +def _dd_expm1(xhi: float, xlo: float) -> tuple[float, float]: ... diff --git a/venv/lib/python3.10/site-packages/scipy/special/_testutils.py b/venv/lib/python3.10/site-packages/scipy/special/_testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..68c1eb3611143b3d6a4b7c02ca492d3b5d03bbcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_testutils.py @@ -0,0 +1,321 @@ +import os +import functools +import operator +from scipy._lib import _pep440 + +import numpy as np +from numpy.testing import assert_ +import pytest + +import scipy.special as sc + +__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData'] + + +#------------------------------------------------------------------------------ +# Check if a module is present to be used in tests +#------------------------------------------------------------------------------ + +class MissingModule: + def __init__(self, name): + self.name = name + + +def check_version(module, min_ver): + if type(module) == MissingModule: + return pytest.mark.skip(reason=f"{module.name} is not installed") + return pytest.mark.skipif( + _pep440.parse(module.__version__) < _pep440.Version(min_ver), + reason=f"{module.__name__} version >= {min_ver} required" + ) + + +#------------------------------------------------------------------------------ +# Enable convergence and loss of precision warnings -- turn off one by one +#------------------------------------------------------------------------------ + +def with_special_errors(func): + """ + Enable special function errors (such as underflow, overflow, + loss of precision, etc.) + """ + @functools.wraps(func) + def wrapper(*a, **kw): + with sc.errstate(all='raise'): + res = func(*a, **kw) + return res + return wrapper + + +#------------------------------------------------------------------------------ +# Comparing function values at many data points at once, with helpful +# error reports +#------------------------------------------------------------------------------ + +def assert_func_equal(func, results, points, rtol=None, atol=None, + param_filter=None, knownfailure=None, + vectorized=True, dtype=None, nan_ok=False, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + if hasattr(points, 'next'): + # it's a generator + points = list(points) + + points = np.asarray(points) + if points.ndim == 1: + points = points[:,None] + nparams = points.shape[1] + + if hasattr(results, '__name__'): + # function + data = points + result_columns = None + result_func = results + else: + # dataset + data = np.c_[points, results] + result_columns = list(range(nparams, data.shape[1])) + result_func = None + + fdata = FuncData(func, data, list(range(nparams)), + result_columns=result_columns, result_func=result_func, + rtol=rtol, atol=atol, param_filter=param_filter, + knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized, + ignore_inf_sign=ignore_inf_sign, + distinguish_nan_and_inf=distinguish_nan_and_inf) + fdata.check() + + +class FuncData: + """ + Data set for checking a special function. + + Parameters + ---------- + func : function + Function to test + data : numpy array + columnar data to use for testing + param_columns : int or tuple of ints + Columns indices in which the parameters to `func` lie. + Can be imaginary integers to indicate that the parameter + should be cast to complex. + result_columns : int or tuple of ints, optional + Column indices for expected results from `func`. + result_func : callable, optional + Function to call to obtain results. + rtol : float, optional + Required relative tolerance. Default is 5*eps. + atol : float, optional + Required absolute tolerance. Default is 5*tiny. + param_filter : function, or tuple of functions/Nones, optional + Filter functions to exclude some parameter ranges. + If omitted, no filtering is done. + knownfailure : str, optional + Known failure error message to raise when the test is run. + If omitted, no exception is raised. + nan_ok : bool, optional + If nan is always an accepted result. + vectorized : bool, optional + Whether all functions passed in are vectorized. + ignore_inf_sign : bool, optional + Whether to ignore signs of infinities. + (Doesn't matter for complex-valued functions.) + distinguish_nan_and_inf : bool, optional + If True, treat numbers which contain nans or infs as + equal. Sets ignore_inf_sign to be True. + + """ + + def __init__(self, func, data, param_columns, result_columns=None, + result_func=None, rtol=None, atol=None, param_filter=None, + knownfailure=None, dataname=None, nan_ok=False, vectorized=True, + ignore_inf_sign=False, distinguish_nan_and_inf=True): + self.func = func + self.data = data + self.dataname = dataname + if not hasattr(param_columns, '__len__'): + param_columns = (param_columns,) + self.param_columns = tuple(param_columns) + if result_columns is not None: + if not hasattr(result_columns, '__len__'): + result_columns = (result_columns,) + self.result_columns = tuple(result_columns) + if result_func is not None: + message = "Only result_func or result_columns should be provided" + raise ValueError(message) + elif result_func is not None: + self.result_columns = None + else: + raise ValueError("Either result_func or result_columns should be provided") + self.result_func = result_func + self.rtol = rtol + self.atol = atol + if not hasattr(param_filter, '__len__'): + param_filter = (param_filter,) + self.param_filter = param_filter + self.knownfailure = knownfailure + self.nan_ok = nan_ok + self.vectorized = vectorized + self.ignore_inf_sign = ignore_inf_sign + self.distinguish_nan_and_inf = distinguish_nan_and_inf + if not self.distinguish_nan_and_inf: + self.ignore_inf_sign = True + + def get_tolerances(self, dtype): + if not np.issubdtype(dtype, np.inexact): + dtype = np.dtype(float) + info = np.finfo(dtype) + rtol, atol = self.rtol, self.atol + if rtol is None: + rtol = 5*info.eps + if atol is None: + atol = 5*info.tiny + return rtol, atol + + def check(self, data=None, dtype=None, dtypes=None): + """Check the special function against the data.""" + __tracebackhide__ = operator.methodcaller( + 'errisinstance', AssertionError + ) + + if self.knownfailure: + pytest.xfail(reason=self.knownfailure) + + if data is None: + data = self.data + + if dtype is None: + dtype = data.dtype + else: + data = data.astype(dtype) + + rtol, atol = self.get_tolerances(dtype) + + # Apply given filter functions + if self.param_filter: + param_mask = np.ones((data.shape[0],), np.bool_) + for j, filter in zip(self.param_columns, self.param_filter): + if filter: + param_mask &= list(filter(data[:,j])) + data = data[param_mask] + + # Pick parameters from the correct columns + params = [] + for idx, j in enumerate(self.param_columns): + if np.iscomplexobj(j): + j = int(j.imag) + params.append(data[:,j].astype(complex)) + elif dtypes and idx < len(dtypes): + params.append(data[:, j].astype(dtypes[idx])) + else: + params.append(data[:,j]) + + # Helper for evaluating results + def eval_func_at_params(func, skip_mask=None): + if self.vectorized: + got = func(*params) + else: + got = [] + for j in range(len(params[0])): + if skip_mask is not None and skip_mask[j]: + got.append(np.nan) + continue + got.append(func(*tuple([params[i][j] for i in range(len(params))]))) + got = np.asarray(got) + if not isinstance(got, tuple): + got = (got,) + return got + + # Evaluate function to be tested + got = eval_func_at_params(self.func) + + # Grab the correct results + if self.result_columns is not None: + # Correct results passed in with the data + wanted = tuple([data[:,icol] for icol in self.result_columns]) + else: + # Function producing correct results passed in + skip_mask = None + if self.nan_ok and len(got) == 1: + # Don't spend time evaluating what doesn't need to be evaluated + skip_mask = np.isnan(got[0]) + wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask) + + # Check the validity of each output returned + assert_(len(got) == len(wanted)) + + for output_num, (x, y) in enumerate(zip(got, wanted)): + if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign: + pinf_x = np.isinf(x) + pinf_y = np.isinf(y) + minf_x = np.isinf(x) + minf_y = np.isinf(y) + else: + pinf_x = np.isposinf(x) + pinf_y = np.isposinf(y) + minf_x = np.isneginf(x) + minf_y = np.isneginf(y) + nan_x = np.isnan(x) + nan_y = np.isnan(y) + + with np.errstate(all='ignore'): + abs_y = np.absolute(y) + abs_y[~np.isfinite(abs_y)] = 0 + diff = np.absolute(x - y) + diff[~np.isfinite(diff)] = 0 + + rdiff = diff / np.absolute(y) + rdiff[~np.isfinite(rdiff)] = 0 + + tol_mask = (diff <= atol + rtol*abs_y) + pinf_mask = (pinf_x == pinf_y) + minf_mask = (minf_x == minf_y) + + nan_mask = (nan_x == nan_y) + + bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask) + + point_count = bad_j.size + if self.nan_ok: + bad_j &= ~nan_x + bad_j &= ~nan_y + point_count -= (nan_x | nan_y).sum() + + if not self.distinguish_nan_and_inf and not self.nan_ok: + # If nan's are okay we've already covered all these cases + inf_x = np.isinf(x) + inf_y = np.isinf(y) + both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y) + bad_j &= ~both_nonfinite + point_count -= both_nonfinite.sum() + + if np.any(bad_j): + # Some bad results: inform what, where, and how bad + msg = [""] + msg.append("Max |adiff|: %g" % diff[bad_j].max()) + msg.append("Max |rdiff|: %g" % rdiff[bad_j].max()) + msg.append("Bad results (%d out of %d) for the following points " + "(in output %d):" + % (np.sum(bad_j), point_count, output_num,)) + for j in np.nonzero(bad_j)[0]: + j = int(j) + def fmt(x): + return '%30s' % np.array2string(x[j], precision=18) + a = " ".join(map(fmt, params)) + b = " ".join(map(fmt, got)) + c = " ".join(map(fmt, wanted)) + d = fmt(rdiff) + msg.append(f"{a} => {b} != {c} (rdiff {d})") + assert_(False, "\n".join(msg)) + + def __repr__(self): + """Pretty-printing, esp. for Nose output""" + if np.any(list(map(np.iscomplexobj, self.param_columns))): + is_complex = " (complex)" + else: + is_complex = "" + if self.dataname: + return "".format(self.func.__name__, is_complex, + os.path.basename(self.dataname)) + else: + return f"" diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ab1738021e174d176be0522c267bfe3e245a9c19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyi @@ -0,0 +1,526 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from typing import Any, Dict + +import numpy as np + +__all__ = [ + 'geterr', + 'seterr', + 'errstate', + 'agm', + 'airy', + 'airye', + 'bdtr', + 'bdtrc', + 'bdtri', + 'bdtrik', + 'bdtrin', + 'bei', + 'beip', + 'ber', + 'berp', + 'besselpoly', + 'beta', + 'betainc', + 'betaincc', + 'betainccinv', + 'betaincinv', + 'betaln', + 'binom', + 'boxcox', + 'boxcox1p', + 'btdtr', + 'btdtri', + 'btdtria', + 'btdtrib', + 'cbrt', + 'chdtr', + 'chdtrc', + 'chdtri', + 'chdtriv', + 'chndtr', + 'chndtridf', + 'chndtrinc', + 'chndtrix', + 'cosdg', + 'cosm1', + 'cotdg', + 'dawsn', + 'ellipe', + 'ellipeinc', + 'ellipj', + 'ellipk', + 'ellipkinc', + 'ellipkm1', + 'elliprc', + 'elliprd', + 'elliprf', + 'elliprg', + 'elliprj', + 'entr', + 'erf', + 'erfc', + 'erfcinv', + 'erfcx', + 'erfi', + 'erfinv', + 'eval_chebyc', + 'eval_chebys', + 'eval_chebyt', + 'eval_chebyu', + 'eval_gegenbauer', + 'eval_genlaguerre', + 'eval_hermite', + 'eval_hermitenorm', + 'eval_jacobi', + 'eval_laguerre', + 'eval_legendre', + 'eval_sh_chebyt', + 'eval_sh_chebyu', + 'eval_sh_jacobi', + 'eval_sh_legendre', + 'exp1', + 'exp10', + 'exp2', + 'expi', + 'expit', + 'expm1', + 'expn', + 'exprel', + 'fdtr', + 'fdtrc', + 'fdtri', + 'fdtridfd', + 'fresnel', + 'gamma', + 'gammainc', + 'gammaincc', + 'gammainccinv', + 'gammaincinv', + 'gammaln', + 'gammasgn', + 'gdtr', + 'gdtrc', + 'gdtria', + 'gdtrib', + 'gdtrix', + 'hankel1', + 'hankel1e', + 'hankel2', + 'hankel2e', + 'huber', + 'hyp0f1', + 'hyp1f1', + 'hyp2f1', + 'hyperu', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'inv_boxcox', + 'inv_boxcox1p', + 'it2i0k0', + 'it2j0y0', + 'it2struve0', + 'itairy', + 'iti0k0', + 'itj0y0', + 'itmodstruve0', + 'itstruve0', + 'iv', + 'ive', + 'j0', + 'j1', + 'jn', + 'jv', + 'jve', + 'k0', + 'k0e', + 'k1', + 'k1e', + 'kei', + 'keip', + 'kelvin', + 'ker', + 'kerp', + 'kl_div', + 'kn', + 'kolmogi', + 'kolmogorov', + 'kv', + 'kve', + 'log1p', + 'log_expit', + 'log_ndtr', + 'loggamma', + 'logit', + 'lpmv', + 'mathieu_a', + 'mathieu_b', + 'mathieu_cem', + 'mathieu_modcem1', + 'mathieu_modcem2', + 'mathieu_modsem1', + 'mathieu_modsem2', + 'mathieu_sem', + 'modfresnelm', + 'modfresnelp', + 'modstruve', + 'nbdtr', + 'nbdtrc', + 'nbdtri', + 'nbdtrik', + 'nbdtrin', + 'ncfdtr', + 'ncfdtri', + 'ncfdtridfd', + 'ncfdtridfn', + 'ncfdtrinc', + 'nctdtr', + 'nctdtridf', + 'nctdtrinc', + 'nctdtrit', + 'ndtr', + 'ndtri', + 'ndtri_exp', + 'nrdtrimn', + 'nrdtrisd', + 'obl_ang1', + 'obl_ang1_cv', + 'obl_cv', + 'obl_rad1', + 'obl_rad1_cv', + 'obl_rad2', + 'obl_rad2_cv', + 'owens_t', + 'pbdv', + 'pbvv', + 'pbwa', + 'pdtr', + 'pdtrc', + 'pdtri', + 'pdtrik', + 'poch', + 'powm1', + 'pro_ang1', + 'pro_ang1_cv', + 'pro_cv', + 'pro_rad1', + 'pro_rad1_cv', + 'pro_rad2', + 'pro_rad2_cv', + 'pseudo_huber', + 'psi', + 'radian', + 'rel_entr', + 'rgamma', + 'round', + 'shichi', + 'sici', + 'sindg', + 'smirnov', + 'smirnovi', + 'spence', + 'sph_harm', + 'stdtr', + 'stdtridf', + 'stdtrit', + 'struve', + 'tandg', + 'tklmbda', + 'voigt_profile', + 'wofz', + 'wright_bessel', + 'wrightomega', + 'xlog1py', + 'xlogy', + 'y0', + 'y1', + 'yn', + 'yv', + 'yve', + 'zetac' +] + +def geterr() -> Dict[str, str]: ... +def seterr(**kwargs: str) -> Dict[str, str]: ... + +class errstate: + def __init__(self, **kargs: str) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: Any, # Unused + exc_value: Any, # Unused + traceback: Any, # Unused + ) -> None: ... + +_cosine_cdf: np.ufunc +_cosine_invcdf: np.ufunc +_cospi: np.ufunc +_ellip_harm: np.ufunc +_factorial: np.ufunc +_igam_fac: np.ufunc +_kolmogc: np.ufunc +_kolmogci: np.ufunc +_kolmogp: np.ufunc +_lambertw: np.ufunc +_lanczos_sum_expg_scaled: np.ufunc +_lgam1p: np.ufunc +_log1pmx: np.ufunc +_riemann_zeta: np.ufunc +_scaled_exp1: np.ufunc +_sf_error_test_function: np.ufunc +_sinpi: np.ufunc +_smirnovc: np.ufunc +_smirnovci: np.ufunc +_smirnovp: np.ufunc +_spherical_in: np.ufunc +_spherical_in_d: np.ufunc +_spherical_jn: np.ufunc +_spherical_jn_d: np.ufunc +_spherical_kn: np.ufunc +_spherical_kn_d: np.ufunc +_spherical_yn: np.ufunc +_spherical_yn_d: np.ufunc +_stirling2_inexact: np.ufunc +_struve_asymp_large_z: np.ufunc +_struve_bessel_series: np.ufunc +_struve_power_series: np.ufunc +_zeta: np.ufunc +agm: np.ufunc +airy: np.ufunc +airye: np.ufunc +bdtr: np.ufunc +bdtrc: np.ufunc +bdtri: np.ufunc +bdtrik: np.ufunc +bdtrin: np.ufunc +bei: np.ufunc +beip: np.ufunc +ber: np.ufunc +berp: np.ufunc +besselpoly: np.ufunc +beta: np.ufunc +betainc: np.ufunc +betaincc: np.ufunc +betainccinv: np.ufunc +betaincinv: np.ufunc +betaln: np.ufunc +binom: np.ufunc +boxcox1p: np.ufunc +boxcox: np.ufunc +btdtr: np.ufunc +btdtri: np.ufunc +btdtria: np.ufunc +btdtrib: np.ufunc +cbrt: np.ufunc +chdtr: np.ufunc +chdtrc: np.ufunc +chdtri: np.ufunc +chdtriv: np.ufunc +chndtr: np.ufunc +chndtridf: np.ufunc +chndtrinc: np.ufunc +chndtrix: np.ufunc +cosdg: np.ufunc +cosm1: np.ufunc +cotdg: np.ufunc +dawsn: np.ufunc +ellipe: np.ufunc +ellipeinc: np.ufunc +ellipj: np.ufunc +ellipk: np.ufunc +ellipkinc: np.ufunc +ellipkm1: np.ufunc +elliprc: np.ufunc +elliprd: np.ufunc +elliprf: np.ufunc +elliprg: np.ufunc +elliprj: np.ufunc +entr: np.ufunc +erf: np.ufunc +erfc: np.ufunc +erfcinv: np.ufunc +erfcx: np.ufunc +erfi: np.ufunc +erfinv: np.ufunc +eval_chebyc: np.ufunc +eval_chebys: np.ufunc +eval_chebyt: np.ufunc +eval_chebyu: np.ufunc +eval_gegenbauer: np.ufunc +eval_genlaguerre: np.ufunc +eval_hermite: np.ufunc +eval_hermitenorm: np.ufunc +eval_jacobi: np.ufunc +eval_laguerre: np.ufunc +eval_legendre: np.ufunc +eval_sh_chebyt: np.ufunc +eval_sh_chebyu: np.ufunc +eval_sh_jacobi: np.ufunc +eval_sh_legendre: np.ufunc +exp10: np.ufunc +exp1: np.ufunc +exp2: np.ufunc +expi: np.ufunc +expit: np.ufunc +expm1: np.ufunc +expn: np.ufunc +exprel: np.ufunc +fdtr: np.ufunc +fdtrc: np.ufunc +fdtri: np.ufunc +fdtridfd: np.ufunc +fresnel: np.ufunc +gamma: np.ufunc +gammainc: np.ufunc +gammaincc: np.ufunc +gammainccinv: np.ufunc +gammaincinv: np.ufunc +gammaln: np.ufunc +gammasgn: np.ufunc +gdtr: np.ufunc +gdtrc: np.ufunc +gdtria: np.ufunc +gdtrib: np.ufunc +gdtrix: np.ufunc +hankel1: np.ufunc +hankel1e: np.ufunc +hankel2: np.ufunc +hankel2e: np.ufunc +huber: np.ufunc +hyp0f1: np.ufunc +hyp1f1: np.ufunc +hyp2f1: np.ufunc +hyperu: np.ufunc +i0: np.ufunc +i0e: np.ufunc +i1: np.ufunc +i1e: np.ufunc +inv_boxcox1p: np.ufunc +inv_boxcox: np.ufunc +it2i0k0: np.ufunc +it2j0y0: np.ufunc +it2struve0: np.ufunc +itairy: np.ufunc +iti0k0: np.ufunc +itj0y0: np.ufunc +itmodstruve0: np.ufunc +itstruve0: np.ufunc +iv: np.ufunc +ive: np.ufunc +j0: np.ufunc +j1: np.ufunc +jn: np.ufunc +jv: np.ufunc +jve: np.ufunc +k0: np.ufunc +k0e: np.ufunc +k1: np.ufunc +k1e: np.ufunc +kei: np.ufunc +keip: np.ufunc +kelvin: np.ufunc +ker: np.ufunc +kerp: np.ufunc +kl_div: np.ufunc +kn: np.ufunc +kolmogi: np.ufunc +kolmogorov: np.ufunc +kv: np.ufunc +kve: np.ufunc +log1p: np.ufunc +log_expit: np.ufunc +log_ndtr: np.ufunc +loggamma: np.ufunc +logit: np.ufunc +lpmv: np.ufunc +mathieu_a: np.ufunc +mathieu_b: np.ufunc +mathieu_cem: np.ufunc +mathieu_modcem1: np.ufunc +mathieu_modcem2: np.ufunc +mathieu_modsem1: np.ufunc +mathieu_modsem2: np.ufunc +mathieu_sem: np.ufunc +modfresnelm: np.ufunc +modfresnelp: np.ufunc +modstruve: np.ufunc +nbdtr: np.ufunc +nbdtrc: np.ufunc +nbdtri: np.ufunc +nbdtrik: np.ufunc +nbdtrin: np.ufunc +ncfdtr: np.ufunc +ncfdtri: np.ufunc +ncfdtridfd: np.ufunc +ncfdtridfn: np.ufunc +ncfdtrinc: np.ufunc +nctdtr: np.ufunc +nctdtridf: np.ufunc +nctdtrinc: np.ufunc +nctdtrit: np.ufunc +ndtr: np.ufunc +ndtri: np.ufunc +ndtri_exp: np.ufunc +nrdtrimn: np.ufunc +nrdtrisd: np.ufunc +obl_ang1: np.ufunc +obl_ang1_cv: np.ufunc +obl_cv: np.ufunc +obl_rad1: np.ufunc +obl_rad1_cv: np.ufunc +obl_rad2: np.ufunc +obl_rad2_cv: np.ufunc +owens_t: np.ufunc +pbdv: np.ufunc +pbvv: np.ufunc +pbwa: np.ufunc +pdtr: np.ufunc +pdtrc: np.ufunc +pdtri: np.ufunc +pdtrik: np.ufunc +poch: np.ufunc +powm1: np.ufunc +pro_ang1: np.ufunc +pro_ang1_cv: np.ufunc +pro_cv: np.ufunc +pro_rad1: np.ufunc +pro_rad1_cv: np.ufunc +pro_rad2: np.ufunc +pro_rad2_cv: np.ufunc +pseudo_huber: np.ufunc +psi: np.ufunc +radian: np.ufunc +rel_entr: np.ufunc +rgamma: np.ufunc +round: np.ufunc +shichi: np.ufunc +sici: np.ufunc +sindg: np.ufunc +smirnov: np.ufunc +smirnovi: np.ufunc +spence: np.ufunc +sph_harm: np.ufunc +stdtr: np.ufunc +stdtridf: np.ufunc +stdtrit: np.ufunc +struve: np.ufunc +tandg: np.ufunc +tklmbda: np.ufunc +voigt_profile: np.ufunc +wofz: np.ufunc +wright_bessel: np.ufunc +wrightomega: np.ufunc +xlog1py: np.ufunc +xlogy: np.ufunc +y0: np.ufunc +y1: np.ufunc +yn: np.ufunc +yv: np.ufunc +yve: np.ufunc +zetac: np.ufunc + diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..1e2e27941adbe6ae5a34eb807d157e2c144c39c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.pyx @@ -0,0 +1,21968 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from libc.math cimport NAN + +include "_ufuncs_extra_code_common.pxi" +include "_ufuncs_extra_code.pxi" +__all__ = ['agm', 'airy', 'airye', 'bdtr', 'bdtrc', 'bdtri', 'bdtrik', 'bdtrin', 'bei', 'beip', 'ber', 'berp', 'besselpoly', 'beta', 'betainc', 'betaincc', 'betainccinv', 'betaincinv', 'betaln', 'binom', 'boxcox', 'boxcox1p', 'btdtr', 'btdtri', 'btdtria', 'btdtrib', 'cbrt', 'chdtr', 'chdtrc', 'chdtri', 'chdtriv', 'chndtr', 'chndtridf', 'chndtrinc', 'chndtrix', 'cosdg', 'cosm1', 'cotdg', 'dawsn', 'ellipe', 'ellipeinc', 'ellipj', 'ellipk', 'ellipkinc', 'ellipkm1', 'elliprc', 'elliprd', 'elliprf', 'elliprg', 'elliprj', 'entr', 'erf', 'erfc', 'erfcinv', 'erfcx', 'erfi', 'erfinv', 'eval_chebyc', 'eval_chebys', 'eval_chebyt', 'eval_chebyu', 'eval_gegenbauer', 'eval_genlaguerre', 'eval_hermite', 'eval_hermitenorm', 'eval_jacobi', 'eval_laguerre', 'eval_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', 'eval_sh_jacobi', 'eval_sh_legendre', 'exp1', 'exp10', 'exp2', 'expi', 'expit', 'expm1', 'expn', 'exprel', 'fdtr', 'fdtrc', 'fdtri', 'fdtridfd', 'fresnel', 'gamma', 'gammainc', 'gammaincc', 'gammainccinv', 'gammaincinv', 'gammaln', 'gammasgn', 'gdtr', 'gdtrc', 'gdtria', 'gdtrib', 'gdtrix', 'hankel1', 'hankel1e', 'hankel2', 'hankel2e', 'huber', 'hyp0f1', 'hyp1f1', 'hyp2f1', 'hyperu', 'i0', 'i0e', 'i1', 'i1e', 'inv_boxcox', 'inv_boxcox1p', 'it2i0k0', 'it2j0y0', 'it2struve0', 'itairy', 'iti0k0', 'itj0y0', 'itmodstruve0', 'itstruve0', 'iv', 'ive', 'j0', 'j1', 'jv', 'jve', 'k0', 'k0e', 'k1', 'k1e', 'kei', 'keip', 'kelvin', 'ker', 'kerp', 'kl_div', 'kn', 'kolmogi', 'kolmogorov', 'kv', 'kve', 'log1p', 'log_expit', 'log_ndtr', 'loggamma', 'logit', 'lpmv', 'mathieu_a', 'mathieu_b', 'mathieu_cem', 'mathieu_modcem1', 'mathieu_modcem2', 'mathieu_modsem1', 'mathieu_modsem2', 'mathieu_sem', 'modfresnelm', 'modfresnelp', 'modstruve', 'nbdtr', 'nbdtrc', 'nbdtri', 'nbdtrik', 'nbdtrin', 'ncfdtr', 'ncfdtri', 'ncfdtridfd', 'ncfdtridfn', 'ncfdtrinc', 'nctdtr', 'nctdtridf', 'nctdtrinc', 'nctdtrit', 'ndtr', 'ndtri', 'ndtri_exp', 'nrdtrimn', 'nrdtrisd', 'obl_ang1', 'obl_ang1_cv', 'obl_cv', 'obl_rad1', 'obl_rad1_cv', 'obl_rad2', 'obl_rad2_cv', 'owens_t', 'pbdv', 'pbvv', 'pbwa', 'pdtr', 'pdtrc', 'pdtri', 'pdtrik', 'poch', 'powm1', 'pro_ang1', 'pro_ang1_cv', 'pro_cv', 'pro_rad1', 'pro_rad1_cv', 'pro_rad2', 'pro_rad2_cv', 'pseudo_huber', 'psi', 'radian', 'rel_entr', 'rgamma', 'round', 'shichi', 'sici', 'sindg', 'smirnov', 'smirnovi', 'spence', 'sph_harm', 'stdtr', 'stdtridf', 'stdtrit', 'struve', 'tandg', 'tklmbda', 'voigt_profile', 'wofz', 'wright_bessel', 'wrightomega', 'xlog1py', 'xlogy', 'y0', 'y1', 'yn', 'yv', 'yve', 'zetac', 'geterr', 'seterr', 'errstate', 'jn'] +cdef void loop_D_DDDD__As_DDDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDDD__As_FFFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDD__As_DDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_DDD__As_FFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_DD__As_DD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_DD__As_FF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_D__As_D_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_D_D__As_F_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_D_Dld__As_Dld_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_dD__As_dD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_dD__As_fF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_D_ddD__As_ddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_ddD__As_ffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddD__As_dddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddD__As_fffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddd__As_dddd_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_dddd__As_ffff_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_iidd__As_lldd_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double complex ov0 + for i in range(n): + if (ip0)[0] == (ip0)[0] and (ip1)[0] == (ip1)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_D_lD__As_lD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double complex ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_d__As_d_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_d_d__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_d_dd__As_dd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_dd__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddd__As_ddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddd__As_fff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd__As_dddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd__As_ffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd_d_As_dddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef char *op1 = args[5] + cdef double ov0 + cdef double ov1 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + op1 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_d_dddd_d_As_ffff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef char *op1 = args[5] + cdef double ov0 + cdef double ov1 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + op1 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddddddd__As_ddddddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddddddd__As_fffffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddi_d_As_ddl_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef char *op1 = args[4] + cdef double ov0 + cdef double ov1 + for i in range(n): + if (ip2)[0] == (ip2)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov1) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + ov1 = NAN + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + op1 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_d_ddiiddd__As_ddllddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *ip5 = args[5] + cdef char *ip6 = args[6] + cdef char *op0 = args[7] + cdef double ov0 + for i in range(n): + if (ip2)[0] == (ip2)[0] and (ip3)[0] == (ip3)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + ip5 += steps[5] + ip6 += steps[6] + op0 += steps[7] + sf_error.check_fpe(func_name) + +cdef void loop_d_did__As_dld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + if (ip1)[0] == (ip1)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_id__As_ld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + if (ip0)[0] == (ip0)[0]: + ov0 = (func)((ip0)[0], (ip1)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_iid__As_lld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + if (ip0)[0] == (ip0)[0] and (ip1)[0] == (ip1)[0]: + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = NAN + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_ld__As_ld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_d_ldd__As_ldd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_d_lddd__As_lddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *op0 = args[4] + cdef double ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + op0 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_f_f__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_f_ff__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_f_fff__As_fff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef float ov0 + for i in range(n): + ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_g_g__As_g_g(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef long double ov0 + for i in range(n): + ov0 = (func)((ip0)[0]) + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DDDD_As_D_DDDD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double complex ov0 + cdef double complex ov1 + cdef double complex ov2 + cdef double complex ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DDDD_As_F_FFFF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double complex ov0 + cdef double complex ov1 + cdef double complex ov2 + cdef double complex ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DD_As_D_DD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_D_DD_As_F_FF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_DDDD_As_d_DDDD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double complex ov0 + cdef double complex ov1 + cdef double complex ov2 + cdef double complex ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_DDDD_As_f_FFFF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double complex ov0 + cdef double complex ov1 + cdef double complex ov2 + cdef double complex ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_DD_As_d_DD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_DD_As_f_FF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double complex ov0 + cdef double complex ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dd_As_d_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dd_As_f_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dddd_As_d_dddd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_d_dddd_As_f_ffff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef char *op1 = args[2] + cdef char *op2 = args[3] + cdef char *op3 = args[4] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + op0 += steps[1] + op1 += steps[2] + op2 += steps[3] + op3 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dd_As_dd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dd_As_ff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dddd_As_dd_dddd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef char *op2 = args[4] + cdef char *op3 = args[5] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + op2 += steps[4] + op3 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_i_dd_dddd_As_ff_ffff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *op0 = args[2] + cdef char *op1 = args[3] + cdef char *op2 = args[4] + cdef char *op3 = args[5] + cdef double ov0 + cdef double ov1 + cdef double ov2 + cdef double ov3 + for i in range(n): + (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3) + (op0)[0] = ov0 + (op1)[0] = ov1 + (op2)[0] = ov2 + (op3)[0] = ov3 + ip0 += steps[0] + ip1 += steps[1] + op0 += steps[2] + op1 += steps[3] + op2 += steps[4] + op3 += steps[5] + sf_error.check_fpe(func_name) + +cdef void loop_i_ddd_dd_As_ddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef char *op1 = args[4] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + op1 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_ddd_dd_As_fff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *op0 = args[3] + cdef char *op1 = args[4] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + op0 += steps[3] + op1 += steps[4] + sf_error.check_fpe(func_name) + +cdef void loop_i_ddddd_dd_As_ddddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *op0 = args[5] + cdef char *op1 = args[6] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + op0 += steps[5] + op1 += steps[6] + sf_error.check_fpe(func_name) + +cdef void loop_i_ddddd_dd_As_fffff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *ip1 = args[1] + cdef char *ip2 = args[2] + cdef char *ip3 = args[3] + cdef char *ip4 = args[4] + cdef char *op0 = args[5] + cdef char *op1 = args[6] + cdef double ov0 + cdef double ov1 + for i in range(n): + (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], &ov0, &ov1) + (op0)[0] = ov0 + (op1)[0] = ov1 + ip0 += steps[0] + ip1 += steps[1] + ip2 += steps[2] + ip3 += steps[3] + ip4 += steps[4] + op0 += steps[5] + op1 += steps[6] + sf_error.check_fpe(func_name) + +cdef void loop_i_i__As_l_l(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil: + cdef np.npy_intp i, n = dims[0] + cdef void *func = (data)[0] + cdef char *func_name = (data)[1] + cdef char *ip0 = args[0] + cdef char *op0 = args[1] + cdef int ov0 + for i in range(n): + if (ip0)[0] == (ip0)[0]: + ov0 = (func)((ip0)[0]) + else: + sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument") + ov0 = 0xbad0bad0 + (op0)[0] = ov0 + ip0 += steps[0] + op0 += steps[1] + sf_error.check_fpe(func_name) + +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosine_cdf "cosine_cdf"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosine_invcdf "cosine_invcdf"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cospi "cospi"(double) noexcept nogil +from ._ellip_harm cimport ellip_harmonic as _func_ellip_harmonic +ctypedef double _proto_ellip_harmonic_t(double, double, int, int, double, double, double) noexcept nogil +cdef _proto_ellip_harmonic_t *_proto_ellip_harmonic_t_var = &_func_ellip_harmonic +from ._legacy cimport ellip_harmonic_unsafe as _func_ellip_harmonic_unsafe +ctypedef double _proto_ellip_harmonic_unsafe_t(double, double, double, double, double, double, double) noexcept nogil +cdef _proto_ellip_harmonic_unsafe_t *_proto_ellip_harmonic_unsafe_t_var = &_func_ellip_harmonic_unsafe +from ._factorial cimport _factorial as _func__factorial +ctypedef double _proto__factorial_t(double) noexcept nogil +cdef _proto__factorial_t *_proto__factorial_t_var = &_func__factorial +cdef extern from r"_ufuncs_defs.h": + cdef double _func_igam_fac "igam_fac"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kolmogc "kolmogc"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kolmogci "kolmogci"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kolmogp "kolmogp"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_lanczos_sum_expg_scaled "lanczos_sum_expg_scaled"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_lgam1p "lgam1p"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_log1pmx "log1pmx"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_riemann_zeta "riemann_zeta"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_scaled_exp1 "scaled_exp1"(double) noexcept nogil +from .sf_error cimport _sf_error_test_function as _func__sf_error_test_function +ctypedef int _proto__sf_error_test_function_t(int) noexcept nogil +cdef _proto__sf_error_test_function_t *_proto__sf_error_test_function_t_var = &_func__sf_error_test_function +cdef extern from r"_ufuncs_defs.h": + cdef double _func_sinpi "sinpi"(double) noexcept nogil +from ._legacy cimport smirnovc_unsafe as _func_smirnovc_unsafe +ctypedef double _proto_smirnovc_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovc_unsafe_t *_proto_smirnovc_unsafe_t_var = &_func_smirnovc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_smirnovc "smirnovc"(int, double) noexcept nogil +from ._legacy cimport smirnovci_unsafe as _func_smirnovci_unsafe +ctypedef double _proto_smirnovci_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovci_unsafe_t *_proto_smirnovci_unsafe_t_var = &_func_smirnovci_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_smirnovci "smirnovci"(int, double) noexcept nogil +from ._legacy cimport smirnovp_unsafe as _func_smirnovp_unsafe +ctypedef double _proto_smirnovp_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovp_unsafe_t *_proto_smirnovp_unsafe_t_var = &_func_smirnovp_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_smirnovp "smirnovp"(int, double) noexcept nogil +from ._spherical_bessel cimport spherical_in_complex as _func_spherical_in_complex +ctypedef double complex _proto_spherical_in_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_in_complex_t *_proto_spherical_in_complex_t_var = &_func_spherical_in_complex +from ._spherical_bessel cimport spherical_in_real as _func_spherical_in_real +ctypedef double _proto_spherical_in_real_t(long, double) noexcept nogil +cdef _proto_spherical_in_real_t *_proto_spherical_in_real_t_var = &_func_spherical_in_real +from ._spherical_bessel cimport spherical_in_d_complex as _func_spherical_in_d_complex +ctypedef double complex _proto_spherical_in_d_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_in_d_complex_t *_proto_spherical_in_d_complex_t_var = &_func_spherical_in_d_complex +from ._spherical_bessel cimport spherical_in_d_real as _func_spherical_in_d_real +ctypedef double _proto_spherical_in_d_real_t(long, double) noexcept nogil +cdef _proto_spherical_in_d_real_t *_proto_spherical_in_d_real_t_var = &_func_spherical_in_d_real +from ._spherical_bessel cimport spherical_jn_complex as _func_spherical_jn_complex +ctypedef double complex _proto_spherical_jn_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_jn_complex_t *_proto_spherical_jn_complex_t_var = &_func_spherical_jn_complex +from ._spherical_bessel cimport spherical_jn_real as _func_spherical_jn_real +ctypedef double _proto_spherical_jn_real_t(long, double) noexcept nogil +cdef _proto_spherical_jn_real_t *_proto_spherical_jn_real_t_var = &_func_spherical_jn_real +from ._spherical_bessel cimport spherical_jn_d_complex as _func_spherical_jn_d_complex +ctypedef double complex _proto_spherical_jn_d_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_jn_d_complex_t *_proto_spherical_jn_d_complex_t_var = &_func_spherical_jn_d_complex +from ._spherical_bessel cimport spherical_jn_d_real as _func_spherical_jn_d_real +ctypedef double _proto_spherical_jn_d_real_t(long, double) noexcept nogil +cdef _proto_spherical_jn_d_real_t *_proto_spherical_jn_d_real_t_var = &_func_spherical_jn_d_real +from ._spherical_bessel cimport spherical_kn_complex as _func_spherical_kn_complex +ctypedef double complex _proto_spherical_kn_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_kn_complex_t *_proto_spherical_kn_complex_t_var = &_func_spherical_kn_complex +from ._spherical_bessel cimport spherical_kn_real as _func_spherical_kn_real +ctypedef double _proto_spherical_kn_real_t(long, double) noexcept nogil +cdef _proto_spherical_kn_real_t *_proto_spherical_kn_real_t_var = &_func_spherical_kn_real +from ._spherical_bessel cimport spherical_kn_d_complex as _func_spherical_kn_d_complex +ctypedef double complex _proto_spherical_kn_d_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_kn_d_complex_t *_proto_spherical_kn_d_complex_t_var = &_func_spherical_kn_d_complex +from ._spherical_bessel cimport spherical_kn_d_real as _func_spherical_kn_d_real +ctypedef double _proto_spherical_kn_d_real_t(long, double) noexcept nogil +cdef _proto_spherical_kn_d_real_t *_proto_spherical_kn_d_real_t_var = &_func_spherical_kn_d_real +from ._spherical_bessel cimport spherical_yn_complex as _func_spherical_yn_complex +ctypedef double complex _proto_spherical_yn_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_yn_complex_t *_proto_spherical_yn_complex_t_var = &_func_spherical_yn_complex +from ._spherical_bessel cimport spherical_yn_real as _func_spherical_yn_real +ctypedef double _proto_spherical_yn_real_t(long, double) noexcept nogil +cdef _proto_spherical_yn_real_t *_proto_spherical_yn_real_t_var = &_func_spherical_yn_real +from ._spherical_bessel cimport spherical_yn_d_complex as _func_spherical_yn_d_complex +ctypedef double complex _proto_spherical_yn_d_complex_t(long, double complex) noexcept nogil +cdef _proto_spherical_yn_d_complex_t *_proto_spherical_yn_d_complex_t_var = &_func_spherical_yn_d_complex +from ._spherical_bessel cimport spherical_yn_d_real as _func_spherical_yn_d_real +ctypedef double _proto_spherical_yn_d_real_t(long, double) noexcept nogil +cdef _proto_spherical_yn_d_real_t *_proto_spherical_yn_d_real_t_var = &_func_spherical_yn_d_real +cdef extern from r"_ufuncs_defs.h": + cdef double _func_struve_asymp_large_z "struve_asymp_large_z"(double, double, int, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_struve_bessel_series "struve_bessel_series"(double, double, int, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_struve_power_series "struve_power_series"(double, double, int, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_zeta "zeta"(double, double) noexcept nogil +from ._agm cimport agm as _func_agm +ctypedef double _proto_agm_t(double, double) noexcept nogil +cdef _proto_agm_t *_proto_agm_t_var = &_func_agm +cdef extern from r"_ufuncs_defs.h": + cdef int _func_airy_wrap "airy_wrap"(double, double *, double *, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cairy_wrap "cairy_wrap"(double complex, double complex *, double complex *, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cairy_wrap_e "cairy_wrap_e"(double complex, double complex *, double complex *, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cairy_wrap_e_real "cairy_wrap_e_real"(double, double *, double *, double *, double *) noexcept nogil +from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe +ctypedef double _proto_bdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_bdtr "bdtr"(double, int, double) noexcept nogil +from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe +ctypedef double _proto_bdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_bdtrc "bdtrc"(double, int, double) noexcept nogil +from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe +ctypedef double _proto_bdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_bdtri "bdtri"(double, int, double) noexcept nogil +from ._cdflib_wrappers cimport bdtrik as _func_bdtrik +ctypedef double _proto_bdtrik_t(double, double, double) noexcept nogil +cdef _proto_bdtrik_t *_proto_bdtrik_t_var = &_func_bdtrik +from ._cdflib_wrappers cimport bdtrin as _func_bdtrin +ctypedef double _proto_bdtrin_t(double, double, double) noexcept nogil +cdef _proto_bdtrin_t *_proto_bdtrin_t_var = &_func_bdtrin +cdef extern from r"_ufuncs_defs.h": + cdef double _func_bei_wrap "bei_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_beip_wrap "beip_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ber_wrap "ber_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_berp_wrap "berp_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_besselpoly "besselpoly"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_beta "beta"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_lbeta "lbeta"(double, double) noexcept nogil +from ._boxcox cimport boxcox as _func_boxcox +ctypedef double _proto_boxcox_t(double, double) noexcept nogil +cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox +from ._boxcox cimport boxcox1p as _func_boxcox1p +ctypedef double _proto_boxcox1p_t(double, double) noexcept nogil +cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef double _func_btdtr "btdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_incbi "incbi"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport btdtria as _func_btdtria +ctypedef double _proto_btdtria_t(double, double, double) noexcept nogil +cdef _proto_btdtria_t *_proto_btdtria_t_var = &_func_btdtria +from ._cdflib_wrappers cimport btdtrib as _func_btdtrib +ctypedef double _proto_btdtrib_t(double, double, double) noexcept nogil +cdef _proto_btdtrib_t *_proto_btdtrib_t_var = &_func_btdtrib +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbrt "cbrt"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_chdtr "chdtr"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_chdtrc "chdtrc"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_chdtri "chdtri"(double, double) noexcept nogil +from ._cdflib_wrappers cimport chdtriv as _func_chdtriv +ctypedef double _proto_chdtriv_t(double, double) noexcept nogil +cdef _proto_chdtriv_t *_proto_chdtriv_t_var = &_func_chdtriv +from ._cdflib_wrappers cimport chndtr as _func_chndtr +ctypedef double _proto_chndtr_t(double, double, double) noexcept nogil +cdef _proto_chndtr_t *_proto_chndtr_t_var = &_func_chndtr +from ._cdflib_wrappers cimport chndtridf as _func_chndtridf +ctypedef double _proto_chndtridf_t(double, double, double) noexcept nogil +cdef _proto_chndtridf_t *_proto_chndtridf_t_var = &_func_chndtridf +from ._cdflib_wrappers cimport chndtrinc as _func_chndtrinc +ctypedef double _proto_chndtrinc_t(double, double, double) noexcept nogil +cdef _proto_chndtrinc_t *_proto_chndtrinc_t_var = &_func_chndtrinc +from ._cdflib_wrappers cimport chndtrix as _func_chndtrix +ctypedef double _proto_chndtrix_t(double, double, double) noexcept nogil +cdef _proto_chndtrix_t *_proto_chndtrix_t_var = &_func_chndtrix +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosdg "cosdg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cosm1 "cosm1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cotdg "cotdg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ellpe "ellpe"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ellie "ellie"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_ellpj "ellpj"(double, double, double *, double *, double *, double *) noexcept nogil +from ._ellipk cimport ellipk as _func_ellipk +ctypedef double _proto_ellipk_t(double) noexcept nogil +cdef _proto_ellipk_t *_proto_ellipk_t_var = &_func_ellipk +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ellik "ellik"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ellpk "ellpk"(double) noexcept nogil +from ._convex_analysis cimport entr as _func_entr +ctypedef double _proto_entr_t(double) noexcept nogil +cdef _proto_entr_t *_proto_entr_t_var = &_func_entr +cdef extern from r"_ufuncs_defs.h": + cdef double _func_erf "erf"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_erfc "erfc"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_erfcinv "erfcinv"(double) noexcept nogil +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex] +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double _proto_eval_chebyc_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double] +from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l +ctypedef double _proto_eval_chebyc_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex] +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double _proto_eval_chebys_double__t(double, double) noexcept nogil +cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double] +from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l +ctypedef double _proto_eval_chebys_l_t(long, double) noexcept nogil +cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex] +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double _proto_eval_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double] +from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l +ctypedef double _proto_eval_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex] +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double _proto_eval_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double] +from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l +ctypedef double _proto_eval_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex] +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double] +from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l +ctypedef double _proto_eval_gegenbauer_l_t(long, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex] +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double] +from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l +ctypedef double _proto_eval_genlaguerre_l_t(long, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l +from .orthogonal_eval cimport eval_hermite as _func_eval_hermite +ctypedef double _proto_eval_hermite_t(long, double) noexcept nogil +cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite +from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm +ctypedef double _proto_eval_hermitenorm_t(long, double) noexcept nogil +cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex] +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double] +from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l +ctypedef double _proto_eval_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex] +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double _proto_eval_laguerre_double__t(double, double) noexcept nogil +cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double] +from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l +ctypedef double _proto_eval_laguerre_l_t(long, double) noexcept nogil +cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex] +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double _proto_eval_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double] +from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l +ctypedef double _proto_eval_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex] +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double _proto_eval_sh_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double] +from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l +ctypedef double _proto_eval_sh_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex] +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double _proto_eval_sh_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double] +from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l +ctypedef double _proto_eval_sh_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex] +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double] +from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l +ctypedef double _proto_eval_sh_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex] +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double _proto_eval_sh_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double] +from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l +ctypedef double _proto_eval_sh_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cexp1_wrap "cexp1_wrap"(double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_exp1_wrap "exp1_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_exp10 "exp10"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_exp2 "exp2"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cexpi_wrap "cexpi_wrap"(double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_expi_wrap "expi_wrap"(double) noexcept nogil +from ._cunity cimport cexpm1 as _func_cexpm1 +ctypedef double complex _proto_cexpm1_t(double complex) noexcept nogil +cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1 +cdef extern from r"_ufuncs_defs.h": + cdef double _func_expm1 "expm1"(double) noexcept nogil +from ._legacy cimport expn_unsafe as _func_expn_unsafe +ctypedef double _proto_expn_unsafe_t(double, double) noexcept nogil +cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_expn "expn"(int, double) noexcept nogil +from ._exprel cimport exprel as _func_exprel +ctypedef double _proto_exprel_t(double) noexcept nogil +cdef _proto_exprel_t *_proto_exprel_t_var = &_func_exprel +cdef extern from r"_ufuncs_defs.h": + cdef double _func_fdtr "fdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_fdtrc "fdtrc"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_fdtri "fdtri"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport fdtridfd as _func_fdtridfd +ctypedef double _proto_fdtridfd_t(double, double, double) noexcept nogil +cdef _proto_fdtridfd_t *_proto_fdtridfd_t_var = &_func_fdtridfd +cdef extern from r"_ufuncs_defs.h": + cdef int _func_fresnl "fresnl"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cfresnl_wrap "cfresnl_wrap"(double complex, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_Gamma "Gamma"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_igam "igam"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_igamc "igamc"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_igamci "igamci"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_igami "igami"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_lgam "lgam"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_gammasgn "gammasgn"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_gdtr "gdtr"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_gdtrc "gdtrc"(double, double, double) noexcept nogil +from ._cdflib_wrappers cimport gdtria as _func_gdtria +ctypedef double _proto_gdtria_t(double, double, double) noexcept nogil +cdef _proto_gdtria_t *_proto_gdtria_t_var = &_func_gdtria +from ._cdflib_wrappers cimport gdtrib as _func_gdtrib +ctypedef double _proto_gdtrib_t(double, double, double) noexcept nogil +cdef _proto_gdtrib_t *_proto_gdtrib_t_var = &_func_gdtrib +from ._cdflib_wrappers cimport gdtrix as _func_gdtrix +ctypedef double _proto_gdtrix_t(double, double, double) noexcept nogil +cdef _proto_gdtrix_t *_proto_gdtrix_t_var = &_func_gdtrix +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesh_wrap1 "cbesh_wrap1"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesh_wrap1_e "cbesh_wrap1_e"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesh_wrap2 "cbesh_wrap2"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesh_wrap2_e "cbesh_wrap2_e"(double, double complex) noexcept nogil +from ._convex_analysis cimport huber as _func_huber +ctypedef double _proto_huber_t(double, double) noexcept nogil +cdef _proto_huber_t *_proto_huber_t_var = &_func_huber +from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx +ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) noexcept nogil +cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx +from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real +ctypedef double _proto__hyp0f1_real_t(double, double) noexcept nogil +cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_chyp1f1_wrap "chyp1f1_wrap"(double, double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_hyp2f1 "hyp2f1"(double, double, double, double) noexcept nogil +from ._hyp2f1 cimport hyp2f1_complex as _func_hyp2f1_complex +ctypedef double complex _proto_hyp2f1_complex_t(double, double, double, double complex) noexcept nogil +cdef _proto_hyp2f1_complex_t *_proto_hyp2f1_complex_t_var = &_func_hyp2f1_complex +from ._hypergeometric cimport hyperu as _func_hyperu +ctypedef double _proto_hyperu_t(double, double, double) noexcept nogil +cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu +cdef extern from r"_ufuncs_defs.h": + cdef double _func_i0 "i0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_i0e "i0e"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_i1 "i1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_i1e "i1e"(double) noexcept nogil +from ._boxcox cimport inv_boxcox as _func_inv_boxcox +ctypedef double _proto_inv_boxcox_t(double, double) noexcept nogil +cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox +from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p +ctypedef double _proto_inv_boxcox1p_t(double, double) noexcept nogil +cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef int _func_it2i0k0_wrap "it2i0k0_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_it2j0y0_wrap "it2j0y0_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_it2struve0_wrap "it2struve0_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_itairy_wrap "itairy_wrap"(double, double *, double *, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_it1i0k0_wrap "it1i0k0_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_it1j0y0_wrap "it1j0y0_wrap"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_itmodstruve0_wrap "itmodstruve0_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_itstruve0_wrap "itstruve0_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesi_wrap "cbesi_wrap"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_iv "iv"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesi_wrap_e "cbesi_wrap_e"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesi_wrap_e_real "cbesi_wrap_e_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_j0 "j0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_j1 "j1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesj_wrap "cbesj_wrap"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesj_wrap_real "cbesj_wrap_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesj_wrap_e "cbesj_wrap_e"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesj_wrap_e_real "cbesj_wrap_e_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_k0 "k0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_k0e "k0e"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_k1 "k1"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_k1e "k1e"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kei_wrap "kei_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_keip_wrap "keip_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_kelvin_wrap "kelvin_wrap"(double, double complex *, double complex *, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ker_wrap "ker_wrap"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kerp_wrap "kerp_wrap"(double) noexcept nogil +from ._convex_analysis cimport kl_div as _func_kl_div +ctypedef double _proto_kl_div_t(double, double) noexcept nogil +cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div +from ._legacy cimport kn_unsafe as _func_kn_unsafe +ctypedef double _proto_kn_unsafe_t(double, double) noexcept nogil +cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesk_wrap_real_int "cbesk_wrap_real_int"(int, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kolmogi "kolmogi"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_kolmogorov "kolmogorov"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesk_wrap "cbesk_wrap"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesk_wrap_real "cbesk_wrap_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesk_wrap_e "cbesk_wrap_e"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesk_wrap_e_real "cbesk_wrap_e_real"(double, double) noexcept nogil +from ._cunity cimport clog1p as _func_clog1p +ctypedef double complex _proto_clog1p_t(double complex) noexcept nogil +cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p +cdef extern from r"_ufuncs_defs.h": + cdef double _func_log1p "log1p"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_pmv_wrap "pmv_wrap"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cem_cva_wrap "cem_cva_wrap"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_sem_cva_wrap "sem_cva_wrap"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_cem_wrap "cem_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_mcm1_wrap "mcm1_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_mcm2_wrap "mcm2_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_msm1_wrap "msm1_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_msm2_wrap "msm2_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_sem_wrap "sem_wrap"(double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_modified_fresnel_minus_wrap "modified_fresnel_minus_wrap"(double, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_modified_fresnel_plus_wrap "modified_fresnel_plus_wrap"(double, double complex *, double complex *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_struve_l "struve_l"(double, double) noexcept nogil +from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe +ctypedef double _proto_nbdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_nbdtr "nbdtr"(int, int, double) noexcept nogil +from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe +ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_nbdtrc "nbdtrc"(int, int, double) noexcept nogil +from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe +ctypedef double _proto_nbdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_nbdtri "nbdtri"(int, int, double) noexcept nogil +from ._cdflib_wrappers cimport nbdtrik as _func_nbdtrik +ctypedef double _proto_nbdtrik_t(double, double, double) noexcept nogil +cdef _proto_nbdtrik_t *_proto_nbdtrik_t_var = &_func_nbdtrik +from ._cdflib_wrappers cimport nbdtrin as _func_nbdtrin +ctypedef double _proto_nbdtrin_t(double, double, double) noexcept nogil +cdef _proto_nbdtrin_t *_proto_nbdtrin_t_var = &_func_nbdtrin +from ._cdflib_wrappers cimport ncfdtr as _func_ncfdtr +ctypedef double _proto_ncfdtr_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtr_t *_proto_ncfdtr_t_var = &_func_ncfdtr +from ._cdflib_wrappers cimport ncfdtri as _func_ncfdtri +ctypedef double _proto_ncfdtri_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtri_t *_proto_ncfdtri_t_var = &_func_ncfdtri +from ._cdflib_wrappers cimport ncfdtridfd as _func_ncfdtridfd +ctypedef double _proto_ncfdtridfd_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfd_t *_proto_ncfdtridfd_t_var = &_func_ncfdtridfd +from ._cdflib_wrappers cimport ncfdtridfn as _func_ncfdtridfn +ctypedef double _proto_ncfdtridfn_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfn_t *_proto_ncfdtridfn_t_var = &_func_ncfdtridfn +from ._cdflib_wrappers cimport ncfdtrinc as _func_ncfdtrinc +ctypedef double _proto_ncfdtrinc_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtrinc_t *_proto_ncfdtrinc_t_var = &_func_ncfdtrinc +from ._cdflib_wrappers cimport nctdtr as _func_nctdtr +ctypedef double _proto_nctdtr_t(double, double, double) noexcept nogil +cdef _proto_nctdtr_t *_proto_nctdtr_t_var = &_func_nctdtr +from ._cdflib_wrappers cimport nctdtridf as _func_nctdtridf +ctypedef double _proto_nctdtridf_t(double, double, double) noexcept nogil +cdef _proto_nctdtridf_t *_proto_nctdtridf_t_var = &_func_nctdtridf +from ._cdflib_wrappers cimport nctdtrinc as _func_nctdtrinc +ctypedef double _proto_nctdtrinc_t(double, double, double) noexcept nogil +cdef _proto_nctdtrinc_t *_proto_nctdtrinc_t_var = &_func_nctdtrinc +from ._cdflib_wrappers cimport nctdtrit as _func_nctdtrit +ctypedef double _proto_nctdtrit_t(double, double, double) noexcept nogil +cdef _proto_nctdtrit_t *_proto_nctdtrit_t_var = &_func_nctdtrit +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ndtr "ndtr"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_ndtri "ndtri"(double) noexcept nogil +from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp +ctypedef double _proto_ndtri_exp_t(double) noexcept nogil +cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp +from ._cdflib_wrappers cimport nrdtrimn as _func_nrdtrimn +ctypedef double _proto_nrdtrimn_t(double, double, double) noexcept nogil +cdef _proto_nrdtrimn_t *_proto_nrdtrimn_t_var = &_func_nrdtrimn +from ._cdflib_wrappers cimport nrdtrisd as _func_nrdtrisd +ctypedef double _proto_nrdtrisd_t(double, double, double) noexcept nogil +cdef _proto_nrdtrisd_t *_proto_nrdtrisd_t_var = &_func_nrdtrisd +cdef extern from r"_ufuncs_defs.h": + cdef double _func_oblate_aswfa_nocv_wrap "oblate_aswfa_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_oblate_aswfa_wrap "oblate_aswfa_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_oblate_segv_wrap "oblate_segv_wrap"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_oblate_radial1_nocv_wrap "oblate_radial1_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_oblate_radial1_wrap "oblate_radial1_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_oblate_radial2_nocv_wrap "oblate_radial2_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_oblate_radial2_wrap "oblate_radial2_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_owens_t "owens_t"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_pbdv_wrap "pbdv_wrap"(double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_pbvv_wrap "pbvv_wrap"(double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_pbwa_wrap "pbwa_wrap"(double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_pdtr "pdtr"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_pdtrc "pdtrc"(double, double) noexcept nogil +from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe +ctypedef double _proto_pdtri_unsafe_t(double, double) noexcept nogil +cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_pdtri "pdtri"(int, double) noexcept nogil +from ._cdflib_wrappers cimport pdtrik as _func_pdtrik +ctypedef double _proto_pdtrik_t(double, double) noexcept nogil +cdef _proto_pdtrik_t *_proto_pdtrik_t_var = &_func_pdtrik +cdef extern from r"_ufuncs_defs.h": + cdef double _func_poch "poch"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_prolate_aswfa_nocv_wrap "prolate_aswfa_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_prolate_aswfa_wrap "prolate_aswfa_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_prolate_segv_wrap "prolate_segv_wrap"(double, double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_prolate_radial1_nocv_wrap "prolate_radial1_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_prolate_radial1_wrap "prolate_radial1_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_prolate_radial2_nocv_wrap "prolate_radial2_nocv_wrap"(double, double, double, double, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef int _func_prolate_radial2_wrap "prolate_radial2_wrap"(double, double, double, double, double, double *, double *) noexcept nogil +from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber +ctypedef double _proto_pseudo_huber_t(double, double) noexcept nogil +cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber +cdef extern from r"_ufuncs_defs.h": + cdef double _func_radian "radian"(double, double, double) noexcept nogil +from ._convex_analysis cimport rel_entr as _func_rel_entr +ctypedef double _proto_rel_entr_t(double, double) noexcept nogil +cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr +cdef extern from r"_ufuncs_defs.h": + cdef double _func_rgamma "rgamma"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_round "round"(double) noexcept nogil +from ._sici cimport cshichi as _func_cshichi +ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi +cdef extern from r"_ufuncs_defs.h": + cdef int _func_shichi "shichi"(double, double *, double *) noexcept nogil +from ._sici cimport csici as _func_csici +ctypedef int _proto_csici_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_csici_t *_proto_csici_t_var = &_func_csici +cdef extern from r"_ufuncs_defs.h": + cdef int _func_sici "sici"(double, double *, double *) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_sindg "sindg"(double) noexcept nogil +from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe +ctypedef double _proto_smirnov_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_smirnov "smirnov"(int, double) noexcept nogil +from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe +ctypedef double _proto_smirnovi_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_smirnovi "smirnovi"(int, double) noexcept nogil +from ._spence cimport cspence as _func_cspence +ctypedef double complex _proto_cspence_t(double complex) noexcept nogil +cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence +cdef extern from r"_ufuncs_defs.h": + cdef double _func_spence "spence"(double) noexcept nogil +from ._legacy cimport sph_harmonic_unsafe as _func_sph_harmonic_unsafe +ctypedef double complex _proto_sph_harmonic_unsafe_t(double, double, double, double) noexcept nogil +cdef _proto_sph_harmonic_unsafe_t *_proto_sph_harmonic_unsafe_t_var = &_func_sph_harmonic_unsafe +from .sph_harm cimport sph_harmonic as _func_sph_harmonic +ctypedef double complex _proto_sph_harmonic_t(int, int, double, double) noexcept nogil +cdef _proto_sph_harmonic_t *_proto_sph_harmonic_t_var = &_func_sph_harmonic +from ._cdflib_wrappers cimport stdtr as _func_stdtr +ctypedef double _proto_stdtr_t(double, double) noexcept nogil +cdef _proto_stdtr_t *_proto_stdtr_t_var = &_func_stdtr +from ._cdflib_wrappers cimport stdtridf as _func_stdtridf +ctypedef double _proto_stdtridf_t(double, double) noexcept nogil +cdef _proto_stdtridf_t *_proto_stdtridf_t_var = &_func_stdtridf +from ._cdflib_wrappers cimport stdtrit as _func_stdtrit +ctypedef double _proto_stdtrit_t(double, double) noexcept nogil +cdef _proto_stdtrit_t *_proto_stdtrit_t_var = &_func_stdtrit +cdef extern from r"_ufuncs_defs.h": + cdef double _func_struve_h "struve_h"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_tandg "tandg"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_tukeylambdacdf "tukeylambdacdf"(double, double) noexcept nogil +from ._wright_bessel cimport wright_bessel_scalar as _func_wright_bessel_scalar +ctypedef double _proto_wright_bessel_scalar_t(double, double, double) noexcept nogil +cdef _proto_wright_bessel_scalar_t *_proto_wright_bessel_scalar_t_var = &_func_wright_bessel_scalar +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double _proto_xlog1py_double__t(double, double) noexcept nogil +cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double] +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double _proto_xlogy_double__t(double, double) noexcept nogil +cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex] +cdef extern from r"_ufuncs_defs.h": + cdef double _func_y0 "y0"(double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_y1 "y1"(double) noexcept nogil +from ._legacy cimport yn_unsafe as _func_yn_unsafe +ctypedef double _proto_yn_unsafe_t(double, double) noexcept nogil +cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef double _func_yn "yn"(int, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesy_wrap "cbesy_wrap"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesy_wrap_real "cbesy_wrap_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double complex _func_cbesy_wrap_e "cbesy_wrap_e"(double, double complex) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_cbesy_wrap_e_real "cbesy_wrap_e_real"(double, double) noexcept nogil +cdef extern from r"_ufuncs_defs.h": + cdef double _func_zetac "zetac"(double) noexcept nogil +cdef np.PyUFuncGenericFunction ufunc__cosine_cdf_loops[2] +cdef void *ufunc__cosine_cdf_ptr[4] +cdef void *ufunc__cosine_cdf_data[2] +cdef char ufunc__cosine_cdf_types[4] +cdef char *ufunc__cosine_cdf_doc = ( + "_cosine_cdf(x)\n" + "\n" + "Cumulative distribution function (CDF) of the cosine distribution::\n" + "\n" + " { 0, x < -pi\n" + " cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi\n" + " { 1, x > pi\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The cosine distribution CDF evaluated at `x`.") +ufunc__cosine_cdf_loops[0] = loop_d_d__As_f_f +ufunc__cosine_cdf_loops[1] = loop_d_d__As_d_d +ufunc__cosine_cdf_types[0] = NPY_FLOAT +ufunc__cosine_cdf_types[1] = NPY_FLOAT +ufunc__cosine_cdf_types[2] = NPY_DOUBLE +ufunc__cosine_cdf_types[3] = NPY_DOUBLE +ufunc__cosine_cdf_ptr[2*0] = _func_cosine_cdf +ufunc__cosine_cdf_ptr[2*0+1] = ("_cosine_cdf") +ufunc__cosine_cdf_ptr[2*1] = _func_cosine_cdf +ufunc__cosine_cdf_ptr[2*1+1] = ("_cosine_cdf") +ufunc__cosine_cdf_data[0] = &ufunc__cosine_cdf_ptr[2*0] +ufunc__cosine_cdf_data[1] = &ufunc__cosine_cdf_ptr[2*1] +_cosine_cdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_cdf_loops, ufunc__cosine_cdf_data, ufunc__cosine_cdf_types, 2, 1, 1, 0, "_cosine_cdf", ufunc__cosine_cdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__cosine_invcdf_loops[2] +cdef void *ufunc__cosine_invcdf_ptr[4] +cdef void *ufunc__cosine_invcdf_data[2] +cdef char ufunc__cosine_invcdf_types[4] +cdef char *ufunc__cosine_invcdf_doc = ( + "_cosine_invcdf(p)\n" + "\n" + "Inverse of the cumulative distribution function (CDF) of the cosine\n" + "distribution.\n" + "\n" + "The CDF of the cosine distribution is::\n" + "\n" + " cdf(x) = (pi + x + sin(x))/(2*pi)\n" + "\n" + "This function computes the inverse of cdf(x).\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " `p` must contain real numbers in the interval ``0 <= p <= 1``.\n" + " `nan` is returned for values of `p` outside the interval [0, 1].\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The inverse of the cosine distribution CDF evaluated at `p`.") +ufunc__cosine_invcdf_loops[0] = loop_d_d__As_f_f +ufunc__cosine_invcdf_loops[1] = loop_d_d__As_d_d +ufunc__cosine_invcdf_types[0] = NPY_FLOAT +ufunc__cosine_invcdf_types[1] = NPY_FLOAT +ufunc__cosine_invcdf_types[2] = NPY_DOUBLE +ufunc__cosine_invcdf_types[3] = NPY_DOUBLE +ufunc__cosine_invcdf_ptr[2*0] = _func_cosine_invcdf +ufunc__cosine_invcdf_ptr[2*0+1] = ("_cosine_invcdf") +ufunc__cosine_invcdf_ptr[2*1] = _func_cosine_invcdf +ufunc__cosine_invcdf_ptr[2*1+1] = ("_cosine_invcdf") +ufunc__cosine_invcdf_data[0] = &ufunc__cosine_invcdf_ptr[2*0] +ufunc__cosine_invcdf_data[1] = &ufunc__cosine_invcdf_ptr[2*1] +_cosine_invcdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_invcdf_loops, ufunc__cosine_invcdf_data, ufunc__cosine_invcdf_types, 2, 1, 1, 0, "_cosine_invcdf", ufunc__cosine_invcdf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__cospi_loops[4] +cdef void *ufunc__cospi_ptr[8] +cdef void *ufunc__cospi_data[4] +cdef char ufunc__cospi_types[8] +cdef char *ufunc__cospi_doc = ( + "Internal function, do not use.") +ufunc__cospi_loops[0] = loop_d_d__As_f_f +ufunc__cospi_loops[1] = loop_d_d__As_d_d +ufunc__cospi_loops[2] = loop_D_D__As_F_F +ufunc__cospi_loops[3] = loop_D_D__As_D_D +ufunc__cospi_types[0] = NPY_FLOAT +ufunc__cospi_types[1] = NPY_FLOAT +ufunc__cospi_types[2] = NPY_DOUBLE +ufunc__cospi_types[3] = NPY_DOUBLE +ufunc__cospi_types[4] = NPY_CFLOAT +ufunc__cospi_types[5] = NPY_CFLOAT +ufunc__cospi_types[6] = NPY_CDOUBLE +ufunc__cospi_types[7] = NPY_CDOUBLE +ufunc__cospi_ptr[2*0] = _func_cospi +ufunc__cospi_ptr[2*0+1] = ("_cospi") +ufunc__cospi_ptr[2*1] = _func_cospi +ufunc__cospi_ptr[2*1+1] = ("_cospi") +ufunc__cospi_ptr[2*2] = scipy.special._ufuncs_cxx._export_ccospi +ufunc__cospi_ptr[2*2+1] = ("_cospi") +ufunc__cospi_ptr[2*3] = scipy.special._ufuncs_cxx._export_ccospi +ufunc__cospi_ptr[2*3+1] = ("_cospi") +ufunc__cospi_data[0] = &ufunc__cospi_ptr[2*0] +ufunc__cospi_data[1] = &ufunc__cospi_ptr[2*1] +ufunc__cospi_data[2] = &ufunc__cospi_ptr[2*2] +ufunc__cospi_data[3] = &ufunc__cospi_ptr[2*3] +_cospi = np.PyUFunc_FromFuncAndData(ufunc__cospi_loops, ufunc__cospi_data, ufunc__cospi_types, 4, 1, 1, 0, "_cospi", ufunc__cospi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__ellip_harm_loops[3] +cdef void *ufunc__ellip_harm_ptr[6] +cdef void *ufunc__ellip_harm_data[3] +cdef char ufunc__ellip_harm_types[24] +cdef char *ufunc__ellip_harm_doc = ( + "Internal function, use `ellip_harm` instead.") +ufunc__ellip_harm_loops[0] = loop_d_ddddddd__As_fffffff_f +ufunc__ellip_harm_loops[1] = loop_d_ddiiddd__As_ddllddd_d +ufunc__ellip_harm_loops[2] = loop_d_ddddddd__As_ddddddd_d +ufunc__ellip_harm_types[0] = NPY_FLOAT +ufunc__ellip_harm_types[1] = NPY_FLOAT +ufunc__ellip_harm_types[2] = NPY_FLOAT +ufunc__ellip_harm_types[3] = NPY_FLOAT +ufunc__ellip_harm_types[4] = NPY_FLOAT +ufunc__ellip_harm_types[5] = NPY_FLOAT +ufunc__ellip_harm_types[6] = NPY_FLOAT +ufunc__ellip_harm_types[7] = NPY_FLOAT +ufunc__ellip_harm_types[8] = NPY_DOUBLE +ufunc__ellip_harm_types[9] = NPY_DOUBLE +ufunc__ellip_harm_types[10] = NPY_LONG +ufunc__ellip_harm_types[11] = NPY_LONG +ufunc__ellip_harm_types[12] = NPY_DOUBLE +ufunc__ellip_harm_types[13] = NPY_DOUBLE +ufunc__ellip_harm_types[14] = NPY_DOUBLE +ufunc__ellip_harm_types[15] = NPY_DOUBLE +ufunc__ellip_harm_types[16] = NPY_DOUBLE +ufunc__ellip_harm_types[17] = NPY_DOUBLE +ufunc__ellip_harm_types[18] = NPY_DOUBLE +ufunc__ellip_harm_types[19] = NPY_DOUBLE +ufunc__ellip_harm_types[20] = NPY_DOUBLE +ufunc__ellip_harm_types[21] = NPY_DOUBLE +ufunc__ellip_harm_types[22] = NPY_DOUBLE +ufunc__ellip_harm_types[23] = NPY_DOUBLE +ufunc__ellip_harm_ptr[2*0] = _func_ellip_harmonic_unsafe +ufunc__ellip_harm_ptr[2*0+1] = ("_ellip_harm") +ufunc__ellip_harm_ptr[2*1] = _func_ellip_harmonic +ufunc__ellip_harm_ptr[2*1+1] = ("_ellip_harm") +ufunc__ellip_harm_ptr[2*2] = _func_ellip_harmonic_unsafe +ufunc__ellip_harm_ptr[2*2+1] = ("_ellip_harm") +ufunc__ellip_harm_data[0] = &ufunc__ellip_harm_ptr[2*0] +ufunc__ellip_harm_data[1] = &ufunc__ellip_harm_ptr[2*1] +ufunc__ellip_harm_data[2] = &ufunc__ellip_harm_ptr[2*2] +_ellip_harm = np.PyUFunc_FromFuncAndData(ufunc__ellip_harm_loops, ufunc__ellip_harm_data, ufunc__ellip_harm_types, 3, 7, 1, 0, "_ellip_harm", ufunc__ellip_harm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__factorial_loops[2] +cdef void *ufunc__factorial_ptr[4] +cdef void *ufunc__factorial_data[2] +cdef char ufunc__factorial_types[4] +cdef char *ufunc__factorial_doc = ( + "Internal function, do not use.") +ufunc__factorial_loops[0] = loop_d_d__As_f_f +ufunc__factorial_loops[1] = loop_d_d__As_d_d +ufunc__factorial_types[0] = NPY_FLOAT +ufunc__factorial_types[1] = NPY_FLOAT +ufunc__factorial_types[2] = NPY_DOUBLE +ufunc__factorial_types[3] = NPY_DOUBLE +ufunc__factorial_ptr[2*0] = _func__factorial +ufunc__factorial_ptr[2*0+1] = ("_factorial") +ufunc__factorial_ptr[2*1] = _func__factorial +ufunc__factorial_ptr[2*1+1] = ("_factorial") +ufunc__factorial_data[0] = &ufunc__factorial_ptr[2*0] +ufunc__factorial_data[1] = &ufunc__factorial_ptr[2*1] +_factorial = np.PyUFunc_FromFuncAndData(ufunc__factorial_loops, ufunc__factorial_data, ufunc__factorial_types, 2, 1, 1, 0, "_factorial", ufunc__factorial_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__igam_fac_loops[2] +cdef void *ufunc__igam_fac_ptr[4] +cdef void *ufunc__igam_fac_data[2] +cdef char ufunc__igam_fac_types[6] +cdef char *ufunc__igam_fac_doc = ( + "Internal function, do not use.") +ufunc__igam_fac_loops[0] = loop_d_dd__As_ff_f +ufunc__igam_fac_loops[1] = loop_d_dd__As_dd_d +ufunc__igam_fac_types[0] = NPY_FLOAT +ufunc__igam_fac_types[1] = NPY_FLOAT +ufunc__igam_fac_types[2] = NPY_FLOAT +ufunc__igam_fac_types[3] = NPY_DOUBLE +ufunc__igam_fac_types[4] = NPY_DOUBLE +ufunc__igam_fac_types[5] = NPY_DOUBLE +ufunc__igam_fac_ptr[2*0] = _func_igam_fac +ufunc__igam_fac_ptr[2*0+1] = ("_igam_fac") +ufunc__igam_fac_ptr[2*1] = _func_igam_fac +ufunc__igam_fac_ptr[2*1+1] = ("_igam_fac") +ufunc__igam_fac_data[0] = &ufunc__igam_fac_ptr[2*0] +ufunc__igam_fac_data[1] = &ufunc__igam_fac_ptr[2*1] +_igam_fac = np.PyUFunc_FromFuncAndData(ufunc__igam_fac_loops, ufunc__igam_fac_data, ufunc__igam_fac_types, 2, 2, 1, 0, "_igam_fac", ufunc__igam_fac_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogc_loops[2] +cdef void *ufunc__kolmogc_ptr[4] +cdef void *ufunc__kolmogc_data[2] +cdef char ufunc__kolmogc_types[4] +cdef char *ufunc__kolmogc_doc = ( + "Internal function, do not use.") +ufunc__kolmogc_loops[0] = loop_d_d__As_f_f +ufunc__kolmogc_loops[1] = loop_d_d__As_d_d +ufunc__kolmogc_types[0] = NPY_FLOAT +ufunc__kolmogc_types[1] = NPY_FLOAT +ufunc__kolmogc_types[2] = NPY_DOUBLE +ufunc__kolmogc_types[3] = NPY_DOUBLE +ufunc__kolmogc_ptr[2*0] = _func_kolmogc +ufunc__kolmogc_ptr[2*0+1] = ("_kolmogc") +ufunc__kolmogc_ptr[2*1] = _func_kolmogc +ufunc__kolmogc_ptr[2*1+1] = ("_kolmogc") +ufunc__kolmogc_data[0] = &ufunc__kolmogc_ptr[2*0] +ufunc__kolmogc_data[1] = &ufunc__kolmogc_ptr[2*1] +_kolmogc = np.PyUFunc_FromFuncAndData(ufunc__kolmogc_loops, ufunc__kolmogc_data, ufunc__kolmogc_types, 2, 1, 1, 0, "_kolmogc", ufunc__kolmogc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogci_loops[2] +cdef void *ufunc__kolmogci_ptr[4] +cdef void *ufunc__kolmogci_data[2] +cdef char ufunc__kolmogci_types[4] +cdef char *ufunc__kolmogci_doc = ( + "Internal function, do not use.") +ufunc__kolmogci_loops[0] = loop_d_d__As_f_f +ufunc__kolmogci_loops[1] = loop_d_d__As_d_d +ufunc__kolmogci_types[0] = NPY_FLOAT +ufunc__kolmogci_types[1] = NPY_FLOAT +ufunc__kolmogci_types[2] = NPY_DOUBLE +ufunc__kolmogci_types[3] = NPY_DOUBLE +ufunc__kolmogci_ptr[2*0] = _func_kolmogci +ufunc__kolmogci_ptr[2*0+1] = ("_kolmogci") +ufunc__kolmogci_ptr[2*1] = _func_kolmogci +ufunc__kolmogci_ptr[2*1+1] = ("_kolmogci") +ufunc__kolmogci_data[0] = &ufunc__kolmogci_ptr[2*0] +ufunc__kolmogci_data[1] = &ufunc__kolmogci_ptr[2*1] +_kolmogci = np.PyUFunc_FromFuncAndData(ufunc__kolmogci_loops, ufunc__kolmogci_data, ufunc__kolmogci_types, 2, 1, 1, 0, "_kolmogci", ufunc__kolmogci_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__kolmogp_loops[2] +cdef void *ufunc__kolmogp_ptr[4] +cdef void *ufunc__kolmogp_data[2] +cdef char ufunc__kolmogp_types[4] +cdef char *ufunc__kolmogp_doc = ( + "Internal function, do not use.") +ufunc__kolmogp_loops[0] = loop_d_d__As_f_f +ufunc__kolmogp_loops[1] = loop_d_d__As_d_d +ufunc__kolmogp_types[0] = NPY_FLOAT +ufunc__kolmogp_types[1] = NPY_FLOAT +ufunc__kolmogp_types[2] = NPY_DOUBLE +ufunc__kolmogp_types[3] = NPY_DOUBLE +ufunc__kolmogp_ptr[2*0] = _func_kolmogp +ufunc__kolmogp_ptr[2*0+1] = ("_kolmogp") +ufunc__kolmogp_ptr[2*1] = _func_kolmogp +ufunc__kolmogp_ptr[2*1+1] = ("_kolmogp") +ufunc__kolmogp_data[0] = &ufunc__kolmogp_ptr[2*0] +ufunc__kolmogp_data[1] = &ufunc__kolmogp_ptr[2*1] +_kolmogp = np.PyUFunc_FromFuncAndData(ufunc__kolmogp_loops, ufunc__kolmogp_data, ufunc__kolmogp_types, 2, 1, 1, 0, "_kolmogp", ufunc__kolmogp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__lambertw_loops[1] +cdef void *ufunc__lambertw_ptr[2] +cdef void *ufunc__lambertw_data[1] +cdef char ufunc__lambertw_types[4] +cdef char *ufunc__lambertw_doc = ( + "Internal function, use `lambertw` instead.") +ufunc__lambertw_loops[0] = loop_D_Dld__As_Dld_D +ufunc__lambertw_types[0] = NPY_CDOUBLE +ufunc__lambertw_types[1] = NPY_LONG +ufunc__lambertw_types[2] = NPY_DOUBLE +ufunc__lambertw_types[3] = NPY_CDOUBLE +ufunc__lambertw_ptr[2*0] = scipy.special._ufuncs_cxx._export_lambertw_scalar +ufunc__lambertw_ptr[2*0+1] = ("_lambertw") +ufunc__lambertw_data[0] = &ufunc__lambertw_ptr[2*0] +_lambertw = np.PyUFunc_FromFuncAndData(ufunc__lambertw_loops, ufunc__lambertw_data, ufunc__lambertw_types, 1, 3, 1, 0, "_lambertw", ufunc__lambertw_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__lanczos_sum_expg_scaled_loops[2] +cdef void *ufunc__lanczos_sum_expg_scaled_ptr[4] +cdef void *ufunc__lanczos_sum_expg_scaled_data[2] +cdef char ufunc__lanczos_sum_expg_scaled_types[4] +cdef char *ufunc__lanczos_sum_expg_scaled_doc = ( + "Internal function, do not use.") +ufunc__lanczos_sum_expg_scaled_loops[0] = loop_d_d__As_f_f +ufunc__lanczos_sum_expg_scaled_loops[1] = loop_d_d__As_d_d +ufunc__lanczos_sum_expg_scaled_types[0] = NPY_FLOAT +ufunc__lanczos_sum_expg_scaled_types[1] = NPY_FLOAT +ufunc__lanczos_sum_expg_scaled_types[2] = NPY_DOUBLE +ufunc__lanczos_sum_expg_scaled_types[3] = NPY_DOUBLE +ufunc__lanczos_sum_expg_scaled_ptr[2*0] = _func_lanczos_sum_expg_scaled +ufunc__lanczos_sum_expg_scaled_ptr[2*0+1] = ("_lanczos_sum_expg_scaled") +ufunc__lanczos_sum_expg_scaled_ptr[2*1] = _func_lanczos_sum_expg_scaled +ufunc__lanczos_sum_expg_scaled_ptr[2*1+1] = ("_lanczos_sum_expg_scaled") +ufunc__lanczos_sum_expg_scaled_data[0] = &ufunc__lanczos_sum_expg_scaled_ptr[2*0] +ufunc__lanczos_sum_expg_scaled_data[1] = &ufunc__lanczos_sum_expg_scaled_ptr[2*1] +_lanczos_sum_expg_scaled = np.PyUFunc_FromFuncAndData(ufunc__lanczos_sum_expg_scaled_loops, ufunc__lanczos_sum_expg_scaled_data, ufunc__lanczos_sum_expg_scaled_types, 2, 1, 1, 0, "_lanczos_sum_expg_scaled", ufunc__lanczos_sum_expg_scaled_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__lgam1p_loops[2] +cdef void *ufunc__lgam1p_ptr[4] +cdef void *ufunc__lgam1p_data[2] +cdef char ufunc__lgam1p_types[4] +cdef char *ufunc__lgam1p_doc = ( + "Internal function, do not use.") +ufunc__lgam1p_loops[0] = loop_d_d__As_f_f +ufunc__lgam1p_loops[1] = loop_d_d__As_d_d +ufunc__lgam1p_types[0] = NPY_FLOAT +ufunc__lgam1p_types[1] = NPY_FLOAT +ufunc__lgam1p_types[2] = NPY_DOUBLE +ufunc__lgam1p_types[3] = NPY_DOUBLE +ufunc__lgam1p_ptr[2*0] = _func_lgam1p +ufunc__lgam1p_ptr[2*0+1] = ("_lgam1p") +ufunc__lgam1p_ptr[2*1] = _func_lgam1p +ufunc__lgam1p_ptr[2*1+1] = ("_lgam1p") +ufunc__lgam1p_data[0] = &ufunc__lgam1p_ptr[2*0] +ufunc__lgam1p_data[1] = &ufunc__lgam1p_ptr[2*1] +_lgam1p = np.PyUFunc_FromFuncAndData(ufunc__lgam1p_loops, ufunc__lgam1p_data, ufunc__lgam1p_types, 2, 1, 1, 0, "_lgam1p", ufunc__lgam1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__log1pmx_loops[2] +cdef void *ufunc__log1pmx_ptr[4] +cdef void *ufunc__log1pmx_data[2] +cdef char ufunc__log1pmx_types[4] +cdef char *ufunc__log1pmx_doc = ( + "Internal function, do not use.") +ufunc__log1pmx_loops[0] = loop_d_d__As_f_f +ufunc__log1pmx_loops[1] = loop_d_d__As_d_d +ufunc__log1pmx_types[0] = NPY_FLOAT +ufunc__log1pmx_types[1] = NPY_FLOAT +ufunc__log1pmx_types[2] = NPY_DOUBLE +ufunc__log1pmx_types[3] = NPY_DOUBLE +ufunc__log1pmx_ptr[2*0] = _func_log1pmx +ufunc__log1pmx_ptr[2*0+1] = ("_log1pmx") +ufunc__log1pmx_ptr[2*1] = _func_log1pmx +ufunc__log1pmx_ptr[2*1+1] = ("_log1pmx") +ufunc__log1pmx_data[0] = &ufunc__log1pmx_ptr[2*0] +ufunc__log1pmx_data[1] = &ufunc__log1pmx_ptr[2*1] +_log1pmx = np.PyUFunc_FromFuncAndData(ufunc__log1pmx_loops, ufunc__log1pmx_data, ufunc__log1pmx_types, 2, 1, 1, 0, "_log1pmx", ufunc__log1pmx_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__riemann_zeta_loops[2] +cdef void *ufunc__riemann_zeta_ptr[4] +cdef void *ufunc__riemann_zeta_data[2] +cdef char ufunc__riemann_zeta_types[4] +cdef char *ufunc__riemann_zeta_doc = ( + "Internal function, use `zeta` instead.") +ufunc__riemann_zeta_loops[0] = loop_d_d__As_f_f +ufunc__riemann_zeta_loops[1] = loop_d_d__As_d_d +ufunc__riemann_zeta_types[0] = NPY_FLOAT +ufunc__riemann_zeta_types[1] = NPY_FLOAT +ufunc__riemann_zeta_types[2] = NPY_DOUBLE +ufunc__riemann_zeta_types[3] = NPY_DOUBLE +ufunc__riemann_zeta_ptr[2*0] = _func_riemann_zeta +ufunc__riemann_zeta_ptr[2*0+1] = ("_riemann_zeta") +ufunc__riemann_zeta_ptr[2*1] = _func_riemann_zeta +ufunc__riemann_zeta_ptr[2*1+1] = ("_riemann_zeta") +ufunc__riemann_zeta_data[0] = &ufunc__riemann_zeta_ptr[2*0] +ufunc__riemann_zeta_data[1] = &ufunc__riemann_zeta_ptr[2*1] +_riemann_zeta = np.PyUFunc_FromFuncAndData(ufunc__riemann_zeta_loops, ufunc__riemann_zeta_data, ufunc__riemann_zeta_types, 2, 1, 1, 0, "_riemann_zeta", ufunc__riemann_zeta_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__scaled_exp1_loops[2] +cdef void *ufunc__scaled_exp1_ptr[4] +cdef void *ufunc__scaled_exp1_data[2] +cdef char ufunc__scaled_exp1_types[4] +cdef char *ufunc__scaled_exp1_doc = ( + "_scaled_exp1(x, out=None):\n" + "\n" + "Compute the scaled exponential integral.\n" + "\n" + "This is a private function, subject to change or removal with no\n" + "deprecation.\n" + "\n" + "This function computes F(x), where F is the factor remaining in E_1(x)\n" + "when exp(-x)/x is factored out. That is,::\n" + "\n" + " E_1(x) = exp(-x)/x * F(x)\n" + "\n" + "or\n" + "\n" + " F(x) = x * exp(x) * E_1(x)\n" + "\n" + "The function is defined for real x >= 0. For x < 0, nan is returned.\n" + "\n" + "F has the properties:\n" + "\n" + "* F(0) = 0\n" + "* F(x) is increasing on [0, inf).\n" + "* The limit as x goes to infinity of F(x) is 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x: array_like\n" + " The input values. Must be real. The implementation is limited to\n" + " double precision floating point, so other types will be cast to\n" + " to double precision.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the scaled exponential integral.\n" + "\n" + "See Also\n" + "--------\n" + "exp1 : exponential integral E_1\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import _scaled_exp1\n" + ">>> _scaled_exp1([0, 0.1, 1, 10, 100])") +ufunc__scaled_exp1_loops[0] = loop_d_d__As_f_f +ufunc__scaled_exp1_loops[1] = loop_d_d__As_d_d +ufunc__scaled_exp1_types[0] = NPY_FLOAT +ufunc__scaled_exp1_types[1] = NPY_FLOAT +ufunc__scaled_exp1_types[2] = NPY_DOUBLE +ufunc__scaled_exp1_types[3] = NPY_DOUBLE +ufunc__scaled_exp1_ptr[2*0] = _func_scaled_exp1 +ufunc__scaled_exp1_ptr[2*0+1] = ("_scaled_exp1") +ufunc__scaled_exp1_ptr[2*1] = _func_scaled_exp1 +ufunc__scaled_exp1_ptr[2*1+1] = ("_scaled_exp1") +ufunc__scaled_exp1_data[0] = &ufunc__scaled_exp1_ptr[2*0] +ufunc__scaled_exp1_data[1] = &ufunc__scaled_exp1_ptr[2*1] +_scaled_exp1 = np.PyUFunc_FromFuncAndData(ufunc__scaled_exp1_loops, ufunc__scaled_exp1_data, ufunc__scaled_exp1_types, 2, 1, 1, 0, "_scaled_exp1", ufunc__scaled_exp1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__sf_error_test_function_loops[1] +cdef void *ufunc__sf_error_test_function_ptr[2] +cdef void *ufunc__sf_error_test_function_data[1] +cdef char ufunc__sf_error_test_function_types[2] +cdef char *ufunc__sf_error_test_function_doc = ( + "Private function; do not use.") +ufunc__sf_error_test_function_loops[0] = loop_i_i__As_l_l +ufunc__sf_error_test_function_types[0] = NPY_LONG +ufunc__sf_error_test_function_types[1] = NPY_LONG +ufunc__sf_error_test_function_ptr[2*0] = _func__sf_error_test_function +ufunc__sf_error_test_function_ptr[2*0+1] = ("_sf_error_test_function") +ufunc__sf_error_test_function_data[0] = &ufunc__sf_error_test_function_ptr[2*0] +_sf_error_test_function = np.PyUFunc_FromFuncAndData(ufunc__sf_error_test_function_loops, ufunc__sf_error_test_function_data, ufunc__sf_error_test_function_types, 1, 1, 1, 0, "_sf_error_test_function", ufunc__sf_error_test_function_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__sinpi_loops[4] +cdef void *ufunc__sinpi_ptr[8] +cdef void *ufunc__sinpi_data[4] +cdef char ufunc__sinpi_types[8] +cdef char *ufunc__sinpi_doc = ( + "Internal function, do not use.") +ufunc__sinpi_loops[0] = loop_d_d__As_f_f +ufunc__sinpi_loops[1] = loop_d_d__As_d_d +ufunc__sinpi_loops[2] = loop_D_D__As_F_F +ufunc__sinpi_loops[3] = loop_D_D__As_D_D +ufunc__sinpi_types[0] = NPY_FLOAT +ufunc__sinpi_types[1] = NPY_FLOAT +ufunc__sinpi_types[2] = NPY_DOUBLE +ufunc__sinpi_types[3] = NPY_DOUBLE +ufunc__sinpi_types[4] = NPY_CFLOAT +ufunc__sinpi_types[5] = NPY_CFLOAT +ufunc__sinpi_types[6] = NPY_CDOUBLE +ufunc__sinpi_types[7] = NPY_CDOUBLE +ufunc__sinpi_ptr[2*0] = _func_sinpi +ufunc__sinpi_ptr[2*0+1] = ("_sinpi") +ufunc__sinpi_ptr[2*1] = _func_sinpi +ufunc__sinpi_ptr[2*1+1] = ("_sinpi") +ufunc__sinpi_ptr[2*2] = scipy.special._ufuncs_cxx._export_csinpi +ufunc__sinpi_ptr[2*2+1] = ("_sinpi") +ufunc__sinpi_ptr[2*3] = scipy.special._ufuncs_cxx._export_csinpi +ufunc__sinpi_ptr[2*3+1] = ("_sinpi") +ufunc__sinpi_data[0] = &ufunc__sinpi_ptr[2*0] +ufunc__sinpi_data[1] = &ufunc__sinpi_ptr[2*1] +ufunc__sinpi_data[2] = &ufunc__sinpi_ptr[2*2] +ufunc__sinpi_data[3] = &ufunc__sinpi_ptr[2*3] +_sinpi = np.PyUFunc_FromFuncAndData(ufunc__sinpi_loops, ufunc__sinpi_data, ufunc__sinpi_types, 4, 1, 1, 0, "_sinpi", ufunc__sinpi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovc_loops[3] +cdef void *ufunc__smirnovc_ptr[6] +cdef void *ufunc__smirnovc_data[3] +cdef char ufunc__smirnovc_types[9] +cdef char *ufunc__smirnovc_doc = ( + "_smirnovc(n, d)\n" + " Internal function, do not use.") +ufunc__smirnovc_loops[0] = loop_d_id__As_ld_d +ufunc__smirnovc_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovc_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovc_types[0] = NPY_LONG +ufunc__smirnovc_types[1] = NPY_DOUBLE +ufunc__smirnovc_types[2] = NPY_DOUBLE +ufunc__smirnovc_types[3] = NPY_FLOAT +ufunc__smirnovc_types[4] = NPY_FLOAT +ufunc__smirnovc_types[5] = NPY_FLOAT +ufunc__smirnovc_types[6] = NPY_DOUBLE +ufunc__smirnovc_types[7] = NPY_DOUBLE +ufunc__smirnovc_types[8] = NPY_DOUBLE +ufunc__smirnovc_ptr[2*0] = _func_smirnovc +ufunc__smirnovc_ptr[2*0+1] = ("_smirnovc") +ufunc__smirnovc_ptr[2*1] = _func_smirnovc_unsafe +ufunc__smirnovc_ptr[2*1+1] = ("_smirnovc") +ufunc__smirnovc_ptr[2*2] = _func_smirnovc_unsafe +ufunc__smirnovc_ptr[2*2+1] = ("_smirnovc") +ufunc__smirnovc_data[0] = &ufunc__smirnovc_ptr[2*0] +ufunc__smirnovc_data[1] = &ufunc__smirnovc_ptr[2*1] +ufunc__smirnovc_data[2] = &ufunc__smirnovc_ptr[2*2] +_smirnovc = np.PyUFunc_FromFuncAndData(ufunc__smirnovc_loops, ufunc__smirnovc_data, ufunc__smirnovc_types, 3, 2, 1, 0, "_smirnovc", ufunc__smirnovc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovci_loops[3] +cdef void *ufunc__smirnovci_ptr[6] +cdef void *ufunc__smirnovci_data[3] +cdef char ufunc__smirnovci_types[9] +cdef char *ufunc__smirnovci_doc = ( + "Internal function, do not use.") +ufunc__smirnovci_loops[0] = loop_d_id__As_ld_d +ufunc__smirnovci_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovci_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovci_types[0] = NPY_LONG +ufunc__smirnovci_types[1] = NPY_DOUBLE +ufunc__smirnovci_types[2] = NPY_DOUBLE +ufunc__smirnovci_types[3] = NPY_FLOAT +ufunc__smirnovci_types[4] = NPY_FLOAT +ufunc__smirnovci_types[5] = NPY_FLOAT +ufunc__smirnovci_types[6] = NPY_DOUBLE +ufunc__smirnovci_types[7] = NPY_DOUBLE +ufunc__smirnovci_types[8] = NPY_DOUBLE +ufunc__smirnovci_ptr[2*0] = _func_smirnovci +ufunc__smirnovci_ptr[2*0+1] = ("_smirnovci") +ufunc__smirnovci_ptr[2*1] = _func_smirnovci_unsafe +ufunc__smirnovci_ptr[2*1+1] = ("_smirnovci") +ufunc__smirnovci_ptr[2*2] = _func_smirnovci_unsafe +ufunc__smirnovci_ptr[2*2+1] = ("_smirnovci") +ufunc__smirnovci_data[0] = &ufunc__smirnovci_ptr[2*0] +ufunc__smirnovci_data[1] = &ufunc__smirnovci_ptr[2*1] +ufunc__smirnovci_data[2] = &ufunc__smirnovci_ptr[2*2] +_smirnovci = np.PyUFunc_FromFuncAndData(ufunc__smirnovci_loops, ufunc__smirnovci_data, ufunc__smirnovci_types, 3, 2, 1, 0, "_smirnovci", ufunc__smirnovci_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__smirnovp_loops[3] +cdef void *ufunc__smirnovp_ptr[6] +cdef void *ufunc__smirnovp_data[3] +cdef char ufunc__smirnovp_types[9] +cdef char *ufunc__smirnovp_doc = ( + "_smirnovp(n, p)\n" + " Internal function, do not use.") +ufunc__smirnovp_loops[0] = loop_d_id__As_ld_d +ufunc__smirnovp_loops[1] = loop_d_dd__As_ff_f +ufunc__smirnovp_loops[2] = loop_d_dd__As_dd_d +ufunc__smirnovp_types[0] = NPY_LONG +ufunc__smirnovp_types[1] = NPY_DOUBLE +ufunc__smirnovp_types[2] = NPY_DOUBLE +ufunc__smirnovp_types[3] = NPY_FLOAT +ufunc__smirnovp_types[4] = NPY_FLOAT +ufunc__smirnovp_types[5] = NPY_FLOAT +ufunc__smirnovp_types[6] = NPY_DOUBLE +ufunc__smirnovp_types[7] = NPY_DOUBLE +ufunc__smirnovp_types[8] = NPY_DOUBLE +ufunc__smirnovp_ptr[2*0] = _func_smirnovp +ufunc__smirnovp_ptr[2*0+1] = ("_smirnovp") +ufunc__smirnovp_ptr[2*1] = _func_smirnovp_unsafe +ufunc__smirnovp_ptr[2*1+1] = ("_smirnovp") +ufunc__smirnovp_ptr[2*2] = _func_smirnovp_unsafe +ufunc__smirnovp_ptr[2*2+1] = ("_smirnovp") +ufunc__smirnovp_data[0] = &ufunc__smirnovp_ptr[2*0] +ufunc__smirnovp_data[1] = &ufunc__smirnovp_ptr[2*1] +ufunc__smirnovp_data[2] = &ufunc__smirnovp_ptr[2*2] +_smirnovp = np.PyUFunc_FromFuncAndData(ufunc__smirnovp_loops, ufunc__smirnovp_data, ufunc__smirnovp_types, 3, 2, 1, 0, "_smirnovp", ufunc__smirnovp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_in_loops[2] +cdef void *ufunc__spherical_in_ptr[4] +cdef void *ufunc__spherical_in_data[2] +cdef char ufunc__spherical_in_types[6] +cdef char *ufunc__spherical_in_doc = ( + "Internal function, use `spherical_in` instead.") +ufunc__spherical_in_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_in_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_in_types[0] = NPY_LONG +ufunc__spherical_in_types[1] = NPY_DOUBLE +ufunc__spherical_in_types[2] = NPY_DOUBLE +ufunc__spherical_in_types[3] = NPY_LONG +ufunc__spherical_in_types[4] = NPY_CDOUBLE +ufunc__spherical_in_types[5] = NPY_CDOUBLE +ufunc__spherical_in_ptr[2*0] = _func_spherical_in_real +ufunc__spherical_in_ptr[2*0+1] = ("_spherical_in") +ufunc__spherical_in_ptr[2*1] = _func_spherical_in_complex +ufunc__spherical_in_ptr[2*1+1] = ("_spherical_in") +ufunc__spherical_in_data[0] = &ufunc__spherical_in_ptr[2*0] +ufunc__spherical_in_data[1] = &ufunc__spherical_in_ptr[2*1] +_spherical_in = np.PyUFunc_FromFuncAndData(ufunc__spherical_in_loops, ufunc__spherical_in_data, ufunc__spherical_in_types, 2, 2, 1, 0, "_spherical_in", ufunc__spherical_in_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_in_d_loops[2] +cdef void *ufunc__spherical_in_d_ptr[4] +cdef void *ufunc__spherical_in_d_data[2] +cdef char ufunc__spherical_in_d_types[6] +cdef char *ufunc__spherical_in_d_doc = ( + "Internal function, use `spherical_in` instead.") +ufunc__spherical_in_d_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_in_d_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_in_d_types[0] = NPY_LONG +ufunc__spherical_in_d_types[1] = NPY_DOUBLE +ufunc__spherical_in_d_types[2] = NPY_DOUBLE +ufunc__spherical_in_d_types[3] = NPY_LONG +ufunc__spherical_in_d_types[4] = NPY_CDOUBLE +ufunc__spherical_in_d_types[5] = NPY_CDOUBLE +ufunc__spherical_in_d_ptr[2*0] = _func_spherical_in_d_real +ufunc__spherical_in_d_ptr[2*0+1] = ("_spherical_in_d") +ufunc__spherical_in_d_ptr[2*1] = _func_spherical_in_d_complex +ufunc__spherical_in_d_ptr[2*1+1] = ("_spherical_in_d") +ufunc__spherical_in_d_data[0] = &ufunc__spherical_in_d_ptr[2*0] +ufunc__spherical_in_d_data[1] = &ufunc__spherical_in_d_ptr[2*1] +_spherical_in_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_in_d_loops, ufunc__spherical_in_d_data, ufunc__spherical_in_d_types, 2, 2, 1, 0, "_spherical_in_d", ufunc__spherical_in_d_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_jn_loops[2] +cdef void *ufunc__spherical_jn_ptr[4] +cdef void *ufunc__spherical_jn_data[2] +cdef char ufunc__spherical_jn_types[6] +cdef char *ufunc__spherical_jn_doc = ( + "Internal function, use `spherical_jn` instead.") +ufunc__spherical_jn_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_jn_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_jn_types[0] = NPY_LONG +ufunc__spherical_jn_types[1] = NPY_DOUBLE +ufunc__spherical_jn_types[2] = NPY_DOUBLE +ufunc__spherical_jn_types[3] = NPY_LONG +ufunc__spherical_jn_types[4] = NPY_CDOUBLE +ufunc__spherical_jn_types[5] = NPY_CDOUBLE +ufunc__spherical_jn_ptr[2*0] = _func_spherical_jn_real +ufunc__spherical_jn_ptr[2*0+1] = ("_spherical_jn") +ufunc__spherical_jn_ptr[2*1] = _func_spherical_jn_complex +ufunc__spherical_jn_ptr[2*1+1] = ("_spherical_jn") +ufunc__spherical_jn_data[0] = &ufunc__spherical_jn_ptr[2*0] +ufunc__spherical_jn_data[1] = &ufunc__spherical_jn_ptr[2*1] +_spherical_jn = np.PyUFunc_FromFuncAndData(ufunc__spherical_jn_loops, ufunc__spherical_jn_data, ufunc__spherical_jn_types, 2, 2, 1, 0, "_spherical_jn", ufunc__spherical_jn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_jn_d_loops[2] +cdef void *ufunc__spherical_jn_d_ptr[4] +cdef void *ufunc__spherical_jn_d_data[2] +cdef char ufunc__spherical_jn_d_types[6] +cdef char *ufunc__spherical_jn_d_doc = ( + "Internal function, use `spherical_jn` instead.") +ufunc__spherical_jn_d_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_jn_d_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_jn_d_types[0] = NPY_LONG +ufunc__spherical_jn_d_types[1] = NPY_DOUBLE +ufunc__spherical_jn_d_types[2] = NPY_DOUBLE +ufunc__spherical_jn_d_types[3] = NPY_LONG +ufunc__spherical_jn_d_types[4] = NPY_CDOUBLE +ufunc__spherical_jn_d_types[5] = NPY_CDOUBLE +ufunc__spherical_jn_d_ptr[2*0] = _func_spherical_jn_d_real +ufunc__spherical_jn_d_ptr[2*0+1] = ("_spherical_jn_d") +ufunc__spherical_jn_d_ptr[2*1] = _func_spherical_jn_d_complex +ufunc__spherical_jn_d_ptr[2*1+1] = ("_spherical_jn_d") +ufunc__spherical_jn_d_data[0] = &ufunc__spherical_jn_d_ptr[2*0] +ufunc__spherical_jn_d_data[1] = &ufunc__spherical_jn_d_ptr[2*1] +_spherical_jn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_jn_d_loops, ufunc__spherical_jn_d_data, ufunc__spherical_jn_d_types, 2, 2, 1, 0, "_spherical_jn_d", ufunc__spherical_jn_d_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_kn_loops[2] +cdef void *ufunc__spherical_kn_ptr[4] +cdef void *ufunc__spherical_kn_data[2] +cdef char ufunc__spherical_kn_types[6] +cdef char *ufunc__spherical_kn_doc = ( + "Internal function, use `spherical_kn` instead.") +ufunc__spherical_kn_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_kn_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_kn_types[0] = NPY_LONG +ufunc__spherical_kn_types[1] = NPY_DOUBLE +ufunc__spherical_kn_types[2] = NPY_DOUBLE +ufunc__spherical_kn_types[3] = NPY_LONG +ufunc__spherical_kn_types[4] = NPY_CDOUBLE +ufunc__spherical_kn_types[5] = NPY_CDOUBLE +ufunc__spherical_kn_ptr[2*0] = _func_spherical_kn_real +ufunc__spherical_kn_ptr[2*0+1] = ("_spherical_kn") +ufunc__spherical_kn_ptr[2*1] = _func_spherical_kn_complex +ufunc__spherical_kn_ptr[2*1+1] = ("_spherical_kn") +ufunc__spherical_kn_data[0] = &ufunc__spherical_kn_ptr[2*0] +ufunc__spherical_kn_data[1] = &ufunc__spherical_kn_ptr[2*1] +_spherical_kn = np.PyUFunc_FromFuncAndData(ufunc__spherical_kn_loops, ufunc__spherical_kn_data, ufunc__spherical_kn_types, 2, 2, 1, 0, "_spherical_kn", ufunc__spherical_kn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_kn_d_loops[2] +cdef void *ufunc__spherical_kn_d_ptr[4] +cdef void *ufunc__spherical_kn_d_data[2] +cdef char ufunc__spherical_kn_d_types[6] +cdef char *ufunc__spherical_kn_d_doc = ( + "Internal function, use `spherical_kn` instead.") +ufunc__spherical_kn_d_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_kn_d_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_kn_d_types[0] = NPY_LONG +ufunc__spherical_kn_d_types[1] = NPY_DOUBLE +ufunc__spherical_kn_d_types[2] = NPY_DOUBLE +ufunc__spherical_kn_d_types[3] = NPY_LONG +ufunc__spherical_kn_d_types[4] = NPY_CDOUBLE +ufunc__spherical_kn_d_types[5] = NPY_CDOUBLE +ufunc__spherical_kn_d_ptr[2*0] = _func_spherical_kn_d_real +ufunc__spherical_kn_d_ptr[2*0+1] = ("_spherical_kn_d") +ufunc__spherical_kn_d_ptr[2*1] = _func_spherical_kn_d_complex +ufunc__spherical_kn_d_ptr[2*1+1] = ("_spherical_kn_d") +ufunc__spherical_kn_d_data[0] = &ufunc__spherical_kn_d_ptr[2*0] +ufunc__spherical_kn_d_data[1] = &ufunc__spherical_kn_d_ptr[2*1] +_spherical_kn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_kn_d_loops, ufunc__spherical_kn_d_data, ufunc__spherical_kn_d_types, 2, 2, 1, 0, "_spherical_kn_d", ufunc__spherical_kn_d_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_yn_loops[2] +cdef void *ufunc__spherical_yn_ptr[4] +cdef void *ufunc__spherical_yn_data[2] +cdef char ufunc__spherical_yn_types[6] +cdef char *ufunc__spherical_yn_doc = ( + "Internal function, use `spherical_yn` instead.") +ufunc__spherical_yn_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_yn_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_yn_types[0] = NPY_LONG +ufunc__spherical_yn_types[1] = NPY_DOUBLE +ufunc__spherical_yn_types[2] = NPY_DOUBLE +ufunc__spherical_yn_types[3] = NPY_LONG +ufunc__spherical_yn_types[4] = NPY_CDOUBLE +ufunc__spherical_yn_types[5] = NPY_CDOUBLE +ufunc__spherical_yn_ptr[2*0] = _func_spherical_yn_real +ufunc__spherical_yn_ptr[2*0+1] = ("_spherical_yn") +ufunc__spherical_yn_ptr[2*1] = _func_spherical_yn_complex +ufunc__spherical_yn_ptr[2*1+1] = ("_spherical_yn") +ufunc__spherical_yn_data[0] = &ufunc__spherical_yn_ptr[2*0] +ufunc__spherical_yn_data[1] = &ufunc__spherical_yn_ptr[2*1] +_spherical_yn = np.PyUFunc_FromFuncAndData(ufunc__spherical_yn_loops, ufunc__spherical_yn_data, ufunc__spherical_yn_types, 2, 2, 1, 0, "_spherical_yn", ufunc__spherical_yn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__spherical_yn_d_loops[2] +cdef void *ufunc__spherical_yn_d_ptr[4] +cdef void *ufunc__spherical_yn_d_data[2] +cdef char ufunc__spherical_yn_d_types[6] +cdef char *ufunc__spherical_yn_d_doc = ( + "Internal function, use `spherical_yn` instead.") +ufunc__spherical_yn_d_loops[0] = loop_d_ld__As_ld_d +ufunc__spherical_yn_d_loops[1] = loop_D_lD__As_lD_D +ufunc__spherical_yn_d_types[0] = NPY_LONG +ufunc__spherical_yn_d_types[1] = NPY_DOUBLE +ufunc__spherical_yn_d_types[2] = NPY_DOUBLE +ufunc__spherical_yn_d_types[3] = NPY_LONG +ufunc__spherical_yn_d_types[4] = NPY_CDOUBLE +ufunc__spherical_yn_d_types[5] = NPY_CDOUBLE +ufunc__spherical_yn_d_ptr[2*0] = _func_spherical_yn_d_real +ufunc__spherical_yn_d_ptr[2*0+1] = ("_spherical_yn_d") +ufunc__spherical_yn_d_ptr[2*1] = _func_spherical_yn_d_complex +ufunc__spherical_yn_d_ptr[2*1+1] = ("_spherical_yn_d") +ufunc__spherical_yn_d_data[0] = &ufunc__spherical_yn_d_ptr[2*0] +ufunc__spherical_yn_d_data[1] = &ufunc__spherical_yn_d_ptr[2*1] +_spherical_yn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_yn_d_loops, ufunc__spherical_yn_d_data, ufunc__spherical_yn_d_types, 2, 2, 1, 0, "_spherical_yn_d", ufunc__spherical_yn_d_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__stirling2_inexact_loops[2] +cdef void *ufunc__stirling2_inexact_ptr[4] +cdef void *ufunc__stirling2_inexact_data[2] +cdef char ufunc__stirling2_inexact_types[6] +cdef char *ufunc__stirling2_inexact_doc = ( + "Internal function, do not use.") +ufunc__stirling2_inexact_loops[0] = loop_d_dd__As_ff_f +ufunc__stirling2_inexact_loops[1] = loop_d_dd__As_dd_d +ufunc__stirling2_inexact_types[0] = NPY_FLOAT +ufunc__stirling2_inexact_types[1] = NPY_FLOAT +ufunc__stirling2_inexact_types[2] = NPY_FLOAT +ufunc__stirling2_inexact_types[3] = NPY_DOUBLE +ufunc__stirling2_inexact_types[4] = NPY_DOUBLE +ufunc__stirling2_inexact_types[5] = NPY_DOUBLE +ufunc__stirling2_inexact_ptr[2*0] = scipy.special._ufuncs_cxx._export__stirling2_inexact +ufunc__stirling2_inexact_ptr[2*0+1] = ("_stirling2_inexact") +ufunc__stirling2_inexact_ptr[2*1] = scipy.special._ufuncs_cxx._export__stirling2_inexact +ufunc__stirling2_inexact_ptr[2*1+1] = ("_stirling2_inexact") +ufunc__stirling2_inexact_data[0] = &ufunc__stirling2_inexact_ptr[2*0] +ufunc__stirling2_inexact_data[1] = &ufunc__stirling2_inexact_ptr[2*1] +_stirling2_inexact = np.PyUFunc_FromFuncAndData(ufunc__stirling2_inexact_loops, ufunc__stirling2_inexact_data, ufunc__stirling2_inexact_types, 2, 2, 1, 0, "_stirling2_inexact", ufunc__stirling2_inexact_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_asymp_large_z_loops[1] +cdef void *ufunc__struve_asymp_large_z_ptr[2] +cdef void *ufunc__struve_asymp_large_z_data[1] +cdef char ufunc__struve_asymp_large_z_types[5] +cdef char *ufunc__struve_asymp_large_z_doc = ( + "_struve_asymp_large_z(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using asymptotic expansion\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_asymp_large_z_loops[0] = loop_d_ddi_d_As_ddl_dd +ufunc__struve_asymp_large_z_types[0] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[1] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[2] = NPY_LONG +ufunc__struve_asymp_large_z_types[3] = NPY_DOUBLE +ufunc__struve_asymp_large_z_types[4] = NPY_DOUBLE +ufunc__struve_asymp_large_z_ptr[2*0] = _func_struve_asymp_large_z +ufunc__struve_asymp_large_z_ptr[2*0+1] = ("_struve_asymp_large_z") +ufunc__struve_asymp_large_z_data[0] = &ufunc__struve_asymp_large_z_ptr[2*0] +_struve_asymp_large_z = np.PyUFunc_FromFuncAndData(ufunc__struve_asymp_large_z_loops, ufunc__struve_asymp_large_z_data, ufunc__struve_asymp_large_z_types, 1, 3, 2, 0, "_struve_asymp_large_z", ufunc__struve_asymp_large_z_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_bessel_series_loops[1] +cdef void *ufunc__struve_bessel_series_ptr[2] +cdef void *ufunc__struve_bessel_series_data[1] +cdef char ufunc__struve_bessel_series_types[5] +cdef char *ufunc__struve_bessel_series_doc = ( + "_struve_bessel_series(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using Bessel function series\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_bessel_series_loops[0] = loop_d_ddi_d_As_ddl_dd +ufunc__struve_bessel_series_types[0] = NPY_DOUBLE +ufunc__struve_bessel_series_types[1] = NPY_DOUBLE +ufunc__struve_bessel_series_types[2] = NPY_LONG +ufunc__struve_bessel_series_types[3] = NPY_DOUBLE +ufunc__struve_bessel_series_types[4] = NPY_DOUBLE +ufunc__struve_bessel_series_ptr[2*0] = _func_struve_bessel_series +ufunc__struve_bessel_series_ptr[2*0+1] = ("_struve_bessel_series") +ufunc__struve_bessel_series_data[0] = &ufunc__struve_bessel_series_ptr[2*0] +_struve_bessel_series = np.PyUFunc_FromFuncAndData(ufunc__struve_bessel_series_loops, ufunc__struve_bessel_series_data, ufunc__struve_bessel_series_types, 1, 3, 2, 0, "_struve_bessel_series", ufunc__struve_bessel_series_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__struve_power_series_loops[1] +cdef void *ufunc__struve_power_series_ptr[2] +cdef void *ufunc__struve_power_series_data[1] +cdef char ufunc__struve_power_series_types[5] +cdef char *ufunc__struve_power_series_doc = ( + "_struve_power_series(v, z, is_h)\n" + "\n" + "Internal function for testing `struve` & `modstruve`\n" + "\n" + "Evaluates using power series\n" + "\n" + "Returns\n" + "-------\n" + "v, err") +ufunc__struve_power_series_loops[0] = loop_d_ddi_d_As_ddl_dd +ufunc__struve_power_series_types[0] = NPY_DOUBLE +ufunc__struve_power_series_types[1] = NPY_DOUBLE +ufunc__struve_power_series_types[2] = NPY_LONG +ufunc__struve_power_series_types[3] = NPY_DOUBLE +ufunc__struve_power_series_types[4] = NPY_DOUBLE +ufunc__struve_power_series_ptr[2*0] = _func_struve_power_series +ufunc__struve_power_series_ptr[2*0+1] = ("_struve_power_series") +ufunc__struve_power_series_data[0] = &ufunc__struve_power_series_ptr[2*0] +_struve_power_series = np.PyUFunc_FromFuncAndData(ufunc__struve_power_series_loops, ufunc__struve_power_series_data, ufunc__struve_power_series_types, 1, 3, 2, 0, "_struve_power_series", ufunc__struve_power_series_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc__zeta_loops[2] +cdef void *ufunc__zeta_ptr[4] +cdef void *ufunc__zeta_data[2] +cdef char ufunc__zeta_types[6] +cdef char *ufunc__zeta_doc = ( + "_zeta(x, q)\n" + "\n" + "Internal function, Hurwitz zeta.") +ufunc__zeta_loops[0] = loop_d_dd__As_ff_f +ufunc__zeta_loops[1] = loop_d_dd__As_dd_d +ufunc__zeta_types[0] = NPY_FLOAT +ufunc__zeta_types[1] = NPY_FLOAT +ufunc__zeta_types[2] = NPY_FLOAT +ufunc__zeta_types[3] = NPY_DOUBLE +ufunc__zeta_types[4] = NPY_DOUBLE +ufunc__zeta_types[5] = NPY_DOUBLE +ufunc__zeta_ptr[2*0] = _func_zeta +ufunc__zeta_ptr[2*0+1] = ("_zeta") +ufunc__zeta_ptr[2*1] = _func_zeta +ufunc__zeta_ptr[2*1+1] = ("_zeta") +ufunc__zeta_data[0] = &ufunc__zeta_ptr[2*0] +ufunc__zeta_data[1] = &ufunc__zeta_ptr[2*1] +_zeta = np.PyUFunc_FromFuncAndData(ufunc__zeta_loops, ufunc__zeta_data, ufunc__zeta_types, 2, 2, 1, 0, "_zeta", ufunc__zeta_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_agm_loops[2] +cdef void *ufunc_agm_ptr[4] +cdef void *ufunc_agm_data[2] +cdef char ufunc_agm_types[6] +cdef char *ufunc_agm_doc = ( + "agm(a, b, out=None)\n" + "\n" + "Compute the arithmetic-geometric mean of `a` and `b`.\n" + "\n" + "Start with a_0 = a and b_0 = b and iteratively compute::\n" + "\n" + " a_{n+1} = (a_n + b_n)/2\n" + " b_{n+1} = sqrt(a_n*b_n)\n" + "\n" + "a_n and b_n converge to the same limit as n increases; their common\n" + "limit is agm(a, b).\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real values only. If the values are both negative, the result\n" + " is negative. If one value is negative and the other is positive,\n" + " `nan` is returned.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The arithmetic-geometric mean of `a` and `b`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import agm\n" + ">>> a, b = 24.0, 6.0\n" + ">>> agm(a, b)\n" + "13.458171481725614\n" + "\n" + "Compare that result to the iteration:\n" + "\n" + ">>> while a != b:\n" + "... a, b = (a + b)/2, np.sqrt(a*b)\n" + "... print(\"a = %19.16f b=%19.16f\" % (a, b))\n" + "...\n" + "a = 15.0000000000000000 b=12.0000000000000000\n" + "a = 13.5000000000000000 b=13.4164078649987388\n" + "a = 13.4582039324993694 b=13.4581390309909850\n" + "a = 13.4581714817451772 b=13.4581714817060547\n" + "a = 13.4581714817256159 b=13.4581714817256159\n" + "\n" + "When array-like arguments are given, broadcasting applies:\n" + "\n" + ">>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).\n" + ">>> b = np.array([6, 12, 24, 48]) # b has shape (4,).\n" + ">>> agm(a, b)\n" + "array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],\n" + " [ 4.37037309, 6.72908574, 10.84726853, 18.11597502],\n" + " [ 6. , 8.74074619, 13.45817148, 21.69453707]])") +ufunc_agm_loops[0] = loop_d_dd__As_ff_f +ufunc_agm_loops[1] = loop_d_dd__As_dd_d +ufunc_agm_types[0] = NPY_FLOAT +ufunc_agm_types[1] = NPY_FLOAT +ufunc_agm_types[2] = NPY_FLOAT +ufunc_agm_types[3] = NPY_DOUBLE +ufunc_agm_types[4] = NPY_DOUBLE +ufunc_agm_types[5] = NPY_DOUBLE +ufunc_agm_ptr[2*0] = _func_agm +ufunc_agm_ptr[2*0+1] = ("agm") +ufunc_agm_ptr[2*1] = _func_agm +ufunc_agm_ptr[2*1+1] = ("agm") +ufunc_agm_data[0] = &ufunc_agm_ptr[2*0] +ufunc_agm_data[1] = &ufunc_agm_ptr[2*1] +agm = np.PyUFunc_FromFuncAndData(ufunc_agm_loops, ufunc_agm_data, ufunc_agm_types, 2, 2, 1, 0, "agm", ufunc_agm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_airy_loops[4] +cdef void *ufunc_airy_ptr[8] +cdef void *ufunc_airy_data[4] +cdef char ufunc_airy_types[20] +cdef char *ufunc_airy_doc = ( + "airy(z, out=None)\n" + "\n" + "Airy functions and their derivatives.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex argument.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray\n" + " Airy functions Ai and Bi, and their derivatives Aip and Bip.\n" + "\n" + "See Also\n" + "--------\n" + "airye : exponentially scaled Airy functions.\n" + "\n" + "Notes\n" + "-----\n" + "The Airy functions Ai and Bi are two independent solutions of\n" + "\n" + ".. math:: y''(x) = x y(x).\n" + "\n" + "For real `z` in [-10, 10], the computation is carried out by calling\n" + "the Cephes [1]_ `airy` routine, which uses power series summation\n" + "for small `z` and rational minimax approximations for large `z`.\n" + "\n" + "Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are\n" + "employed. They are computed using power series for :math:`|z| < 1` and\n" + "the following relations to modified Bessel functions for larger `z`\n" + "(where :math:`t \\equiv 2 z^{3/2}/3`):\n" + "\n" + ".. math::\n" + "\n" + " Ai(z) = \\frac{1}{\\pi \\sqrt{3}} K_{1/3}(t)\n" + "\n" + " Ai'(z) = -\\frac{z}{\\pi \\sqrt{3}} K_{2/3}(t)\n" + "\n" + " Bi(z) = \\sqrt{\\frac{z}{3}} \\left(I_{-1/3}(t) + I_{1/3}(t) \\right)\n" + "\n" + " Bi'(z) = \\frac{z}{\\sqrt{3}} \\left(I_{-2/3}(t) + I_{2/3}(t)\\right)\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the Airy functions on the interval [-15, 5].\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> x = np.linspace(-15, 5, 201)\n" + ">>> ai, aip, bi, bip = special.airy(x)\n" + "\n" + "Plot Ai(x) and Bi(x).\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.plot(x, ai, 'r', label='Ai(x)')\n" + ">>> plt.plot(x, bi, 'b--', label='Bi(x)')\n" + ">>> plt.ylim(-0.5, 1.0)\n" + ">>> plt.grid()\n" + ">>> plt.legend(loc='upper left')\n" + ">>> plt.show()") +ufunc_airy_loops[0] = loop_i_d_dddd_As_f_ffff +ufunc_airy_loops[1] = loop_i_d_dddd_As_d_dddd +ufunc_airy_loops[2] = loop_i_D_DDDD_As_F_FFFF +ufunc_airy_loops[3] = loop_i_D_DDDD_As_D_DDDD +ufunc_airy_types[0] = NPY_FLOAT +ufunc_airy_types[1] = NPY_FLOAT +ufunc_airy_types[2] = NPY_FLOAT +ufunc_airy_types[3] = NPY_FLOAT +ufunc_airy_types[4] = NPY_FLOAT +ufunc_airy_types[5] = NPY_DOUBLE +ufunc_airy_types[6] = NPY_DOUBLE +ufunc_airy_types[7] = NPY_DOUBLE +ufunc_airy_types[8] = NPY_DOUBLE +ufunc_airy_types[9] = NPY_DOUBLE +ufunc_airy_types[10] = NPY_CFLOAT +ufunc_airy_types[11] = NPY_CFLOAT +ufunc_airy_types[12] = NPY_CFLOAT +ufunc_airy_types[13] = NPY_CFLOAT +ufunc_airy_types[14] = NPY_CFLOAT +ufunc_airy_types[15] = NPY_CDOUBLE +ufunc_airy_types[16] = NPY_CDOUBLE +ufunc_airy_types[17] = NPY_CDOUBLE +ufunc_airy_types[18] = NPY_CDOUBLE +ufunc_airy_types[19] = NPY_CDOUBLE +ufunc_airy_ptr[2*0] = _func_airy_wrap +ufunc_airy_ptr[2*0+1] = ("airy") +ufunc_airy_ptr[2*1] = _func_airy_wrap +ufunc_airy_ptr[2*1+1] = ("airy") +ufunc_airy_ptr[2*2] = _func_cairy_wrap +ufunc_airy_ptr[2*2+1] = ("airy") +ufunc_airy_ptr[2*3] = _func_cairy_wrap +ufunc_airy_ptr[2*3+1] = ("airy") +ufunc_airy_data[0] = &ufunc_airy_ptr[2*0] +ufunc_airy_data[1] = &ufunc_airy_ptr[2*1] +ufunc_airy_data[2] = &ufunc_airy_ptr[2*2] +ufunc_airy_data[3] = &ufunc_airy_ptr[2*3] +airy = np.PyUFunc_FromFuncAndData(ufunc_airy_loops, ufunc_airy_data, ufunc_airy_types, 4, 1, 4, 0, "airy", ufunc_airy_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_airye_loops[4] +cdef void *ufunc_airye_ptr[8] +cdef void *ufunc_airye_data[4] +cdef char ufunc_airye_types[20] +cdef char *ufunc_airye_doc = ( + "airye(z, out=None)\n" + "\n" + "Exponentially scaled Airy functions and their derivatives.\n" + "\n" + "Scaling::\n" + "\n" + " eAi = Ai * exp(2.0/3.0*z*sqrt(z))\n" + " eAip = Aip * exp(2.0/3.0*z*sqrt(z))\n" + " eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))\n" + " eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex argument.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray\n" + " Exponentially scaled Airy functions eAi and eBi, and their derivatives\n" + " eAip and eBip\n" + "\n" + "See Also\n" + "--------\n" + "airy\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "We can compute exponentially scaled Airy functions and their derivatives:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import airye\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> z = np.linspace(0, 50, 500)\n" + ">>> eAi, eAip, eBi, eBip = airye(z)\n" + ">>> f, ax = plt.subplots(2, 1, sharex=True)\n" + ">>> for ind, data in enumerate([[eAi, eAip, [\"eAi\", \"eAip\"]],\n" + "... [eBi, eBip, [\"eBi\", \"eBip\"]]]):\n" + "... ax[ind].plot(z, data[0], \"-r\", z, data[1], \"-b\")\n" + "... ax[ind].legend(data[2])\n" + "... ax[ind].grid(True)\n" + ">>> plt.show()\n" + "\n" + "We can compute these using usual non-scaled Airy functions by:\n" + "\n" + ">>> from scipy.special import airy\n" + ">>> Ai, Aip, Bi, Bip = airy(z)\n" + ">>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))\n" + "True\n" + ">>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))\n" + "True\n" + ">>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))\n" + "True\n" + ">>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))\n" + "True\n" + "\n" + "Comparing non-scaled and exponentially scaled ones, the usual non-scaled\n" + "function quickly underflows for large values, whereas the exponentially\n" + "scaled function does not.\n" + "\n" + ">>> airy(200)\n" + "(0.0, 0.0, nan, nan)\n" + ">>> airye(200)\n" + "(0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)") +ufunc_airye_loops[0] = loop_i_d_dddd_As_f_ffff +ufunc_airye_loops[1] = loop_i_d_dddd_As_d_dddd +ufunc_airye_loops[2] = loop_i_D_DDDD_As_F_FFFF +ufunc_airye_loops[3] = loop_i_D_DDDD_As_D_DDDD +ufunc_airye_types[0] = NPY_FLOAT +ufunc_airye_types[1] = NPY_FLOAT +ufunc_airye_types[2] = NPY_FLOAT +ufunc_airye_types[3] = NPY_FLOAT +ufunc_airye_types[4] = NPY_FLOAT +ufunc_airye_types[5] = NPY_DOUBLE +ufunc_airye_types[6] = NPY_DOUBLE +ufunc_airye_types[7] = NPY_DOUBLE +ufunc_airye_types[8] = NPY_DOUBLE +ufunc_airye_types[9] = NPY_DOUBLE +ufunc_airye_types[10] = NPY_CFLOAT +ufunc_airye_types[11] = NPY_CFLOAT +ufunc_airye_types[12] = NPY_CFLOAT +ufunc_airye_types[13] = NPY_CFLOAT +ufunc_airye_types[14] = NPY_CFLOAT +ufunc_airye_types[15] = NPY_CDOUBLE +ufunc_airye_types[16] = NPY_CDOUBLE +ufunc_airye_types[17] = NPY_CDOUBLE +ufunc_airye_types[18] = NPY_CDOUBLE +ufunc_airye_types[19] = NPY_CDOUBLE +ufunc_airye_ptr[2*0] = _func_cairy_wrap_e_real +ufunc_airye_ptr[2*0+1] = ("airye") +ufunc_airye_ptr[2*1] = _func_cairy_wrap_e_real +ufunc_airye_ptr[2*1+1] = ("airye") +ufunc_airye_ptr[2*2] = _func_cairy_wrap_e +ufunc_airye_ptr[2*2+1] = ("airye") +ufunc_airye_ptr[2*3] = _func_cairy_wrap_e +ufunc_airye_ptr[2*3+1] = ("airye") +ufunc_airye_data[0] = &ufunc_airye_ptr[2*0] +ufunc_airye_data[1] = &ufunc_airye_ptr[2*1] +ufunc_airye_data[2] = &ufunc_airye_ptr[2*2] +ufunc_airye_data[3] = &ufunc_airye_ptr[2*3] +airye = np.PyUFunc_FromFuncAndData(ufunc_airye_loops, ufunc_airye_data, ufunc_airye_types, 4, 1, 4, 0, "airye", ufunc_airye_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtr_loops[3] +cdef void *ufunc_bdtr_ptr[6] +cdef void *ufunc_bdtr_data[3] +cdef char ufunc_bdtr_types[12] +cdef char *ufunc_bdtr_doc = ( + "bdtr(k, n, p, out=None)\n" + "\n" + "Binomial distribution cumulative distribution function.\n" + "\n" + "Sum of the terms 0 through `floor(k)` of the Binomial probability density.\n" + "\n" + ".. math::\n" + " \\mathrm{bdtr}(k, n, p) =\n" + " \\sum_{j=0}^{\\lfloor k \\rfloor} {{n}\\choose{j}} p^j (1-p)^{n-j}\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (double), rounded down to the nearest integer.\n" + "n : array_like\n" + " Number of events (int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Probability of `floor(k)` or fewer successes in `n` independent events with\n" + " success probabilities of `p`.\n" + "\n" + "Notes\n" + "-----\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtr}(k, n, p) =\n" + " I_{1 - p}(n - \\lfloor k \\rfloor, \\lfloor k \\rfloor + 1).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtr_loops[1] = loop_d_did__As_dld_d +ufunc_bdtr_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtr_types[0] = NPY_FLOAT +ufunc_bdtr_types[1] = NPY_FLOAT +ufunc_bdtr_types[2] = NPY_FLOAT +ufunc_bdtr_types[3] = NPY_FLOAT +ufunc_bdtr_types[4] = NPY_DOUBLE +ufunc_bdtr_types[5] = NPY_LONG +ufunc_bdtr_types[6] = NPY_DOUBLE +ufunc_bdtr_types[7] = NPY_DOUBLE +ufunc_bdtr_types[8] = NPY_DOUBLE +ufunc_bdtr_types[9] = NPY_DOUBLE +ufunc_bdtr_types[10] = NPY_DOUBLE +ufunc_bdtr_types[11] = NPY_DOUBLE +ufunc_bdtr_ptr[2*0] = _func_bdtr_unsafe +ufunc_bdtr_ptr[2*0+1] = ("bdtr") +ufunc_bdtr_ptr[2*1] = _func_bdtr +ufunc_bdtr_ptr[2*1+1] = ("bdtr") +ufunc_bdtr_ptr[2*2] = _func_bdtr_unsafe +ufunc_bdtr_ptr[2*2+1] = ("bdtr") +ufunc_bdtr_data[0] = &ufunc_bdtr_ptr[2*0] +ufunc_bdtr_data[1] = &ufunc_bdtr_ptr[2*1] +ufunc_bdtr_data[2] = &ufunc_bdtr_ptr[2*2] +bdtr = np.PyUFunc_FromFuncAndData(ufunc_bdtr_loops, ufunc_bdtr_data, ufunc_bdtr_types, 3, 3, 1, 0, "bdtr", ufunc_bdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrc_loops[3] +cdef void *ufunc_bdtrc_ptr[6] +cdef void *ufunc_bdtrc_data[3] +cdef char ufunc_bdtrc_types[12] +cdef char *ufunc_bdtrc_doc = ( + "bdtrc(k, n, p, out=None)\n" + "\n" + "Binomial distribution survival function.\n" + "\n" + "Sum of the terms `floor(k) + 1` through `n` of the binomial probability\n" + "density,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtrc}(k, n, p) =\n" + " \\sum_{j=\\lfloor k \\rfloor +1}^n {{n}\\choose{j}} p^j (1-p)^{n-j}\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (double), rounded down to nearest integer.\n" + "n : array_like\n" + " Number of events (int)\n" + "p : array_like\n" + " Probability of success in a single event.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Probability of `floor(k) + 1` or more successes in `n` independent\n" + " events with success probabilities of `p`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "betainc\n" + "\n" + "Notes\n" + "-----\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{bdtrc}(k, n, p) = I_{p}(\\lfloor k \\rfloor + 1, n - \\lfloor k \\rfloor).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtrc`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrc_loops[1] = loop_d_did__As_dld_d +ufunc_bdtrc_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtrc_types[0] = NPY_FLOAT +ufunc_bdtrc_types[1] = NPY_FLOAT +ufunc_bdtrc_types[2] = NPY_FLOAT +ufunc_bdtrc_types[3] = NPY_FLOAT +ufunc_bdtrc_types[4] = NPY_DOUBLE +ufunc_bdtrc_types[5] = NPY_LONG +ufunc_bdtrc_types[6] = NPY_DOUBLE +ufunc_bdtrc_types[7] = NPY_DOUBLE +ufunc_bdtrc_types[8] = NPY_DOUBLE +ufunc_bdtrc_types[9] = NPY_DOUBLE +ufunc_bdtrc_types[10] = NPY_DOUBLE +ufunc_bdtrc_types[11] = NPY_DOUBLE +ufunc_bdtrc_ptr[2*0] = _func_bdtrc_unsafe +ufunc_bdtrc_ptr[2*0+1] = ("bdtrc") +ufunc_bdtrc_ptr[2*1] = _func_bdtrc +ufunc_bdtrc_ptr[2*1+1] = ("bdtrc") +ufunc_bdtrc_ptr[2*2] = _func_bdtrc_unsafe +ufunc_bdtrc_ptr[2*2+1] = ("bdtrc") +ufunc_bdtrc_data[0] = &ufunc_bdtrc_ptr[2*0] +ufunc_bdtrc_data[1] = &ufunc_bdtrc_ptr[2*1] +ufunc_bdtrc_data[2] = &ufunc_bdtrc_ptr[2*2] +bdtrc = np.PyUFunc_FromFuncAndData(ufunc_bdtrc_loops, ufunc_bdtrc_data, ufunc_bdtrc_types, 3, 3, 1, 0, "bdtrc", ufunc_bdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtri_loops[3] +cdef void *ufunc_bdtri_ptr[6] +cdef void *ufunc_bdtri_data[3] +cdef char ufunc_bdtri_types[12] +cdef char *ufunc_bdtri_doc = ( + "bdtri(k, n, y, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `p`.\n" + "\n" + "Finds the event probability `p` such that the sum of the terms 0 through\n" + "`k` of the binomial probability density is equal to the given cumulative\n" + "probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (float), rounded down to the nearest integer.\n" + "n : array_like\n" + " Number of events (float)\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "p : scalar or ndarray\n" + " The event probability such that `bdtr(\\lfloor k \\rfloor, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "betaincinv\n" + "\n" + "Notes\n" + "-----\n" + "The computation is carried out using the inverse beta integral function\n" + "and the relation,::\n" + "\n" + " 1 - p = betaincinv(n - k, k + 1, y).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `bdtri`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_bdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtri_loops[1] = loop_d_did__As_dld_d +ufunc_bdtri_loops[2] = loop_d_ddd__As_ddd_d +ufunc_bdtri_types[0] = NPY_FLOAT +ufunc_bdtri_types[1] = NPY_FLOAT +ufunc_bdtri_types[2] = NPY_FLOAT +ufunc_bdtri_types[3] = NPY_FLOAT +ufunc_bdtri_types[4] = NPY_DOUBLE +ufunc_bdtri_types[5] = NPY_LONG +ufunc_bdtri_types[6] = NPY_DOUBLE +ufunc_bdtri_types[7] = NPY_DOUBLE +ufunc_bdtri_types[8] = NPY_DOUBLE +ufunc_bdtri_types[9] = NPY_DOUBLE +ufunc_bdtri_types[10] = NPY_DOUBLE +ufunc_bdtri_types[11] = NPY_DOUBLE +ufunc_bdtri_ptr[2*0] = _func_bdtri_unsafe +ufunc_bdtri_ptr[2*0+1] = ("bdtri") +ufunc_bdtri_ptr[2*1] = _func_bdtri +ufunc_bdtri_ptr[2*1+1] = ("bdtri") +ufunc_bdtri_ptr[2*2] = _func_bdtri_unsafe +ufunc_bdtri_ptr[2*2+1] = ("bdtri") +ufunc_bdtri_data[0] = &ufunc_bdtri_ptr[2*0] +ufunc_bdtri_data[1] = &ufunc_bdtri_ptr[2*1] +ufunc_bdtri_data[2] = &ufunc_bdtri_ptr[2*2] +bdtri = np.PyUFunc_FromFuncAndData(ufunc_bdtri_loops, ufunc_bdtri_data, ufunc_bdtri_types, 3, 3, 1, 0, "bdtri", ufunc_bdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrik_loops[2] +cdef void *ufunc_bdtrik_ptr[4] +cdef void *ufunc_bdtrik_data[2] +cdef char ufunc_bdtrik_types[8] +cdef char *ufunc_bdtrik_doc = ( + "bdtrik(y, n, p, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `k`.\n" + "\n" + "Finds the number of successes `k` such that the sum of the terms 0 through\n" + "`k` of the Binomial probability density for `n` events with probability\n" + "`p` is equal to the given cumulative probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "n : array_like\n" + " Number of events (float).\n" + "p : array_like\n" + " Success probability (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "k : scalar or ndarray\n" + " The number of successes `k` such that `bdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "\n" + "Notes\n" + "-----\n" + "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n" + "cumulative incomplete beta distribution.\n" + "\n" + "Computation of `k` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `k`.\n" + "\n" + "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.") +ufunc_bdtrik_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrik_loops[1] = loop_d_ddd__As_ddd_d +ufunc_bdtrik_types[0] = NPY_FLOAT +ufunc_bdtrik_types[1] = NPY_FLOAT +ufunc_bdtrik_types[2] = NPY_FLOAT +ufunc_bdtrik_types[3] = NPY_FLOAT +ufunc_bdtrik_types[4] = NPY_DOUBLE +ufunc_bdtrik_types[5] = NPY_DOUBLE +ufunc_bdtrik_types[6] = NPY_DOUBLE +ufunc_bdtrik_types[7] = NPY_DOUBLE +ufunc_bdtrik_ptr[2*0] = _func_bdtrik +ufunc_bdtrik_ptr[2*0+1] = ("bdtrik") +ufunc_bdtrik_ptr[2*1] = _func_bdtrik +ufunc_bdtrik_ptr[2*1+1] = ("bdtrik") +ufunc_bdtrik_data[0] = &ufunc_bdtrik_ptr[2*0] +ufunc_bdtrik_data[1] = &ufunc_bdtrik_ptr[2*1] +bdtrik = np.PyUFunc_FromFuncAndData(ufunc_bdtrik_loops, ufunc_bdtrik_data, ufunc_bdtrik_types, 2, 3, 1, 0, "bdtrik", ufunc_bdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bdtrin_loops[2] +cdef void *ufunc_bdtrin_ptr[4] +cdef void *ufunc_bdtrin_data[2] +cdef char ufunc_bdtrin_types[8] +cdef char *ufunc_bdtrin_doc = ( + "bdtrin(k, y, p, out=None)\n" + "\n" + "Inverse function to `bdtr` with respect to `n`.\n" + "\n" + "Finds the number of events `n` such that the sum of the terms 0 through\n" + "`k` of the Binomial probability density for events with probability `p` is\n" + "equal to the given cumulative probability `y`.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of successes (float).\n" + "y : array_like\n" + " Cumulative probability (probability of `k` or fewer successes in `n`\n" + " events).\n" + "p : array_like\n" + " Success probability (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "n : scalar or ndarray\n" + " The number of events `n` such that `bdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "bdtr\n" + "\n" + "Notes\n" + "-----\n" + "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n" + "cumulative incomplete beta distribution.\n" + "\n" + "Computation of `n` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `n`.\n" + "\n" + "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.") +ufunc_bdtrin_loops[0] = loop_d_ddd__As_fff_f +ufunc_bdtrin_loops[1] = loop_d_ddd__As_ddd_d +ufunc_bdtrin_types[0] = NPY_FLOAT +ufunc_bdtrin_types[1] = NPY_FLOAT +ufunc_bdtrin_types[2] = NPY_FLOAT +ufunc_bdtrin_types[3] = NPY_FLOAT +ufunc_bdtrin_types[4] = NPY_DOUBLE +ufunc_bdtrin_types[5] = NPY_DOUBLE +ufunc_bdtrin_types[6] = NPY_DOUBLE +ufunc_bdtrin_types[7] = NPY_DOUBLE +ufunc_bdtrin_ptr[2*0] = _func_bdtrin +ufunc_bdtrin_ptr[2*0+1] = ("bdtrin") +ufunc_bdtrin_ptr[2*1] = _func_bdtrin +ufunc_bdtrin_ptr[2*1+1] = ("bdtrin") +ufunc_bdtrin_data[0] = &ufunc_bdtrin_ptr[2*0] +ufunc_bdtrin_data[1] = &ufunc_bdtrin_ptr[2*1] +bdtrin = np.PyUFunc_FromFuncAndData(ufunc_bdtrin_loops, ufunc_bdtrin_data, ufunc_bdtrin_types, 2, 3, 1, 0, "bdtrin", ufunc_bdtrin_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_bei_loops[2] +cdef void *ufunc_bei_ptr[4] +cdef void *ufunc_bei_data[2] +cdef char ufunc_bei_types[4] +cdef char *ufunc_bei_doc = ( + "bei(x, out=None)\n" + "\n" + "Kelvin function bei.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{bei}(x) = \\Im[J_0(x e^{3 \\pi i / 4})]\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of\n" + "order zero (see `jv`). See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kelvin function.\n" + "\n" + "See Also\n" + "--------\n" + "ber : the corresponding real part\n" + "beip : the derivative of bei\n" + "jv : Bessel function of the first kind\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10.61\n" + "\n" + "Examples\n" + "--------\n" + "It can be expressed using Bessel functions.\n" + "\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag\n" + "array([0.24956604, 0.97229163, 1.93758679, 2.29269032])\n" + ">>> sc.bei(x)\n" + "array([0.24956604, 0.97229163, 1.93758679, 2.29269032])") +ufunc_bei_loops[0] = loop_d_d__As_f_f +ufunc_bei_loops[1] = loop_d_d__As_d_d +ufunc_bei_types[0] = NPY_FLOAT +ufunc_bei_types[1] = NPY_FLOAT +ufunc_bei_types[2] = NPY_DOUBLE +ufunc_bei_types[3] = NPY_DOUBLE +ufunc_bei_ptr[2*0] = _func_bei_wrap +ufunc_bei_ptr[2*0+1] = ("bei") +ufunc_bei_ptr[2*1] = _func_bei_wrap +ufunc_bei_ptr[2*1+1] = ("bei") +ufunc_bei_data[0] = &ufunc_bei_ptr[2*0] +ufunc_bei_data[1] = &ufunc_bei_ptr[2*1] +bei = np.PyUFunc_FromFuncAndData(ufunc_bei_loops, ufunc_bei_data, ufunc_bei_types, 2, 1, 1, 0, "bei", ufunc_bei_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_beip_loops[2] +cdef void *ufunc_beip_ptr[4] +cdef void *ufunc_beip_data[2] +cdef char ufunc_beip_types[4] +cdef char *ufunc_beip_doc = ( + "beip(x, out=None)\n" + "\n" + "Derivative of the Kelvin function bei.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The values of the derivative of bei.\n" + "\n" + "See Also\n" + "--------\n" + "bei\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10#PT5") +ufunc_beip_loops[0] = loop_d_d__As_f_f +ufunc_beip_loops[1] = loop_d_d__As_d_d +ufunc_beip_types[0] = NPY_FLOAT +ufunc_beip_types[1] = NPY_FLOAT +ufunc_beip_types[2] = NPY_DOUBLE +ufunc_beip_types[3] = NPY_DOUBLE +ufunc_beip_ptr[2*0] = _func_beip_wrap +ufunc_beip_ptr[2*0+1] = ("beip") +ufunc_beip_ptr[2*1] = _func_beip_wrap +ufunc_beip_ptr[2*1+1] = ("beip") +ufunc_beip_data[0] = &ufunc_beip_ptr[2*0] +ufunc_beip_data[1] = &ufunc_beip_ptr[2*1] +beip = np.PyUFunc_FromFuncAndData(ufunc_beip_loops, ufunc_beip_data, ufunc_beip_types, 2, 1, 1, 0, "beip", ufunc_beip_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ber_loops[2] +cdef void *ufunc_ber_ptr[4] +cdef void *ufunc_ber_data[2] +cdef char ufunc_ber_types[4] +cdef char *ufunc_ber_doc = ( + "ber(x, out=None)\n" + "\n" + "Kelvin function ber.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{ber}(x) = \\Re[J_0(x e^{3 \\pi i / 4})]\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of\n" + "order zero (see `jv`). See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kelvin function.\n" + "\n" + "See Also\n" + "--------\n" + "bei : the corresponding real part\n" + "berp : the derivative of bei\n" + "jv : Bessel function of the first kind\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10.61\n" + "\n" + "Examples\n" + "--------\n" + "It can be expressed using Bessel functions.\n" + "\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real\n" + "array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])\n" + ">>> sc.ber(x)\n" + "array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])") +ufunc_ber_loops[0] = loop_d_d__As_f_f +ufunc_ber_loops[1] = loop_d_d__As_d_d +ufunc_ber_types[0] = NPY_FLOAT +ufunc_ber_types[1] = NPY_FLOAT +ufunc_ber_types[2] = NPY_DOUBLE +ufunc_ber_types[3] = NPY_DOUBLE +ufunc_ber_ptr[2*0] = _func_ber_wrap +ufunc_ber_ptr[2*0+1] = ("ber") +ufunc_ber_ptr[2*1] = _func_ber_wrap +ufunc_ber_ptr[2*1+1] = ("ber") +ufunc_ber_data[0] = &ufunc_ber_ptr[2*0] +ufunc_ber_data[1] = &ufunc_ber_ptr[2*1] +ber = np.PyUFunc_FromFuncAndData(ufunc_ber_loops, ufunc_ber_data, ufunc_ber_types, 2, 1, 1, 0, "ber", ufunc_ber_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_berp_loops[2] +cdef void *ufunc_berp_ptr[4] +cdef void *ufunc_berp_data[2] +cdef char ufunc_berp_types[4] +cdef char *ufunc_berp_doc = ( + "berp(x, out=None)\n" + "\n" + "Derivative of the Kelvin function ber.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The values of the derivative of ber.\n" + "\n" + "See Also\n" + "--------\n" + "ber\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10#PT5") +ufunc_berp_loops[0] = loop_d_d__As_f_f +ufunc_berp_loops[1] = loop_d_d__As_d_d +ufunc_berp_types[0] = NPY_FLOAT +ufunc_berp_types[1] = NPY_FLOAT +ufunc_berp_types[2] = NPY_DOUBLE +ufunc_berp_types[3] = NPY_DOUBLE +ufunc_berp_ptr[2*0] = _func_berp_wrap +ufunc_berp_ptr[2*0+1] = ("berp") +ufunc_berp_ptr[2*1] = _func_berp_wrap +ufunc_berp_ptr[2*1+1] = ("berp") +ufunc_berp_data[0] = &ufunc_berp_ptr[2*0] +ufunc_berp_data[1] = &ufunc_berp_ptr[2*1] +berp = np.PyUFunc_FromFuncAndData(ufunc_berp_loops, ufunc_berp_data, ufunc_berp_types, 2, 1, 1, 0, "berp", ufunc_berp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_besselpoly_loops[2] +cdef void *ufunc_besselpoly_ptr[4] +cdef void *ufunc_besselpoly_data[2] +cdef char ufunc_besselpoly_types[8] +cdef char *ufunc_besselpoly_doc = ( + "besselpoly(a, lmb, nu, out=None)\n" + "\n" + "Weighted integral of the Bessel function of the first kind.\n" + "\n" + "Computes\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^1 x^\\lambda J_\\nu(2 a x) \\, dx\n" + "\n" + "where :math:`J_\\nu` is a Bessel function and :math:`\\lambda=lmb`,\n" + ":math:`\\nu=nu`.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Scale factor inside the Bessel function.\n" + "lmb : array_like\n" + " Power of `x`\n" + "nu : array_like\n" + " Order of the Bessel function.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the integral.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function for one parameter set.\n" + "\n" + ">>> from scipy.special import besselpoly\n" + ">>> besselpoly(1, 1, 1)\n" + "0.24449718372863877\n" + "\n" + "Evaluate the function for different scale factors.\n" + "\n" + ">>> import numpy as np\n" + ">>> factors = np.array([0., 3., 6.])\n" + ">>> besselpoly(factors, 1, 1)\n" + "array([ 0. , -0.00549029, 0.00140174])\n" + "\n" + "Plot the function for varying powers, orders and scales.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> powers = np.linspace(0, 10, 100)\n" + ">>> orders = [1, 2, 3]\n" + ">>> scales = [1, 2]\n" + ">>> all_combinations = [(order, scale) for order in orders\n" + "... for scale in scales]\n" + ">>> for order, scale in all_combinations:\n" + "... ax.plot(powers, besselpoly(scale, powers, order),\n" + "... label=rf\"$\\nu={order}, a={scale}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(r\"$\\lambda$\")\n" + ">>> ax.set_ylabel(r\"$\\int_0^1 x^{\\lambda} J_{\\nu}(2ax)\\,dx$\")\n" + ">>> plt.show()") +ufunc_besselpoly_loops[0] = loop_d_ddd__As_fff_f +ufunc_besselpoly_loops[1] = loop_d_ddd__As_ddd_d +ufunc_besselpoly_types[0] = NPY_FLOAT +ufunc_besselpoly_types[1] = NPY_FLOAT +ufunc_besselpoly_types[2] = NPY_FLOAT +ufunc_besselpoly_types[3] = NPY_FLOAT +ufunc_besselpoly_types[4] = NPY_DOUBLE +ufunc_besselpoly_types[5] = NPY_DOUBLE +ufunc_besselpoly_types[6] = NPY_DOUBLE +ufunc_besselpoly_types[7] = NPY_DOUBLE +ufunc_besselpoly_ptr[2*0] = _func_besselpoly +ufunc_besselpoly_ptr[2*0+1] = ("besselpoly") +ufunc_besselpoly_ptr[2*1] = _func_besselpoly +ufunc_besselpoly_ptr[2*1+1] = ("besselpoly") +ufunc_besselpoly_data[0] = &ufunc_besselpoly_ptr[2*0] +ufunc_besselpoly_data[1] = &ufunc_besselpoly_ptr[2*1] +besselpoly = np.PyUFunc_FromFuncAndData(ufunc_besselpoly_loops, ufunc_besselpoly_data, ufunc_besselpoly_types, 2, 3, 1, 0, "besselpoly", ufunc_besselpoly_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_beta_loops[2] +cdef void *ufunc_beta_ptr[4] +cdef void *ufunc_beta_data[2] +cdef char ufunc_beta_types[6] +cdef char *ufunc_beta_doc = ( + "beta(a, b, out=None)\n" + "\n" + "Beta function.\n" + "\n" + "This function is defined in [1]_ as\n" + "\n" + ".. math::\n" + "\n" + " B(a, b) = \\int_0^1 t^{a-1}(1-t)^{b-1}dt\n" + " = \\frac{\\Gamma(a)\\Gamma(b)}{\\Gamma(a+b)},\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real-valued arguments\n" + "out : ndarray, optional\n" + " Optional output array for the function result\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the beta function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "betainc : the regularized incomplete beta function\n" + "betaln : the natural logarithm of the absolute\n" + " value of the beta function\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions,\n" + " Eq. 5.12.1. https://dlmf.nist.gov/5.12\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "The beta function relates to the gamma function by the\n" + "definition given above:\n" + "\n" + ">>> sc.beta(2, 3)\n" + "0.08333333333333333\n" + ">>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)\n" + "0.08333333333333333\n" + "\n" + "As this relationship demonstrates, the beta function\n" + "is symmetric:\n" + "\n" + ">>> sc.beta(1.7, 2.4)\n" + "0.16567527689031739\n" + ">>> sc.beta(2.4, 1.7)\n" + "0.16567527689031739\n" + "\n" + "This function satisfies :math:`B(1, b) = 1/b`:\n" + "\n" + ">>> sc.beta(1, 4)\n" + "0.25") +ufunc_beta_loops[0] = loop_d_dd__As_ff_f +ufunc_beta_loops[1] = loop_d_dd__As_dd_d +ufunc_beta_types[0] = NPY_FLOAT +ufunc_beta_types[1] = NPY_FLOAT +ufunc_beta_types[2] = NPY_FLOAT +ufunc_beta_types[3] = NPY_DOUBLE +ufunc_beta_types[4] = NPY_DOUBLE +ufunc_beta_types[5] = NPY_DOUBLE +ufunc_beta_ptr[2*0] = _func_beta +ufunc_beta_ptr[2*0+1] = ("beta") +ufunc_beta_ptr[2*1] = _func_beta +ufunc_beta_ptr[2*1+1] = ("beta") +ufunc_beta_data[0] = &ufunc_beta_ptr[2*0] +ufunc_beta_data[1] = &ufunc_beta_ptr[2*1] +beta = np.PyUFunc_FromFuncAndData(ufunc_beta_loops, ufunc_beta_data, ufunc_beta_types, 2, 2, 1, 0, "beta", ufunc_beta_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betainc_loops[2] +cdef void *ufunc_betainc_ptr[4] +cdef void *ufunc_betainc_data[2] +cdef char ufunc_betainc_types[8] +cdef char *ufunc_betainc_doc = ( + "betainc(a, b, x, out=None)\n" + "\n" + "Regularized incomplete beta function.\n" + "\n" + "Computes the regularized incomplete beta function, defined as [1]_:\n" + "\n" + ".. math::\n" + "\n" + " I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)} \\int_0^x\n" + " t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "for :math:`0 \\leq x \\leq 1`.\n" + "\n" + "This function is the cumulative distribution function for the beta\n" + "distribution; its range is [0, 1].\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "beta : beta function\n" + "betaincinv : inverse of the regularized incomplete beta function\n" + "betaincc : complement of the regularized incomplete beta function\n" + "scipy.stats.beta : beta distribution\n" + "\n" + "Notes\n" + "-----\n" + "The term *regularized* in the name of this function refers to the\n" + "scaling of the function by the gamma function terms shown in the\n" + "formula. When not qualified as *regularized*, the name *incomplete\n" + "beta function* often refers to just the integral expression,\n" + "without the gamma terms. One can use the function `beta` from\n" + "`scipy.special` to get this \"nonregularized\" incomplete beta\n" + "function by multiplying the result of ``betainc(a, b, x)`` by\n" + "``beta(a, b)``.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + "\n" + "Let :math:`B(a, b)` be the `beta` function.\n" + "\n" + ">>> import scipy.special as sc\n" + "\n" + "The coefficient in terms of `gamma` is equal to\n" + ":math:`1/B(a, b)`. Also, when :math:`x=1`\n" + "the integral is equal to :math:`B(a, b)`.\n" + "Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.\n" + "\n" + ">>> sc.betainc(0.2, 3.5, 1.0)\n" + "1.0\n" + "\n" + "It satisfies\n" + ":math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,\n" + "where :math:`F` is the hypergeometric function `hyp2f1`:\n" + "\n" + ">>> a, b, x = 1.4, 3.1, 0.5\n" + ">>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))\n" + "0.8148904036225295\n" + ">>> sc.betainc(a, b, x)\n" + "0.8148904036225296\n" + "\n" + "This functions satisfies the relationship\n" + ":math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:\n" + "\n" + ">>> sc.betainc(2.2, 3.1, 0.4)\n" + "0.49339638807619446\n" + ">>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)\n" + "0.49339638807619446") +ufunc_betainc_loops[0] = loop_f_fff__As_fff_f +ufunc_betainc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betainc_types[0] = NPY_FLOAT +ufunc_betainc_types[1] = NPY_FLOAT +ufunc_betainc_types[2] = NPY_FLOAT +ufunc_betainc_types[3] = NPY_FLOAT +ufunc_betainc_types[4] = NPY_DOUBLE +ufunc_betainc_types[5] = NPY_DOUBLE +ufunc_betainc_types[6] = NPY_DOUBLE +ufunc_betainc_types[7] = NPY_DOUBLE +ufunc_betainc_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibeta_float +ufunc_betainc_ptr[2*0+1] = ("betainc") +ufunc_betainc_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibeta_double +ufunc_betainc_ptr[2*1+1] = ("betainc") +ufunc_betainc_data[0] = &ufunc_betainc_ptr[2*0] +ufunc_betainc_data[1] = &ufunc_betainc_ptr[2*1] +betainc = np.PyUFunc_FromFuncAndData(ufunc_betainc_loops, ufunc_betainc_data, ufunc_betainc_types, 2, 3, 1, 0, "betainc", ufunc_betainc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaincc_loops[2] +cdef void *ufunc_betaincc_ptr[4] +cdef void *ufunc_betaincc_data[2] +cdef char ufunc_betaincc_types[8] +cdef char *ufunc_betaincc_doc = ( + "betaincc(a, b, x, out=None)\n" + "\n" + "Complement of the regularized incomplete beta function.\n" + "\n" + "Computes the complement of the regularized incomplete beta function,\n" + "defined as [1]_:\n" + "\n" + ".. math::\n" + "\n" + " \\bar{I}_x(a, b) = 1 - I_x(a, b)\n" + " = 1 - \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)} \\int_0^x\n" + " t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "for :math:`0 \\leq x \\leq 1`.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "x : array_like\n" + " Real-valued such that :math:`0 \\leq x \\leq 1`,\n" + " the upper limit of integration\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "betaincinv : inverse of the regularized incomplete beta function\n" + "betainccinv :\n" + " inverse of the complement of the regularized incomplete beta function\n" + "beta : beta function\n" + "scipy.stats.beta : beta distribution\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.11.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import betaincc, betainc\n" + "\n" + "The naive calculation ``1 - betainc(a, b, x)`` loses precision when\n" + "the values of ``betainc(a, b, x)`` are close to 1:\n" + "\n" + ">>> 1 - betainc(0.5, 8, [0.9, 0.99, 0.999])\n" + "array([2.0574632e-09, 0.0000000e+00, 0.0000000e+00])\n" + "\n" + "By using ``betaincc``, we get the correct values:\n" + "\n" + ">>> betaincc(0.5, 8, [0.9, 0.99, 0.999])\n" + "array([2.05746321e-09, 1.97259354e-17, 1.96467954e-25])") +ufunc_betaincc_loops[0] = loop_f_fff__As_fff_f +ufunc_betaincc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betaincc_types[0] = NPY_FLOAT +ufunc_betaincc_types[1] = NPY_FLOAT +ufunc_betaincc_types[2] = NPY_FLOAT +ufunc_betaincc_types[3] = NPY_FLOAT +ufunc_betaincc_types[4] = NPY_DOUBLE +ufunc_betaincc_types[5] = NPY_DOUBLE +ufunc_betaincc_types[6] = NPY_DOUBLE +ufunc_betaincc_types[7] = NPY_DOUBLE +ufunc_betaincc_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibetac_float +ufunc_betaincc_ptr[2*0+1] = ("betaincc") +ufunc_betaincc_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibetac_double +ufunc_betaincc_ptr[2*1+1] = ("betaincc") +ufunc_betaincc_data[0] = &ufunc_betaincc_ptr[2*0] +ufunc_betaincc_data[1] = &ufunc_betaincc_ptr[2*1] +betaincc = np.PyUFunc_FromFuncAndData(ufunc_betaincc_loops, ufunc_betaincc_data, ufunc_betaincc_types, 2, 3, 1, 0, "betaincc", ufunc_betaincc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betainccinv_loops[2] +cdef void *ufunc_betainccinv_ptr[4] +cdef void *ufunc_betainccinv_data[2] +cdef char ufunc_betainccinv_types[8] +cdef char *ufunc_betainccinv_doc = ( + "betainccinv(a, b, y, out=None)\n" + "\n" + "Inverse of the complemented regularized incomplete beta function.\n" + "\n" + "Computes :math:`x` such that:\n" + "\n" + ".. math::\n" + "\n" + " y = 1 - I_x(a, b) = 1 - \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\n" + " \\int_0^x t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "where :math:`I_x` is the normalized incomplete beta function `betainc`\n" + "and :math:`\\Gamma` is the `gamma` function [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "y : array_like\n" + " Real-valued input\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the inverse of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "betaincc : complement of the regularized incomplete beta function\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.11.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import betainccinv, betaincc\n" + "\n" + "This function is the inverse of `betaincc` for fixed\n" + "values of :math:`a` and :math:`b`.\n" + "\n" + ">>> a, b = 1.2, 3.1\n" + ">>> y = betaincc(a, b, 0.2)\n" + ">>> betainccinv(a, b, y)\n" + "0.2\n" + "\n" + ">>> a, b = 7, 2.5\n" + ">>> x = betainccinv(a, b, 0.875)\n" + ">>> betaincc(a, b, x)\n" + "0.875") +ufunc_betainccinv_loops[0] = loop_f_fff__As_fff_f +ufunc_betainccinv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betainccinv_types[0] = NPY_FLOAT +ufunc_betainccinv_types[1] = NPY_FLOAT +ufunc_betainccinv_types[2] = NPY_FLOAT +ufunc_betainccinv_types[3] = NPY_FLOAT +ufunc_betainccinv_types[4] = NPY_DOUBLE +ufunc_betainccinv_types[5] = NPY_DOUBLE +ufunc_betainccinv_types[6] = NPY_DOUBLE +ufunc_betainccinv_types[7] = NPY_DOUBLE +ufunc_betainccinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibetac_inv_float +ufunc_betainccinv_ptr[2*0+1] = ("betainccinv") +ufunc_betainccinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibetac_inv_double +ufunc_betainccinv_ptr[2*1+1] = ("betainccinv") +ufunc_betainccinv_data[0] = &ufunc_betainccinv_ptr[2*0] +ufunc_betainccinv_data[1] = &ufunc_betainccinv_ptr[2*1] +betainccinv = np.PyUFunc_FromFuncAndData(ufunc_betainccinv_loops, ufunc_betainccinv_data, ufunc_betainccinv_types, 2, 3, 1, 0, "betainccinv", ufunc_betainccinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaincinv_loops[2] +cdef void *ufunc_betaincinv_ptr[4] +cdef void *ufunc_betaincinv_data[2] +cdef char ufunc_betaincinv_types[8] +cdef char *ufunc_betaincinv_doc = ( + "betaincinv(a, b, y, out=None)\n" + "\n" + "Inverse of the regularized incomplete beta function.\n" + "\n" + "Computes :math:`x` such that:\n" + "\n" + ".. math::\n" + "\n" + " y = I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\n" + " \\int_0^x t^{a-1}(1-t)^{b-1}dt,\n" + "\n" + "where :math:`I_x` is the normalized incomplete beta function `betainc`\n" + "and :math:`\\Gamma` is the `gamma` function [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "y : array_like\n" + " Real-valued input\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the inverse of the regularized incomplete beta function\n" + "\n" + "See Also\n" + "--------\n" + "betainc : regularized incomplete beta function\n" + "gamma : gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.17\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "This function is the inverse of `betainc` for fixed\n" + "values of :math:`a` and :math:`b`.\n" + "\n" + ">>> a, b = 1.2, 3.1\n" + ">>> y = sc.betainc(a, b, 0.2)\n" + ">>> sc.betaincinv(a, b, y)\n" + "0.2\n" + ">>>\n" + ">>> a, b = 7.5, 0.4\n" + ">>> x = sc.betaincinv(a, b, 0.5)\n" + ">>> sc.betainc(a, b, x)\n" + "0.5") +ufunc_betaincinv_loops[0] = loop_f_fff__As_fff_f +ufunc_betaincinv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_betaincinv_types[0] = NPY_FLOAT +ufunc_betaincinv_types[1] = NPY_FLOAT +ufunc_betaincinv_types[2] = NPY_FLOAT +ufunc_betaincinv_types[3] = NPY_FLOAT +ufunc_betaincinv_types[4] = NPY_DOUBLE +ufunc_betaincinv_types[5] = NPY_DOUBLE +ufunc_betaincinv_types[6] = NPY_DOUBLE +ufunc_betaincinv_types[7] = NPY_DOUBLE +ufunc_betaincinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_ibeta_inv_float +ufunc_betaincinv_ptr[2*0+1] = ("betaincinv") +ufunc_betaincinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_ibeta_inv_double +ufunc_betaincinv_ptr[2*1+1] = ("betaincinv") +ufunc_betaincinv_data[0] = &ufunc_betaincinv_ptr[2*0] +ufunc_betaincinv_data[1] = &ufunc_betaincinv_ptr[2*1] +betaincinv = np.PyUFunc_FromFuncAndData(ufunc_betaincinv_loops, ufunc_betaincinv_data, ufunc_betaincinv_types, 2, 3, 1, 0, "betaincinv", ufunc_betaincinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_betaln_loops[2] +cdef void *ufunc_betaln_ptr[4] +cdef void *ufunc_betaln_data[2] +cdef char ufunc_betaln_types[6] +cdef char *ufunc_betaln_doc = ( + "betaln(a, b, out=None)\n" + "\n" + "Natural logarithm of absolute value of beta function.\n" + "\n" + "Computes ``ln(abs(beta(a, b)))``.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Positive, real-valued parameters\n" + "out : ndarray, optional\n" + " Optional output array for function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the betaln function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "betainc : the regularized incomplete beta function\n" + "beta : the beta function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import betaln, beta\n" + "\n" + "Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)``\n" + "is the same as ``log(beta(a, b))``:\n" + "\n" + ">>> betaln(3, 4)\n" + "-4.0943445622221\n" + "\n" + ">>> np.log(beta(3, 4))\n" + "-4.0943445622221\n" + "\n" + "In the following ``beta(a, b)`` underflows to 0, so we can't compute\n" + "the logarithm of the actual value.\n" + "\n" + ">>> a = 400\n" + ">>> b = 900\n" + ">>> beta(a, b)\n" + "0.0\n" + "\n" + "We can compute the logarithm of ``beta(a, b)`` by using `betaln`:\n" + "\n" + ">>> betaln(a, b)\n" + "-804.3069951764146") +ufunc_betaln_loops[0] = loop_d_dd__As_ff_f +ufunc_betaln_loops[1] = loop_d_dd__As_dd_d +ufunc_betaln_types[0] = NPY_FLOAT +ufunc_betaln_types[1] = NPY_FLOAT +ufunc_betaln_types[2] = NPY_FLOAT +ufunc_betaln_types[3] = NPY_DOUBLE +ufunc_betaln_types[4] = NPY_DOUBLE +ufunc_betaln_types[5] = NPY_DOUBLE +ufunc_betaln_ptr[2*0] = _func_lbeta +ufunc_betaln_ptr[2*0+1] = ("betaln") +ufunc_betaln_ptr[2*1] = _func_lbeta +ufunc_betaln_ptr[2*1+1] = ("betaln") +ufunc_betaln_data[0] = &ufunc_betaln_ptr[2*0] +ufunc_betaln_data[1] = &ufunc_betaln_ptr[2*1] +betaln = np.PyUFunc_FromFuncAndData(ufunc_betaln_loops, ufunc_betaln_data, ufunc_betaln_types, 2, 2, 1, 0, "betaln", ufunc_betaln_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_binom_loops[2] +cdef void *ufunc_binom_ptr[4] +cdef void *ufunc_binom_data[2] +cdef char ufunc_binom_types[6] +cdef char *ufunc_binom_doc = ( + "binom(x, y, out=None)\n" + "\n" + "Binomial coefficient considered as a function of two real variables.\n" + "\n" + "For real arguments, the binomial coefficient is defined as\n" + "\n" + ".. math::\n" + "\n" + " \\binom{x}{y} = \\frac{\\Gamma(x + 1)}{\\Gamma(y + 1)\\Gamma(x - y + 1)} =\n" + " \\frac{1}{(x + 1)\\mathrm{B}(x - y + 1, y + 1)}\n" + "\n" + "Where :math:`\\Gamma` is the Gamma function (`gamma`) and :math:`\\mathrm{B}`\n" + "is the Beta function (`beta`) [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x, y: array_like\n" + " Real arguments to :math:`\\binom{x}{y}`.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of binomial coefficient.\n" + "\n" + "See Also\n" + "--------\n" + "comb : The number of combinations of N things taken k at a time.\n" + "\n" + "Notes\n" + "-----\n" + "The Gamma function has poles at non-positive integers and tends to either\n" + "positive or negative infinity depending on the direction on the real line\n" + "from which a pole is approached. When considered as a function of two real\n" + "variables, :math:`\\binom{x}{y}` is thus undefined when `x` is a negative\n" + "integer. `binom` returns ``nan`` when ``x`` is a negative integer. This\n" + "is the case even when ``x`` is a negative integer and ``y`` an integer,\n" + "contrary to the usual convention for defining :math:`\\binom{n}{k}` when it\n" + "is considered as a function of two integer variables.\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Binomial_coefficient\n" + "\n" + "Examples\n" + "--------\n" + "The following examples illustrate the ways in which `binom` differs from\n" + "the function `comb`.\n" + "\n" + ">>> from scipy.special import binom, comb\n" + "\n" + "When ``exact=False`` and ``x`` and ``y`` are both positive, `comb` calls\n" + "`binom` internally.\n" + "\n" + ">>> x, y = 3, 2\n" + ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n" + "(3.0, 3.0, 3)\n" + "\n" + "For larger values, `comb` with ``exact=True`` no longer agrees\n" + "with `binom`.\n" + "\n" + ">>> x, y = 43, 23\n" + ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n" + "(960566918219.9999, 960566918219.9999, 960566918220)\n" + "\n" + "`binom` returns ``nan`` when ``x`` is a negative integer, but is otherwise\n" + "defined for negative arguments. `comb` returns 0 whenever one of ``x`` or\n" + "``y`` is negative or ``x`` is less than ``y``.\n" + "\n" + ">>> x, y = -3, 2\n" + ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n" + "(nan, 0.0, 0)\n" + "\n" + ">>> x, y = -3.1, 2.2\n" + ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n" + "(18.714147876804432, 0.0, 0)\n" + "\n" + ">>> x, y = 2.2, 3.1\n" + ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n" + "(0.037399983365134115, 0.0, 0)") +ufunc_binom_loops[0] = loop_d_dd__As_ff_f +ufunc_binom_loops[1] = loop_d_dd__As_dd_d +ufunc_binom_types[0] = NPY_FLOAT +ufunc_binom_types[1] = NPY_FLOAT +ufunc_binom_types[2] = NPY_FLOAT +ufunc_binom_types[3] = NPY_DOUBLE +ufunc_binom_types[4] = NPY_DOUBLE +ufunc_binom_types[5] = NPY_DOUBLE +ufunc_binom_ptr[2*0] = scipy.special._ufuncs_cxx._export_binom +ufunc_binom_ptr[2*0+1] = ("binom") +ufunc_binom_ptr[2*1] = scipy.special._ufuncs_cxx._export_binom +ufunc_binom_ptr[2*1+1] = ("binom") +ufunc_binom_data[0] = &ufunc_binom_ptr[2*0] +ufunc_binom_data[1] = &ufunc_binom_ptr[2*1] +binom = np.PyUFunc_FromFuncAndData(ufunc_binom_loops, ufunc_binom_data, ufunc_binom_types, 2, 2, 1, 0, "binom", ufunc_binom_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_boxcox_loops[2] +cdef void *ufunc_boxcox_ptr[4] +cdef void *ufunc_boxcox_data[2] +cdef char ufunc_boxcox_types[6] +cdef char *ufunc_boxcox_doc = ( + "boxcox(x, lmbda, out=None)\n" + "\n" + "Compute the Box-Cox transformation.\n" + "\n" + "The Box-Cox transformation is::\n" + "\n" + " y = (x**lmbda - 1) / lmbda if lmbda != 0\n" + " log(x) if lmbda == 0\n" + "\n" + "Returns `nan` if ``x < 0``.\n" + "Returns `-inf` if ``x == 0`` and ``lmbda < 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.14.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox\n" + ">>> boxcox([1, 4, 10], 2.5)\n" + "array([ 0. , 12.4 , 126.09110641])\n" + ">>> boxcox(2, [0, 1, 2])\n" + "array([ 0.69314718, 1. , 1.5 ])") +ufunc_boxcox_loops[0] = loop_d_dd__As_ff_f +ufunc_boxcox_loops[1] = loop_d_dd__As_dd_d +ufunc_boxcox_types[0] = NPY_FLOAT +ufunc_boxcox_types[1] = NPY_FLOAT +ufunc_boxcox_types[2] = NPY_FLOAT +ufunc_boxcox_types[3] = NPY_DOUBLE +ufunc_boxcox_types[4] = NPY_DOUBLE +ufunc_boxcox_types[5] = NPY_DOUBLE +ufunc_boxcox_ptr[2*0] = _func_boxcox +ufunc_boxcox_ptr[2*0+1] = ("boxcox") +ufunc_boxcox_ptr[2*1] = _func_boxcox +ufunc_boxcox_ptr[2*1+1] = ("boxcox") +ufunc_boxcox_data[0] = &ufunc_boxcox_ptr[2*0] +ufunc_boxcox_data[1] = &ufunc_boxcox_ptr[2*1] +boxcox = np.PyUFunc_FromFuncAndData(ufunc_boxcox_loops, ufunc_boxcox_data, ufunc_boxcox_types, 2, 2, 1, 0, "boxcox", ufunc_boxcox_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_boxcox1p_loops[2] +cdef void *ufunc_boxcox1p_ptr[4] +cdef void *ufunc_boxcox1p_data[2] +cdef char ufunc_boxcox1p_types[6] +cdef char *ufunc_boxcox1p_doc = ( + "boxcox1p(x, lmbda, out=None)\n" + "\n" + "Compute the Box-Cox transformation of 1 + `x`.\n" + "\n" + "The Box-Cox transformation computed by `boxcox1p` is::\n" + "\n" + " y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0\n" + " log(1+x) if lmbda == 0\n" + "\n" + "Returns `nan` if ``x < -1``.\n" + "Returns `-inf` if ``x == -1`` and ``lmbda < 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.14.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox1p\n" + ">>> boxcox1p(1e-4, [0, 0.5, 1])\n" + "array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])\n" + ">>> boxcox1p([0.01, 0.1], 0.25)\n" + "array([ 0.00996272, 0.09645476])") +ufunc_boxcox1p_loops[0] = loop_d_dd__As_ff_f +ufunc_boxcox1p_loops[1] = loop_d_dd__As_dd_d +ufunc_boxcox1p_types[0] = NPY_FLOAT +ufunc_boxcox1p_types[1] = NPY_FLOAT +ufunc_boxcox1p_types[2] = NPY_FLOAT +ufunc_boxcox1p_types[3] = NPY_DOUBLE +ufunc_boxcox1p_types[4] = NPY_DOUBLE +ufunc_boxcox1p_types[5] = NPY_DOUBLE +ufunc_boxcox1p_ptr[2*0] = _func_boxcox1p +ufunc_boxcox1p_ptr[2*0+1] = ("boxcox1p") +ufunc_boxcox1p_ptr[2*1] = _func_boxcox1p +ufunc_boxcox1p_ptr[2*1+1] = ("boxcox1p") +ufunc_boxcox1p_data[0] = &ufunc_boxcox1p_ptr[2*0] +ufunc_boxcox1p_data[1] = &ufunc_boxcox1p_ptr[2*1] +boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_boxcox1p_loops, ufunc_boxcox1p_data, ufunc_boxcox1p_types, 2, 2, 1, 0, "boxcox1p", ufunc_boxcox1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtr_loops[2] +cdef void *ufunc_btdtr_ptr[4] +cdef void *ufunc_btdtr_data[2] +cdef char ufunc_btdtr_types[8] +cdef char *ufunc_btdtr_doc = ( + "btdtr(a, b, x, out=None)\n" + "\n" + "Cumulative distribution function of the beta distribution.\n" + "\n" + "Returns the integral from zero to `x` of the beta probability density\n" + "function,\n" + "\n" + ".. math::\n" + " I = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + ".. deprecated:: 1.12.0\n" + " This function is deprecated and will be removed from SciPy 1.14.0.\n" + " Use `scipy.special.betainc` instead.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (a > 0).\n" + "b : array_like\n" + " Shape parameter (b > 0).\n" + "x : array_like\n" + " Upper limit of integration, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Cumulative distribution function of the beta distribution with\n" + " parameters `a` and `b` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "betainc\n" + "\n" + "Notes\n" + "-----\n" + "This function is identical to the incomplete beta integral function\n" + "`betainc`.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `btdtr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_btdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtr_types[0] = NPY_FLOAT +ufunc_btdtr_types[1] = NPY_FLOAT +ufunc_btdtr_types[2] = NPY_FLOAT +ufunc_btdtr_types[3] = NPY_FLOAT +ufunc_btdtr_types[4] = NPY_DOUBLE +ufunc_btdtr_types[5] = NPY_DOUBLE +ufunc_btdtr_types[6] = NPY_DOUBLE +ufunc_btdtr_types[7] = NPY_DOUBLE +ufunc_btdtr_ptr[2*0] = _func_btdtr +ufunc_btdtr_ptr[2*0+1] = ("btdtr") +ufunc_btdtr_ptr[2*1] = _func_btdtr +ufunc_btdtr_ptr[2*1+1] = ("btdtr") +ufunc_btdtr_data[0] = &ufunc_btdtr_ptr[2*0] +ufunc_btdtr_data[1] = &ufunc_btdtr_ptr[2*1] +btdtr = np.PyUFunc_FromFuncAndData(ufunc_btdtr_loops, ufunc_btdtr_data, ufunc_btdtr_types, 2, 3, 1, 0, "btdtr", ufunc_btdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtri_loops[2] +cdef void *ufunc_btdtri_ptr[4] +cdef void *ufunc_btdtri_data[2] +cdef char ufunc_btdtri_types[8] +cdef char *ufunc_btdtri_doc = ( + "btdtri(a, b, p, out=None)\n" + "\n" + "The `p`-th quantile of the beta distribution.\n" + "\n" + "This function is the inverse of the beta cumulative distribution function,\n" + "`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + ".. deprecated:: 1.12.0\n" + " This function is deprecated and will be removed from SciPy 1.14.0.\n" + " Use `scipy.special.betaincinv` instead.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (`a` > 0).\n" + "b : array_like\n" + " Shape parameter (`b` > 0).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " The quantile corresponding to `p`.\n" + "\n" + "See Also\n" + "--------\n" + "betaincinv\n" + "btdtr\n" + "\n" + "Notes\n" + "-----\n" + "The value of `x` is found by interval halving or Newton iterations.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent\n" + "problem of finding the inverse of the incomplete beta integral.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_btdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtri_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtri_types[0] = NPY_FLOAT +ufunc_btdtri_types[1] = NPY_FLOAT +ufunc_btdtri_types[2] = NPY_FLOAT +ufunc_btdtri_types[3] = NPY_FLOAT +ufunc_btdtri_types[4] = NPY_DOUBLE +ufunc_btdtri_types[5] = NPY_DOUBLE +ufunc_btdtri_types[6] = NPY_DOUBLE +ufunc_btdtri_types[7] = NPY_DOUBLE +ufunc_btdtri_ptr[2*0] = _func_incbi +ufunc_btdtri_ptr[2*0+1] = ("btdtri") +ufunc_btdtri_ptr[2*1] = _func_incbi +ufunc_btdtri_ptr[2*1+1] = ("btdtri") +ufunc_btdtri_data[0] = &ufunc_btdtri_ptr[2*0] +ufunc_btdtri_data[1] = &ufunc_btdtri_ptr[2*1] +btdtri = np.PyUFunc_FromFuncAndData(ufunc_btdtri_loops, ufunc_btdtri_data, ufunc_btdtri_types, 2, 3, 1, 0, "btdtri", ufunc_btdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtria_loops[2] +cdef void *ufunc_btdtria_ptr[4] +cdef void *ufunc_btdtria_data[2] +cdef char ufunc_btdtria_types[8] +cdef char *ufunc_btdtria_doc = ( + "btdtria(p, b, x, out=None)\n" + "\n" + "Inverse of `btdtr` with respect to `a`.\n" + "\n" + "This is the inverse of the beta cumulative distribution function, `btdtr`,\n" + "considered as a function of `a`, returning the value of `a` for which\n" + "`btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "b : array_like\n" + " Shape parameter (`b` > 0).\n" + "x : array_like\n" + " The quantile, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "a : scalar or ndarray\n" + " The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.\n" + "\n" + "See Also\n" + "--------\n" + "btdtr : Cumulative distribution function of the beta distribution.\n" + "btdtri : Inverse with respect to `x`.\n" + "btdtrib : Inverse with respect to `b`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `a`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Algorithm 708: Significant Digit Computation of the Incomplete Beta\n" + " Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.") +ufunc_btdtria_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtria_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtria_types[0] = NPY_FLOAT +ufunc_btdtria_types[1] = NPY_FLOAT +ufunc_btdtria_types[2] = NPY_FLOAT +ufunc_btdtria_types[3] = NPY_FLOAT +ufunc_btdtria_types[4] = NPY_DOUBLE +ufunc_btdtria_types[5] = NPY_DOUBLE +ufunc_btdtria_types[6] = NPY_DOUBLE +ufunc_btdtria_types[7] = NPY_DOUBLE +ufunc_btdtria_ptr[2*0] = _func_btdtria +ufunc_btdtria_ptr[2*0+1] = ("btdtria") +ufunc_btdtria_ptr[2*1] = _func_btdtria +ufunc_btdtria_ptr[2*1+1] = ("btdtria") +ufunc_btdtria_data[0] = &ufunc_btdtria_ptr[2*0] +ufunc_btdtria_data[1] = &ufunc_btdtria_ptr[2*1] +btdtria = np.PyUFunc_FromFuncAndData(ufunc_btdtria_loops, ufunc_btdtria_data, ufunc_btdtria_types, 2, 3, 1, 0, "btdtria", ufunc_btdtria_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_btdtrib_loops[2] +cdef void *ufunc_btdtrib_ptr[4] +cdef void *ufunc_btdtrib_data[2] +cdef char ufunc_btdtrib_types[8] +cdef char *ufunc_btdtrib_doc = ( + "btdtria(a, p, x, out=None)\n" + "\n" + "Inverse of `btdtr` with respect to `b`.\n" + "\n" + "This is the inverse of the beta cumulative distribution function, `btdtr`,\n" + "considered as a function of `b`, returning the value of `b` for which\n" + "`btdtr(a, b, x) = p`, or\n" + "\n" + ".. math::\n" + " p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Shape parameter (`a` > 0).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "x : array_like\n" + " The quantile, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "b : scalar or ndarray\n" + " The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.\n" + "\n" + "See Also\n" + "--------\n" + "btdtr : Cumulative distribution function of the beta distribution.\n" + "btdtri : Inverse with respect to `x`.\n" + "btdtria : Inverse with respect to `a`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `b`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Algorithm 708: Significant Digit Computation of the Incomplete Beta\n" + " Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.") +ufunc_btdtrib_loops[0] = loop_d_ddd__As_fff_f +ufunc_btdtrib_loops[1] = loop_d_ddd__As_ddd_d +ufunc_btdtrib_types[0] = NPY_FLOAT +ufunc_btdtrib_types[1] = NPY_FLOAT +ufunc_btdtrib_types[2] = NPY_FLOAT +ufunc_btdtrib_types[3] = NPY_FLOAT +ufunc_btdtrib_types[4] = NPY_DOUBLE +ufunc_btdtrib_types[5] = NPY_DOUBLE +ufunc_btdtrib_types[6] = NPY_DOUBLE +ufunc_btdtrib_types[7] = NPY_DOUBLE +ufunc_btdtrib_ptr[2*0] = _func_btdtrib +ufunc_btdtrib_ptr[2*0+1] = ("btdtrib") +ufunc_btdtrib_ptr[2*1] = _func_btdtrib +ufunc_btdtrib_ptr[2*1+1] = ("btdtrib") +ufunc_btdtrib_data[0] = &ufunc_btdtrib_ptr[2*0] +ufunc_btdtrib_data[1] = &ufunc_btdtrib_ptr[2*1] +btdtrib = np.PyUFunc_FromFuncAndData(ufunc_btdtrib_loops, ufunc_btdtrib_data, ufunc_btdtrib_types, 2, 3, 1, 0, "btdtrib", ufunc_btdtrib_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cbrt_loops[2] +cdef void *ufunc_cbrt_ptr[4] +cdef void *ufunc_cbrt_data[2] +cdef char ufunc_cbrt_types[4] +cdef char *ufunc_cbrt_doc = ( + "cbrt(x, out=None)\n" + "\n" + "Element-wise cube root of `x`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The cube root of each value in `x`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import cbrt\n" + "\n" + ">>> cbrt(8)\n" + "2.0\n" + ">>> cbrt([-8, -3, 0.125, 1.331])\n" + "array([-2. , -1.44224957, 0.5 , 1.1 ])") +ufunc_cbrt_loops[0] = loop_d_d__As_f_f +ufunc_cbrt_loops[1] = loop_d_d__As_d_d +ufunc_cbrt_types[0] = NPY_FLOAT +ufunc_cbrt_types[1] = NPY_FLOAT +ufunc_cbrt_types[2] = NPY_DOUBLE +ufunc_cbrt_types[3] = NPY_DOUBLE +ufunc_cbrt_ptr[2*0] = _func_cbrt +ufunc_cbrt_ptr[2*0+1] = ("cbrt") +ufunc_cbrt_ptr[2*1] = _func_cbrt +ufunc_cbrt_ptr[2*1+1] = ("cbrt") +ufunc_cbrt_data[0] = &ufunc_cbrt_ptr[2*0] +ufunc_cbrt_data[1] = &ufunc_cbrt_ptr[2*1] +cbrt = np.PyUFunc_FromFuncAndData(ufunc_cbrt_loops, ufunc_cbrt_data, ufunc_cbrt_types, 2, 1, 1, 0, "cbrt", ufunc_cbrt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtr_loops[2] +cdef void *ufunc_chdtr_ptr[4] +cdef void *ufunc_chdtr_data[2] +cdef char ufunc_chdtr_types[6] +cdef char *ufunc_chdtr_doc = ( + "chdtr(v, x, out=None)\n" + "\n" + "Chi square cumulative distribution function.\n" + "\n" + "Returns the area under the left tail (from 0 to `x`) of the Chi\n" + "square probability density function with `v` degrees of freedom:\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_0^x t^{v/2 - 1} e^{-t/2} dt\n" + "\n" + "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n" + "integral can be expressed in terms of the regularized lower\n" + "incomplete gamma function `gammainc` as\n" + "``gammainc(v / 2, x / 2)``. [1]_\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "x : array_like\n" + " Upper bound of the integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the cumulative distribution function.\n" + "\n" + "See Also\n" + "--------\n" + "chdtrc, chdtri, chdtriv, gammainc\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It can be expressed in terms of the regularized lower incomplete\n" + "gamma function.\n" + "\n" + ">>> v = 1\n" + ">>> x = np.arange(4)\n" + ">>> sc.chdtr(v, x)\n" + "array([0. , 0.68268949, 0.84270079, 0.91673548])\n" + ">>> sc.gammainc(v / 2, x / 2)\n" + "array([0. , 0.68268949, 0.84270079, 0.91673548])") +ufunc_chdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtr_types[0] = NPY_FLOAT +ufunc_chdtr_types[1] = NPY_FLOAT +ufunc_chdtr_types[2] = NPY_FLOAT +ufunc_chdtr_types[3] = NPY_DOUBLE +ufunc_chdtr_types[4] = NPY_DOUBLE +ufunc_chdtr_types[5] = NPY_DOUBLE +ufunc_chdtr_ptr[2*0] = _func_chdtr +ufunc_chdtr_ptr[2*0+1] = ("chdtr") +ufunc_chdtr_ptr[2*1] = _func_chdtr +ufunc_chdtr_ptr[2*1+1] = ("chdtr") +ufunc_chdtr_data[0] = &ufunc_chdtr_ptr[2*0] +ufunc_chdtr_data[1] = &ufunc_chdtr_ptr[2*1] +chdtr = np.PyUFunc_FromFuncAndData(ufunc_chdtr_loops, ufunc_chdtr_data, ufunc_chdtr_types, 2, 2, 1, 0, "chdtr", ufunc_chdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtrc_loops[2] +cdef void *ufunc_chdtrc_ptr[4] +cdef void *ufunc_chdtrc_data[2] +cdef char ufunc_chdtrc_types[6] +cdef char *ufunc_chdtrc_doc = ( + "chdtrc(v, x, out=None)\n" + "\n" + "Chi square survival function.\n" + "\n" + "Returns the area under the right hand tail (from `x` to infinity)\n" + "of the Chi square probability density function with `v` degrees of\n" + "freedom:\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_x^\\infty t^{v/2 - 1} e^{-t/2} dt\n" + "\n" + "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n" + "integral can be expressed in terms of the regularized upper\n" + "incomplete gamma function `gammaincc` as\n" + "``gammaincc(v / 2, x / 2)``. [1]_\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "x : array_like\n" + " Lower bound of the integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the survival function.\n" + "\n" + "See Also\n" + "--------\n" + "chdtr, chdtri, chdtriv, gammaincc\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It can be expressed in terms of the regularized upper incomplete\n" + "gamma function.\n" + "\n" + ">>> v = 1\n" + ">>> x = np.arange(4)\n" + ">>> sc.chdtrc(v, x)\n" + "array([1. , 0.31731051, 0.15729921, 0.08326452])\n" + ">>> sc.gammaincc(v / 2, x / 2)\n" + "array([1. , 0.31731051, 0.15729921, 0.08326452])") +ufunc_chdtrc_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtrc_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtrc_types[0] = NPY_FLOAT +ufunc_chdtrc_types[1] = NPY_FLOAT +ufunc_chdtrc_types[2] = NPY_FLOAT +ufunc_chdtrc_types[3] = NPY_DOUBLE +ufunc_chdtrc_types[4] = NPY_DOUBLE +ufunc_chdtrc_types[5] = NPY_DOUBLE +ufunc_chdtrc_ptr[2*0] = _func_chdtrc +ufunc_chdtrc_ptr[2*0+1] = ("chdtrc") +ufunc_chdtrc_ptr[2*1] = _func_chdtrc +ufunc_chdtrc_ptr[2*1+1] = ("chdtrc") +ufunc_chdtrc_data[0] = &ufunc_chdtrc_ptr[2*0] +ufunc_chdtrc_data[1] = &ufunc_chdtrc_ptr[2*1] +chdtrc = np.PyUFunc_FromFuncAndData(ufunc_chdtrc_loops, ufunc_chdtrc_data, ufunc_chdtrc_types, 2, 2, 1, 0, "chdtrc", ufunc_chdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtri_loops[2] +cdef void *ufunc_chdtri_ptr[4] +cdef void *ufunc_chdtri_data[2] +cdef char ufunc_chdtri_types[6] +cdef char *ufunc_chdtri_doc = ( + "chdtri(v, p, out=None)\n" + "\n" + "Inverse to `chdtrc` with respect to `x`.\n" + "\n" + "Returns `x` such that ``chdtrc(v, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Degrees of freedom.\n" + "p : array_like\n" + " Probability.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value so that the probability a Chi square random variable\n" + " with `v` degrees of freedom is greater than `x` equals `p`.\n" + "\n" + "See Also\n" + "--------\n" + "chdtrc, chdtr, chdtriv\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It inverts `chdtrc`.\n" + "\n" + ">>> v, p = 1, 0.3\n" + ">>> sc.chdtrc(v, sc.chdtri(v, p))\n" + "0.3\n" + ">>> x = 1\n" + ">>> sc.chdtri(v, sc.chdtrc(v, x))\n" + "1.0") +ufunc_chdtri_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtri_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtri_types[0] = NPY_FLOAT +ufunc_chdtri_types[1] = NPY_FLOAT +ufunc_chdtri_types[2] = NPY_FLOAT +ufunc_chdtri_types[3] = NPY_DOUBLE +ufunc_chdtri_types[4] = NPY_DOUBLE +ufunc_chdtri_types[5] = NPY_DOUBLE +ufunc_chdtri_ptr[2*0] = _func_chdtri +ufunc_chdtri_ptr[2*0+1] = ("chdtri") +ufunc_chdtri_ptr[2*1] = _func_chdtri +ufunc_chdtri_ptr[2*1+1] = ("chdtri") +ufunc_chdtri_data[0] = &ufunc_chdtri_ptr[2*0] +ufunc_chdtri_data[1] = &ufunc_chdtri_ptr[2*1] +chdtri = np.PyUFunc_FromFuncAndData(ufunc_chdtri_loops, ufunc_chdtri_data, ufunc_chdtri_types, 2, 2, 1, 0, "chdtri", ufunc_chdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chdtriv_loops[2] +cdef void *ufunc_chdtriv_ptr[4] +cdef void *ufunc_chdtriv_data[2] +cdef char ufunc_chdtriv_types[6] +cdef char *ufunc_chdtriv_doc = ( + "chdtriv(p, x, out=None)\n" + "\n" + "Inverse to `chdtr` with respect to `v`.\n" + "\n" + "Returns `v` such that ``chdtr(v, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability that the Chi square random variable is less than\n" + " or equal to `x`.\n" + "x : array_like\n" + " Nonnegative input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Degrees of freedom.\n" + "\n" + "See Also\n" + "--------\n" + "chdtr, chdtrc, chdtri\n" + "\n" + "References\n" + "----------\n" + ".. [1] Chi-Square distribution,\n" + " https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It inverts `chdtr`.\n" + "\n" + ">>> p, x = 0.5, 1\n" + ">>> sc.chdtr(sc.chdtriv(p, x), x)\n" + "0.5000000000202172\n" + ">>> v = 1\n" + ">>> sc.chdtriv(sc.chdtr(v, x), v)\n" + "1.0000000000000013") +ufunc_chdtriv_loops[0] = loop_d_dd__As_ff_f +ufunc_chdtriv_loops[1] = loop_d_dd__As_dd_d +ufunc_chdtriv_types[0] = NPY_FLOAT +ufunc_chdtriv_types[1] = NPY_FLOAT +ufunc_chdtriv_types[2] = NPY_FLOAT +ufunc_chdtriv_types[3] = NPY_DOUBLE +ufunc_chdtriv_types[4] = NPY_DOUBLE +ufunc_chdtriv_types[5] = NPY_DOUBLE +ufunc_chdtriv_ptr[2*0] = _func_chdtriv +ufunc_chdtriv_ptr[2*0+1] = ("chdtriv") +ufunc_chdtriv_ptr[2*1] = _func_chdtriv +ufunc_chdtriv_ptr[2*1+1] = ("chdtriv") +ufunc_chdtriv_data[0] = &ufunc_chdtriv_ptr[2*0] +ufunc_chdtriv_data[1] = &ufunc_chdtriv_ptr[2*1] +chdtriv = np.PyUFunc_FromFuncAndData(ufunc_chdtriv_loops, ufunc_chdtriv_data, ufunc_chdtriv_types, 2, 2, 1, 0, "chdtriv", ufunc_chdtriv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtr_loops[2] +cdef void *ufunc_chndtr_ptr[4] +cdef void *ufunc_chndtr_data[2] +cdef char ufunc_chndtr_types[8] +cdef char *ufunc_chndtr_doc = ( + "chndtr(x, df, nc, out=None)\n" + "\n" + "Non-central chi square cumulative distribution function\n" + "\n" + "The cumulative distribution function is given by:\n" + "\n" + ".. math::\n" + "\n" + " P(\\chi^{\\prime 2} \\vert \\nu, \\lambda) =\\sum_{j=0}^{\\infty}\n" + " e^{-\\lambda /2}\n" + " \\frac{(\\lambda /2)^j}{j!} P(\\chi^{\\prime 2} \\vert \\nu + 2j),\n" + "\n" + "where :math:`\\nu > 0` is the degrees of freedom (``df``) and\n" + ":math:`\\lambda \\geq 0` is the non-centrality parameter (``nc``).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value of the non-central chi square cumulative distribution function.\n" + "\n" + "See Also\n" + "--------\n" + "chndtrix, chndtridf, chndtrinc") +ufunc_chndtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtr_types[0] = NPY_FLOAT +ufunc_chndtr_types[1] = NPY_FLOAT +ufunc_chndtr_types[2] = NPY_FLOAT +ufunc_chndtr_types[3] = NPY_FLOAT +ufunc_chndtr_types[4] = NPY_DOUBLE +ufunc_chndtr_types[5] = NPY_DOUBLE +ufunc_chndtr_types[6] = NPY_DOUBLE +ufunc_chndtr_types[7] = NPY_DOUBLE +ufunc_chndtr_ptr[2*0] = _func_chndtr +ufunc_chndtr_ptr[2*0+1] = ("chndtr") +ufunc_chndtr_ptr[2*1] = _func_chndtr +ufunc_chndtr_ptr[2*1+1] = ("chndtr") +ufunc_chndtr_data[0] = &ufunc_chndtr_ptr[2*0] +ufunc_chndtr_data[1] = &ufunc_chndtr_ptr[2*1] +chndtr = np.PyUFunc_FromFuncAndData(ufunc_chndtr_loops, ufunc_chndtr_data, ufunc_chndtr_types, 2, 3, 1, 0, "chndtr", ufunc_chndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtridf_loops[2] +cdef void *ufunc_chndtridf_ptr[4] +cdef void *ufunc_chndtridf_data[2] +cdef char ufunc_chndtridf_types[8] +cdef char *ufunc_chndtridf_doc = ( + "chndtridf(x, p, nc, out=None)\n" + "\n" + "Inverse to `chndtr` vs `df`\n" + "\n" + "Calculated using a search to find a value for `df` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " Degrees of freedom\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtrix, chndtrinc") +ufunc_chndtridf_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtridf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtridf_types[0] = NPY_FLOAT +ufunc_chndtridf_types[1] = NPY_FLOAT +ufunc_chndtridf_types[2] = NPY_FLOAT +ufunc_chndtridf_types[3] = NPY_FLOAT +ufunc_chndtridf_types[4] = NPY_DOUBLE +ufunc_chndtridf_types[5] = NPY_DOUBLE +ufunc_chndtridf_types[6] = NPY_DOUBLE +ufunc_chndtridf_types[7] = NPY_DOUBLE +ufunc_chndtridf_ptr[2*0] = _func_chndtridf +ufunc_chndtridf_ptr[2*0+1] = ("chndtridf") +ufunc_chndtridf_ptr[2*1] = _func_chndtridf +ufunc_chndtridf_ptr[2*1+1] = ("chndtridf") +ufunc_chndtridf_data[0] = &ufunc_chndtridf_ptr[2*0] +ufunc_chndtridf_data[1] = &ufunc_chndtridf_ptr[2*1] +chndtridf = np.PyUFunc_FromFuncAndData(ufunc_chndtridf_loops, ufunc_chndtridf_data, ufunc_chndtridf_types, 2, 3, 1, 0, "chndtridf", ufunc_chndtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtrinc_loops[2] +cdef void *ufunc_chndtrinc_ptr[4] +cdef void *ufunc_chndtrinc_data[2] +cdef char ufunc_chndtrinc_types[8] +cdef char *ufunc_chndtrinc_doc = ( + "chndtrinc(x, df, p, out=None)\n" + "\n" + "Inverse to `chndtr` vs `nc`\n" + "\n" + "Calculated using a search to find a value for `df` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper bound of the integral; must satisfy ``x >= 0``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Non-centrality\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtrix, chndtrinc") +ufunc_chndtrinc_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtrinc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtrinc_types[0] = NPY_FLOAT +ufunc_chndtrinc_types[1] = NPY_FLOAT +ufunc_chndtrinc_types[2] = NPY_FLOAT +ufunc_chndtrinc_types[3] = NPY_FLOAT +ufunc_chndtrinc_types[4] = NPY_DOUBLE +ufunc_chndtrinc_types[5] = NPY_DOUBLE +ufunc_chndtrinc_types[6] = NPY_DOUBLE +ufunc_chndtrinc_types[7] = NPY_DOUBLE +ufunc_chndtrinc_ptr[2*0] = _func_chndtrinc +ufunc_chndtrinc_ptr[2*0+1] = ("chndtrinc") +ufunc_chndtrinc_ptr[2*1] = _func_chndtrinc +ufunc_chndtrinc_ptr[2*1+1] = ("chndtrinc") +ufunc_chndtrinc_data[0] = &ufunc_chndtrinc_ptr[2*0] +ufunc_chndtrinc_data[1] = &ufunc_chndtrinc_ptr[2*1] +chndtrinc = np.PyUFunc_FromFuncAndData(ufunc_chndtrinc_loops, ufunc_chndtrinc_data, ufunc_chndtrinc_types, 2, 3, 1, 0, "chndtrinc", ufunc_chndtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_chndtrix_loops[2] +cdef void *ufunc_chndtrix_ptr[4] +cdef void *ufunc_chndtrix_data[2] +cdef char ufunc_chndtrix_types[8] +cdef char *ufunc_chndtrix_doc = ( + "chndtrix(p, df, nc, out=None)\n" + "\n" + "Inverse to `chndtr` vs `x`\n" + "\n" + "Calculated using a search to find a value for `x` that produces the\n" + "desired value of `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability; must satisfy ``0 <= p < 1``\n" + "df : array_like\n" + " Degrees of freedom; must satisfy ``df > 0``\n" + "nc : array_like\n" + " Non-centrality parameter; must satisfy ``nc >= 0``\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value so that the probability a non-central Chi square random variable\n" + " with `df` degrees of freedom and non-centrality, `nc`, is greater than\n" + " `x` equals `p`.\n" + "\n" + "See Also\n" + "--------\n" + "chndtr, chndtridf, chndtrinc") +ufunc_chndtrix_loops[0] = loop_d_ddd__As_fff_f +ufunc_chndtrix_loops[1] = loop_d_ddd__As_ddd_d +ufunc_chndtrix_types[0] = NPY_FLOAT +ufunc_chndtrix_types[1] = NPY_FLOAT +ufunc_chndtrix_types[2] = NPY_FLOAT +ufunc_chndtrix_types[3] = NPY_FLOAT +ufunc_chndtrix_types[4] = NPY_DOUBLE +ufunc_chndtrix_types[5] = NPY_DOUBLE +ufunc_chndtrix_types[6] = NPY_DOUBLE +ufunc_chndtrix_types[7] = NPY_DOUBLE +ufunc_chndtrix_ptr[2*0] = _func_chndtrix +ufunc_chndtrix_ptr[2*0+1] = ("chndtrix") +ufunc_chndtrix_ptr[2*1] = _func_chndtrix +ufunc_chndtrix_ptr[2*1+1] = ("chndtrix") +ufunc_chndtrix_data[0] = &ufunc_chndtrix_ptr[2*0] +ufunc_chndtrix_data[1] = &ufunc_chndtrix_ptr[2*1] +chndtrix = np.PyUFunc_FromFuncAndData(ufunc_chndtrix_loops, ufunc_chndtrix_data, ufunc_chndtrix_types, 2, 3, 1, 0, "chndtrix", ufunc_chndtrix_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cosdg_loops[2] +cdef void *ufunc_cosdg_ptr[4] +cdef void *ufunc_cosdg_data[2] +cdef char ufunc_cosdg_types[4] +cdef char *ufunc_cosdg_doc = ( + "cosdg(x, out=None)\n" + "\n" + "Cosine of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Cosine of the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, tandg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using cosine directly.\n" + "\n" + ">>> x = 90 + 180 * np.arange(3)\n" + ">>> sc.cosdg(x)\n" + "array([-0., 0., -0.])\n" + ">>> np.cos(x * np.pi / 180)\n" + "array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16])") +ufunc_cosdg_loops[0] = loop_d_d__As_f_f +ufunc_cosdg_loops[1] = loop_d_d__As_d_d +ufunc_cosdg_types[0] = NPY_FLOAT +ufunc_cosdg_types[1] = NPY_FLOAT +ufunc_cosdg_types[2] = NPY_DOUBLE +ufunc_cosdg_types[3] = NPY_DOUBLE +ufunc_cosdg_ptr[2*0] = _func_cosdg +ufunc_cosdg_ptr[2*0+1] = ("cosdg") +ufunc_cosdg_ptr[2*1] = _func_cosdg +ufunc_cosdg_ptr[2*1+1] = ("cosdg") +ufunc_cosdg_data[0] = &ufunc_cosdg_ptr[2*0] +ufunc_cosdg_data[1] = &ufunc_cosdg_ptr[2*1] +cosdg = np.PyUFunc_FromFuncAndData(ufunc_cosdg_loops, ufunc_cosdg_data, ufunc_cosdg_types, 2, 1, 1, 0, "cosdg", ufunc_cosdg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cosm1_loops[2] +cdef void *ufunc_cosm1_ptr[4] +cdef void *ufunc_cosm1_data[2] +cdef char ufunc_cosm1_types[4] +cdef char *ufunc_cosm1_doc = ( + "cosm1(x, out=None)\n" + "\n" + "cos(x) - 1 for use when `x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real valued argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of ``cos(x) - 1``.\n" + "\n" + "See Also\n" + "--------\n" + "expm1, log1p\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than computing ``cos(x) - 1`` directly for\n" + "``x`` around 0.\n" + "\n" + ">>> x = 1e-30\n" + ">>> np.cos(x) - 1\n" + "0.0\n" + ">>> sc.cosm1(x)\n" + "-5.0000000000000005e-61") +ufunc_cosm1_loops[0] = loop_d_d__As_f_f +ufunc_cosm1_loops[1] = loop_d_d__As_d_d +ufunc_cosm1_types[0] = NPY_FLOAT +ufunc_cosm1_types[1] = NPY_FLOAT +ufunc_cosm1_types[2] = NPY_DOUBLE +ufunc_cosm1_types[3] = NPY_DOUBLE +ufunc_cosm1_ptr[2*0] = _func_cosm1 +ufunc_cosm1_ptr[2*0+1] = ("cosm1") +ufunc_cosm1_ptr[2*1] = _func_cosm1 +ufunc_cosm1_ptr[2*1+1] = ("cosm1") +ufunc_cosm1_data[0] = &ufunc_cosm1_ptr[2*0] +ufunc_cosm1_data[1] = &ufunc_cosm1_ptr[2*1] +cosm1 = np.PyUFunc_FromFuncAndData(ufunc_cosm1_loops, ufunc_cosm1_data, ufunc_cosm1_types, 2, 1, 1, 0, "cosm1", ufunc_cosm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_cotdg_loops[2] +cdef void *ufunc_cotdg_ptr[4] +cdef void *ufunc_cotdg_data[2] +cdef char ufunc_cotdg_types[4] +cdef char *ufunc_cotdg_doc = ( + "cotdg(x, out=None)\n" + "\n" + "Cotangent of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Cotangent at the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, cosdg, tandg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using cotangent directly.\n" + "\n" + ">>> x = 90 + 180 * np.arange(3)\n" + ">>> sc.cotdg(x)\n" + "array([0., 0., 0.])\n" + ">>> 1 / np.tan(x * np.pi / 180)\n" + "array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])") +ufunc_cotdg_loops[0] = loop_d_d__As_f_f +ufunc_cotdg_loops[1] = loop_d_d__As_d_d +ufunc_cotdg_types[0] = NPY_FLOAT +ufunc_cotdg_types[1] = NPY_FLOAT +ufunc_cotdg_types[2] = NPY_DOUBLE +ufunc_cotdg_types[3] = NPY_DOUBLE +ufunc_cotdg_ptr[2*0] = _func_cotdg +ufunc_cotdg_ptr[2*0+1] = ("cotdg") +ufunc_cotdg_ptr[2*1] = _func_cotdg +ufunc_cotdg_ptr[2*1+1] = ("cotdg") +ufunc_cotdg_data[0] = &ufunc_cotdg_ptr[2*0] +ufunc_cotdg_data[1] = &ufunc_cotdg_ptr[2*1] +cotdg = np.PyUFunc_FromFuncAndData(ufunc_cotdg_loops, ufunc_cotdg_data, ufunc_cotdg_types, 2, 1, 1, 0, "cotdg", ufunc_cotdg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_dawsn_loops[4] +cdef void *ufunc_dawsn_ptr[8] +cdef void *ufunc_dawsn_data[4] +cdef char ufunc_dawsn_types[8] +cdef char *ufunc_dawsn_doc = ( + "dawsn(x, out=None)\n" + "\n" + "Dawson's integral.\n" + "\n" + "Computes::\n" + "\n" + " exp(-x**2) * integral(exp(t**2), t=0..x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Function parameter.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the integral.\n" + "\n" + "See Also\n" + "--------\n" + "wofz, erf, erfc, erfcx, erfi\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-15, 15, num=1000)\n" + ">>> plt.plot(x, special.dawsn(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$dawsn(x)$')\n" + ">>> plt.show()") +ufunc_dawsn_loops[0] = loop_d_d__As_f_f +ufunc_dawsn_loops[1] = loop_d_d__As_d_d +ufunc_dawsn_loops[2] = loop_D_D__As_F_F +ufunc_dawsn_loops[3] = loop_D_D__As_D_D +ufunc_dawsn_types[0] = NPY_FLOAT +ufunc_dawsn_types[1] = NPY_FLOAT +ufunc_dawsn_types[2] = NPY_DOUBLE +ufunc_dawsn_types[3] = NPY_DOUBLE +ufunc_dawsn_types[4] = NPY_CFLOAT +ufunc_dawsn_types[5] = NPY_CFLOAT +ufunc_dawsn_types[6] = NPY_CDOUBLE +ufunc_dawsn_types[7] = NPY_CDOUBLE +ufunc_dawsn_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn +ufunc_dawsn_ptr[2*0+1] = ("dawsn") +ufunc_dawsn_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn +ufunc_dawsn_ptr[2*1+1] = ("dawsn") +ufunc_dawsn_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex +ufunc_dawsn_ptr[2*2+1] = ("dawsn") +ufunc_dawsn_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex +ufunc_dawsn_ptr[2*3+1] = ("dawsn") +ufunc_dawsn_data[0] = &ufunc_dawsn_ptr[2*0] +ufunc_dawsn_data[1] = &ufunc_dawsn_ptr[2*1] +ufunc_dawsn_data[2] = &ufunc_dawsn_ptr[2*2] +ufunc_dawsn_data[3] = &ufunc_dawsn_ptr[2*3] +dawsn = np.PyUFunc_FromFuncAndData(ufunc_dawsn_loops, ufunc_dawsn_data, ufunc_dawsn_types, 4, 1, 1, 0, "dawsn", ufunc_dawsn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipe_loops[2] +cdef void *ufunc_ellipe_ptr[4] +cdef void *ufunc_ellipe_data[2] +cdef char ufunc_ellipe_types[4] +cdef char *ufunc_ellipe_doc = ( + "ellipe(m, out=None)\n" + "\n" + "Complete elliptic integral of the second kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Defines the parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "E : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpe`.\n" + "\n" + "For `m > 0` the computation uses the approximation,\n" + "\n" + ".. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),\n" + "\n" + "where :math:`P` and :math:`Q` are tenth-order polynomials. For\n" + "`m < 0`, the relation\n" + "\n" + ".. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)\n" + "\n" + "is used.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre E integral is related to Carlson's symmetric R_D or R_G\n" + "functions in multiple ways [3]_. For example,\n" + "\n" + ".. math:: E(m) = 2 R_G(0, 1-k^2, 1) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i\n" + "\n" + "Examples\n" + "--------\n" + "This function is used in finding the circumference of an\n" + "ellipse with semi-major axis `a` and semi-minor axis `b`.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + "\n" + ">>> a = 3.5\n" + ">>> b = 2.1\n" + ">>> e_sq = 1.0 - b**2/a**2 # eccentricity squared\n" + "\n" + "Then the circumference is found using the following:\n" + "\n" + ">>> C = 4*a*special.ellipe(e_sq) # circumference formula\n" + ">>> C\n" + "17.868899204378693\n" + "\n" + "When `a` and `b` are the same (meaning eccentricity is 0),\n" + "this reduces to the circumference of a circle.\n" + "\n" + ">>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b\n" + "21.991148575128552\n" + ">>> 2*np.pi*a # formula for circle of radius a\n" + "21.991148575128552") +ufunc_ellipe_loops[0] = loop_d_d__As_f_f +ufunc_ellipe_loops[1] = loop_d_d__As_d_d +ufunc_ellipe_types[0] = NPY_FLOAT +ufunc_ellipe_types[1] = NPY_FLOAT +ufunc_ellipe_types[2] = NPY_DOUBLE +ufunc_ellipe_types[3] = NPY_DOUBLE +ufunc_ellipe_ptr[2*0] = _func_ellpe +ufunc_ellipe_ptr[2*0+1] = ("ellipe") +ufunc_ellipe_ptr[2*1] = _func_ellpe +ufunc_ellipe_ptr[2*1+1] = ("ellipe") +ufunc_ellipe_data[0] = &ufunc_ellipe_ptr[2*0] +ufunc_ellipe_data[1] = &ufunc_ellipe_ptr[2*1] +ellipe = np.PyUFunc_FromFuncAndData(ufunc_ellipe_loops, ufunc_ellipe_data, ufunc_ellipe_types, 2, 1, 1, 0, "ellipe", ufunc_ellipe_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipeinc_loops[2] +cdef void *ufunc_ellipeinc_ptr[4] +cdef void *ufunc_ellipeinc_data[2] +cdef char ufunc_ellipeinc_types[6] +cdef char *ufunc_ellipeinc_doc = ( + "ellipeinc(phi, m, out=None)\n" + "\n" + "Incomplete elliptic integral of the second kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "phi : array_like\n" + " amplitude of the elliptic integral.\n" + "m : array_like\n" + " parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "E : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellie`.\n" + "\n" + "Computation uses arithmetic-geometric means algorithm.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre E incomplete integral can be related to combinations\n" + "of Carlson's symmetric integrals R_D, R_F, and R_G in multiple\n" + "ways [3]_. For example, with :math:`c = \\csc^2\\phi`,\n" + "\n" + ".. math::\n" + " E(\\phi, m) = R_F(c-1, c-k^2, c)\n" + " - \\frac{1}{3} k^2 R_D(c-1, c-k^2, c) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipeinc_loops[0] = loop_d_dd__As_ff_f +ufunc_ellipeinc_loops[1] = loop_d_dd__As_dd_d +ufunc_ellipeinc_types[0] = NPY_FLOAT +ufunc_ellipeinc_types[1] = NPY_FLOAT +ufunc_ellipeinc_types[2] = NPY_FLOAT +ufunc_ellipeinc_types[3] = NPY_DOUBLE +ufunc_ellipeinc_types[4] = NPY_DOUBLE +ufunc_ellipeinc_types[5] = NPY_DOUBLE +ufunc_ellipeinc_ptr[2*0] = _func_ellie +ufunc_ellipeinc_ptr[2*0+1] = ("ellipeinc") +ufunc_ellipeinc_ptr[2*1] = _func_ellie +ufunc_ellipeinc_ptr[2*1+1] = ("ellipeinc") +ufunc_ellipeinc_data[0] = &ufunc_ellipeinc_ptr[2*0] +ufunc_ellipeinc_data[1] = &ufunc_ellipeinc_ptr[2*1] +ellipeinc = np.PyUFunc_FromFuncAndData(ufunc_ellipeinc_loops, ufunc_ellipeinc_data, ufunc_ellipeinc_types, 2, 2, 1, 0, "ellipeinc", ufunc_ellipeinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipj_loops[2] +cdef void *ufunc_ellipj_ptr[4] +cdef void *ufunc_ellipj_data[2] +cdef char ufunc_ellipj_types[12] +cdef char *ufunc_ellipj_doc = ( + "ellipj(u, m, out=None)\n" + "\n" + "Jacobian elliptic functions\n" + "\n" + "Calculates the Jacobian elliptic functions of parameter `m` between\n" + "0 and 1, and real argument `u`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Parameter.\n" + "u : array_like\n" + " Argument.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "sn, cn, dn, ph : 4-tuple of scalar or ndarray\n" + " The returned functions::\n" + "\n" + " sn(u|m), cn(u|m), dn(u|m)\n" + "\n" + " The value `ph` is such that if `u = ellipkinc(ph, m)`,\n" + " then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.\n" + "\n" + "See Also\n" + "--------\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpj`.\n" + "\n" + "These functions are periodic, with quarter-period on the real axis\n" + "equal to the complete elliptic integral `ellipk(m)`.\n" + "\n" + "Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then\n" + "`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called\n" + "the amplitude of `u`.\n" + "\n" + "Computation is by means of the arithmetic-geometric mean algorithm,\n" + "except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`\n" + "close to 1, the approximation applies only for `phi < pi/2`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_ellipj_loops[0] = loop_i_dd_dddd_As_ff_ffff +ufunc_ellipj_loops[1] = loop_i_dd_dddd_As_dd_dddd +ufunc_ellipj_types[0] = NPY_FLOAT +ufunc_ellipj_types[1] = NPY_FLOAT +ufunc_ellipj_types[2] = NPY_FLOAT +ufunc_ellipj_types[3] = NPY_FLOAT +ufunc_ellipj_types[4] = NPY_FLOAT +ufunc_ellipj_types[5] = NPY_FLOAT +ufunc_ellipj_types[6] = NPY_DOUBLE +ufunc_ellipj_types[7] = NPY_DOUBLE +ufunc_ellipj_types[8] = NPY_DOUBLE +ufunc_ellipj_types[9] = NPY_DOUBLE +ufunc_ellipj_types[10] = NPY_DOUBLE +ufunc_ellipj_types[11] = NPY_DOUBLE +ufunc_ellipj_ptr[2*0] = _func_ellpj +ufunc_ellipj_ptr[2*0+1] = ("ellipj") +ufunc_ellipj_ptr[2*1] = _func_ellpj +ufunc_ellipj_ptr[2*1+1] = ("ellipj") +ufunc_ellipj_data[0] = &ufunc_ellipj_ptr[2*0] +ufunc_ellipj_data[1] = &ufunc_ellipj_ptr[2*1] +ellipj = np.PyUFunc_FromFuncAndData(ufunc_ellipj_loops, ufunc_ellipj_data, ufunc_ellipj_types, 2, 2, 4, 0, "ellipj", ufunc_ellipj_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipk_loops[2] +cdef void *ufunc_ellipk_ptr[4] +cdef void *ufunc_ellipk_data[2] +cdef char ufunc_ellipk_types[4] +cdef char *ufunc_ellipk_doc = ( + "ellipk(m, out=None)\n" + "\n" + "Complete elliptic integral of the first kind.\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " The parameter of the elliptic integral.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind around m = 1\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "For more precision around point m = 1, use `ellipkm1`, which this\n" + "function calls.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [1]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre K integral is related to Carlson's symmetric R_F\n" + "function by [2]_:\n" + "\n" + ".. math:: K(m) = R_F(0, 1-k^2, 1) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [2] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipk_loops[0] = loop_d_d__As_f_f +ufunc_ellipk_loops[1] = loop_d_d__As_d_d +ufunc_ellipk_types[0] = NPY_FLOAT +ufunc_ellipk_types[1] = NPY_FLOAT +ufunc_ellipk_types[2] = NPY_DOUBLE +ufunc_ellipk_types[3] = NPY_DOUBLE +ufunc_ellipk_ptr[2*0] = _func_ellipk +ufunc_ellipk_ptr[2*0+1] = ("ellipk") +ufunc_ellipk_ptr[2*1] = _func_ellipk +ufunc_ellipk_ptr[2*1+1] = ("ellipk") +ufunc_ellipk_data[0] = &ufunc_ellipk_ptr[2*0] +ufunc_ellipk_data[1] = &ufunc_ellipk_ptr[2*1] +ellipk = np.PyUFunc_FromFuncAndData(ufunc_ellipk_loops, ufunc_ellipk_data, ufunc_ellipk_types, 2, 1, 1, 0, "ellipk", ufunc_ellipk_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipkinc_loops[2] +cdef void *ufunc_ellipkinc_ptr[4] +cdef void *ufunc_ellipkinc_data[2] +cdef char ufunc_ellipkinc_types[6] +cdef char *ufunc_ellipkinc_doc = ( + "ellipkinc(phi, m, out=None)\n" + "\n" + "Incomplete elliptic integral of the first kind\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "This function is also called :math:`F(\\phi, m)`.\n" + "\n" + "Parameters\n" + "----------\n" + "phi : array_like\n" + " amplitude of the elliptic integral\n" + "m : array_like\n" + " parameter of the elliptic integral\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral\n" + "\n" + "See Also\n" + "--------\n" + "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellik`. The computation is\n" + "carried out using the arithmetic-geometric mean algorithm.\n" + "\n" + "The parameterization in terms of :math:`m` follows that of section\n" + "17.2 in [2]_. Other parameterizations in terms of the\n" + "complementary parameter :math:`1 - m`, modular angle\n" + ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n" + "used, so be careful that you choose the correct parameter.\n" + "\n" + "The Legendre K incomplete integral (or F integral) is related to\n" + "Carlson's symmetric R_F function [3]_.\n" + "Setting :math:`c = \\csc^2\\phi`,\n" + "\n" + ".. math:: F(\\phi, m) = R_F(c-1, c-k^2, c) .\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + ".. [3] NIST Digital Library of Mathematical\n" + " Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n" + " 2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i") +ufunc_ellipkinc_loops[0] = loop_d_dd__As_ff_f +ufunc_ellipkinc_loops[1] = loop_d_dd__As_dd_d +ufunc_ellipkinc_types[0] = NPY_FLOAT +ufunc_ellipkinc_types[1] = NPY_FLOAT +ufunc_ellipkinc_types[2] = NPY_FLOAT +ufunc_ellipkinc_types[3] = NPY_DOUBLE +ufunc_ellipkinc_types[4] = NPY_DOUBLE +ufunc_ellipkinc_types[5] = NPY_DOUBLE +ufunc_ellipkinc_ptr[2*0] = _func_ellik +ufunc_ellipkinc_ptr[2*0+1] = ("ellipkinc") +ufunc_ellipkinc_ptr[2*1] = _func_ellik +ufunc_ellipkinc_ptr[2*1+1] = ("ellipkinc") +ufunc_ellipkinc_data[0] = &ufunc_ellipkinc_ptr[2*0] +ufunc_ellipkinc_data[1] = &ufunc_ellipkinc_ptr[2*1] +ellipkinc = np.PyUFunc_FromFuncAndData(ufunc_ellipkinc_loops, ufunc_ellipkinc_data, ufunc_ellipkinc_types, 2, 2, 1, 0, "ellipkinc", ufunc_ellipkinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ellipkm1_loops[2] +cdef void *ufunc_ellipkm1_ptr[4] +cdef void *ufunc_ellipkm1_data[2] +cdef char ufunc_ellipkm1_types[4] +cdef char *ufunc_ellipkm1_doc = ( + "ellipkm1(p, out=None)\n" + "\n" + "Complete elliptic integral of the first kind around `m` = 1\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n" + "\n" + "where `m = 1 - p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Defines the parameter of the elliptic integral as `m = 1 - p`.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the elliptic integral.\n" + "\n" + "See Also\n" + "--------\n" + "ellipk : Complete elliptic integral of the first kind\n" + "ellipkinc : Incomplete elliptic integral of the first kind\n" + "ellipe : Complete elliptic integral of the second kind\n" + "ellipeinc : Incomplete elliptic integral of the second kind\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `ellpk`.\n" + "\n" + "For `p <= 1`, computation uses the approximation,\n" + "\n" + ".. math:: K(p) \\approx P(p) - \\log(p) Q(p),\n" + "\n" + "where :math:`P` and :math:`Q` are tenth-order polynomials. The\n" + "argument `p` is used internally rather than `m` so that the logarithmic\n" + "singularity at `m = 1` will be shifted to the origin; this preserves\n" + "maximum accuracy. For `p > 1`, the identity\n" + "\n" + ".. math:: K(p) = K(1/p)/\\sqrt(p)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/") +ufunc_ellipkm1_loops[0] = loop_d_d__As_f_f +ufunc_ellipkm1_loops[1] = loop_d_d__As_d_d +ufunc_ellipkm1_types[0] = NPY_FLOAT +ufunc_ellipkm1_types[1] = NPY_FLOAT +ufunc_ellipkm1_types[2] = NPY_DOUBLE +ufunc_ellipkm1_types[3] = NPY_DOUBLE +ufunc_ellipkm1_ptr[2*0] = _func_ellpk +ufunc_ellipkm1_ptr[2*0+1] = ("ellipkm1") +ufunc_ellipkm1_ptr[2*1] = _func_ellpk +ufunc_ellipkm1_ptr[2*1+1] = ("ellipkm1") +ufunc_ellipkm1_data[0] = &ufunc_ellipkm1_ptr[2*0] +ufunc_ellipkm1_data[1] = &ufunc_ellipkm1_ptr[2*1] +ellipkm1 = np.PyUFunc_FromFuncAndData(ufunc_ellipkm1_loops, ufunc_ellipkm1_data, ufunc_ellipkm1_types, 2, 1, 1, 0, "ellipkm1", ufunc_ellipkm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprc_loops[4] +cdef void *ufunc_elliprc_ptr[8] +cdef void *ufunc_elliprc_data[4] +cdef char ufunc_elliprc_types[12] +cdef char *ufunc_elliprc_doc = ( + "elliprc(x, y, out=None)\n" + "\n" + "Degenerate symmetric elliptic integral.\n" + "\n" + "The function RC is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{C}}(x, y) =\n" + " \\frac{1}{2} \\int_0^{+\\infty} (t + x)^{-1/2} (t + y)^{-1} dt\n" + " = R_{\\mathrm{F}}(x, y, y)\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Real or complex input parameters. `x` can be any number in the\n" + " complex plane cut along the negative real axis. `y` must be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If `y` is real and negative, the Cauchy\n" + " principal value is returned. If both of `x` and `y` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==\n" + "elliprf(x, y, y)``. It is an elementary function rather than an elliptic\n" + "integral.\n" + "\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E6\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprc\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprc(scale*x, scale*y)\n" + "(0.5484493976710874-0.4169557678995833j)\n" + "\n" + ">>> elliprc(x, y)/np.sqrt(scale)\n" + "(0.5484493976710874-0.41695576789958333j)\n" + "\n" + "When the two arguments coincide, the integral is particularly\n" + "simple:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprc(x, x)\n" + "(0.4299173120614631-0.3041729818745595j)\n" + "\n" + ">>> 1/np.sqrt(x)\n" + "(0.4299173120614631-0.30417298187455954j)\n" + "\n" + "Another simple case: the first argument vanishes:\n" + "\n" + ">>> y = 1.2 + 3.4j\n" + ">>> elliprc(0, y)\n" + "(0.6753125346116815-0.47779380263880866j)\n" + "\n" + ">>> np.pi/2/np.sqrt(y)\n" + "(0.6753125346116815-0.4777938026388088j)\n" + "\n" + "When `x` and `y` are both positive, we can express\n" + ":math:`R_C(x,y)` in terms of more elementary functions. For the\n" + "case :math:`0 \\le x < y`,\n" + "\n" + ">>> x = 3.2\n" + ">>> y = 6.\n" + ">>> elliprc(x, y)\n" + "0.44942991498453444\n" + "\n" + ">>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x)\n" + "0.44942991498453433\n" + "\n" + "And for the case :math:`0 \\le y < x`,\n" + "\n" + ">>> x = 6.\n" + ">>> y = 3.2\n" + ">>> elliprc(x,y)\n" + "0.4989837501576147\n" + "\n" + ">>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y)\n" + "0.49898375015761476") +ufunc_elliprc_loops[0] = loop_d_dd__As_ff_f +ufunc_elliprc_loops[1] = loop_d_dd__As_dd_d +ufunc_elliprc_loops[2] = loop_D_DD__As_FF_F +ufunc_elliprc_loops[3] = loop_D_DD__As_DD_D +ufunc_elliprc_types[0] = NPY_FLOAT +ufunc_elliprc_types[1] = NPY_FLOAT +ufunc_elliprc_types[2] = NPY_FLOAT +ufunc_elliprc_types[3] = NPY_DOUBLE +ufunc_elliprc_types[4] = NPY_DOUBLE +ufunc_elliprc_types[5] = NPY_DOUBLE +ufunc_elliprc_types[6] = NPY_CFLOAT +ufunc_elliprc_types[7] = NPY_CFLOAT +ufunc_elliprc_types[8] = NPY_CFLOAT +ufunc_elliprc_types[9] = NPY_CDOUBLE +ufunc_elliprc_types[10] = NPY_CDOUBLE +ufunc_elliprc_types[11] = NPY_CDOUBLE +ufunc_elliprc_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RC +ufunc_elliprc_ptr[2*0+1] = ("elliprc") +ufunc_elliprc_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RC +ufunc_elliprc_ptr[2*1+1] = ("elliprc") +ufunc_elliprc_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RC +ufunc_elliprc_ptr[2*2+1] = ("elliprc") +ufunc_elliprc_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RC +ufunc_elliprc_ptr[2*3+1] = ("elliprc") +ufunc_elliprc_data[0] = &ufunc_elliprc_ptr[2*0] +ufunc_elliprc_data[1] = &ufunc_elliprc_ptr[2*1] +ufunc_elliprc_data[2] = &ufunc_elliprc_ptr[2*2] +ufunc_elliprc_data[3] = &ufunc_elliprc_ptr[2*3] +elliprc = np.PyUFunc_FromFuncAndData(ufunc_elliprc_loops, ufunc_elliprc_data, ufunc_elliprc_types, 4, 2, 1, 0, "elliprc", ufunc_elliprc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprd_loops[4] +cdef void *ufunc_elliprd_ptr[8] +cdef void *ufunc_elliprd_data[4] +cdef char ufunc_elliprd_types[16] +cdef char *ufunc_elliprd_doc = ( + "elliprd(x, y, z, out=None)\n" + "\n" + "Symmetric elliptic integral of the second kind.\n" + "\n" + "The function RD is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{D}}(x, y, z) =\n" + " \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}\n" + " dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x` or `y` can be any number in the\n" + " complex plane cut along the negative real axis, but at most one of them\n" + " can be zero, while `z` must be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric elliptic integral.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==\n" + "elliprj(x, y, z, z)``.\n" + "\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E5\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprd\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprd(scale*x, scale*y, scale*z)\n" + "(-0.03703043835680379-0.24500934665683802j)\n" + "\n" + ">>> elliprd(x, y, z)*np.power(scale, -1.5)\n" + "(-0.0370304383568038-0.24500934665683805j)\n" + "\n" + "All three arguments coincide:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprd(x, x, x)\n" + "(-0.03986825876151896-0.14051741840449586j)\n" + "\n" + ">>> np.power(x, -1.5)\n" + "(-0.03986825876151894-0.14051741840449583j)\n" + "\n" + "The so-called \"second lemniscate constant\":\n" + "\n" + ">>> elliprd(0, 2, 1)/3\n" + "0.5990701173677961\n" + "\n" + ">>> from scipy.special import gamma\n" + ">>> gamma(0.75)**2/np.sqrt(2*np.pi)\n" + "0.5990701173677959") +ufunc_elliprd_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprd_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprd_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprd_types[0] = NPY_FLOAT +ufunc_elliprd_types[1] = NPY_FLOAT +ufunc_elliprd_types[2] = NPY_FLOAT +ufunc_elliprd_types[3] = NPY_FLOAT +ufunc_elliprd_types[4] = NPY_DOUBLE +ufunc_elliprd_types[5] = NPY_DOUBLE +ufunc_elliprd_types[6] = NPY_DOUBLE +ufunc_elliprd_types[7] = NPY_DOUBLE +ufunc_elliprd_types[8] = NPY_CFLOAT +ufunc_elliprd_types[9] = NPY_CFLOAT +ufunc_elliprd_types[10] = NPY_CFLOAT +ufunc_elliprd_types[11] = NPY_CFLOAT +ufunc_elliprd_types[12] = NPY_CDOUBLE +ufunc_elliprd_types[13] = NPY_CDOUBLE +ufunc_elliprd_types[14] = NPY_CDOUBLE +ufunc_elliprd_types[15] = NPY_CDOUBLE +ufunc_elliprd_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RD +ufunc_elliprd_ptr[2*0+1] = ("elliprd") +ufunc_elliprd_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RD +ufunc_elliprd_ptr[2*1+1] = ("elliprd") +ufunc_elliprd_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RD +ufunc_elliprd_ptr[2*2+1] = ("elliprd") +ufunc_elliprd_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RD +ufunc_elliprd_ptr[2*3+1] = ("elliprd") +ufunc_elliprd_data[0] = &ufunc_elliprd_ptr[2*0] +ufunc_elliprd_data[1] = &ufunc_elliprd_ptr[2*1] +ufunc_elliprd_data[2] = &ufunc_elliprd_ptr[2*2] +ufunc_elliprd_data[3] = &ufunc_elliprd_ptr[2*3] +elliprd = np.PyUFunc_FromFuncAndData(ufunc_elliprd_loops, ufunc_elliprd_data, ufunc_elliprd_types, 4, 3, 1, 0, "elliprd", ufunc_elliprd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprf_loops[4] +cdef void *ufunc_elliprf_ptr[8] +cdef void *ufunc_elliprf_data[4] +cdef char ufunc_elliprf_types[16] +cdef char *ufunc_elliprf_doc = ( + "elliprf(x, y, z, out=None)\n" + "\n" + "Completely-symmetric elliptic integral of the first kind.\n" + "\n" + "The function RF is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{F}}(x, y, z) =\n" + " \\frac{1}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` can be any number in\n" + " the complex plane cut along the negative real axis, but at most one of\n" + " them can be zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the return\n" + " value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order (cf.:\n" + "https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete\n" + "integral. [2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E1\n" + ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprf\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprf(scale*x, scale*y, scale*z)\n" + "(0.5328051227278146-0.4008623567957094j)\n" + "\n" + ">>> elliprf(x, y, z)/np.sqrt(scale)\n" + "(0.5328051227278147-0.4008623567957095j)\n" + "\n" + "All three arguments coincide:\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> elliprf(x, x, x)\n" + "(0.42991731206146316-0.30417298187455954j)\n" + "\n" + ">>> 1/np.sqrt(x)\n" + "(0.4299173120614631-0.30417298187455954j)\n" + "\n" + "The so-called \"first lemniscate constant\":\n" + "\n" + ">>> elliprf(0, 1, 2)\n" + "1.3110287771460598\n" + "\n" + ">>> from scipy.special import gamma\n" + ">>> gamma(0.25)**2/(4*np.sqrt(2*np.pi))\n" + "1.3110287771460598") +ufunc_elliprf_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprf_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprf_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprf_types[0] = NPY_FLOAT +ufunc_elliprf_types[1] = NPY_FLOAT +ufunc_elliprf_types[2] = NPY_FLOAT +ufunc_elliprf_types[3] = NPY_FLOAT +ufunc_elliprf_types[4] = NPY_DOUBLE +ufunc_elliprf_types[5] = NPY_DOUBLE +ufunc_elliprf_types[6] = NPY_DOUBLE +ufunc_elliprf_types[7] = NPY_DOUBLE +ufunc_elliprf_types[8] = NPY_CFLOAT +ufunc_elliprf_types[9] = NPY_CFLOAT +ufunc_elliprf_types[10] = NPY_CFLOAT +ufunc_elliprf_types[11] = NPY_CFLOAT +ufunc_elliprf_types[12] = NPY_CDOUBLE +ufunc_elliprf_types[13] = NPY_CDOUBLE +ufunc_elliprf_types[14] = NPY_CDOUBLE +ufunc_elliprf_types[15] = NPY_CDOUBLE +ufunc_elliprf_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RF +ufunc_elliprf_ptr[2*0+1] = ("elliprf") +ufunc_elliprf_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RF +ufunc_elliprf_ptr[2*1+1] = ("elliprf") +ufunc_elliprf_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RF +ufunc_elliprf_ptr[2*2+1] = ("elliprf") +ufunc_elliprf_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RF +ufunc_elliprf_ptr[2*3+1] = ("elliprf") +ufunc_elliprf_data[0] = &ufunc_elliprf_ptr[2*0] +ufunc_elliprf_data[1] = &ufunc_elliprf_ptr[2*1] +ufunc_elliprf_data[2] = &ufunc_elliprf_ptr[2*2] +ufunc_elliprf_data[3] = &ufunc_elliprf_ptr[2*3] +elliprf = np.PyUFunc_FromFuncAndData(ufunc_elliprf_loops, ufunc_elliprf_data, ufunc_elliprf_types, 4, 3, 1, 0, "elliprf", ufunc_elliprf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprg_loops[4] +cdef void *ufunc_elliprg_ptr[8] +cdef void *ufunc_elliprg_data[4] +cdef char ufunc_elliprg_types[16] +cdef char *ufunc_elliprg_doc = ( + "elliprg(x, y, z, out=None)\n" + "\n" + "Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "The function RG is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{G}}(x, y, z) =\n" + " \\frac{1}{4} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n" + " \\left(\\frac{x}{t + x} + \\frac{y}{t + y} + \\frac{z}{t + z}\\right) t\n" + " dt\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` can be any number in\n" + " the complex plane cut along the negative real axis.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, and `z` are real, the return\n" + " value is real. Otherwise, the return value is complex.\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprj : Symmetric elliptic integral of the third kind.\n" + "\n" + "Notes\n" + "-----\n" + "The implementation uses the relation [1]_\n" + "\n" + ".. math::\n" + "\n" + " 2 R_{\\mathrm{G}}(x, y, z) =\n" + " z R_{\\mathrm{F}}(x, y, z) -\n" + " \\frac{1}{3} (x - z) (y - z) R_{\\mathrm{D}}(x, y, z) +\n" + " \\sqrt{\\frac{x y}{z}}\n" + "\n" + "and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can\n" + "be chosen as the pivot. When one of the arguments is close to zero, the AGM\n" + "method is applied instead. Other special cases are computed following Ref.\n" + "[2]_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.16.E1\n" + " https://dlmf.nist.gov/19.20.ii\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprg\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> scale = 0.3 + 0.4j\n" + ">>> elliprg(scale*x, scale*y, scale*z)\n" + "(1.195936862005246+0.8470988320464167j)\n" + "\n" + ">>> elliprg(x, y, z)*np.sqrt(scale)\n" + "(1.195936862005246+0.8470988320464165j)\n" + "\n" + "Simplifications:\n" + "\n" + ">>> elliprg(0, y, y)\n" + "1.756203682760182\n" + "\n" + ">>> 0.25*np.pi*np.sqrt(y)\n" + "1.7562036827601817\n" + "\n" + ">>> elliprg(0, 0, z)\n" + "1.224744871391589\n" + "\n" + ">>> 0.5*np.sqrt(z)\n" + "1.224744871391589\n" + "\n" + "The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and\n" + "``c`` is given by\n" + "\n" + ".. math::\n" + "\n" + " S = 4 \\pi a b c R_{\\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).\n" + "\n" + ">>> def ellipsoid_area(a, b, c):\n" + "... r = 4.0 * np.pi * a * b * c\n" + "... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))\n" + ">>> print(ellipsoid_area(1, 3, 5))\n" + "108.62688289491807") +ufunc_elliprg_loops[0] = loop_d_ddd__As_fff_f +ufunc_elliprg_loops[1] = loop_d_ddd__As_ddd_d +ufunc_elliprg_loops[2] = loop_D_DDD__As_FFF_F +ufunc_elliprg_loops[3] = loop_D_DDD__As_DDD_D +ufunc_elliprg_types[0] = NPY_FLOAT +ufunc_elliprg_types[1] = NPY_FLOAT +ufunc_elliprg_types[2] = NPY_FLOAT +ufunc_elliprg_types[3] = NPY_FLOAT +ufunc_elliprg_types[4] = NPY_DOUBLE +ufunc_elliprg_types[5] = NPY_DOUBLE +ufunc_elliprg_types[6] = NPY_DOUBLE +ufunc_elliprg_types[7] = NPY_DOUBLE +ufunc_elliprg_types[8] = NPY_CFLOAT +ufunc_elliprg_types[9] = NPY_CFLOAT +ufunc_elliprg_types[10] = NPY_CFLOAT +ufunc_elliprg_types[11] = NPY_CFLOAT +ufunc_elliprg_types[12] = NPY_CDOUBLE +ufunc_elliprg_types[13] = NPY_CDOUBLE +ufunc_elliprg_types[14] = NPY_CDOUBLE +ufunc_elliprg_types[15] = NPY_CDOUBLE +ufunc_elliprg_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RG +ufunc_elliprg_ptr[2*0+1] = ("elliprg") +ufunc_elliprg_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RG +ufunc_elliprg_ptr[2*1+1] = ("elliprg") +ufunc_elliprg_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RG +ufunc_elliprg_ptr[2*2+1] = ("elliprg") +ufunc_elliprg_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RG +ufunc_elliprg_ptr[2*3+1] = ("elliprg") +ufunc_elliprg_data[0] = &ufunc_elliprg_ptr[2*0] +ufunc_elliprg_data[1] = &ufunc_elliprg_ptr[2*1] +ufunc_elliprg_data[2] = &ufunc_elliprg_ptr[2*2] +ufunc_elliprg_data[3] = &ufunc_elliprg_ptr[2*3] +elliprg = np.PyUFunc_FromFuncAndData(ufunc_elliprg_loops, ufunc_elliprg_data, ufunc_elliprg_types, 4, 3, 1, 0, "elliprg", ufunc_elliprg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_elliprj_loops[4] +cdef void *ufunc_elliprj_ptr[8] +cdef void *ufunc_elliprj_data[4] +cdef char ufunc_elliprj_types[20] +cdef char *ufunc_elliprj_doc = ( + "elliprj(x, y, z, p, out=None)\n" + "\n" + "Symmetric elliptic integral of the third kind.\n" + "\n" + "The function RJ is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " R_{\\mathrm{J}}(x, y, z, p) =\n" + " \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n" + " (t + p)^{-1} dt\n" + "\n" + ".. warning::\n" + " This function should be considered experimental when the inputs are\n" + " unbalanced. Check correctness with another independent implementation.\n" + "\n" + "Parameters\n" + "----------\n" + "x, y, z, p : array_like\n" + " Real or complex input parameters. `x`, `y`, or `z` are numbers in\n" + " the complex plane cut along the negative real axis (subject to further\n" + " constraints, see Notes), and at most one of them can be zero. `p` must\n" + " be non-zero.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "R : scalar or ndarray\n" + " Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the\n" + " return value is real. Otherwise, the return value is complex.\n" + "\n" + " If `p` is real and negative, while `x`, `y`, and `z` are real,\n" + " non-negative, and at most one of them is zero, the Cauchy principal\n" + " value is returned. [1]_ [2]_\n" + "\n" + "See Also\n" + "--------\n" + "elliprc : Degenerate symmetric integral.\n" + "elliprd : Symmetric elliptic integral of the second kind.\n" + "elliprf : Completely-symmetric elliptic integral of the first kind.\n" + "elliprg : Completely-symmetric elliptic integral of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "The code implements Carlson's algorithm based on the duplication theorems\n" + "and series expansion up to the 7th order. [3]_ The algorithm is slightly\n" + "different from its earlier incarnation as it appears in [1]_, in that the\n" + "call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in\n" + "the inner loop. Asymptotic approximations are used where arguments differ\n" + "widely in the order of magnitude. [5]_\n" + "\n" + "The input values are subject to certain sufficient but not necessary\n" + "constraints when input arguments are complex. Notably, ``x``, ``y``, and\n" + "``z`` must have non-negative real parts, unless two of them are\n" + "non-negative and complex-conjugates to each other while the other is a real\n" + "non-negative number. [1]_ If the inputs do not satisfy the sufficient\n" + "condition described in Ref. [1]_ they are rejected outright with the output\n" + "set to NaN.\n" + "\n" + "In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the\n" + "function ``elliprd`` should be preferred because of its less restrictive\n" + "domain.\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n" + " integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n" + " https://arxiv.org/abs/math/9409227\n" + " https://doi.org/10.1007/BF02198293\n" + ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n" + " Functions,\" NIST, US Dept. of Commerce.\n" + " https://dlmf.nist.gov/19.20.iii\n" + ".. [3] B. C. Carlson, J. FitzSimmons, \"Reduction Theorems for Elliptic\n" + " Integrands with the Square Root of Two Quadratic Factors,\" J.\n" + " Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.\n" + " https://doi.org/10.1016/S0377-0427(00)00282-X\n" + ".. [4] F. Johansson, \"Numerical Evaluation of Elliptic Functions, Elliptic\n" + " Integrals and Modular Forms,\" in J. Blumlein, C. Schneider, P.\n" + " Paule, eds., \"Elliptic Integrals, Elliptic Functions and Modular\n" + " Forms in Quantum Field Theory,\" pp. 269-293, 2019 (Cham,\n" + " Switzerland: Springer Nature Switzerland)\n" + " https://arxiv.org/abs/1806.06725\n" + " https://doi.org/10.1007/978-3-030-04480-0\n" + ".. [5] B. C. Carlson, J. L. Gustafson, \"Asymptotic Approximations for\n" + " Symmetric Elliptic Integrals,\" SIAM J. Math. Anls., vol. 25, no. 2,\n" + " pp. 288-303, 1994.\n" + " https://arxiv.org/abs/math/9310223\n" + " https://doi.org/10.1137/S0036141092228477\n" + "\n" + "Examples\n" + "--------\n" + "Basic homogeneity property:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import elliprj\n" + "\n" + ">>> x = 1.2 + 3.4j\n" + ">>> y = 5.\n" + ">>> z = 6.\n" + ">>> p = 7.\n" + ">>> scale = 0.3 - 0.4j\n" + ">>> elliprj(scale*x, scale*y, scale*z, scale*p)\n" + "(0.10834905565679157+0.19694950747103812j)\n" + "\n" + ">>> elliprj(x, y, z, p)*np.power(scale, -1.5)\n" + "(0.10834905565679556+0.19694950747103854j)\n" + "\n" + "Reduction to simpler elliptic integral:\n" + "\n" + ">>> elliprj(x, y, z, z)\n" + "(0.08288462362195129-0.028376809745123258j)\n" + "\n" + ">>> from scipy.special import elliprd\n" + ">>> elliprd(x, y, z)\n" + "(0.08288462362195136-0.028376809745123296j)\n" + "\n" + "All arguments coincide:\n" + "\n" + ">>> elliprj(x, x, x, x)\n" + "(-0.03986825876151896-0.14051741840449586j)\n" + "\n" + ">>> np.power(x, -1.5)\n" + "(-0.03986825876151894-0.14051741840449583j)") +ufunc_elliprj_loops[0] = loop_d_dddd__As_ffff_f +ufunc_elliprj_loops[1] = loop_d_dddd__As_dddd_d +ufunc_elliprj_loops[2] = loop_D_DDDD__As_FFFF_F +ufunc_elliprj_loops[3] = loop_D_DDDD__As_DDDD_D +ufunc_elliprj_types[0] = NPY_FLOAT +ufunc_elliprj_types[1] = NPY_FLOAT +ufunc_elliprj_types[2] = NPY_FLOAT +ufunc_elliprj_types[3] = NPY_FLOAT +ufunc_elliprj_types[4] = NPY_FLOAT +ufunc_elliprj_types[5] = NPY_DOUBLE +ufunc_elliprj_types[6] = NPY_DOUBLE +ufunc_elliprj_types[7] = NPY_DOUBLE +ufunc_elliprj_types[8] = NPY_DOUBLE +ufunc_elliprj_types[9] = NPY_DOUBLE +ufunc_elliprj_types[10] = NPY_CFLOAT +ufunc_elliprj_types[11] = NPY_CFLOAT +ufunc_elliprj_types[12] = NPY_CFLOAT +ufunc_elliprj_types[13] = NPY_CFLOAT +ufunc_elliprj_types[14] = NPY_CFLOAT +ufunc_elliprj_types[15] = NPY_CDOUBLE +ufunc_elliprj_types[16] = NPY_CDOUBLE +ufunc_elliprj_types[17] = NPY_CDOUBLE +ufunc_elliprj_types[18] = NPY_CDOUBLE +ufunc_elliprj_types[19] = NPY_CDOUBLE +ufunc_elliprj_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RJ +ufunc_elliprj_ptr[2*0+1] = ("elliprj") +ufunc_elliprj_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RJ +ufunc_elliprj_ptr[2*1+1] = ("elliprj") +ufunc_elliprj_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RJ +ufunc_elliprj_ptr[2*2+1] = ("elliprj") +ufunc_elliprj_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RJ +ufunc_elliprj_ptr[2*3+1] = ("elliprj") +ufunc_elliprj_data[0] = &ufunc_elliprj_ptr[2*0] +ufunc_elliprj_data[1] = &ufunc_elliprj_ptr[2*1] +ufunc_elliprj_data[2] = &ufunc_elliprj_ptr[2*2] +ufunc_elliprj_data[3] = &ufunc_elliprj_ptr[2*3] +elliprj = np.PyUFunc_FromFuncAndData(ufunc_elliprj_loops, ufunc_elliprj_data, ufunc_elliprj_types, 4, 4, 1, 0, "elliprj", ufunc_elliprj_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_entr_loops[2] +cdef void *ufunc_entr_ptr[4] +cdef void *ufunc_entr_data[2] +cdef char ufunc_entr_types[4] +cdef char *ufunc_entr_doc = ( + "entr(x, out=None)\n" + "\n" + "Elementwise function for computing entropy.\n" + "\n" + ".. math:: \\text{entr}(x) = \\begin{cases} - x \\log(x) & x > 0 \\\\ 0 & x = 0\n" + " \\\\ -\\infty & \\text{otherwise} \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " Input array.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The value of the elementwise entropy function at the given points `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kl_div, rel_entr, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is concave.\n" + "\n" + "The origin of this function is in convex programming; see [1]_.\n" + "Given a probability distribution :math:`p_1, \\ldots, p_n`,\n" + "the definition of entropy in the context of *information theory* is\n" + "\n" + ".. math::\n" + "\n" + " \\sum_{i = 1}^n \\mathrm{entr}(p_i).\n" + "\n" + "To compute the latter quantity, use `scipy.stats.entropy`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`") +ufunc_entr_loops[0] = loop_d_d__As_f_f +ufunc_entr_loops[1] = loop_d_d__As_d_d +ufunc_entr_types[0] = NPY_FLOAT +ufunc_entr_types[1] = NPY_FLOAT +ufunc_entr_types[2] = NPY_DOUBLE +ufunc_entr_types[3] = NPY_DOUBLE +ufunc_entr_ptr[2*0] = _func_entr +ufunc_entr_ptr[2*0+1] = ("entr") +ufunc_entr_ptr[2*1] = _func_entr +ufunc_entr_ptr[2*1+1] = ("entr") +ufunc_entr_data[0] = &ufunc_entr_ptr[2*0] +ufunc_entr_data[1] = &ufunc_entr_ptr[2*1] +entr = np.PyUFunc_FromFuncAndData(ufunc_entr_loops, ufunc_entr_data, ufunc_entr_types, 2, 1, 1, 0, "entr", ufunc_entr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erf_loops[4] +cdef void *ufunc_erf_ptr[8] +cdef void *ufunc_erf_data[4] +cdef char ufunc_erf_types[8] +cdef char *ufunc_erf_doc = ( + "erf(z, out=None)\n" + "\n" + "Returns the error function of complex argument.\n" + "\n" + "It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " Input array.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The values of the error function at the given points `x`.\n" + "\n" + "See Also\n" + "--------\n" + "erfc, erfinv, erfcinv, wofz, erfcx, erfi\n" + "\n" + "Notes\n" + "-----\n" + "The cumulative of the unit normal distribution is given by\n" + "``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Error_function\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover,\n" + " 1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm\n" + ".. [3] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erf(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erf(x)$')\n" + ">>> plt.show()") +ufunc_erf_loops[0] = loop_d_d__As_f_f +ufunc_erf_loops[1] = loop_d_d__As_d_d +ufunc_erf_loops[2] = loop_D_D__As_F_F +ufunc_erf_loops[3] = loop_D_D__As_D_D +ufunc_erf_types[0] = NPY_FLOAT +ufunc_erf_types[1] = NPY_FLOAT +ufunc_erf_types[2] = NPY_DOUBLE +ufunc_erf_types[3] = NPY_DOUBLE +ufunc_erf_types[4] = NPY_CFLOAT +ufunc_erf_types[5] = NPY_CFLOAT +ufunc_erf_types[6] = NPY_CDOUBLE +ufunc_erf_types[7] = NPY_CDOUBLE +ufunc_erf_ptr[2*0] = _func_erf +ufunc_erf_ptr[2*0+1] = ("erf") +ufunc_erf_ptr[2*1] = _func_erf +ufunc_erf_ptr[2*1+1] = ("erf") +ufunc_erf_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erf +ufunc_erf_ptr[2*2+1] = ("erf") +ufunc_erf_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erf +ufunc_erf_ptr[2*3+1] = ("erf") +ufunc_erf_data[0] = &ufunc_erf_ptr[2*0] +ufunc_erf_data[1] = &ufunc_erf_ptr[2*1] +ufunc_erf_data[2] = &ufunc_erf_ptr[2*2] +ufunc_erf_data[3] = &ufunc_erf_ptr[2*3] +erf = np.PyUFunc_FromFuncAndData(ufunc_erf_loops, ufunc_erf_data, ufunc_erf_types, 4, 1, 1, 0, "erf", ufunc_erf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfc_loops[4] +cdef void *ufunc_erfc_ptr[8] +cdef void *ufunc_erfc_data[4] +cdef char ufunc_erfc_types[8] +cdef char *ufunc_erfc_doc = ( + "erfc(x, out=None)\n" + "\n" + "Complementary error function, ``1 - erf(x)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the complementary error function\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfi, erfcx, dawsn, wofz\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfc(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfc(x)$')\n" + ">>> plt.show()") +ufunc_erfc_loops[0] = loop_d_d__As_f_f +ufunc_erfc_loops[1] = loop_d_d__As_d_d +ufunc_erfc_loops[2] = loop_D_D__As_F_F +ufunc_erfc_loops[3] = loop_D_D__As_D_D +ufunc_erfc_types[0] = NPY_FLOAT +ufunc_erfc_types[1] = NPY_FLOAT +ufunc_erfc_types[2] = NPY_DOUBLE +ufunc_erfc_types[3] = NPY_DOUBLE +ufunc_erfc_types[4] = NPY_CFLOAT +ufunc_erfc_types[5] = NPY_CFLOAT +ufunc_erfc_types[6] = NPY_CDOUBLE +ufunc_erfc_types[7] = NPY_CDOUBLE +ufunc_erfc_ptr[2*0] = _func_erfc +ufunc_erfc_ptr[2*0+1] = ("erfc") +ufunc_erfc_ptr[2*1] = _func_erfc +ufunc_erfc_ptr[2*1+1] = ("erfc") +ufunc_erfc_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex +ufunc_erfc_ptr[2*2+1] = ("erfc") +ufunc_erfc_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex +ufunc_erfc_ptr[2*3+1] = ("erfc") +ufunc_erfc_data[0] = &ufunc_erfc_ptr[2*0] +ufunc_erfc_data[1] = &ufunc_erfc_ptr[2*1] +ufunc_erfc_data[2] = &ufunc_erfc_ptr[2*2] +ufunc_erfc_data[3] = &ufunc_erfc_ptr[2*3] +erfc = np.PyUFunc_FromFuncAndData(ufunc_erfc_loops, ufunc_erfc_data, ufunc_erfc_types, 4, 1, 1, 0, "erfc", ufunc_erfc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfcinv_loops[2] +cdef void *ufunc_erfcinv_ptr[4] +cdef void *ufunc_erfcinv_data[2] +cdef char ufunc_erfcinv_types[4] +cdef char *ufunc_erfcinv_doc = ( + "erfcinv(y, out=None)\n" + "\n" + "Inverse of the complementary error function.\n" + "\n" + "Computes the inverse of the complementary error function.\n" + "\n" + "In the complex domain, there is no unique complex number w satisfying\n" + "erfc(w)=z. This indicates a true inverse function would be multivalued.\n" + "When the domain restricts to the real, 0 < x < 2, there is a unique real\n" + "number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).\n" + "\n" + "It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)\n" + "\n" + "Parameters\n" + "----------\n" + "y : ndarray\n" + " Argument at which to evaluate. Domain: [0, 2]\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "erfcinv : scalar or ndarray\n" + " The inverse of erfc of y, element-wise\n" + "\n" + "See Also\n" + "--------\n" + "erf : Error function of a complex argument\n" + "erfc : Complementary error function, ``1 - erf(x)``\n" + "erfinv : Inverse of the error function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import erfcinv\n" + "\n" + ">>> erfcinv(0.5)\n" + "0.4769362762044699\n" + "\n" + ">>> y = np.linspace(0.0, 2.0, num=11)\n" + ">>> erfcinv(y)\n" + "array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345,\n" + " -0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,\n" + " -inf])\n" + "\n" + "Plot the function:\n" + "\n" + ">>> y = np.linspace(0, 2, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(y, erfcinv(y))\n" + ">>> ax.grid(True)\n" + ">>> ax.set_xlabel('y')\n" + ">>> ax.set_title('erfcinv(y)')\n" + ">>> plt.show()") +ufunc_erfcinv_loops[0] = loop_d_d__As_f_f +ufunc_erfcinv_loops[1] = loop_d_d__As_d_d +ufunc_erfcinv_types[0] = NPY_FLOAT +ufunc_erfcinv_types[1] = NPY_FLOAT +ufunc_erfcinv_types[2] = NPY_DOUBLE +ufunc_erfcinv_types[3] = NPY_DOUBLE +ufunc_erfcinv_ptr[2*0] = _func_erfcinv +ufunc_erfcinv_ptr[2*0+1] = ("erfcinv") +ufunc_erfcinv_ptr[2*1] = _func_erfcinv +ufunc_erfcinv_ptr[2*1+1] = ("erfcinv") +ufunc_erfcinv_data[0] = &ufunc_erfcinv_ptr[2*0] +ufunc_erfcinv_data[1] = &ufunc_erfcinv_ptr[2*1] +erfcinv = np.PyUFunc_FromFuncAndData(ufunc_erfcinv_loops, ufunc_erfcinv_data, ufunc_erfcinv_types, 2, 1, 1, 0, "erfcinv", ufunc_erfcinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfcx_loops[4] +cdef void *ufunc_erfcx_ptr[8] +cdef void *ufunc_erfcx_data[4] +cdef char ufunc_erfcx_types[8] +cdef char *ufunc_erfcx_doc = ( + "erfcx(x, out=None)\n" + "\n" + "Scaled complementary error function, ``exp(x**2) * erfc(x)``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the scaled complementary error function\n" + "\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfc, erfi, dawsn, wofz\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.12.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfcx(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfcx(x)$')\n" + ">>> plt.show()") +ufunc_erfcx_loops[0] = loop_d_d__As_f_f +ufunc_erfcx_loops[1] = loop_d_d__As_d_d +ufunc_erfcx_loops[2] = loop_D_D__As_F_F +ufunc_erfcx_loops[3] = loop_D_D__As_D_D +ufunc_erfcx_types[0] = NPY_FLOAT +ufunc_erfcx_types[1] = NPY_FLOAT +ufunc_erfcx_types[2] = NPY_DOUBLE +ufunc_erfcx_types[3] = NPY_DOUBLE +ufunc_erfcx_types[4] = NPY_CFLOAT +ufunc_erfcx_types[5] = NPY_CFLOAT +ufunc_erfcx_types[6] = NPY_CDOUBLE +ufunc_erfcx_types[7] = NPY_CDOUBLE +ufunc_erfcx_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx +ufunc_erfcx_ptr[2*0+1] = ("erfcx") +ufunc_erfcx_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx +ufunc_erfcx_ptr[2*1+1] = ("erfcx") +ufunc_erfcx_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex +ufunc_erfcx_ptr[2*2+1] = ("erfcx") +ufunc_erfcx_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex +ufunc_erfcx_ptr[2*3+1] = ("erfcx") +ufunc_erfcx_data[0] = &ufunc_erfcx_ptr[2*0] +ufunc_erfcx_data[1] = &ufunc_erfcx_ptr[2*1] +ufunc_erfcx_data[2] = &ufunc_erfcx_ptr[2*2] +ufunc_erfcx_data[3] = &ufunc_erfcx_ptr[2*3] +erfcx = np.PyUFunc_FromFuncAndData(ufunc_erfcx_loops, ufunc_erfcx_data, ufunc_erfcx_types, 4, 1, 1, 0, "erfcx", ufunc_erfcx_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfi_loops[4] +cdef void *ufunc_erfi_ptr[8] +cdef void *ufunc_erfi_data[4] +cdef char ufunc_erfi_types[8] +cdef char *ufunc_erfi_doc = ( + "erfi(z, out=None)\n" + "\n" + "Imaginary error function, ``-i erf(i z)``.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the imaginary error function\n" + "\n" + "See Also\n" + "--------\n" + "erf, erfc, erfcx, dawsn, wofz\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.12.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> plt.plot(x, special.erfi(x))\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.ylabel('$erfi(x)$')\n" + ">>> plt.show()") +ufunc_erfi_loops[0] = loop_d_d__As_f_f +ufunc_erfi_loops[1] = loop_d_d__As_d_d +ufunc_erfi_loops[2] = loop_D_D__As_F_F +ufunc_erfi_loops[3] = loop_D_D__As_D_D +ufunc_erfi_types[0] = NPY_FLOAT +ufunc_erfi_types[1] = NPY_FLOAT +ufunc_erfi_types[2] = NPY_DOUBLE +ufunc_erfi_types[3] = NPY_DOUBLE +ufunc_erfi_types[4] = NPY_CFLOAT +ufunc_erfi_types[5] = NPY_CFLOAT +ufunc_erfi_types[6] = NPY_CDOUBLE +ufunc_erfi_types[7] = NPY_CDOUBLE +ufunc_erfi_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfi +ufunc_erfi_ptr[2*0+1] = ("erfi") +ufunc_erfi_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfi +ufunc_erfi_ptr[2*1+1] = ("erfi") +ufunc_erfi_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex +ufunc_erfi_ptr[2*2+1] = ("erfi") +ufunc_erfi_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex +ufunc_erfi_ptr[2*3+1] = ("erfi") +ufunc_erfi_data[0] = &ufunc_erfi_ptr[2*0] +ufunc_erfi_data[1] = &ufunc_erfi_ptr[2*1] +ufunc_erfi_data[2] = &ufunc_erfi_ptr[2*2] +ufunc_erfi_data[3] = &ufunc_erfi_ptr[2*3] +erfi = np.PyUFunc_FromFuncAndData(ufunc_erfi_loops, ufunc_erfi_data, ufunc_erfi_types, 4, 1, 1, 0, "erfi", ufunc_erfi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_erfinv_loops[2] +cdef void *ufunc_erfinv_ptr[4] +cdef void *ufunc_erfinv_data[2] +cdef char ufunc_erfinv_types[4] +cdef char *ufunc_erfinv_doc = ( + "erfinv(y, out=None)\n" + "\n" + "Inverse of the error function.\n" + "\n" + "Computes the inverse of the error function.\n" + "\n" + "In the complex domain, there is no unique complex number w satisfying\n" + "erf(w)=z. This indicates a true inverse function would be multivalued.\n" + "When the domain restricts to the real, -1 < x < 1, there is a unique real\n" + "number satisfying erf(erfinv(x)) = x.\n" + "\n" + "Parameters\n" + "----------\n" + "y : ndarray\n" + " Argument at which to evaluate. Domain: [-1, 1]\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "erfinv : scalar or ndarray\n" + " The inverse of erf of y, element-wise\n" + "\n" + "See Also\n" + "--------\n" + "erf : Error function of a complex argument\n" + "erfc : Complementary error function, ``1 - erf(x)``\n" + "erfcinv : Inverse of the complementary error function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import erfinv, erf\n" + "\n" + ">>> erfinv(0.5)\n" + "0.4769362762044699\n" + "\n" + ">>> y = np.linspace(-1.0, 1.0, num=9)\n" + ">>> x = erfinv(y)\n" + ">>> x\n" + "array([ -inf, -0.81341985, -0.47693628, -0.22531206, 0. ,\n" + " 0.22531206, 0.47693628, 0.81341985, inf])\n" + "\n" + "Verify that ``erf(erfinv(y))`` is ``y``.\n" + "\n" + ">>> erf(x)\n" + "array([-1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, 1. ])\n" + "\n" + "Plot the function:\n" + "\n" + ">>> y = np.linspace(-1, 1, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(y, erfinv(y))\n" + ">>> ax.grid(True)\n" + ">>> ax.set_xlabel('y')\n" + ">>> ax.set_title('erfinv(y)')\n" + ">>> plt.show()") +ufunc_erfinv_loops[0] = loop_f_f__As_f_f +ufunc_erfinv_loops[1] = loop_d_d__As_d_d +ufunc_erfinv_types[0] = NPY_FLOAT +ufunc_erfinv_types[1] = NPY_FLOAT +ufunc_erfinv_types[2] = NPY_DOUBLE +ufunc_erfinv_types[3] = NPY_DOUBLE +ufunc_erfinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_erfinv_float +ufunc_erfinv_ptr[2*0+1] = ("erfinv") +ufunc_erfinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_erfinv_double +ufunc_erfinv_ptr[2*1+1] = ("erfinv") +ufunc_erfinv_data[0] = &ufunc_erfinv_ptr[2*0] +ufunc_erfinv_data[1] = &ufunc_erfinv_ptr[2*1] +erfinv = np.PyUFunc_FromFuncAndData(ufunc_erfinv_loops, ufunc_erfinv_data, ufunc_erfinv_types, 2, 1, 1, 0, "erfinv", ufunc_erfinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyc_loops[5] +cdef void *ufunc_eval_chebyc_ptr[10] +cdef void *ufunc_eval_chebyc_data[5] +cdef char ufunc_eval_chebyc_types[15] +cdef char *ufunc_eval_chebyc_doc = ( + "eval_chebyc(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " C_n(x) = 2 T_n(x/2)\n" + "\n" + "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.11 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyt`.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "C : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyc : roots and quadrature weights of Chebyshev\n" + " polynomials of the first kind on [-2, 2]\n" + "chebyc : Chebyshev polynomial object\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "eval_chebyt : evaluate Chebycshev polynomials of the first kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "They are a scaled version of the Chebyshev polynomials of the\n" + "first kind.\n" + "\n" + ">>> x = np.linspace(-2, 2, 6)\n" + ">>> sc.eval_chebyc(3, x)\n" + "array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])\n" + ">>> 2 * sc.eval_chebyt(3, x / 2)\n" + "array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])") +ufunc_eval_chebyc_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_chebyc_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyc_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyc_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyc_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyc_types[0] = NPY_LONG +ufunc_eval_chebyc_types[1] = NPY_DOUBLE +ufunc_eval_chebyc_types[2] = NPY_DOUBLE +ufunc_eval_chebyc_types[3] = NPY_FLOAT +ufunc_eval_chebyc_types[4] = NPY_FLOAT +ufunc_eval_chebyc_types[5] = NPY_FLOAT +ufunc_eval_chebyc_types[6] = NPY_FLOAT +ufunc_eval_chebyc_types[7] = NPY_CFLOAT +ufunc_eval_chebyc_types[8] = NPY_CFLOAT +ufunc_eval_chebyc_types[9] = NPY_DOUBLE +ufunc_eval_chebyc_types[10] = NPY_DOUBLE +ufunc_eval_chebyc_types[11] = NPY_DOUBLE +ufunc_eval_chebyc_types[12] = NPY_DOUBLE +ufunc_eval_chebyc_types[13] = NPY_CDOUBLE +ufunc_eval_chebyc_types[14] = NPY_CDOUBLE +ufunc_eval_chebyc_ptr[2*0] = _func_eval_chebyc_l +ufunc_eval_chebyc_ptr[2*0+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*1] = _func_eval_chebyc[double] +ufunc_eval_chebyc_ptr[2*1+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*2] = _func_eval_chebyc[double_complex] +ufunc_eval_chebyc_ptr[2*2+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*3] = _func_eval_chebyc[double] +ufunc_eval_chebyc_ptr[2*3+1] = ("eval_chebyc") +ufunc_eval_chebyc_ptr[2*4] = _func_eval_chebyc[double_complex] +ufunc_eval_chebyc_ptr[2*4+1] = ("eval_chebyc") +ufunc_eval_chebyc_data[0] = &ufunc_eval_chebyc_ptr[2*0] +ufunc_eval_chebyc_data[1] = &ufunc_eval_chebyc_ptr[2*1] +ufunc_eval_chebyc_data[2] = &ufunc_eval_chebyc_ptr[2*2] +ufunc_eval_chebyc_data[3] = &ufunc_eval_chebyc_ptr[2*3] +ufunc_eval_chebyc_data[4] = &ufunc_eval_chebyc_ptr[2*4] +eval_chebyc = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyc_loops, ufunc_eval_chebyc_data, ufunc_eval_chebyc_types, 5, 2, 1, 0, "eval_chebyc", ufunc_eval_chebyc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebys_loops[5] +cdef void *ufunc_eval_chebys_ptr[10] +cdef void *ufunc_eval_chebys_data[5] +cdef char ufunc_eval_chebys_types[15] +cdef char *ufunc_eval_chebys_doc = ( + "eval_chebys(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " S_n(x) = U_n(x/2)\n" + "\n" + "where :math:`U_n` is a Chebyshev polynomial of the second\n" + "kind. See 22.5.13 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyu`.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "S : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebys : roots and quadrature weights of Chebyshev\n" + " polynomials of the second kind on [-2, 2]\n" + "chebys : Chebyshev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "They are a scaled version of the Chebyshev polynomials of the\n" + "second kind.\n" + "\n" + ">>> x = np.linspace(-2, 2, 6)\n" + ">>> sc.eval_chebys(3, x)\n" + "array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])\n" + ">>> sc.eval_chebyu(3, x / 2)\n" + "array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])") +ufunc_eval_chebys_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_chebys_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebys_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebys_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebys_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebys_types[0] = NPY_LONG +ufunc_eval_chebys_types[1] = NPY_DOUBLE +ufunc_eval_chebys_types[2] = NPY_DOUBLE +ufunc_eval_chebys_types[3] = NPY_FLOAT +ufunc_eval_chebys_types[4] = NPY_FLOAT +ufunc_eval_chebys_types[5] = NPY_FLOAT +ufunc_eval_chebys_types[6] = NPY_FLOAT +ufunc_eval_chebys_types[7] = NPY_CFLOAT +ufunc_eval_chebys_types[8] = NPY_CFLOAT +ufunc_eval_chebys_types[9] = NPY_DOUBLE +ufunc_eval_chebys_types[10] = NPY_DOUBLE +ufunc_eval_chebys_types[11] = NPY_DOUBLE +ufunc_eval_chebys_types[12] = NPY_DOUBLE +ufunc_eval_chebys_types[13] = NPY_CDOUBLE +ufunc_eval_chebys_types[14] = NPY_CDOUBLE +ufunc_eval_chebys_ptr[2*0] = _func_eval_chebys_l +ufunc_eval_chebys_ptr[2*0+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*1] = _func_eval_chebys[double] +ufunc_eval_chebys_ptr[2*1+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*2] = _func_eval_chebys[double_complex] +ufunc_eval_chebys_ptr[2*2+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*3] = _func_eval_chebys[double] +ufunc_eval_chebys_ptr[2*3+1] = ("eval_chebys") +ufunc_eval_chebys_ptr[2*4] = _func_eval_chebys[double_complex] +ufunc_eval_chebys_ptr[2*4+1] = ("eval_chebys") +ufunc_eval_chebys_data[0] = &ufunc_eval_chebys_ptr[2*0] +ufunc_eval_chebys_data[1] = &ufunc_eval_chebys_ptr[2*1] +ufunc_eval_chebys_data[2] = &ufunc_eval_chebys_ptr[2*2] +ufunc_eval_chebys_data[3] = &ufunc_eval_chebys_ptr[2*3] +ufunc_eval_chebys_data[4] = &ufunc_eval_chebys_ptr[2*4] +eval_chebys = np.PyUFunc_FromFuncAndData(ufunc_eval_chebys_loops, ufunc_eval_chebys_data, ufunc_eval_chebys_types, 5, 2, 1, 0, "eval_chebys", ufunc_eval_chebys_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyt_loops[5] +cdef void *ufunc_eval_chebyt_ptr[10] +cdef void *ufunc_eval_chebyt_data[5] +cdef char ufunc_eval_chebyt_types[15] +cdef char *ufunc_eval_chebyt_doc = ( + "eval_chebyt(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the first kind at a point.\n" + "\n" + "The Chebyshev polynomials of the first kind can be defined via the\n" + "Gauss hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.47 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "T : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyt : roots and quadrature weights of Chebyshev\n" + " polynomials of the first kind\n" + "chebyu : Chebychev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "hyp2f1 : Gauss hypergeometric function\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "\n" + "Notes\n" + "-----\n" + "This routine is numerically stable for `x` in ``[-1, 1]`` at least\n" + "up to order ``10000``.\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_chebyt_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_chebyt_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyt_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyt_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyt_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyt_types[0] = NPY_LONG +ufunc_eval_chebyt_types[1] = NPY_DOUBLE +ufunc_eval_chebyt_types[2] = NPY_DOUBLE +ufunc_eval_chebyt_types[3] = NPY_FLOAT +ufunc_eval_chebyt_types[4] = NPY_FLOAT +ufunc_eval_chebyt_types[5] = NPY_FLOAT +ufunc_eval_chebyt_types[6] = NPY_FLOAT +ufunc_eval_chebyt_types[7] = NPY_CFLOAT +ufunc_eval_chebyt_types[8] = NPY_CFLOAT +ufunc_eval_chebyt_types[9] = NPY_DOUBLE +ufunc_eval_chebyt_types[10] = NPY_DOUBLE +ufunc_eval_chebyt_types[11] = NPY_DOUBLE +ufunc_eval_chebyt_types[12] = NPY_DOUBLE +ufunc_eval_chebyt_types[13] = NPY_CDOUBLE +ufunc_eval_chebyt_types[14] = NPY_CDOUBLE +ufunc_eval_chebyt_ptr[2*0] = _func_eval_chebyt_l +ufunc_eval_chebyt_ptr[2*0+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*1] = _func_eval_chebyt[double] +ufunc_eval_chebyt_ptr[2*1+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*2] = _func_eval_chebyt[double_complex] +ufunc_eval_chebyt_ptr[2*2+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*3] = _func_eval_chebyt[double] +ufunc_eval_chebyt_ptr[2*3+1] = ("eval_chebyt") +ufunc_eval_chebyt_ptr[2*4] = _func_eval_chebyt[double_complex] +ufunc_eval_chebyt_ptr[2*4+1] = ("eval_chebyt") +ufunc_eval_chebyt_data[0] = &ufunc_eval_chebyt_ptr[2*0] +ufunc_eval_chebyt_data[1] = &ufunc_eval_chebyt_ptr[2*1] +ufunc_eval_chebyt_data[2] = &ufunc_eval_chebyt_ptr[2*2] +ufunc_eval_chebyt_data[3] = &ufunc_eval_chebyt_ptr[2*3] +ufunc_eval_chebyt_data[4] = &ufunc_eval_chebyt_ptr[2*4] +eval_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyt_loops, ufunc_eval_chebyt_data, ufunc_eval_chebyt_types, 5, 2, 1, 0, "eval_chebyt", ufunc_eval_chebyt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_chebyu_loops[5] +cdef void *ufunc_eval_chebyu_ptr[10] +cdef void *ufunc_eval_chebyu_data[5] +cdef char ufunc_eval_chebyu_types[15] +cdef char *ufunc_eval_chebyu_doc = ( + "eval_chebyu(n, x, out=None)\n" + "\n" + "Evaluate Chebyshev polynomial of the second kind at a point.\n" + "\n" + "The Chebyshev polynomials of the second kind can be defined via\n" + "the Gauss hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.48 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "U : scalar or ndarray\n" + " Values of the Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_chebyu : roots and quadrature weights of Chebyshev\n" + " polynomials of the second kind\n" + "chebyu : Chebyshev polynomial object\n" + "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_chebyu_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_chebyu_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_chebyu_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_chebyu_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_chebyu_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_chebyu_types[0] = NPY_LONG +ufunc_eval_chebyu_types[1] = NPY_DOUBLE +ufunc_eval_chebyu_types[2] = NPY_DOUBLE +ufunc_eval_chebyu_types[3] = NPY_FLOAT +ufunc_eval_chebyu_types[4] = NPY_FLOAT +ufunc_eval_chebyu_types[5] = NPY_FLOAT +ufunc_eval_chebyu_types[6] = NPY_FLOAT +ufunc_eval_chebyu_types[7] = NPY_CFLOAT +ufunc_eval_chebyu_types[8] = NPY_CFLOAT +ufunc_eval_chebyu_types[9] = NPY_DOUBLE +ufunc_eval_chebyu_types[10] = NPY_DOUBLE +ufunc_eval_chebyu_types[11] = NPY_DOUBLE +ufunc_eval_chebyu_types[12] = NPY_DOUBLE +ufunc_eval_chebyu_types[13] = NPY_CDOUBLE +ufunc_eval_chebyu_types[14] = NPY_CDOUBLE +ufunc_eval_chebyu_ptr[2*0] = _func_eval_chebyu_l +ufunc_eval_chebyu_ptr[2*0+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*1] = _func_eval_chebyu[double] +ufunc_eval_chebyu_ptr[2*1+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*2] = _func_eval_chebyu[double_complex] +ufunc_eval_chebyu_ptr[2*2+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*3] = _func_eval_chebyu[double] +ufunc_eval_chebyu_ptr[2*3+1] = ("eval_chebyu") +ufunc_eval_chebyu_ptr[2*4] = _func_eval_chebyu[double_complex] +ufunc_eval_chebyu_ptr[2*4+1] = ("eval_chebyu") +ufunc_eval_chebyu_data[0] = &ufunc_eval_chebyu_ptr[2*0] +ufunc_eval_chebyu_data[1] = &ufunc_eval_chebyu_ptr[2*1] +ufunc_eval_chebyu_data[2] = &ufunc_eval_chebyu_ptr[2*2] +ufunc_eval_chebyu_data[3] = &ufunc_eval_chebyu_ptr[2*3] +ufunc_eval_chebyu_data[4] = &ufunc_eval_chebyu_ptr[2*4] +eval_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyu_loops, ufunc_eval_chebyu_data, ufunc_eval_chebyu_types, 5, 2, 1, 0, "eval_chebyu", ufunc_eval_chebyu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_gegenbauer_loops[5] +cdef void *ufunc_eval_gegenbauer_ptr[10] +cdef void *ufunc_eval_gegenbauer_data[5] +cdef char ufunc_eval_gegenbauer_types[20] +cdef char *ufunc_eval_gegenbauer_doc = ( + "eval_gegenbauer(n, alpha, x, out=None)\n" + "\n" + "Evaluate Gegenbauer polynomial at a point.\n" + "\n" + "The Gegenbauer polynomials can be defined via the Gauss\n" + "hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " C_n^{(\\alpha)} = \\frac{(2\\alpha)_n}{\\Gamma(n + 1)}\n" + " {}_2F_1(-n, 2\\alpha + n; \\alpha + 1/2; (1 - z)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.46 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter\n" + "x : array_like\n" + " Points at which to evaluate the Gegenbauer polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "C : scalar or ndarray\n" + " Values of the Gegenbauer polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_gegenbauer : roots and quadrature weights of Gegenbauer\n" + " polynomials\n" + "gegenbauer : Gegenbauer polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_gegenbauer_loops[0] = loop_d_ldd__As_ldd_d +ufunc_eval_gegenbauer_loops[1] = loop_d_ddd__As_fff_f +ufunc_eval_gegenbauer_loops[2] = loop_D_ddD__As_ffF_F +ufunc_eval_gegenbauer_loops[3] = loop_d_ddd__As_ddd_d +ufunc_eval_gegenbauer_loops[4] = loop_D_ddD__As_ddD_D +ufunc_eval_gegenbauer_types[0] = NPY_LONG +ufunc_eval_gegenbauer_types[1] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[2] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[3] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[4] = NPY_FLOAT +ufunc_eval_gegenbauer_types[5] = NPY_FLOAT +ufunc_eval_gegenbauer_types[6] = NPY_FLOAT +ufunc_eval_gegenbauer_types[7] = NPY_FLOAT +ufunc_eval_gegenbauer_types[8] = NPY_FLOAT +ufunc_eval_gegenbauer_types[9] = NPY_FLOAT +ufunc_eval_gegenbauer_types[10] = NPY_CFLOAT +ufunc_eval_gegenbauer_types[11] = NPY_CFLOAT +ufunc_eval_gegenbauer_types[12] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[13] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[14] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[15] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[16] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[17] = NPY_DOUBLE +ufunc_eval_gegenbauer_types[18] = NPY_CDOUBLE +ufunc_eval_gegenbauer_types[19] = NPY_CDOUBLE +ufunc_eval_gegenbauer_ptr[2*0] = _func_eval_gegenbauer_l +ufunc_eval_gegenbauer_ptr[2*0+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*1] = _func_eval_gegenbauer[double] +ufunc_eval_gegenbauer_ptr[2*1+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*2] = _func_eval_gegenbauer[double_complex] +ufunc_eval_gegenbauer_ptr[2*2+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*3] = _func_eval_gegenbauer[double] +ufunc_eval_gegenbauer_ptr[2*3+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_ptr[2*4] = _func_eval_gegenbauer[double_complex] +ufunc_eval_gegenbauer_ptr[2*4+1] = ("eval_gegenbauer") +ufunc_eval_gegenbauer_data[0] = &ufunc_eval_gegenbauer_ptr[2*0] +ufunc_eval_gegenbauer_data[1] = &ufunc_eval_gegenbauer_ptr[2*1] +ufunc_eval_gegenbauer_data[2] = &ufunc_eval_gegenbauer_ptr[2*2] +ufunc_eval_gegenbauer_data[3] = &ufunc_eval_gegenbauer_ptr[2*3] +ufunc_eval_gegenbauer_data[4] = &ufunc_eval_gegenbauer_ptr[2*4] +eval_gegenbauer = np.PyUFunc_FromFuncAndData(ufunc_eval_gegenbauer_loops, ufunc_eval_gegenbauer_data, ufunc_eval_gegenbauer_types, 5, 3, 1, 0, "eval_gegenbauer", ufunc_eval_gegenbauer_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_genlaguerre_loops[5] +cdef void *ufunc_eval_genlaguerre_ptr[10] +cdef void *ufunc_eval_genlaguerre_data[5] +cdef char ufunc_eval_genlaguerre_types[20] +cdef char *ufunc_eval_genlaguerre_doc = ( + "eval_genlaguerre(n, alpha, x, out=None)\n" + "\n" + "Evaluate generalized Laguerre polynomial at a point.\n" + "\n" + "The generalized Laguerre polynomials can be defined via the\n" + "confluent hypergeometric function :math:`{}_1F_1` as\n" + "\n" + ".. math::\n" + "\n" + " L_n^{(\\alpha)}(x) = \\binom{n + \\alpha}{n}\n" + " {}_1F_1(-n, \\alpha + 1, x).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre\n" + "polynomials are the special case where :math:`\\alpha = 0`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the confluent hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter; must have ``alpha > -1``\n" + "x : array_like\n" + " Points at which to evaluate the generalized Laguerre\n" + " polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Values of the generalized Laguerre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_genlaguerre : roots and quadrature weights of generalized\n" + " Laguerre polynomials\n" + "genlaguerre : generalized Laguerre polynomial object\n" + "hyp1f1 : confluent hypergeometric function\n" + "eval_laguerre : evaluate Laguerre polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_genlaguerre_loops[0] = loop_d_ldd__As_ldd_d +ufunc_eval_genlaguerre_loops[1] = loop_d_ddd__As_fff_f +ufunc_eval_genlaguerre_loops[2] = loop_D_ddD__As_ffF_F +ufunc_eval_genlaguerre_loops[3] = loop_d_ddd__As_ddd_d +ufunc_eval_genlaguerre_loops[4] = loop_D_ddD__As_ddD_D +ufunc_eval_genlaguerre_types[0] = NPY_LONG +ufunc_eval_genlaguerre_types[1] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[2] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[3] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[4] = NPY_FLOAT +ufunc_eval_genlaguerre_types[5] = NPY_FLOAT +ufunc_eval_genlaguerre_types[6] = NPY_FLOAT +ufunc_eval_genlaguerre_types[7] = NPY_FLOAT +ufunc_eval_genlaguerre_types[8] = NPY_FLOAT +ufunc_eval_genlaguerre_types[9] = NPY_FLOAT +ufunc_eval_genlaguerre_types[10] = NPY_CFLOAT +ufunc_eval_genlaguerre_types[11] = NPY_CFLOAT +ufunc_eval_genlaguerre_types[12] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[13] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[14] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[15] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[16] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[17] = NPY_DOUBLE +ufunc_eval_genlaguerre_types[18] = NPY_CDOUBLE +ufunc_eval_genlaguerre_types[19] = NPY_CDOUBLE +ufunc_eval_genlaguerre_ptr[2*0] = _func_eval_genlaguerre_l +ufunc_eval_genlaguerre_ptr[2*0+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*1] = _func_eval_genlaguerre[double] +ufunc_eval_genlaguerre_ptr[2*1+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*2] = _func_eval_genlaguerre[double_complex] +ufunc_eval_genlaguerre_ptr[2*2+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*3] = _func_eval_genlaguerre[double] +ufunc_eval_genlaguerre_ptr[2*3+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_ptr[2*4] = _func_eval_genlaguerre[double_complex] +ufunc_eval_genlaguerre_ptr[2*4+1] = ("eval_genlaguerre") +ufunc_eval_genlaguerre_data[0] = &ufunc_eval_genlaguerre_ptr[2*0] +ufunc_eval_genlaguerre_data[1] = &ufunc_eval_genlaguerre_ptr[2*1] +ufunc_eval_genlaguerre_data[2] = &ufunc_eval_genlaguerre_ptr[2*2] +ufunc_eval_genlaguerre_data[3] = &ufunc_eval_genlaguerre_ptr[2*3] +ufunc_eval_genlaguerre_data[4] = &ufunc_eval_genlaguerre_ptr[2*4] +eval_genlaguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_genlaguerre_loops, ufunc_eval_genlaguerre_data, ufunc_eval_genlaguerre_types, 5, 3, 1, 0, "eval_genlaguerre", ufunc_eval_genlaguerre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_hermite_loops[1] +cdef void *ufunc_eval_hermite_ptr[2] +cdef void *ufunc_eval_hermite_data[1] +cdef char ufunc_eval_hermite_types[3] +cdef char *ufunc_eval_hermite_doc = ( + "eval_hermite(n, x, out=None)\n" + "\n" + "Evaluate physicist's Hermite polynomial at a point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " H_n(x) = (-1)^n e^{x^2} \\frac{d^n}{dx^n} e^{-x^2};\n" + "\n" + ":math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in\n" + "[AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial\n" + "x : array_like\n" + " Points at which to evaluate the Hermite polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "H : scalar or ndarray\n" + " Values of the Hermite polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_hermite : roots and quadrature weights of physicist's\n" + " Hermite polynomials\n" + "hermite : physicist's Hermite polynomial object\n" + "numpy.polynomial.hermite.Hermite : Physicist's Hermite series\n" + "eval_hermitenorm : evaluate Probabilist's Hermite polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_hermite_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_hermite_types[0] = NPY_LONG +ufunc_eval_hermite_types[1] = NPY_DOUBLE +ufunc_eval_hermite_types[2] = NPY_DOUBLE +ufunc_eval_hermite_ptr[2*0] = _func_eval_hermite +ufunc_eval_hermite_ptr[2*0+1] = ("eval_hermite") +ufunc_eval_hermite_data[0] = &ufunc_eval_hermite_ptr[2*0] +eval_hermite = np.PyUFunc_FromFuncAndData(ufunc_eval_hermite_loops, ufunc_eval_hermite_data, ufunc_eval_hermite_types, 1, 2, 1, 0, "eval_hermite", ufunc_eval_hermite_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_hermitenorm_loops[1] +cdef void *ufunc_eval_hermitenorm_ptr[2] +cdef void *ufunc_eval_hermitenorm_data[1] +cdef char ufunc_eval_hermitenorm_types[3] +cdef char *ufunc_eval_hermitenorm_doc = ( + "eval_hermitenorm(n, x, out=None)\n" + "\n" + "Evaluate probabilist's (normalized) Hermite polynomial at a\n" + "point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " He_n(x) = (-1)^n e^{x^2/2} \\frac{d^n}{dx^n} e^{-x^2/2};\n" + "\n" + ":math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in\n" + "[AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial\n" + "x : array_like\n" + " Points at which to evaluate the Hermite polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "He : scalar or ndarray\n" + " Values of the Hermite polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_hermitenorm : roots and quadrature weights of probabilist's\n" + " Hermite polynomials\n" + "hermitenorm : probabilist's Hermite polynomial object\n" + "numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series\n" + "eval_hermite : evaluate physicist's Hermite polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_hermitenorm_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_hermitenorm_types[0] = NPY_LONG +ufunc_eval_hermitenorm_types[1] = NPY_DOUBLE +ufunc_eval_hermitenorm_types[2] = NPY_DOUBLE +ufunc_eval_hermitenorm_ptr[2*0] = _func_eval_hermitenorm +ufunc_eval_hermitenorm_ptr[2*0+1] = ("eval_hermitenorm") +ufunc_eval_hermitenorm_data[0] = &ufunc_eval_hermitenorm_ptr[2*0] +eval_hermitenorm = np.PyUFunc_FromFuncAndData(ufunc_eval_hermitenorm_loops, ufunc_eval_hermitenorm_data, ufunc_eval_hermitenorm_types, 1, 2, 1, 0, "eval_hermitenorm", ufunc_eval_hermitenorm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_jacobi_loops[5] +cdef void *ufunc_eval_jacobi_ptr[10] +cdef void *ufunc_eval_jacobi_data[5] +cdef char ufunc_eval_jacobi_types[25] +cdef char *ufunc_eval_jacobi_doc = ( + "eval_jacobi(n, alpha, beta, x, out=None)\n" + "\n" + "Evaluate Jacobi polynomial at a point.\n" + "\n" + "The Jacobi polynomials can be defined via the Gauss hypergeometric\n" + "function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " P_n^{(\\alpha, \\beta)}(x) = \\frac{(\\alpha + 1)_n}{\\Gamma(n + 1)}\n" + " {}_2F_1(-n, 1 + \\alpha + \\beta + n; \\alpha + 1; (1 - z)/2)\n" + "\n" + "where :math:`(\\cdot)_n` is the Pochhammer symbol; see `poch`. When\n" + ":math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.42 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "alpha : array_like\n" + " Parameter\n" + "beta : array_like\n" + " Parameter\n" + "x : array_like\n" + " Points at which to evaluate the polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the Jacobi polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_jacobi : roots and quadrature weights of Jacobi polynomials\n" + "jacobi : Jacobi polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_jacobi_loops[0] = loop_d_lddd__As_lddd_d +ufunc_eval_jacobi_loops[1] = loop_d_dddd__As_ffff_f +ufunc_eval_jacobi_loops[2] = loop_D_dddD__As_fffF_F +ufunc_eval_jacobi_loops[3] = loop_d_dddd__As_dddd_d +ufunc_eval_jacobi_loops[4] = loop_D_dddD__As_dddD_D +ufunc_eval_jacobi_types[0] = NPY_LONG +ufunc_eval_jacobi_types[1] = NPY_DOUBLE +ufunc_eval_jacobi_types[2] = NPY_DOUBLE +ufunc_eval_jacobi_types[3] = NPY_DOUBLE +ufunc_eval_jacobi_types[4] = NPY_DOUBLE +ufunc_eval_jacobi_types[5] = NPY_FLOAT +ufunc_eval_jacobi_types[6] = NPY_FLOAT +ufunc_eval_jacobi_types[7] = NPY_FLOAT +ufunc_eval_jacobi_types[8] = NPY_FLOAT +ufunc_eval_jacobi_types[9] = NPY_FLOAT +ufunc_eval_jacobi_types[10] = NPY_FLOAT +ufunc_eval_jacobi_types[11] = NPY_FLOAT +ufunc_eval_jacobi_types[12] = NPY_FLOAT +ufunc_eval_jacobi_types[13] = NPY_CFLOAT +ufunc_eval_jacobi_types[14] = NPY_CFLOAT +ufunc_eval_jacobi_types[15] = NPY_DOUBLE +ufunc_eval_jacobi_types[16] = NPY_DOUBLE +ufunc_eval_jacobi_types[17] = NPY_DOUBLE +ufunc_eval_jacobi_types[18] = NPY_DOUBLE +ufunc_eval_jacobi_types[19] = NPY_DOUBLE +ufunc_eval_jacobi_types[20] = NPY_DOUBLE +ufunc_eval_jacobi_types[21] = NPY_DOUBLE +ufunc_eval_jacobi_types[22] = NPY_DOUBLE +ufunc_eval_jacobi_types[23] = NPY_CDOUBLE +ufunc_eval_jacobi_types[24] = NPY_CDOUBLE +ufunc_eval_jacobi_ptr[2*0] = _func_eval_jacobi_l +ufunc_eval_jacobi_ptr[2*0+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*1] = _func_eval_jacobi[double] +ufunc_eval_jacobi_ptr[2*1+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*2] = _func_eval_jacobi[double_complex] +ufunc_eval_jacobi_ptr[2*2+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*3] = _func_eval_jacobi[double] +ufunc_eval_jacobi_ptr[2*3+1] = ("eval_jacobi") +ufunc_eval_jacobi_ptr[2*4] = _func_eval_jacobi[double_complex] +ufunc_eval_jacobi_ptr[2*4+1] = ("eval_jacobi") +ufunc_eval_jacobi_data[0] = &ufunc_eval_jacobi_ptr[2*0] +ufunc_eval_jacobi_data[1] = &ufunc_eval_jacobi_ptr[2*1] +ufunc_eval_jacobi_data[2] = &ufunc_eval_jacobi_ptr[2*2] +ufunc_eval_jacobi_data[3] = &ufunc_eval_jacobi_ptr[2*3] +ufunc_eval_jacobi_data[4] = &ufunc_eval_jacobi_ptr[2*4] +eval_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_jacobi_loops, ufunc_eval_jacobi_data, ufunc_eval_jacobi_types, 5, 4, 1, 0, "eval_jacobi", ufunc_eval_jacobi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_laguerre_loops[5] +cdef void *ufunc_eval_laguerre_ptr[10] +cdef void *ufunc_eval_laguerre_data[5] +cdef char ufunc_eval_laguerre_types[15] +cdef char *ufunc_eval_laguerre_doc = ( + "eval_laguerre(n, x, out=None)\n" + "\n" + "Evaluate Laguerre polynomial at a point.\n" + "\n" + "The Laguerre polynomials can be defined via the confluent\n" + "hypergeometric function :math:`{}_1F_1` as\n" + "\n" + ".. math::\n" + "\n" + " L_n(x) = {}_1F_1(-n, 1, x).\n" + "\n" + "See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an\n" + "integer the result is a polynomial of degree :math:`n`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer the result is\n" + " determined via the relation to the confluent hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Laguerre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Values of the Laguerre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_laguerre : roots and quadrature weights of Laguerre\n" + " polynomials\n" + "laguerre : Laguerre polynomial object\n" + "numpy.polynomial.laguerre.Laguerre : Laguerre series\n" + "eval_genlaguerre : evaluate generalized Laguerre polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_laguerre_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_laguerre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_laguerre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_laguerre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_laguerre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_laguerre_types[0] = NPY_LONG +ufunc_eval_laguerre_types[1] = NPY_DOUBLE +ufunc_eval_laguerre_types[2] = NPY_DOUBLE +ufunc_eval_laguerre_types[3] = NPY_FLOAT +ufunc_eval_laguerre_types[4] = NPY_FLOAT +ufunc_eval_laguerre_types[5] = NPY_FLOAT +ufunc_eval_laguerre_types[6] = NPY_FLOAT +ufunc_eval_laguerre_types[7] = NPY_CFLOAT +ufunc_eval_laguerre_types[8] = NPY_CFLOAT +ufunc_eval_laguerre_types[9] = NPY_DOUBLE +ufunc_eval_laguerre_types[10] = NPY_DOUBLE +ufunc_eval_laguerre_types[11] = NPY_DOUBLE +ufunc_eval_laguerre_types[12] = NPY_DOUBLE +ufunc_eval_laguerre_types[13] = NPY_CDOUBLE +ufunc_eval_laguerre_types[14] = NPY_CDOUBLE +ufunc_eval_laguerre_ptr[2*0] = _func_eval_laguerre_l +ufunc_eval_laguerre_ptr[2*0+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*1] = _func_eval_laguerre[double] +ufunc_eval_laguerre_ptr[2*1+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*2] = _func_eval_laguerre[double_complex] +ufunc_eval_laguerre_ptr[2*2+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*3] = _func_eval_laguerre[double] +ufunc_eval_laguerre_ptr[2*3+1] = ("eval_laguerre") +ufunc_eval_laguerre_ptr[2*4] = _func_eval_laguerre[double_complex] +ufunc_eval_laguerre_ptr[2*4+1] = ("eval_laguerre") +ufunc_eval_laguerre_data[0] = &ufunc_eval_laguerre_ptr[2*0] +ufunc_eval_laguerre_data[1] = &ufunc_eval_laguerre_ptr[2*1] +ufunc_eval_laguerre_data[2] = &ufunc_eval_laguerre_ptr[2*2] +ufunc_eval_laguerre_data[3] = &ufunc_eval_laguerre_ptr[2*3] +ufunc_eval_laguerre_data[4] = &ufunc_eval_laguerre_ptr[2*4] +eval_laguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_laguerre_loops, ufunc_eval_laguerre_data, ufunc_eval_laguerre_types, 5, 2, 1, 0, "eval_laguerre", ufunc_eval_laguerre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_legendre_loops[5] +cdef void *ufunc_eval_legendre_ptr[10] +cdef void *ufunc_eval_legendre_data[5] +cdef char ufunc_eval_legendre_types[15] +cdef char *ufunc_eval_legendre_doc = ( + "eval_legendre(n, x, out=None)\n" + "\n" + "Evaluate Legendre polynomial at a point.\n" + "\n" + "The Legendre polynomials can be defined via the Gauss\n" + "hypergeometric function :math:`{}_2F_1` as\n" + "\n" + ".. math::\n" + "\n" + " P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).\n" + "\n" + "When :math:`n` is an integer the result is a polynomial of degree\n" + ":math:`n`. See 22.5.49 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to the Gauss hypergeometric\n" + " function.\n" + "x : array_like\n" + " Points at which to evaluate the Legendre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the Legendre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_legendre : roots and quadrature weights of Legendre\n" + " polynomials\n" + "legendre : Legendre polynomial object\n" + "hyp2f1 : Gauss hypergeometric function\n" + "numpy.polynomial.legendre.Legendre : Legendre series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import eval_legendre\n" + "\n" + "Evaluate the zero-order Legendre polynomial at x = 0\n" + "\n" + ">>> eval_legendre(0, 0)\n" + "1.0\n" + "\n" + "Evaluate the first-order Legendre polynomial between -1 and 1\n" + "\n" + ">>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials\n" + ">>> eval_legendre(1, X)\n" + "array([-1. , -0.5, 0. , 0.5, 1. ])\n" + "\n" + "Evaluate Legendre polynomials of order 0 through 4 at x = 0\n" + "\n" + ">>> N = range(0, 5)\n" + ">>> eval_legendre(N, 0)\n" + "array([ 1. , 0. , -0.5 , 0. , 0.375])\n" + "\n" + "Plot Legendre polynomials of order 0 through 4\n" + "\n" + ">>> X = np.linspace(-1, 1)\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> for n in range(0, 5):\n" + "... y = eval_legendre(n, X)\n" + "... plt.plot(X, y, label=r'$P_{}(x)$'.format(n))\n" + "\n" + ">>> plt.title(\"Legendre Polynomials\")\n" + ">>> plt.xlabel(\"x\")\n" + ">>> plt.ylabel(r'$P_n(x)$')\n" + ">>> plt.legend(loc='lower right')\n" + ">>> plt.show()") +ufunc_eval_legendre_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_legendre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_legendre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_legendre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_legendre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_legendre_types[0] = NPY_LONG +ufunc_eval_legendre_types[1] = NPY_DOUBLE +ufunc_eval_legendre_types[2] = NPY_DOUBLE +ufunc_eval_legendre_types[3] = NPY_FLOAT +ufunc_eval_legendre_types[4] = NPY_FLOAT +ufunc_eval_legendre_types[5] = NPY_FLOAT +ufunc_eval_legendre_types[6] = NPY_FLOAT +ufunc_eval_legendre_types[7] = NPY_CFLOAT +ufunc_eval_legendre_types[8] = NPY_CFLOAT +ufunc_eval_legendre_types[9] = NPY_DOUBLE +ufunc_eval_legendre_types[10] = NPY_DOUBLE +ufunc_eval_legendre_types[11] = NPY_DOUBLE +ufunc_eval_legendre_types[12] = NPY_DOUBLE +ufunc_eval_legendre_types[13] = NPY_CDOUBLE +ufunc_eval_legendre_types[14] = NPY_CDOUBLE +ufunc_eval_legendre_ptr[2*0] = _func_eval_legendre_l +ufunc_eval_legendre_ptr[2*0+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*1] = _func_eval_legendre[double] +ufunc_eval_legendre_ptr[2*1+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*2] = _func_eval_legendre[double_complex] +ufunc_eval_legendre_ptr[2*2+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*3] = _func_eval_legendre[double] +ufunc_eval_legendre_ptr[2*3+1] = ("eval_legendre") +ufunc_eval_legendre_ptr[2*4] = _func_eval_legendre[double_complex] +ufunc_eval_legendre_ptr[2*4+1] = ("eval_legendre") +ufunc_eval_legendre_data[0] = &ufunc_eval_legendre_ptr[2*0] +ufunc_eval_legendre_data[1] = &ufunc_eval_legendre_ptr[2*1] +ufunc_eval_legendre_data[2] = &ufunc_eval_legendre_ptr[2*2] +ufunc_eval_legendre_data[3] = &ufunc_eval_legendre_ptr[2*3] +ufunc_eval_legendre_data[4] = &ufunc_eval_legendre_ptr[2*4] +eval_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_legendre_loops, ufunc_eval_legendre_data, ufunc_eval_legendre_types, 5, 2, 1, 0, "eval_legendre", ufunc_eval_legendre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyt_loops[5] +cdef void *ufunc_eval_sh_chebyt_ptr[10] +cdef void *ufunc_eval_sh_chebyt_data[5] +cdef char ufunc_eval_sh_chebyt_types[15] +cdef char *ufunc_eval_sh_chebyt_doc = ( + "eval_sh_chebyt(n, x, out=None)\n" + "\n" + "Evaluate shifted Chebyshev polynomial of the first kind at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " T_n^*(x) = T_n(2x - 1)\n" + "\n" + "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.14 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyt`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "T : scalar or ndarray\n" + " Values of the shifted Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_chebyt : roots and quadrature weights of shifted\n" + " Chebyshev polynomials of the first kind\n" + "sh_chebyt : shifted Chebyshev polynomial object\n" + "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n" + "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_chebyt_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_sh_chebyt_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_chebyt_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_chebyt_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_chebyt_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_chebyt_types[0] = NPY_LONG +ufunc_eval_sh_chebyt_types[1] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[2] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[3] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[4] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[5] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[6] = NPY_FLOAT +ufunc_eval_sh_chebyt_types[7] = NPY_CFLOAT +ufunc_eval_sh_chebyt_types[8] = NPY_CFLOAT +ufunc_eval_sh_chebyt_types[9] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[10] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[11] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[12] = NPY_DOUBLE +ufunc_eval_sh_chebyt_types[13] = NPY_CDOUBLE +ufunc_eval_sh_chebyt_types[14] = NPY_CDOUBLE +ufunc_eval_sh_chebyt_ptr[2*0] = _func_eval_sh_chebyt_l +ufunc_eval_sh_chebyt_ptr[2*0+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*1] = _func_eval_sh_chebyt[double] +ufunc_eval_sh_chebyt_ptr[2*1+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*2] = _func_eval_sh_chebyt[double_complex] +ufunc_eval_sh_chebyt_ptr[2*2+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*3] = _func_eval_sh_chebyt[double] +ufunc_eval_sh_chebyt_ptr[2*3+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_ptr[2*4] = _func_eval_sh_chebyt[double_complex] +ufunc_eval_sh_chebyt_ptr[2*4+1] = ("eval_sh_chebyt") +ufunc_eval_sh_chebyt_data[0] = &ufunc_eval_sh_chebyt_ptr[2*0] +ufunc_eval_sh_chebyt_data[1] = &ufunc_eval_sh_chebyt_ptr[2*1] +ufunc_eval_sh_chebyt_data[2] = &ufunc_eval_sh_chebyt_ptr[2*2] +ufunc_eval_sh_chebyt_data[3] = &ufunc_eval_sh_chebyt_ptr[2*3] +ufunc_eval_sh_chebyt_data[4] = &ufunc_eval_sh_chebyt_ptr[2*4] +eval_sh_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyt_loops, ufunc_eval_sh_chebyt_data, ufunc_eval_sh_chebyt_types, 5, 2, 1, 0, "eval_sh_chebyt", ufunc_eval_sh_chebyt_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyu_loops[5] +cdef void *ufunc_eval_sh_chebyu_ptr[10] +cdef void *ufunc_eval_sh_chebyu_data[5] +cdef char ufunc_eval_sh_chebyu_types[15] +cdef char *ufunc_eval_sh_chebyu_doc = ( + "eval_sh_chebyu(n, x, out=None)\n" + "\n" + "Evaluate shifted Chebyshev polynomial of the second kind at a\n" + "point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " U_n^*(x) = U_n(2x - 1)\n" + "\n" + "where :math:`U_n` is a Chebyshev polynomial of the first kind. See\n" + "22.5.15 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `eval_chebyu`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Chebyshev polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "U : scalar or ndarray\n" + " Values of the shifted Chebyshev polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_chebyu : roots and quadrature weights of shifted\n" + " Chebychev polynomials of the second kind\n" + "sh_chebyu : shifted Chebyshev polynomial object\n" + "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_chebyu_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_sh_chebyu_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_chebyu_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_chebyu_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_chebyu_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_chebyu_types[0] = NPY_LONG +ufunc_eval_sh_chebyu_types[1] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[2] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[3] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[4] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[5] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[6] = NPY_FLOAT +ufunc_eval_sh_chebyu_types[7] = NPY_CFLOAT +ufunc_eval_sh_chebyu_types[8] = NPY_CFLOAT +ufunc_eval_sh_chebyu_types[9] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[10] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[11] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[12] = NPY_DOUBLE +ufunc_eval_sh_chebyu_types[13] = NPY_CDOUBLE +ufunc_eval_sh_chebyu_types[14] = NPY_CDOUBLE +ufunc_eval_sh_chebyu_ptr[2*0] = _func_eval_sh_chebyu_l +ufunc_eval_sh_chebyu_ptr[2*0+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*1] = _func_eval_sh_chebyu[double] +ufunc_eval_sh_chebyu_ptr[2*1+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*2] = _func_eval_sh_chebyu[double_complex] +ufunc_eval_sh_chebyu_ptr[2*2+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*3] = _func_eval_sh_chebyu[double] +ufunc_eval_sh_chebyu_ptr[2*3+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_ptr[2*4] = _func_eval_sh_chebyu[double_complex] +ufunc_eval_sh_chebyu_ptr[2*4+1] = ("eval_sh_chebyu") +ufunc_eval_sh_chebyu_data[0] = &ufunc_eval_sh_chebyu_ptr[2*0] +ufunc_eval_sh_chebyu_data[1] = &ufunc_eval_sh_chebyu_ptr[2*1] +ufunc_eval_sh_chebyu_data[2] = &ufunc_eval_sh_chebyu_ptr[2*2] +ufunc_eval_sh_chebyu_data[3] = &ufunc_eval_sh_chebyu_ptr[2*3] +ufunc_eval_sh_chebyu_data[4] = &ufunc_eval_sh_chebyu_ptr[2*4] +eval_sh_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyu_loops, ufunc_eval_sh_chebyu_data, ufunc_eval_sh_chebyu_types, 5, 2, 1, 0, "eval_sh_chebyu", ufunc_eval_sh_chebyu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_jacobi_loops[5] +cdef void *ufunc_eval_sh_jacobi_ptr[10] +cdef void *ufunc_eval_sh_jacobi_data[5] +cdef char ufunc_eval_sh_jacobi_types[25] +cdef char *ufunc_eval_sh_jacobi_doc = ( + "eval_sh_jacobi(n, p, q, x, out=None)\n" + "\n" + "Evaluate shifted Jacobi polynomial at a point.\n" + "\n" + "Defined by\n" + "\n" + ".. math::\n" + "\n" + " G_n^{(p, q)}(x)\n" + " = \\binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),\n" + "\n" + "where :math:`P_n^{(\\cdot, \\cdot)}` is the n-th Jacobi\n" + "polynomial. See 22.5.2 in [AS]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Degree of the polynomial. If not an integer, the result is\n" + " determined via the relation to `binom` and `eval_jacobi`.\n" + "p : float\n" + " Parameter\n" + "q : float\n" + " Parameter\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "G : scalar or ndarray\n" + " Values of the shifted Jacobi polynomial.\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_jacobi : roots and quadrature weights of shifted Jacobi\n" + " polynomials\n" + "sh_jacobi : shifted Jacobi polynomial object\n" + "eval_jacobi : evaluate Jacobi polynomials\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_jacobi_loops[0] = loop_d_lddd__As_lddd_d +ufunc_eval_sh_jacobi_loops[1] = loop_d_dddd__As_ffff_f +ufunc_eval_sh_jacobi_loops[2] = loop_D_dddD__As_fffF_F +ufunc_eval_sh_jacobi_loops[3] = loop_d_dddd__As_dddd_d +ufunc_eval_sh_jacobi_loops[4] = loop_D_dddD__As_dddD_D +ufunc_eval_sh_jacobi_types[0] = NPY_LONG +ufunc_eval_sh_jacobi_types[1] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[2] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[3] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[4] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[5] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[6] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[7] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[8] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[9] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[10] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[11] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[12] = NPY_FLOAT +ufunc_eval_sh_jacobi_types[13] = NPY_CFLOAT +ufunc_eval_sh_jacobi_types[14] = NPY_CFLOAT +ufunc_eval_sh_jacobi_types[15] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[16] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[17] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[18] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[19] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[20] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[21] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[22] = NPY_DOUBLE +ufunc_eval_sh_jacobi_types[23] = NPY_CDOUBLE +ufunc_eval_sh_jacobi_types[24] = NPY_CDOUBLE +ufunc_eval_sh_jacobi_ptr[2*0] = _func_eval_sh_jacobi_l +ufunc_eval_sh_jacobi_ptr[2*0+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*1] = _func_eval_sh_jacobi[double] +ufunc_eval_sh_jacobi_ptr[2*1+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*2] = _func_eval_sh_jacobi[double_complex] +ufunc_eval_sh_jacobi_ptr[2*2+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*3] = _func_eval_sh_jacobi[double] +ufunc_eval_sh_jacobi_ptr[2*3+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_ptr[2*4] = _func_eval_sh_jacobi[double_complex] +ufunc_eval_sh_jacobi_ptr[2*4+1] = ("eval_sh_jacobi") +ufunc_eval_sh_jacobi_data[0] = &ufunc_eval_sh_jacobi_ptr[2*0] +ufunc_eval_sh_jacobi_data[1] = &ufunc_eval_sh_jacobi_ptr[2*1] +ufunc_eval_sh_jacobi_data[2] = &ufunc_eval_sh_jacobi_ptr[2*2] +ufunc_eval_sh_jacobi_data[3] = &ufunc_eval_sh_jacobi_ptr[2*3] +ufunc_eval_sh_jacobi_data[4] = &ufunc_eval_sh_jacobi_ptr[2*4] +eval_sh_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_jacobi_loops, ufunc_eval_sh_jacobi_data, ufunc_eval_sh_jacobi_types, 5, 4, 1, 0, "eval_sh_jacobi", ufunc_eval_sh_jacobi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_eval_sh_legendre_loops[5] +cdef void *ufunc_eval_sh_legendre_ptr[10] +cdef void *ufunc_eval_sh_legendre_data[5] +cdef char ufunc_eval_sh_legendre_types[15] +cdef char *ufunc_eval_sh_legendre_doc = ( + "eval_sh_legendre(n, x, out=None)\n" + "\n" + "Evaluate shifted Legendre polynomial at a point.\n" + "\n" + "These polynomials are defined as\n" + "\n" + ".. math::\n" + "\n" + " P_n^*(x) = P_n(2x - 1)\n" + "\n" + "where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_\n" + "for details.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Degree of the polynomial. If not an integer, the value is\n" + " determined via the relation to `eval_legendre`.\n" + "x : array_like\n" + " Points at which to evaluate the shifted Legendre polynomial\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "P : scalar or ndarray\n" + " Values of the shifted Legendre polynomial\n" + "\n" + "See Also\n" + "--------\n" + "roots_sh_legendre : roots and quadrature weights of shifted\n" + " Legendre polynomials\n" + "sh_legendre : shifted Legendre polynomial object\n" + "eval_legendre : evaluate Legendre polynomials\n" + "numpy.polynomial.legendre.Legendre : Legendre series\n" + "\n" + "References\n" + "----------\n" + ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.") +ufunc_eval_sh_legendre_loops[0] = loop_d_ld__As_ld_d +ufunc_eval_sh_legendre_loops[1] = loop_d_dd__As_ff_f +ufunc_eval_sh_legendre_loops[2] = loop_D_dD__As_fF_F +ufunc_eval_sh_legendre_loops[3] = loop_d_dd__As_dd_d +ufunc_eval_sh_legendre_loops[4] = loop_D_dD__As_dD_D +ufunc_eval_sh_legendre_types[0] = NPY_LONG +ufunc_eval_sh_legendre_types[1] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[2] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[3] = NPY_FLOAT +ufunc_eval_sh_legendre_types[4] = NPY_FLOAT +ufunc_eval_sh_legendre_types[5] = NPY_FLOAT +ufunc_eval_sh_legendre_types[6] = NPY_FLOAT +ufunc_eval_sh_legendre_types[7] = NPY_CFLOAT +ufunc_eval_sh_legendre_types[8] = NPY_CFLOAT +ufunc_eval_sh_legendre_types[9] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[10] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[11] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[12] = NPY_DOUBLE +ufunc_eval_sh_legendre_types[13] = NPY_CDOUBLE +ufunc_eval_sh_legendre_types[14] = NPY_CDOUBLE +ufunc_eval_sh_legendre_ptr[2*0] = _func_eval_sh_legendre_l +ufunc_eval_sh_legendre_ptr[2*0+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*1] = _func_eval_sh_legendre[double] +ufunc_eval_sh_legendre_ptr[2*1+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*2] = _func_eval_sh_legendre[double_complex] +ufunc_eval_sh_legendre_ptr[2*2+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*3] = _func_eval_sh_legendre[double] +ufunc_eval_sh_legendre_ptr[2*3+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_ptr[2*4] = _func_eval_sh_legendre[double_complex] +ufunc_eval_sh_legendre_ptr[2*4+1] = ("eval_sh_legendre") +ufunc_eval_sh_legendre_data[0] = &ufunc_eval_sh_legendre_ptr[2*0] +ufunc_eval_sh_legendre_data[1] = &ufunc_eval_sh_legendre_ptr[2*1] +ufunc_eval_sh_legendre_data[2] = &ufunc_eval_sh_legendre_ptr[2*2] +ufunc_eval_sh_legendre_data[3] = &ufunc_eval_sh_legendre_ptr[2*3] +ufunc_eval_sh_legendre_data[4] = &ufunc_eval_sh_legendre_ptr[2*4] +eval_sh_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_legendre_loops, ufunc_eval_sh_legendre_data, ufunc_eval_sh_legendre_types, 5, 2, 1, 0, "eval_sh_legendre", ufunc_eval_sh_legendre_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exp1_loops[4] +cdef void *ufunc_exp1_ptr[8] +cdef void *ufunc_exp1_data[4] +cdef char ufunc_exp1_types[8] +cdef char *ufunc_exp1_doc = ( + "exp1(z, out=None)\n" + "\n" + "Exponential integral E1.\n" + "\n" + "For complex :math:`z \\ne 0` the exponential integral can be defined as\n" + "[1]_\n" + "\n" + ".. math::\n" + "\n" + " E_1(z) = \\int_z^\\infty \\frac{e^{-t}}{t} dt,\n" + "\n" + "where the path of the integral does not cross the negative real\n" + "axis or pass through the origin.\n" + "\n" + "Parameters\n" + "----------\n" + "z: array_like\n" + " Real or complex argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the exponential integral E1\n" + "\n" + "See Also\n" + "--------\n" + "expi : exponential integral :math:`Ei`\n" + "expn : generalization of :math:`E_1`\n" + "\n" + "Notes\n" + "-----\n" + "For :math:`x > 0` it is related to the exponential integral\n" + ":math:`Ei` (see `expi`) via the relation\n" + "\n" + ".. math::\n" + "\n" + " E_1(x) = -Ei(-x).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Digital Library of Mathematical Functions, 6.2.1\n" + " https://dlmf.nist.gov/6.2#E1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It has a pole at 0.\n" + "\n" + ">>> sc.exp1(0)\n" + "inf\n" + "\n" + "It has a branch cut on the negative real axis.\n" + "\n" + ">>> sc.exp1(-1)\n" + "nan\n" + ">>> sc.exp1(complex(-1, 0))\n" + "(-1.8951178163559368-3.141592653589793j)\n" + ">>> sc.exp1(complex(-1, -0.0))\n" + "(-1.8951178163559368+3.141592653589793j)\n" + "\n" + "It approaches 0 along the positive real axis.\n" + "\n" + ">>> sc.exp1([1, 10, 100, 1000])\n" + "array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])\n" + "\n" + "It is related to `expi`.\n" + "\n" + ">>> x = np.array([1, 2, 3, 4])\n" + ">>> sc.exp1(x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n" + ">>> -sc.expi(-x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])") +ufunc_exp1_loops[0] = loop_d_d__As_f_f +ufunc_exp1_loops[1] = loop_d_d__As_d_d +ufunc_exp1_loops[2] = loop_D_D__As_F_F +ufunc_exp1_loops[3] = loop_D_D__As_D_D +ufunc_exp1_types[0] = NPY_FLOAT +ufunc_exp1_types[1] = NPY_FLOAT +ufunc_exp1_types[2] = NPY_DOUBLE +ufunc_exp1_types[3] = NPY_DOUBLE +ufunc_exp1_types[4] = NPY_CFLOAT +ufunc_exp1_types[5] = NPY_CFLOAT +ufunc_exp1_types[6] = NPY_CDOUBLE +ufunc_exp1_types[7] = NPY_CDOUBLE +ufunc_exp1_ptr[2*0] = _func_exp1_wrap +ufunc_exp1_ptr[2*0+1] = ("exp1") +ufunc_exp1_ptr[2*1] = _func_exp1_wrap +ufunc_exp1_ptr[2*1+1] = ("exp1") +ufunc_exp1_ptr[2*2] = _func_cexp1_wrap +ufunc_exp1_ptr[2*2+1] = ("exp1") +ufunc_exp1_ptr[2*3] = _func_cexp1_wrap +ufunc_exp1_ptr[2*3+1] = ("exp1") +ufunc_exp1_data[0] = &ufunc_exp1_ptr[2*0] +ufunc_exp1_data[1] = &ufunc_exp1_ptr[2*1] +ufunc_exp1_data[2] = &ufunc_exp1_ptr[2*2] +ufunc_exp1_data[3] = &ufunc_exp1_ptr[2*3] +exp1 = np.PyUFunc_FromFuncAndData(ufunc_exp1_loops, ufunc_exp1_data, ufunc_exp1_types, 4, 1, 1, 0, "exp1", ufunc_exp1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exp10_loops[2] +cdef void *ufunc_exp10_ptr[4] +cdef void *ufunc_exp10_data[2] +cdef char ufunc_exp10_types[4] +cdef char *ufunc_exp10_doc = ( + "exp10(x, out=None)\n" + "\n" + "Compute ``10**x`` element-wise.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``10**x``, computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import exp10\n" + "\n" + ">>> exp10(3)\n" + "1000.0\n" + ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n" + ">>> exp10(x)\n" + "array([[ 0.1 , 0.31622777, 1. ],\n" + " [ 3.16227766, 10. , 31.6227766 ]])") +ufunc_exp10_loops[0] = loop_d_d__As_f_f +ufunc_exp10_loops[1] = loop_d_d__As_d_d +ufunc_exp10_types[0] = NPY_FLOAT +ufunc_exp10_types[1] = NPY_FLOAT +ufunc_exp10_types[2] = NPY_DOUBLE +ufunc_exp10_types[3] = NPY_DOUBLE +ufunc_exp10_ptr[2*0] = _func_exp10 +ufunc_exp10_ptr[2*0+1] = ("exp10") +ufunc_exp10_ptr[2*1] = _func_exp10 +ufunc_exp10_ptr[2*1+1] = ("exp10") +ufunc_exp10_data[0] = &ufunc_exp10_ptr[2*0] +ufunc_exp10_data[1] = &ufunc_exp10_ptr[2*1] +exp10 = np.PyUFunc_FromFuncAndData(ufunc_exp10_loops, ufunc_exp10_data, ufunc_exp10_types, 2, 1, 1, 0, "exp10", ufunc_exp10_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exp2_loops[2] +cdef void *ufunc_exp2_ptr[4] +cdef void *ufunc_exp2_data[2] +cdef char ufunc_exp2_types[4] +cdef char *ufunc_exp2_doc = ( + "exp2(x, out=None)\n" + "\n" + "Compute ``2**x`` element-wise.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``2**x``, computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import exp2\n" + "\n" + ">>> exp2(3)\n" + "8.0\n" + ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n" + ">>> exp2(x)\n" + "array([[ 0.5 , 0.70710678, 1. ],\n" + " [ 1.41421356, 2. , 2.82842712]])") +ufunc_exp2_loops[0] = loop_d_d__As_f_f +ufunc_exp2_loops[1] = loop_d_d__As_d_d +ufunc_exp2_types[0] = NPY_FLOAT +ufunc_exp2_types[1] = NPY_FLOAT +ufunc_exp2_types[2] = NPY_DOUBLE +ufunc_exp2_types[3] = NPY_DOUBLE +ufunc_exp2_ptr[2*0] = _func_exp2 +ufunc_exp2_ptr[2*0+1] = ("exp2") +ufunc_exp2_ptr[2*1] = _func_exp2 +ufunc_exp2_ptr[2*1+1] = ("exp2") +ufunc_exp2_data[0] = &ufunc_exp2_ptr[2*0] +ufunc_exp2_data[1] = &ufunc_exp2_ptr[2*1] +exp2 = np.PyUFunc_FromFuncAndData(ufunc_exp2_loops, ufunc_exp2_data, ufunc_exp2_types, 2, 1, 1, 0, "exp2", ufunc_exp2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expi_loops[4] +cdef void *ufunc_expi_ptr[8] +cdef void *ufunc_expi_data[4] +cdef char ufunc_expi_types[8] +cdef char *ufunc_expi_doc = ( + "expi(x, out=None)\n" + "\n" + "Exponential integral Ei.\n" + "\n" + "For real :math:`x`, the exponential integral is defined as [1]_\n" + "\n" + ".. math::\n" + "\n" + " Ei(x) = \\int_{-\\infty}^x \\frac{e^t}{t} dt.\n" + "\n" + "For :math:`x > 0` the integral is understood as a Cauchy principal\n" + "value.\n" + "\n" + "It is extended to the complex plane by analytic continuation of\n" + "the function on the interval :math:`(0, \\infty)`. The complex\n" + "variant has a branch cut on the negative real axis.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the exponential integral\n" + "\n" + "See Also\n" + "--------\n" + "exp1 : Exponential integral :math:`E_1`\n" + "expn : Generalized exponential integral :math:`E_n`\n" + "\n" + "Notes\n" + "-----\n" + "The exponential integrals :math:`E_1` and :math:`Ei` satisfy the\n" + "relation\n" + "\n" + ".. math::\n" + "\n" + " E_1(x) = -Ei(-x)\n" + "\n" + "for :math:`x > 0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Digital Library of Mathematical Functions, 6.2.5\n" + " https://dlmf.nist.gov/6.2#E5\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is related to `exp1`.\n" + "\n" + ">>> x = np.array([1, 2, 3, 4])\n" + ">>> -sc.expi(-x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n" + ">>> sc.exp1(x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n" + "\n" + "The complex variant has a branch cut on the negative real axis.\n" + "\n" + ">>> sc.expi(-1 + 1e-12j)\n" + "(-0.21938393439552062+3.1415926535894254j)\n" + ">>> sc.expi(-1 - 1e-12j)\n" + "(-0.21938393439552062-3.1415926535894254j)\n" + "\n" + "As the complex variant approaches the branch cut, the real parts\n" + "approach the value of the real variant.\n" + "\n" + ">>> sc.expi(-1)\n" + "-0.21938393439552062\n" + "\n" + "The SciPy implementation returns the real variant for complex\n" + "values on the branch cut.\n" + "\n" + ">>> sc.expi(complex(-1, 0.0))\n" + "(-0.21938393439552062-0j)\n" + ">>> sc.expi(complex(-1, -0.0))\n" + "(-0.21938393439552062-0j)") +ufunc_expi_loops[0] = loop_d_d__As_f_f +ufunc_expi_loops[1] = loop_d_d__As_d_d +ufunc_expi_loops[2] = loop_D_D__As_F_F +ufunc_expi_loops[3] = loop_D_D__As_D_D +ufunc_expi_types[0] = NPY_FLOAT +ufunc_expi_types[1] = NPY_FLOAT +ufunc_expi_types[2] = NPY_DOUBLE +ufunc_expi_types[3] = NPY_DOUBLE +ufunc_expi_types[4] = NPY_CFLOAT +ufunc_expi_types[5] = NPY_CFLOAT +ufunc_expi_types[6] = NPY_CDOUBLE +ufunc_expi_types[7] = NPY_CDOUBLE +ufunc_expi_ptr[2*0] = _func_expi_wrap +ufunc_expi_ptr[2*0+1] = ("expi") +ufunc_expi_ptr[2*1] = _func_expi_wrap +ufunc_expi_ptr[2*1+1] = ("expi") +ufunc_expi_ptr[2*2] = _func_cexpi_wrap +ufunc_expi_ptr[2*2+1] = ("expi") +ufunc_expi_ptr[2*3] = _func_cexpi_wrap +ufunc_expi_ptr[2*3+1] = ("expi") +ufunc_expi_data[0] = &ufunc_expi_ptr[2*0] +ufunc_expi_data[1] = &ufunc_expi_ptr[2*1] +ufunc_expi_data[2] = &ufunc_expi_ptr[2*2] +ufunc_expi_data[3] = &ufunc_expi_ptr[2*3] +expi = np.PyUFunc_FromFuncAndData(ufunc_expi_loops, ufunc_expi_data, ufunc_expi_types, 4, 1, 1, 0, "expi", ufunc_expi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expit_loops[3] +cdef void *ufunc_expit_ptr[6] +cdef void *ufunc_expit_data[3] +cdef char ufunc_expit_types[6] +cdef char *ufunc_expit_doc = ( + "expit(x, out=None)\n" + "\n" + "Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.\n" + "\n" + "The expit function, also known as the logistic sigmoid function, is\n" + "defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the\n" + "logit function.\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " The ndarray to apply expit to element-wise.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " An ndarray of the same shape as x. Its entries\n" + " are `expit` of the corresponding entry of x.\n" + "\n" + "See Also\n" + "--------\n" + "logit\n" + "\n" + "Notes\n" + "-----\n" + "As a ufunc expit takes a number of optional\n" + "keyword arguments. For more information\n" + "see `ufuncs `_\n" + "\n" + ".. versionadded:: 0.10.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import expit, logit\n" + "\n" + ">>> expit([-np.inf, -1.5, 0, 1.5, np.inf])\n" + "array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])\n" + "\n" + "`logit` is the inverse of `expit`:\n" + "\n" + ">>> logit(expit([-2.5, 0, 3.1, 5.0]))\n" + "array([-2.5, 0. , 3.1, 5. ])\n" + "\n" + "Plot expit(x) for x in [-6, 6]:\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-6, 6, 121)\n" + ">>> y = expit(x)\n" + ">>> plt.plot(x, y)\n" + ">>> plt.grid()\n" + ">>> plt.xlim(-6, 6)\n" + ">>> plt.xlabel('x')\n" + ">>> plt.title('expit(x)')\n" + ">>> plt.show()") +ufunc_expit_loops[0] = loop_f_f__As_f_f +ufunc_expit_loops[1] = loop_d_d__As_d_d +ufunc_expit_loops[2] = loop_g_g__As_g_g +ufunc_expit_types[0] = NPY_FLOAT +ufunc_expit_types[1] = NPY_FLOAT +ufunc_expit_types[2] = NPY_DOUBLE +ufunc_expit_types[3] = NPY_DOUBLE +ufunc_expit_types[4] = NPY_LONGDOUBLE +ufunc_expit_types[5] = NPY_LONGDOUBLE +ufunc_expit_ptr[2*0] = scipy.special._ufuncs_cxx._export_expitf +ufunc_expit_ptr[2*0+1] = ("expit") +ufunc_expit_ptr[2*1] = scipy.special._ufuncs_cxx._export_expit +ufunc_expit_ptr[2*1+1] = ("expit") +ufunc_expit_ptr[2*2] = scipy.special._ufuncs_cxx._export_expitl +ufunc_expit_ptr[2*2+1] = ("expit") +ufunc_expit_data[0] = &ufunc_expit_ptr[2*0] +ufunc_expit_data[1] = &ufunc_expit_ptr[2*1] +ufunc_expit_data[2] = &ufunc_expit_ptr[2*2] +expit = np.PyUFunc_FromFuncAndData(ufunc_expit_loops, ufunc_expit_data, ufunc_expit_types, 3, 1, 1, 0, "expit", ufunc_expit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expm1_loops[4] +cdef void *ufunc_expm1_ptr[8] +cdef void *ufunc_expm1_data[4] +cdef char ufunc_expm1_types[8] +cdef char *ufunc_expm1_doc = ( + "expm1(x, out=None)\n" + "\n" + "Compute ``exp(x) - 1``.\n" + "\n" + "When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation\n" + "of ``exp(x) - 1`` can suffer from catastrophic loss of precision.\n" + "``expm1(x)`` is implemented to avoid the loss of precision that occurs when\n" + "`x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``exp(x) - 1`` computed element-wise.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import expm1\n" + "\n" + ">>> expm1(1.0)\n" + "1.7182818284590451\n" + ">>> expm1([-0.2, -0.1, 0, 0.1, 0.2])\n" + "array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])\n" + "\n" + "The exact value of ``exp(7.5e-13) - 1`` is::\n" + "\n" + " 7.5000000000028125000000007031250000001318...*10**-13.\n" + "\n" + "Here is what ``expm1(7.5e-13)`` gives:\n" + "\n" + ">>> expm1(7.5e-13)\n" + "7.5000000000028135e-13\n" + "\n" + "Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in\n" + "a \"catastrophic\" loss of precision:\n" + "\n" + ">>> np.exp(7.5e-13) - 1\n" + "7.5006667543675576e-13") +ufunc_expm1_loops[0] = loop_d_d__As_f_f +ufunc_expm1_loops[1] = loop_d_d__As_d_d +ufunc_expm1_loops[2] = loop_D_D__As_F_F +ufunc_expm1_loops[3] = loop_D_D__As_D_D +ufunc_expm1_types[0] = NPY_FLOAT +ufunc_expm1_types[1] = NPY_FLOAT +ufunc_expm1_types[2] = NPY_DOUBLE +ufunc_expm1_types[3] = NPY_DOUBLE +ufunc_expm1_types[4] = NPY_CFLOAT +ufunc_expm1_types[5] = NPY_CFLOAT +ufunc_expm1_types[6] = NPY_CDOUBLE +ufunc_expm1_types[7] = NPY_CDOUBLE +ufunc_expm1_ptr[2*0] = _func_expm1 +ufunc_expm1_ptr[2*0+1] = ("expm1") +ufunc_expm1_ptr[2*1] = _func_expm1 +ufunc_expm1_ptr[2*1+1] = ("expm1") +ufunc_expm1_ptr[2*2] = _func_cexpm1 +ufunc_expm1_ptr[2*2+1] = ("expm1") +ufunc_expm1_ptr[2*3] = _func_cexpm1 +ufunc_expm1_ptr[2*3+1] = ("expm1") +ufunc_expm1_data[0] = &ufunc_expm1_ptr[2*0] +ufunc_expm1_data[1] = &ufunc_expm1_ptr[2*1] +ufunc_expm1_data[2] = &ufunc_expm1_ptr[2*2] +ufunc_expm1_data[3] = &ufunc_expm1_ptr[2*3] +expm1 = np.PyUFunc_FromFuncAndData(ufunc_expm1_loops, ufunc_expm1_data, ufunc_expm1_types, 4, 1, 1, 0, "expm1", ufunc_expm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_expn_loops[3] +cdef void *ufunc_expn_ptr[6] +cdef void *ufunc_expn_data[3] +cdef char ufunc_expn_types[9] +cdef char *ufunc_expn_doc = ( + "expn(n, x, out=None)\n" + "\n" + "Generalized exponential integral En.\n" + "\n" + "For integer :math:`n \\geq 0` and real :math:`x \\geq 0` the\n" + "generalized exponential integral is defined as [dlmf]_\n" + "\n" + ".. math::\n" + "\n" + " E_n(x) = x^{n - 1} \\int_x^\\infty \\frac{e^{-t}}{t^n} dt.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Non-negative integers\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the generalized exponential integral\n" + "\n" + "See Also\n" + "--------\n" + "exp1 : special case of :math:`E_n` for :math:`n = 1`\n" + "expi : related to :math:`E_n` when :math:`n = 1`\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] Digital Library of Mathematical Functions, 8.19.2\n" + " https://dlmf.nist.gov/8.19#E2\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "Its domain is nonnegative n and x.\n" + "\n" + ">>> sc.expn(-1, 1.0), sc.expn(1, -1.0)\n" + "(nan, nan)\n" + "\n" + "It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it\n" + "is equal to ``1 / (n - 1)``.\n" + "\n" + ">>> sc.expn([0, 1, 2, 3, 4], 0)\n" + "array([ inf, inf, 1. , 0.5 , 0.33333333])\n" + "\n" + "For n equal to 0 it reduces to ``exp(-x) / x``.\n" + "\n" + ">>> x = np.array([1, 2, 3, 4])\n" + ">>> sc.expn(0, x)\n" + "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n" + ">>> np.exp(-x) / x\n" + "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n" + "\n" + "For n equal to 1 it reduces to `exp1`.\n" + "\n" + ">>> sc.expn(1, x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n" + ">>> sc.exp1(x)\n" + "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])") +ufunc_expn_loops[0] = loop_d_id__As_ld_d +ufunc_expn_loops[1] = loop_d_dd__As_ff_f +ufunc_expn_loops[2] = loop_d_dd__As_dd_d +ufunc_expn_types[0] = NPY_LONG +ufunc_expn_types[1] = NPY_DOUBLE +ufunc_expn_types[2] = NPY_DOUBLE +ufunc_expn_types[3] = NPY_FLOAT +ufunc_expn_types[4] = NPY_FLOAT +ufunc_expn_types[5] = NPY_FLOAT +ufunc_expn_types[6] = NPY_DOUBLE +ufunc_expn_types[7] = NPY_DOUBLE +ufunc_expn_types[8] = NPY_DOUBLE +ufunc_expn_ptr[2*0] = _func_expn +ufunc_expn_ptr[2*0+1] = ("expn") +ufunc_expn_ptr[2*1] = _func_expn_unsafe +ufunc_expn_ptr[2*1+1] = ("expn") +ufunc_expn_ptr[2*2] = _func_expn_unsafe +ufunc_expn_ptr[2*2+1] = ("expn") +ufunc_expn_data[0] = &ufunc_expn_ptr[2*0] +ufunc_expn_data[1] = &ufunc_expn_ptr[2*1] +ufunc_expn_data[2] = &ufunc_expn_ptr[2*2] +expn = np.PyUFunc_FromFuncAndData(ufunc_expn_loops, ufunc_expn_data, ufunc_expn_types, 3, 2, 1, 0, "expn", ufunc_expn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_exprel_loops[2] +cdef void *ufunc_exprel_ptr[4] +cdef void *ufunc_exprel_data[2] +cdef char ufunc_exprel_types[4] +cdef char *ufunc_exprel_doc = ( + "exprel(x, out=None)\n" + "\n" + "Relative error exponential, ``(exp(x) - 1)/x``.\n" + "\n" + "When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation\n" + "of ``exp(x) - 1`` can suffer from catastrophic loss of precision.\n" + "``exprel(x)`` is implemented to avoid the loss of precision that occurs when\n" + "`x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " Input array. `x` must contain real numbers.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " ``(exp(x) - 1)/x``, computed element-wise.\n" + "\n" + "See Also\n" + "--------\n" + "expm1\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.17.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import exprel\n" + "\n" + ">>> exprel(0.01)\n" + "1.0050167084168056\n" + ">>> exprel([-0.25, -0.1, 0, 0.1, 0.25])\n" + "array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])\n" + "\n" + "Compare ``exprel(5e-9)`` to the naive calculation. The exact value\n" + "is ``1.00000000250000000416...``.\n" + "\n" + ">>> exprel(5e-9)\n" + "1.0000000025\n" + "\n" + ">>> (np.exp(5e-9) - 1)/5e-9\n" + "0.99999999392252903") +ufunc_exprel_loops[0] = loop_d_d__As_f_f +ufunc_exprel_loops[1] = loop_d_d__As_d_d +ufunc_exprel_types[0] = NPY_FLOAT +ufunc_exprel_types[1] = NPY_FLOAT +ufunc_exprel_types[2] = NPY_DOUBLE +ufunc_exprel_types[3] = NPY_DOUBLE +ufunc_exprel_ptr[2*0] = _func_exprel +ufunc_exprel_ptr[2*0+1] = ("exprel") +ufunc_exprel_ptr[2*1] = _func_exprel +ufunc_exprel_ptr[2*1+1] = ("exprel") +ufunc_exprel_data[0] = &ufunc_exprel_ptr[2*0] +ufunc_exprel_data[1] = &ufunc_exprel_ptr[2*1] +exprel = np.PyUFunc_FromFuncAndData(ufunc_exprel_loops, ufunc_exprel_data, ufunc_exprel_types, 2, 1, 1, 0, "exprel", ufunc_exprel_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtr_loops[2] +cdef void *ufunc_fdtr_ptr[4] +cdef void *ufunc_fdtr_data[2] +cdef char ufunc_fdtr_types[8] +cdef char *ufunc_fdtr_doc = ( + "fdtr(dfn, dfd, x, out=None)\n" + "\n" + "F cumulative distribution function.\n" + "\n" + "Returns the value of the cumulative distribution function of the\n" + "F-distribution, also known as Snedecor's F-distribution or the\n" + "Fisher-Snedecor distribution.\n" + "\n" + "The F-distribution with parameters :math:`d_n` and :math:`d_d` is the\n" + "distribution of the random variable,\n" + "\n" + ".. math::\n" + " X = \\frac{U_n/d_n}{U_d/d_d},\n" + "\n" + "where :math:`U_n` and :math:`U_d` are random variables distributed\n" + ":math:`\\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,\n" + "respectively.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtrc : F distribution survival function\n" + "fdtri : F distribution inverse cumulative distribution\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The regularized incomplete beta function is used, according to the\n" + "formula,\n" + "\n" + ".. math::\n" + " F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also\n" + "available as `scipy.stats.f`. Calling `fdtr` directly can improve\n" + "performance compared to the ``cdf`` method of `scipy.stats.f` (see last\n" + "example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtr\n" + ">>> fdtr(1, 2, 1)\n" + "0.5773502691896258\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.5, 2., 3.])\n" + ">>> fdtr(1, 2, x)\n" + "array([0.4472136 , 0.70710678, 0.77459667])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [1, 5, 10, 50]\n" + ">>> dfd_parameters = [1, 1, 2, 3]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtr_vals = fdtr(dfn, dfd, x)\n" + "... ax.plot(x, fdtr_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"F distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtr`\n" + "directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 1\n" + ">>> fdtr_res = fdtr(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).cdf(x)\n" + ">>> fdtr_res == f_dist_res # test that results are equal\n" + "True") +ufunc_fdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtr_types[0] = NPY_FLOAT +ufunc_fdtr_types[1] = NPY_FLOAT +ufunc_fdtr_types[2] = NPY_FLOAT +ufunc_fdtr_types[3] = NPY_FLOAT +ufunc_fdtr_types[4] = NPY_DOUBLE +ufunc_fdtr_types[5] = NPY_DOUBLE +ufunc_fdtr_types[6] = NPY_DOUBLE +ufunc_fdtr_types[7] = NPY_DOUBLE +ufunc_fdtr_ptr[2*0] = _func_fdtr +ufunc_fdtr_ptr[2*0+1] = ("fdtr") +ufunc_fdtr_ptr[2*1] = _func_fdtr +ufunc_fdtr_ptr[2*1+1] = ("fdtr") +ufunc_fdtr_data[0] = &ufunc_fdtr_ptr[2*0] +ufunc_fdtr_data[1] = &ufunc_fdtr_ptr[2*1] +fdtr = np.PyUFunc_FromFuncAndData(ufunc_fdtr_loops, ufunc_fdtr_data, ufunc_fdtr_types, 2, 3, 1, 0, "fdtr", ufunc_fdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtrc_loops[2] +cdef void *ufunc_fdtrc_ptr[4] +cdef void *ufunc_fdtrc_data[2] +cdef char ufunc_fdtrc_types[8] +cdef char *ufunc_fdtrc_doc = ( + "fdtrc(dfn, dfd, x, out=None)\n" + "\n" + "F survival function.\n" + "\n" + "Returns the complemented F-distribution function (the integral of the\n" + "density from `x` to infinity).\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " The complemented F-distribution function with parameters `dfn` and\n" + " `dfd` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtri : F distribution inverse cumulative distribution function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The regularized incomplete beta function is used, according to the\n" + "formula,\n" + "\n" + ".. math::\n" + " F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also\n" + "available as `scipy.stats.f`. Calling `fdtrc` directly can improve\n" + "performance compared to the ``sf`` method of `scipy.stats.f` (see last\n" + "example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtrc\n" + ">>> fdtrc(1, 2, 1)\n" + "0.42264973081037427\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.5, 2., 3.])\n" + ">>> fdtrc(1, 2, x)\n" + "array([0.5527864 , 0.29289322, 0.22540333])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [1, 5, 10, 50]\n" + ">>> dfd_parameters = [1, 1, 2, 3]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtrc_vals = fdtrc(dfn, dfd, x)\n" + "... ax.plot(x, fdtrc_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"F distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtrc`\n" + "directly can be much faster than calling the ``sf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 1\n" + ">>> fdtrc_res = fdtrc(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).sf(x)\n" + ">>> f_dist_res == fdtrc_res # test that results are equal\n" + "True") +ufunc_fdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtrc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtrc_types[0] = NPY_FLOAT +ufunc_fdtrc_types[1] = NPY_FLOAT +ufunc_fdtrc_types[2] = NPY_FLOAT +ufunc_fdtrc_types[3] = NPY_FLOAT +ufunc_fdtrc_types[4] = NPY_DOUBLE +ufunc_fdtrc_types[5] = NPY_DOUBLE +ufunc_fdtrc_types[6] = NPY_DOUBLE +ufunc_fdtrc_types[7] = NPY_DOUBLE +ufunc_fdtrc_ptr[2*0] = _func_fdtrc +ufunc_fdtrc_ptr[2*0+1] = ("fdtrc") +ufunc_fdtrc_ptr[2*1] = _func_fdtrc +ufunc_fdtrc_ptr[2*1+1] = ("fdtrc") +ufunc_fdtrc_data[0] = &ufunc_fdtrc_ptr[2*0] +ufunc_fdtrc_data[1] = &ufunc_fdtrc_ptr[2*1] +fdtrc = np.PyUFunc_FromFuncAndData(ufunc_fdtrc_loops, ufunc_fdtrc_data, ufunc_fdtrc_types, 2, 3, 1, 0, "fdtrc", ufunc_fdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtri_loops[2] +cdef void *ufunc_fdtri_ptr[4] +cdef void *ufunc_fdtri_data[2] +cdef char ufunc_fdtri_types[8] +cdef char *ufunc_fdtri_doc = ( + "fdtri(dfn, dfd, p, out=None)\n" + "\n" + "The `p`-th quantile of the F-distribution.\n" + "\n" + "This function is the inverse of the F-distribution CDF, `fdtr`, returning\n" + "the `x` such that `fdtr(dfn, dfd, x) = p`.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "dfd : array_like\n" + " Second parameter (positive float).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " The quantile corresponding to `p`.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtrc : F distribution survival function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Notes\n" + "-----\n" + "The computation is carried out using the relation to the inverse\n" + "regularized beta function, :math:`I^{-1}_x(a, b)`. Let\n" + ":math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,\n" + "\n" + ".. math::\n" + " x = \\frac{d_d (1 - z)}{d_n z}.\n" + "\n" + "If `p` is such that :math:`x < 0.5`, the following relation is used\n" + "instead for improved stability: let\n" + ":math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,\n" + "\n" + ".. math::\n" + " x = \\frac{d_d z'}{d_n (1 - z')}.\n" + "\n" + "Wrapper for the Cephes [1]_ routine `fdtri`.\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Calling\n" + "`fdtri` directly can improve performance compared to the ``ppf``\n" + "method of `scipy.stats.f` (see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "`fdtri` represents the inverse of the F distribution CDF which is\n" + "available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2``\n" + "at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`,\n" + "`df2` and the computed CDF value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import fdtri, fdtr\n" + ">>> df1, df2 = 1, 2\n" + ">>> x = 3\n" + ">>> cdf_value = fdtr(df1, df2, x)\n" + ">>> fdtri(df1, df2, cdf_value)\n" + "3.000000000000006\n" + "\n" + "Calculate the function at several points by providing a NumPy array for\n" + "`x`.\n" + "\n" + ">>> x = np.array([0.1, 0.4, 0.7])\n" + ">>> fdtri(1, 2, x)\n" + "array([0.02020202, 0.38095238, 1.92156863])\n" + "\n" + "Plot the function for several parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> dfn_parameters = [50, 10, 1, 50]\n" + ">>> dfd_parameters = [0.5, 1, 1, 5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n" + "... linestyles))\n" + ">>> x = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... dfn, dfd, style = parameter_set\n" + "... fdtri_vals = fdtri(dfn, dfd, x)\n" + "... ax.plot(x, fdtri_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> title = \"F distribution inverse cumulative distribution function\"\n" + ">>> ax.set_title(title)\n" + ">>> ax.set_ylim(0, 30)\n" + ">>> plt.show()\n" + "\n" + "The F distribution is also available as `scipy.stats.f`. Using `fdtri`\n" + "directly can be much faster than calling the ``ppf`` method of\n" + "`scipy.stats.f`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``.\n" + "\n" + ">>> from scipy.stats import f\n" + ">>> dfn, dfd = 1, 2\n" + ">>> x = 0.7\n" + ">>> fdtri_res = fdtri(dfn, dfd, x) # this will often be faster than below\n" + ">>> f_dist_res = f(dfn, dfd).ppf(x)\n" + ">>> f_dist_res == fdtri_res # test that results are equal\n" + "True") +ufunc_fdtri_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtri_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtri_types[0] = NPY_FLOAT +ufunc_fdtri_types[1] = NPY_FLOAT +ufunc_fdtri_types[2] = NPY_FLOAT +ufunc_fdtri_types[3] = NPY_FLOAT +ufunc_fdtri_types[4] = NPY_DOUBLE +ufunc_fdtri_types[5] = NPY_DOUBLE +ufunc_fdtri_types[6] = NPY_DOUBLE +ufunc_fdtri_types[7] = NPY_DOUBLE +ufunc_fdtri_ptr[2*0] = _func_fdtri +ufunc_fdtri_ptr[2*0+1] = ("fdtri") +ufunc_fdtri_ptr[2*1] = _func_fdtri +ufunc_fdtri_ptr[2*1+1] = ("fdtri") +ufunc_fdtri_data[0] = &ufunc_fdtri_ptr[2*0] +ufunc_fdtri_data[1] = &ufunc_fdtri_ptr[2*1] +fdtri = np.PyUFunc_FromFuncAndData(ufunc_fdtri_loops, ufunc_fdtri_data, ufunc_fdtri_types, 2, 3, 1, 0, "fdtri", ufunc_fdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fdtridfd_loops[2] +cdef void *ufunc_fdtridfd_ptr[4] +cdef void *ufunc_fdtridfd_data[2] +cdef char ufunc_fdtridfd_types[8] +cdef char *ufunc_fdtridfd_doc = ( + "fdtridfd(dfn, p, x, out=None)\n" + "\n" + "Inverse to `fdtr` vs dfd\n" + "\n" + "Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " First parameter (positive float).\n" + "p : array_like\n" + " Cumulative probability, in [0, 1].\n" + "x : array_like\n" + " Argument (nonnegative float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "dfd : scalar or ndarray\n" + " `dfd` such that ``fdtr(dfn, dfd, x) == p``.\n" + "\n" + "See Also\n" + "--------\n" + "fdtr : F distribution cumulative distribution function\n" + "fdtrc : F distribution survival function\n" + "fdtri : F distribution quantile function\n" + "scipy.stats.f : F distribution\n" + "\n" + "Examples\n" + "--------\n" + "Compute the F distribution cumulative distribution function for one\n" + "parameter set.\n" + "\n" + ">>> from scipy.special import fdtridfd, fdtr\n" + ">>> dfn, dfd, x = 10, 5, 2\n" + ">>> cdf_value = fdtr(dfn, dfd, x)\n" + ">>> cdf_value\n" + "0.7700248806501017\n" + "\n" + "Verify that `fdtridfd` recovers the original value for `dfd`:\n" + "\n" + ">>> fdtridfd(dfn, cdf_value, x)\n" + "5.0") +ufunc_fdtridfd_loops[0] = loop_d_ddd__As_fff_f +ufunc_fdtridfd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_fdtridfd_types[0] = NPY_FLOAT +ufunc_fdtridfd_types[1] = NPY_FLOAT +ufunc_fdtridfd_types[2] = NPY_FLOAT +ufunc_fdtridfd_types[3] = NPY_FLOAT +ufunc_fdtridfd_types[4] = NPY_DOUBLE +ufunc_fdtridfd_types[5] = NPY_DOUBLE +ufunc_fdtridfd_types[6] = NPY_DOUBLE +ufunc_fdtridfd_types[7] = NPY_DOUBLE +ufunc_fdtridfd_ptr[2*0] = _func_fdtridfd +ufunc_fdtridfd_ptr[2*0+1] = ("fdtridfd") +ufunc_fdtridfd_ptr[2*1] = _func_fdtridfd +ufunc_fdtridfd_ptr[2*1+1] = ("fdtridfd") +ufunc_fdtridfd_data[0] = &ufunc_fdtridfd_ptr[2*0] +ufunc_fdtridfd_data[1] = &ufunc_fdtridfd_ptr[2*1] +fdtridfd = np.PyUFunc_FromFuncAndData(ufunc_fdtridfd_loops, ufunc_fdtridfd_data, ufunc_fdtridfd_types, 2, 3, 1, 0, "fdtridfd", ufunc_fdtridfd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_fresnel_loops[4] +cdef void *ufunc_fresnel_ptr[8] +cdef void *ufunc_fresnel_data[4] +cdef char ufunc_fresnel_types[12] +cdef char *ufunc_fresnel_doc = ( + "fresnel(z, out=None)\n" + "\n" + "Fresnel integrals.\n" + "\n" + "The Fresnel integrals are defined as\n" + "\n" + ".. math::\n" + "\n" + " S(z) &= \\int_0^z \\sin(\\pi t^2 /2) dt \\\\\n" + " C(z) &= \\int_0^z \\cos(\\pi t^2 /2) dt.\n" + "\n" + "See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued argument\n" + "out : 2-tuple of ndarrays, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "S, C : 2-tuple of scalar or ndarray\n" + " Values of the Fresnel integrals\n" + "\n" + "See Also\n" + "--------\n" + "fresnel_zeros : zeros of the Fresnel integrals\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/7.2#iii\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "As z goes to infinity along the real axis, S and C converge to 0.5.\n" + "\n" + ">>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])\n" + ">>> S\n" + "array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])\n" + ">>> C\n" + "array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])\n" + "\n" + "They are related to the error function `erf`.\n" + "\n" + ">>> z = np.array([1, 2, 3, 4])\n" + ">>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z\n" + ">>> S, C = sc.fresnel(z)\n" + ">>> C + 1j*S\n" + "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n" + " 0.60572079+0.496313j , 0.49842603+0.42051575j])\n" + ">>> 0.5 * (1 + 1j) * sc.erf(zeta)\n" + "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n" + " 0.60572079+0.496313j , 0.49842603+0.42051575j])") +ufunc_fresnel_loops[0] = loop_i_d_dd_As_f_ff +ufunc_fresnel_loops[1] = loop_i_d_dd_As_d_dd +ufunc_fresnel_loops[2] = loop_i_D_DD_As_F_FF +ufunc_fresnel_loops[3] = loop_i_D_DD_As_D_DD +ufunc_fresnel_types[0] = NPY_FLOAT +ufunc_fresnel_types[1] = NPY_FLOAT +ufunc_fresnel_types[2] = NPY_FLOAT +ufunc_fresnel_types[3] = NPY_DOUBLE +ufunc_fresnel_types[4] = NPY_DOUBLE +ufunc_fresnel_types[5] = NPY_DOUBLE +ufunc_fresnel_types[6] = NPY_CFLOAT +ufunc_fresnel_types[7] = NPY_CFLOAT +ufunc_fresnel_types[8] = NPY_CFLOAT +ufunc_fresnel_types[9] = NPY_CDOUBLE +ufunc_fresnel_types[10] = NPY_CDOUBLE +ufunc_fresnel_types[11] = NPY_CDOUBLE +ufunc_fresnel_ptr[2*0] = _func_fresnl +ufunc_fresnel_ptr[2*0+1] = ("fresnel") +ufunc_fresnel_ptr[2*1] = _func_fresnl +ufunc_fresnel_ptr[2*1+1] = ("fresnel") +ufunc_fresnel_ptr[2*2] = _func_cfresnl_wrap +ufunc_fresnel_ptr[2*2+1] = ("fresnel") +ufunc_fresnel_ptr[2*3] = _func_cfresnl_wrap +ufunc_fresnel_ptr[2*3+1] = ("fresnel") +ufunc_fresnel_data[0] = &ufunc_fresnel_ptr[2*0] +ufunc_fresnel_data[1] = &ufunc_fresnel_ptr[2*1] +ufunc_fresnel_data[2] = &ufunc_fresnel_ptr[2*2] +ufunc_fresnel_data[3] = &ufunc_fresnel_ptr[2*3] +fresnel = np.PyUFunc_FromFuncAndData(ufunc_fresnel_loops, ufunc_fresnel_data, ufunc_fresnel_types, 4, 1, 2, 0, "fresnel", ufunc_fresnel_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gamma_loops[4] +cdef void *ufunc_gamma_ptr[8] +cdef void *ufunc_gamma_data[4] +cdef char ufunc_gamma_types[8] +cdef char *ufunc_gamma_doc = ( + "gamma(z, out=None)\n" + "\n" + "gamma function.\n" + "\n" + "The gamma function is defined as\n" + "\n" + ".. math::\n" + "\n" + " \\Gamma(z) = \\int_0^\\infty t^{z-1} e^{-t} dt\n" + "\n" + "for :math:`\\Re(z) > 0` and is extended to the rest of the complex\n" + "plane by analytic continuation. See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The gamma function is often referred to as the generalized\n" + "factorial since :math:`\\Gamma(n + 1) = n!` for natural numbers\n" + ":math:`n`. More generally it satisfies the recurrence relation\n" + ":math:`\\Gamma(z + 1) = z \\cdot \\Gamma(z)` for complex :math:`z`,\n" + "which, combined with the fact that :math:`\\Gamma(1) = 1`, implies\n" + "the above identity for :math:`z = n`.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5.2#E1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import gamma, factorial\n" + "\n" + ">>> gamma([0, 0.5, 1, 5])\n" + "array([ inf, 1.77245385, 1. , 24. ])\n" + "\n" + ">>> z = 2.5 + 1j\n" + ">>> gamma(z)\n" + "(0.77476210455108352+0.70763120437959293j)\n" + ">>> gamma(z+1), z*gamma(z) # Recurrence property\n" + "((1.2292740569981171+2.5438401155000685j),\n" + " (1.2292740569981158+2.5438401155000658j))\n" + "\n" + ">>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)\n" + "3.1415926535897927\n" + "\n" + "Plot gamma(x) for real x\n" + "\n" + ">>> x = np.linspace(-3.5, 5.5, 2251)\n" + ">>> y = gamma(x)\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')\n" + ">>> k = np.arange(1, 7)\n" + ">>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,\n" + "... label='(x-1)!, x = 1, 2, ...')\n" + ">>> plt.xlim(-3.5, 5.5)\n" + ">>> plt.ylim(-10, 25)\n" + ">>> plt.grid()\n" + ">>> plt.xlabel('x')\n" + ">>> plt.legend(loc='lower right')\n" + ">>> plt.show()") +ufunc_gamma_loops[0] = loop_d_d__As_f_f +ufunc_gamma_loops[1] = loop_d_d__As_d_d +ufunc_gamma_loops[2] = loop_D_D__As_F_F +ufunc_gamma_loops[3] = loop_D_D__As_D_D +ufunc_gamma_types[0] = NPY_FLOAT +ufunc_gamma_types[1] = NPY_FLOAT +ufunc_gamma_types[2] = NPY_DOUBLE +ufunc_gamma_types[3] = NPY_DOUBLE +ufunc_gamma_types[4] = NPY_CFLOAT +ufunc_gamma_types[5] = NPY_CFLOAT +ufunc_gamma_types[6] = NPY_CDOUBLE +ufunc_gamma_types[7] = NPY_CDOUBLE +ufunc_gamma_ptr[2*0] = _func_Gamma +ufunc_gamma_ptr[2*0+1] = ("gamma") +ufunc_gamma_ptr[2*1] = _func_Gamma +ufunc_gamma_ptr[2*1+1] = ("gamma") +ufunc_gamma_ptr[2*2] = scipy.special._ufuncs_cxx._export_cgamma +ufunc_gamma_ptr[2*2+1] = ("gamma") +ufunc_gamma_ptr[2*3] = scipy.special._ufuncs_cxx._export_cgamma +ufunc_gamma_ptr[2*3+1] = ("gamma") +ufunc_gamma_data[0] = &ufunc_gamma_ptr[2*0] +ufunc_gamma_data[1] = &ufunc_gamma_ptr[2*1] +ufunc_gamma_data[2] = &ufunc_gamma_ptr[2*2] +ufunc_gamma_data[3] = &ufunc_gamma_ptr[2*3] +gamma = np.PyUFunc_FromFuncAndData(ufunc_gamma_loops, ufunc_gamma_data, ufunc_gamma_types, 4, 1, 1, 0, "gamma", ufunc_gamma_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammainc_loops[2] +cdef void *ufunc_gammainc_ptr[4] +cdef void *ufunc_gammainc_data[2] +cdef char ufunc_gammainc_types[6] +cdef char *ufunc_gammainc_doc = ( + "gammainc(a, x, out=None)\n" + "\n" + "Regularized lower incomplete gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " P(a, x) = \\frac{1}{\\Gamma(a)} \\int_0^x t^{a - 1}e^{-t} dt\n" + "\n" + "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "x : array_like\n" + " Nonnegative argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the lower incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The function satisfies the relation ``gammainc(a, x) +\n" + "gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper\n" + "incomplete gamma function.\n" + "\n" + "The implementation largely follows that of [boost]_.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n" + " https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is the CDF of the gamma distribution, so it starts at 0 and\n" + "monotonically increases to 1.\n" + "\n" + ">>> sc.gammainc(0.5, [0, 1, 10, 100])\n" + "array([0. , 0.84270079, 0.99999226, 1. ])\n" + "\n" + "It is equal to one minus the upper incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, 0.4\n" + ">>> sc.gammainc(a, x)\n" + "0.6289066304773024\n" + ">>> 1 - sc.gammaincc(a, x)\n" + "0.6289066304773024") +ufunc_gammainc_loops[0] = loop_d_dd__As_ff_f +ufunc_gammainc_loops[1] = loop_d_dd__As_dd_d +ufunc_gammainc_types[0] = NPY_FLOAT +ufunc_gammainc_types[1] = NPY_FLOAT +ufunc_gammainc_types[2] = NPY_FLOAT +ufunc_gammainc_types[3] = NPY_DOUBLE +ufunc_gammainc_types[4] = NPY_DOUBLE +ufunc_gammainc_types[5] = NPY_DOUBLE +ufunc_gammainc_ptr[2*0] = _func_igam +ufunc_gammainc_ptr[2*0+1] = ("gammainc") +ufunc_gammainc_ptr[2*1] = _func_igam +ufunc_gammainc_ptr[2*1+1] = ("gammainc") +ufunc_gammainc_data[0] = &ufunc_gammainc_ptr[2*0] +ufunc_gammainc_data[1] = &ufunc_gammainc_ptr[2*1] +gammainc = np.PyUFunc_FromFuncAndData(ufunc_gammainc_loops, ufunc_gammainc_data, ufunc_gammainc_types, 2, 2, 1, 0, "gammainc", ufunc_gammainc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammaincc_loops[2] +cdef void *ufunc_gammaincc_ptr[4] +cdef void *ufunc_gammaincc_data[2] +cdef char ufunc_gammaincc_types[6] +cdef char *ufunc_gammaincc_doc = ( + "gammaincc(a, x, out=None)\n" + "\n" + "Regularized upper incomplete gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " Q(a, x) = \\frac{1}{\\Gamma(a)} \\int_x^\\infty t^{a - 1}e^{-t} dt\n" + "\n" + "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "x : array_like\n" + " Nonnegative argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the upper incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The function satisfies the relation ``gammainc(a, x) +\n" + "gammaincc(a, x) = 1`` where `gammainc` is the regularized lower\n" + "incomplete gamma function.\n" + "\n" + "The implementation largely follows that of [boost]_.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n" + " https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is the survival function of the gamma distribution, so it\n" + "starts at 1 and monotonically decreases to 0.\n" + "\n" + ">>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])\n" + "array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,\n" + " 0.00000000e+00])\n" + "\n" + "It is equal to one minus the lower incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, 0.4\n" + ">>> sc.gammaincc(a, x)\n" + "0.37109336952269756\n" + ">>> 1 - sc.gammainc(a, x)\n" + "0.37109336952269756") +ufunc_gammaincc_loops[0] = loop_d_dd__As_ff_f +ufunc_gammaincc_loops[1] = loop_d_dd__As_dd_d +ufunc_gammaincc_types[0] = NPY_FLOAT +ufunc_gammaincc_types[1] = NPY_FLOAT +ufunc_gammaincc_types[2] = NPY_FLOAT +ufunc_gammaincc_types[3] = NPY_DOUBLE +ufunc_gammaincc_types[4] = NPY_DOUBLE +ufunc_gammaincc_types[5] = NPY_DOUBLE +ufunc_gammaincc_ptr[2*0] = _func_igamc +ufunc_gammaincc_ptr[2*0+1] = ("gammaincc") +ufunc_gammaincc_ptr[2*1] = _func_igamc +ufunc_gammaincc_ptr[2*1+1] = ("gammaincc") +ufunc_gammaincc_data[0] = &ufunc_gammaincc_ptr[2*0] +ufunc_gammaincc_data[1] = &ufunc_gammaincc_ptr[2*1] +gammaincc = np.PyUFunc_FromFuncAndData(ufunc_gammaincc_loops, ufunc_gammaincc_data, ufunc_gammaincc_types, 2, 2, 1, 0, "gammaincc", ufunc_gammaincc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammainccinv_loops[2] +cdef void *ufunc_gammainccinv_ptr[4] +cdef void *ufunc_gammainccinv_data[2] +cdef char ufunc_gammainccinv_types[6] +cdef char *ufunc_gammainccinv_doc = ( + "gammainccinv(a, y, out=None)\n" + "\n" + "Inverse of the regularized upper incomplete gamma function.\n" + "\n" + "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n" + "that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper\n" + "incomplete gamma function; see `gammaincc`. This is well-defined\n" + "because the upper incomplete gamma function is monotonic as can\n" + "be seen from its definition in [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "y : array_like\n" + " Argument between 0 and 1, inclusive\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inverse of the upper incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincinv : inverse of the regularized lower incomplete gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It starts at infinity and monotonically decreases to 0.\n" + "\n" + ">>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])\n" + "array([ inf, 1.35277173, 0.22746821, 0. ])\n" + "\n" + "It inverts the upper incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n" + ">>> sc.gammaincc(a, sc.gammainccinv(a, x))\n" + "array([0. , 0.1, 0.5, 1. ])\n" + "\n" + ">>> a, x = 0.5, [0, 10, 50]\n" + ">>> sc.gammainccinv(a, sc.gammaincc(a, x))\n" + "array([ 0., 10., 50.])") +ufunc_gammainccinv_loops[0] = loop_d_dd__As_ff_f +ufunc_gammainccinv_loops[1] = loop_d_dd__As_dd_d +ufunc_gammainccinv_types[0] = NPY_FLOAT +ufunc_gammainccinv_types[1] = NPY_FLOAT +ufunc_gammainccinv_types[2] = NPY_FLOAT +ufunc_gammainccinv_types[3] = NPY_DOUBLE +ufunc_gammainccinv_types[4] = NPY_DOUBLE +ufunc_gammainccinv_types[5] = NPY_DOUBLE +ufunc_gammainccinv_ptr[2*0] = _func_igamci +ufunc_gammainccinv_ptr[2*0+1] = ("gammainccinv") +ufunc_gammainccinv_ptr[2*1] = _func_igamci +ufunc_gammainccinv_ptr[2*1+1] = ("gammainccinv") +ufunc_gammainccinv_data[0] = &ufunc_gammainccinv_ptr[2*0] +ufunc_gammainccinv_data[1] = &ufunc_gammainccinv_ptr[2*1] +gammainccinv = np.PyUFunc_FromFuncAndData(ufunc_gammainccinv_loops, ufunc_gammainccinv_data, ufunc_gammainccinv_types, 2, 2, 1, 0, "gammainccinv", ufunc_gammainccinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammaincinv_loops[2] +cdef void *ufunc_gammaincinv_ptr[4] +cdef void *ufunc_gammaincinv_data[2] +cdef char ufunc_gammaincinv_types[6] +cdef char *ufunc_gammaincinv_doc = ( + "gammaincinv(a, y, out=None)\n" + "\n" + "Inverse to the regularized lower incomplete gamma function.\n" + "\n" + "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n" + "that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower\n" + "incomplete gamma function; see `gammainc`. This is well-defined\n" + "because the lower incomplete gamma function is monotonic as can be\n" + "seen from its definition in [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Positive parameter\n" + "y : array_like\n" + " Parameter between 0 and 1, inclusive\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inverse of the lower incomplete gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gammainc : regularized lower incomplete gamma function\n" + "gammaincc : regularized upper incomplete gamma function\n" + "gammainccinv : inverse of the regularized upper incomplete gamma function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/8.2#E4\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It starts at 0 and monotonically increases to infinity.\n" + "\n" + ">>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])\n" + "array([0. , 0.00789539, 0.22746821, inf])\n" + "\n" + "It inverts the lower incomplete gamma function.\n" + "\n" + ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n" + ">>> sc.gammainc(a, sc.gammaincinv(a, x))\n" + "array([0. , 0.1, 0.5, 1. ])\n" + "\n" + ">>> a, x = 0.5, [0, 10, 25]\n" + ">>> sc.gammaincinv(a, sc.gammainc(a, x))\n" + "array([ 0. , 10. , 25.00001465])") +ufunc_gammaincinv_loops[0] = loop_d_dd__As_ff_f +ufunc_gammaincinv_loops[1] = loop_d_dd__As_dd_d +ufunc_gammaincinv_types[0] = NPY_FLOAT +ufunc_gammaincinv_types[1] = NPY_FLOAT +ufunc_gammaincinv_types[2] = NPY_FLOAT +ufunc_gammaincinv_types[3] = NPY_DOUBLE +ufunc_gammaincinv_types[4] = NPY_DOUBLE +ufunc_gammaincinv_types[5] = NPY_DOUBLE +ufunc_gammaincinv_ptr[2*0] = _func_igami +ufunc_gammaincinv_ptr[2*0+1] = ("gammaincinv") +ufunc_gammaincinv_ptr[2*1] = _func_igami +ufunc_gammaincinv_ptr[2*1+1] = ("gammaincinv") +ufunc_gammaincinv_data[0] = &ufunc_gammaincinv_ptr[2*0] +ufunc_gammaincinv_data[1] = &ufunc_gammaincinv_ptr[2*1] +gammaincinv = np.PyUFunc_FromFuncAndData(ufunc_gammaincinv_loops, ufunc_gammaincinv_data, ufunc_gammaincinv_types, 2, 2, 1, 0, "gammaincinv", ufunc_gammaincinv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammaln_loops[2] +cdef void *ufunc_gammaln_ptr[4] +cdef void *ufunc_gammaln_data[2] +cdef char ufunc_gammaln_types[4] +cdef char *ufunc_gammaln_doc = ( + "gammaln(x, out=None)\n" + "\n" + "Logarithm of the absolute value of the gamma function.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " \\ln(\\lvert\\Gamma(x)\\rvert)\n" + "\n" + "where :math:`\\Gamma` is the gamma function. For more details on\n" + "the gamma function, see [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the log of the absolute value of gamma\n" + "\n" + "See Also\n" + "--------\n" + "gammasgn : sign of the gamma function\n" + "loggamma : principal branch of the logarithm of the gamma function\n" + "\n" + "Notes\n" + "-----\n" + "It is the same function as the Python standard library function\n" + ":func:`math.lgamma`.\n" + "\n" + "When used in conjunction with `gammasgn`, this function is useful\n" + "for working in logspace on the real axis without having to deal\n" + "with complex numbers via the relation ``exp(gammaln(x)) =\n" + "gammasgn(x) * gamma(x)``.\n" + "\n" + "For complex-valued log-gamma, use `loggamma` instead of `gammaln`.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It has two positive zeros.\n" + "\n" + ">>> sc.gammaln([1, 2])\n" + "array([0., 0.])\n" + "\n" + "It has poles at nonpositive integers.\n" + "\n" + ">>> sc.gammaln([0, -1, -2, -3, -4])\n" + "array([inf, inf, inf, inf, inf])\n" + "\n" + "It asymptotically approaches ``x * log(x)`` (Stirling's formula).\n" + "\n" + ">>> x = np.array([1e10, 1e20, 1e40, 1e80])\n" + ">>> sc.gammaln(x)\n" + "array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])\n" + ">>> x * np.log(x)\n" + "array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])") +ufunc_gammaln_loops[0] = loop_d_d__As_f_f +ufunc_gammaln_loops[1] = loop_d_d__As_d_d +ufunc_gammaln_types[0] = NPY_FLOAT +ufunc_gammaln_types[1] = NPY_FLOAT +ufunc_gammaln_types[2] = NPY_DOUBLE +ufunc_gammaln_types[3] = NPY_DOUBLE +ufunc_gammaln_ptr[2*0] = _func_lgam +ufunc_gammaln_ptr[2*0+1] = ("gammaln") +ufunc_gammaln_ptr[2*1] = _func_lgam +ufunc_gammaln_ptr[2*1+1] = ("gammaln") +ufunc_gammaln_data[0] = &ufunc_gammaln_ptr[2*0] +ufunc_gammaln_data[1] = &ufunc_gammaln_ptr[2*1] +gammaln = np.PyUFunc_FromFuncAndData(ufunc_gammaln_loops, ufunc_gammaln_data, ufunc_gammaln_types, 2, 1, 1, 0, "gammaln", ufunc_gammaln_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gammasgn_loops[2] +cdef void *ufunc_gammasgn_ptr[4] +cdef void *ufunc_gammasgn_data[2] +cdef char ufunc_gammasgn_types[4] +cdef char *ufunc_gammasgn_doc = ( + "gammasgn(x, out=None)\n" + "\n" + "Sign of the gamma function.\n" + "\n" + "It is defined as\n" + "\n" + ".. math::\n" + "\n" + " \\text{gammasgn}(x) =\n" + " \\begin{cases}\n" + " +1 & \\Gamma(x) > 0 \\\\\n" + " -1 & \\Gamma(x) < 0\n" + " \\end{cases}\n" + "\n" + "where :math:`\\Gamma` is the gamma function; see `gamma`. This\n" + "definition is complete since the gamma function is never zero;\n" + "see the discussion after [dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Sign of the gamma function\n" + "\n" + "See Also\n" + "--------\n" + "gamma : the gamma function\n" + "gammaln : log of the absolute value of the gamma function\n" + "loggamma : analytic continuation of the log of the gamma function\n" + "\n" + "Notes\n" + "-----\n" + "The gamma function can be computed as ``gammasgn(x) *\n" + "np.exp(gammaln(x))``.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5.2#E1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is 1 for `x > 0`.\n" + "\n" + ">>> sc.gammasgn([1, 2, 3, 4])\n" + "array([1., 1., 1., 1.])\n" + "\n" + "It alternates between -1 and 1 for negative integers.\n" + "\n" + ">>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])\n" + "array([-1., 1., -1., 1.])\n" + "\n" + "It can be used to compute the gamma function.\n" + "\n" + ">>> x = [1.5, 0.5, -0.5, -1.5]\n" + ">>> sc.gammasgn(x) * np.exp(sc.gammaln(x))\n" + "array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])\n" + ">>> sc.gamma(x)\n" + "array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])") +ufunc_gammasgn_loops[0] = loop_d_d__As_f_f +ufunc_gammasgn_loops[1] = loop_d_d__As_d_d +ufunc_gammasgn_types[0] = NPY_FLOAT +ufunc_gammasgn_types[1] = NPY_FLOAT +ufunc_gammasgn_types[2] = NPY_DOUBLE +ufunc_gammasgn_types[3] = NPY_DOUBLE +ufunc_gammasgn_ptr[2*0] = _func_gammasgn +ufunc_gammasgn_ptr[2*0+1] = ("gammasgn") +ufunc_gammasgn_ptr[2*1] = _func_gammasgn +ufunc_gammasgn_ptr[2*1+1] = ("gammasgn") +ufunc_gammasgn_data[0] = &ufunc_gammasgn_ptr[2*0] +ufunc_gammasgn_data[1] = &ufunc_gammasgn_ptr[2*1] +gammasgn = np.PyUFunc_FromFuncAndData(ufunc_gammasgn_loops, ufunc_gammasgn_data, ufunc_gammasgn_types, 2, 1, 1, 0, "gammasgn", ufunc_gammasgn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtr_loops[2] +cdef void *ufunc_gdtr_ptr[4] +cdef void *ufunc_gdtr_data[2] +cdef char ufunc_gdtr_types[8] +cdef char *ufunc_gdtr_doc = ( + "gdtr(a, b, x, out=None)\n" + "\n" + "Gamma distribution cumulative distribution function.\n" + "\n" + "Returns the integral from zero to `x` of the gamma probability density\n" + "function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\int_0^x \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " The rate parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\beta` (float). It is also the reciprocal of the scale\n" + " parameter :math:`\\theta`.\n" + "b : array_like\n" + " The shape parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\alpha` (float).\n" + "x : array_like\n" + " The quantile (upper limit of integration; float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The CDF of the gamma distribution with parameters `a` and `b`\n" + " evaluated at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtrc : 1 - CDF of the gamma distribution.\n" + "scipy.stats.gamma: Gamma distribution\n" + "\n" + "Notes\n" + "-----\n" + "The evaluation is carried out using the relation to the incomplete gamma\n" + "integral (regularized gamma function).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can\n" + "improve performance compared to the ``cdf`` method of `scipy.stats.gamma`\n" + "(see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``a=1``, ``b=2`` at ``x=5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import gdtr\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> gdtr(1., 2., 5.)\n" + "0.9595723180054873\n" + "\n" + "Compute the function for ``a=1`` and ``b=2`` at several points by\n" + "providing a NumPy array for `x`.\n" + "\n" + ">>> xvalues = np.array([1., 2., 3., 4])\n" + ">>> gdtr(1., 1., xvalues)\n" + "array([0.63212056, 0.86466472, 0.95021293, 0.98168436])\n" + "\n" + "`gdtr` can evaluate different parameter sets by providing arrays with\n" + "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n" + "function for three different `a` at four positions `x` and ``b=3``,\n" + "resulting in a 3x4 array.\n" + "\n" + ">>> a = np.array([[0.5], [1.5], [2.5]])\n" + ">>> x = np.array([1., 2., 3., 4])\n" + ">>> a.shape, x.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> gdtr(a, 3., x)\n" + "array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358],\n" + " [0.19115317, 0.57680992, 0.82642193, 0.9380312 ],\n" + " [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> a_parameters = [0.3, 1, 2, 6]\n" + ">>> b_parameters = [2, 10, 15, 20]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... a, b, style = parameter_set\n" + "... gdtr_vals = gdtr(a, b, x)\n" + "... ax.plot(x, gdtr_vals, label=fr\"$a= {a},\\, b={b}$\", ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"Gamma distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The gamma distribution is also available as `scipy.stats.gamma`. Using\n" + "`gdtr` directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.gamma`, especially for small arrays or individual values.\n" + "To get the same results one must use the following parametrization:\n" + "``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``.\n" + "\n" + ">>> from scipy.stats import gamma\n" + ">>> a = 2.\n" + ">>> b = 3\n" + ">>> x = 1.\n" + ">>> gdtr_result = gdtr(a, b, x) # this will often be faster than below\n" + ">>> gamma_dist_result = gamma(b, scale=1/a).cdf(x)\n" + ">>> gdtr_result == gamma_dist_result # test that results are equal\n" + "True") +ufunc_gdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtr_types[0] = NPY_FLOAT +ufunc_gdtr_types[1] = NPY_FLOAT +ufunc_gdtr_types[2] = NPY_FLOAT +ufunc_gdtr_types[3] = NPY_FLOAT +ufunc_gdtr_types[4] = NPY_DOUBLE +ufunc_gdtr_types[5] = NPY_DOUBLE +ufunc_gdtr_types[6] = NPY_DOUBLE +ufunc_gdtr_types[7] = NPY_DOUBLE +ufunc_gdtr_ptr[2*0] = _func_gdtr +ufunc_gdtr_ptr[2*0+1] = ("gdtr") +ufunc_gdtr_ptr[2*1] = _func_gdtr +ufunc_gdtr_ptr[2*1+1] = ("gdtr") +ufunc_gdtr_data[0] = &ufunc_gdtr_ptr[2*0] +ufunc_gdtr_data[1] = &ufunc_gdtr_ptr[2*1] +gdtr = np.PyUFunc_FromFuncAndData(ufunc_gdtr_loops, ufunc_gdtr_data, ufunc_gdtr_types, 2, 3, 1, 0, "gdtr", ufunc_gdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrc_loops[2] +cdef void *ufunc_gdtrc_ptr[4] +cdef void *ufunc_gdtrc_data[2] +cdef char ufunc_gdtrc_types[8] +cdef char *ufunc_gdtrc_doc = ( + "gdtrc(a, b, x, out=None)\n" + "\n" + "Gamma distribution survival function.\n" + "\n" + "Integral from `x` to infinity of the gamma probability density function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\int_x^\\infty \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " The rate parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\beta` (float). It is also the reciprocal of the scale\n" + " parameter :math:`\\theta`.\n" + "b : array_like\n" + " The shape parameter of the gamma distribution, sometimes denoted\n" + " :math:`\\alpha` (float).\n" + "x : array_like\n" + " The quantile (lower limit of integration; float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The survival function of the gamma distribution with parameters `a`\n" + " and `b` evaluated at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr: Gamma distribution cumulative distribution function\n" + "scipy.stats.gamma: Gamma distribution\n" + "gdtrix\n" + "\n" + "Notes\n" + "-----\n" + "The evaluation is carried out using the relation to the incomplete gamma\n" + "integral (regularized gamma function).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can\n" + "improve performance compared to the ``sf`` method of `scipy.stats.gamma`\n" + "(see last example below).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``a=1`` and ``b=2`` at ``x=5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import gdtrc\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> gdtrc(1., 2., 5.)\n" + "0.04042768199451279\n" + "\n" + "Compute the function for ``a=1``, ``b=2`` at several points by providing\n" + "a NumPy array for `x`.\n" + "\n" + ">>> xvalues = np.array([1., 2., 3., 4])\n" + ">>> gdtrc(1., 1., xvalues)\n" + "array([0.36787944, 0.13533528, 0.04978707, 0.01831564])\n" + "\n" + "`gdtrc` can evaluate different parameter sets by providing arrays with\n" + "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n" + "function for three different `a` at four positions `x` and ``b=3``,\n" + "resulting in a 3x4 array.\n" + "\n" + ">>> a = np.array([[0.5], [1.5], [2.5]])\n" + ">>> x = np.array([1., 2., 3., 4])\n" + ">>> a.shape, x.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> gdtrc(a, 3., x)\n" + "array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642],\n" + " [0.80884683, 0.42319008, 0.17357807, 0.0619688 ],\n" + " [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> a_parameters = [0.3, 1, 2, 6]\n" + ">>> b_parameters = [2, 10, 15, 20]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n" + ">>> x = np.linspace(0, 30, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for parameter_set in parameters_list:\n" + "... a, b, style = parameter_set\n" + "... gdtrc_vals = gdtrc(a, b, x)\n" + "... ax.plot(x, gdtrc_vals, label=fr\"$a= {a},\\, b={b}$\", ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(\"Gamma distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The gamma distribution is also available as `scipy.stats.gamma`.\n" + "Using `gdtrc` directly can be much faster than calling the ``sf`` method\n" + "of `scipy.stats.gamma`, especially for small arrays or individual\n" + "values. To get the same results one must use the following parametrization:\n" + "``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``.\n" + "\n" + ">>> from scipy.stats import gamma\n" + ">>> a = 2\n" + ">>> b = 3\n" + ">>> x = 1.\n" + ">>> gdtrc_result = gdtrc(a, b, x) # this will often be faster than below\n" + ">>> gamma_dist_result = gamma(b, scale=1/a).sf(x)\n" + ">>> gdtrc_result == gamma_dist_result # test that results are equal\n" + "True") +ufunc_gdtrc_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrc_types[0] = NPY_FLOAT +ufunc_gdtrc_types[1] = NPY_FLOAT +ufunc_gdtrc_types[2] = NPY_FLOAT +ufunc_gdtrc_types[3] = NPY_FLOAT +ufunc_gdtrc_types[4] = NPY_DOUBLE +ufunc_gdtrc_types[5] = NPY_DOUBLE +ufunc_gdtrc_types[6] = NPY_DOUBLE +ufunc_gdtrc_types[7] = NPY_DOUBLE +ufunc_gdtrc_ptr[2*0] = _func_gdtrc +ufunc_gdtrc_ptr[2*0+1] = ("gdtrc") +ufunc_gdtrc_ptr[2*1] = _func_gdtrc +ufunc_gdtrc_ptr[2*1+1] = ("gdtrc") +ufunc_gdtrc_data[0] = &ufunc_gdtrc_ptr[2*0] +ufunc_gdtrc_data[1] = &ufunc_gdtrc_ptr[2*1] +gdtrc = np.PyUFunc_FromFuncAndData(ufunc_gdtrc_loops, ufunc_gdtrc_data, ufunc_gdtrc_types, 2, 3, 1, 0, "gdtrc", ufunc_gdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtria_loops[2] +cdef void *ufunc_gdtria_ptr[4] +cdef void *ufunc_gdtria_data[2] +cdef char ufunc_gdtria_types[8] +cdef char *ufunc_gdtria_doc = ( + "gdtria(p, b, x, out=None)\n" + "\n" + "Inverse of `gdtr` vs a.\n" + "\n" + "Returns the inverse with respect to the parameter `a` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability values.\n" + "b : array_like\n" + " `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n" + " of the gamma distribution.\n" + "x : array_like\n" + " Nonnegative real values, from the domain of the gamma distribution.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "a : scalar or ndarray\n" + " Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`\n" + " is the \"scale\" parameter of the gamma distribution.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n" + "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `a`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtria\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtria(p, 3.4, 5.6)\n" + "1.2") +ufunc_gdtria_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtria_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtria_types[0] = NPY_FLOAT +ufunc_gdtria_types[1] = NPY_FLOAT +ufunc_gdtria_types[2] = NPY_FLOAT +ufunc_gdtria_types[3] = NPY_FLOAT +ufunc_gdtria_types[4] = NPY_DOUBLE +ufunc_gdtria_types[5] = NPY_DOUBLE +ufunc_gdtria_types[6] = NPY_DOUBLE +ufunc_gdtria_types[7] = NPY_DOUBLE +ufunc_gdtria_ptr[2*0] = _func_gdtria +ufunc_gdtria_ptr[2*0+1] = ("gdtria") +ufunc_gdtria_ptr[2*1] = _func_gdtria +ufunc_gdtria_ptr[2*1+1] = ("gdtria") +ufunc_gdtria_data[0] = &ufunc_gdtria_ptr[2*0] +ufunc_gdtria_data[1] = &ufunc_gdtria_ptr[2*1] +gdtria = np.PyUFunc_FromFuncAndData(ufunc_gdtria_loops, ufunc_gdtria_data, ufunc_gdtria_types, 2, 3, 1, 0, "gdtria", ufunc_gdtria_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrib_loops[2] +cdef void *ufunc_gdtrib_ptr[4] +cdef void *ufunc_gdtrib_data[2] +cdef char ufunc_gdtrib_types[8] +cdef char *ufunc_gdtrib_doc = ( + "gdtrib(a, p, x, out=None)\n" + "\n" + "Inverse of `gdtr` vs b.\n" + "\n" + "Returns the inverse with respect to the parameter `b` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n" + " parameter of the gamma distribution.\n" + "p : array_like\n" + " Probability values.\n" + "x : array_like\n" + " Nonnegative real values, from the domain of the gamma distribution.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "b : scalar or ndarray\n" + " Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is\n" + " the \"shape\" parameter of the gamma distribution.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n" + "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `b`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtrib\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtrib(1.2, p, 5.6)\n" + "3.3999999999723882") +ufunc_gdtrib_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrib_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrib_types[0] = NPY_FLOAT +ufunc_gdtrib_types[1] = NPY_FLOAT +ufunc_gdtrib_types[2] = NPY_FLOAT +ufunc_gdtrib_types[3] = NPY_FLOAT +ufunc_gdtrib_types[4] = NPY_DOUBLE +ufunc_gdtrib_types[5] = NPY_DOUBLE +ufunc_gdtrib_types[6] = NPY_DOUBLE +ufunc_gdtrib_types[7] = NPY_DOUBLE +ufunc_gdtrib_ptr[2*0] = _func_gdtrib +ufunc_gdtrib_ptr[2*0+1] = ("gdtrib") +ufunc_gdtrib_ptr[2*1] = _func_gdtrib +ufunc_gdtrib_ptr[2*1+1] = ("gdtrib") +ufunc_gdtrib_data[0] = &ufunc_gdtrib_ptr[2*0] +ufunc_gdtrib_data[1] = &ufunc_gdtrib_ptr[2*1] +gdtrib = np.PyUFunc_FromFuncAndData(ufunc_gdtrib_loops, ufunc_gdtrib_data, ufunc_gdtrib_types, 2, 3, 1, 0, "gdtrib", ufunc_gdtrib_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_gdtrix_loops[2] +cdef void *ufunc_gdtrix_ptr[4] +cdef void *ufunc_gdtrix_data[2] +cdef char ufunc_gdtrix_types[8] +cdef char *ufunc_gdtrix_doc = ( + "gdtrix(a, b, p, out=None)\n" + "\n" + "Inverse of `gdtr` vs x.\n" + "\n" + "Returns the inverse with respect to the parameter `x` of ``p =\n" + "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n" + "distribution. This is also known as the pth quantile of the\n" + "distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n" + " parameter of the gamma distribution.\n" + "b : array_like\n" + " `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n" + " of the gamma distribution.\n" + "p : array_like\n" + " Probability values.\n" + "out : ndarray, optional\n" + " If a fourth argument is given, it must be a numpy.ndarray whose size\n" + " matches the broadcast result of `a`, `b` and `x`. `out` is then the\n" + " array returned by the function.\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Values of the `x` parameter such that `p = gdtr(a, b, x)`.\n" + "\n" + "See Also\n" + "--------\n" + "gdtr : CDF of the gamma distribution.\n" + "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n" + "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n" + "\n" + "The cumulative distribution function `p` is computed using a routine by\n" + "DiDinato and Morris [2]_. Computation of `x` involves a search for a value\n" + "that produces the desired value of `p`. The search relies on the\n" + "monotonicity of `p` with `x`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] DiDinato, A. R. and Morris, A. H.,\n" + " Computation of the incomplete gamma function ratios and their\n" + " inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.\n" + "\n" + "Examples\n" + "--------\n" + "First evaluate `gdtr`.\n" + "\n" + ">>> from scipy.special import gdtr, gdtrix\n" + ">>> p = gdtr(1.2, 3.4, 5.6)\n" + ">>> print(p)\n" + "0.94378087442\n" + "\n" + "Verify the inverse.\n" + "\n" + ">>> gdtrix(1.2, 3.4, p)\n" + "5.5999999999999996") +ufunc_gdtrix_loops[0] = loop_d_ddd__As_fff_f +ufunc_gdtrix_loops[1] = loop_d_ddd__As_ddd_d +ufunc_gdtrix_types[0] = NPY_FLOAT +ufunc_gdtrix_types[1] = NPY_FLOAT +ufunc_gdtrix_types[2] = NPY_FLOAT +ufunc_gdtrix_types[3] = NPY_FLOAT +ufunc_gdtrix_types[4] = NPY_DOUBLE +ufunc_gdtrix_types[5] = NPY_DOUBLE +ufunc_gdtrix_types[6] = NPY_DOUBLE +ufunc_gdtrix_types[7] = NPY_DOUBLE +ufunc_gdtrix_ptr[2*0] = _func_gdtrix +ufunc_gdtrix_ptr[2*0+1] = ("gdtrix") +ufunc_gdtrix_ptr[2*1] = _func_gdtrix +ufunc_gdtrix_ptr[2*1+1] = ("gdtrix") +ufunc_gdtrix_data[0] = &ufunc_gdtrix_ptr[2*0] +ufunc_gdtrix_data[1] = &ufunc_gdtrix_ptr[2*1] +gdtrix = np.PyUFunc_FromFuncAndData(ufunc_gdtrix_loops, ufunc_gdtrix_data, ufunc_gdtrix_types, 2, 3, 1, 0, "gdtrix", ufunc_gdtrix_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hankel1_loops[2] +cdef void *ufunc_hankel1_ptr[4] +cdef void *ufunc_hankel1_data[2] +cdef char ufunc_hankel1_types[6] +cdef char *ufunc_hankel1_doc = ( + "hankel1(v, z, out=None)\n" + "\n" + "Hankel function of the first kind\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Hankel function of the first kind.\n" + "\n" + "See Also\n" + "--------\n" + "hankel1e : ndarray\n" + " This function with leading exponential behavior stripped off.\n" + "\n" + "Notes\n" + "-----\n" + "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n" + "computation using the relation,\n" + "\n" + ".. math:: H^{(1)}_v(z) =\n" + " \\frac{2}{\\imath\\pi} \\exp(-\\imath \\pi v/2) K_v(z \\exp(-\\imath\\pi/2))\n" + "\n" + "where :math:`K_v` is the modified Bessel function of the second kind.\n" + "For negative orders, the relation\n" + "\n" + ".. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \\exp(\\imath\\pi v)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/") +ufunc_hankel1_loops[0] = loop_D_dD__As_fF_F +ufunc_hankel1_loops[1] = loop_D_dD__As_dD_D +ufunc_hankel1_types[0] = NPY_FLOAT +ufunc_hankel1_types[1] = NPY_CFLOAT +ufunc_hankel1_types[2] = NPY_CFLOAT +ufunc_hankel1_types[3] = NPY_DOUBLE +ufunc_hankel1_types[4] = NPY_CDOUBLE +ufunc_hankel1_types[5] = NPY_CDOUBLE +ufunc_hankel1_ptr[2*0] = _func_cbesh_wrap1 +ufunc_hankel1_ptr[2*0+1] = ("hankel1") +ufunc_hankel1_ptr[2*1] = _func_cbesh_wrap1 +ufunc_hankel1_ptr[2*1+1] = ("hankel1") +ufunc_hankel1_data[0] = &ufunc_hankel1_ptr[2*0] +ufunc_hankel1_data[1] = &ufunc_hankel1_ptr[2*1] +hankel1 = np.PyUFunc_FromFuncAndData(ufunc_hankel1_loops, ufunc_hankel1_data, ufunc_hankel1_types, 2, 2, 1, 0, "hankel1", ufunc_hankel1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hankel1e_loops[2] +cdef void *ufunc_hankel1e_ptr[4] +cdef void *ufunc_hankel1e_data[2] +cdef char ufunc_hankel1e_types[6] +cdef char *ufunc_hankel1e_doc = ( + "hankel1e(v, z, out=None)\n" + "\n" + "Exponentially scaled Hankel function of the first kind\n" + "\n" + "Defined as::\n" + "\n" + " hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the exponentially scaled Hankel function.\n" + "\n" + "Notes\n" + "-----\n" + "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n" + "computation using the relation,\n" + "\n" + ".. math:: H^{(1)}_v(z) =\n" + " \\frac{2}{\\imath\\pi} \\exp(-\\imath \\pi v/2) K_v(z \\exp(-\\imath\\pi/2))\n" + "\n" + "where :math:`K_v` is the modified Bessel function of the second kind.\n" + "For negative orders, the relation\n" + "\n" + ".. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \\exp(\\imath\\pi v)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/") +ufunc_hankel1e_loops[0] = loop_D_dD__As_fF_F +ufunc_hankel1e_loops[1] = loop_D_dD__As_dD_D +ufunc_hankel1e_types[0] = NPY_FLOAT +ufunc_hankel1e_types[1] = NPY_CFLOAT +ufunc_hankel1e_types[2] = NPY_CFLOAT +ufunc_hankel1e_types[3] = NPY_DOUBLE +ufunc_hankel1e_types[4] = NPY_CDOUBLE +ufunc_hankel1e_types[5] = NPY_CDOUBLE +ufunc_hankel1e_ptr[2*0] = _func_cbesh_wrap1_e +ufunc_hankel1e_ptr[2*0+1] = ("hankel1e") +ufunc_hankel1e_ptr[2*1] = _func_cbesh_wrap1_e +ufunc_hankel1e_ptr[2*1+1] = ("hankel1e") +ufunc_hankel1e_data[0] = &ufunc_hankel1e_ptr[2*0] +ufunc_hankel1e_data[1] = &ufunc_hankel1e_ptr[2*1] +hankel1e = np.PyUFunc_FromFuncAndData(ufunc_hankel1e_loops, ufunc_hankel1e_data, ufunc_hankel1e_types, 2, 2, 1, 0, "hankel1e", ufunc_hankel1e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hankel2_loops[2] +cdef void *ufunc_hankel2_ptr[4] +cdef void *ufunc_hankel2_data[2] +cdef char ufunc_hankel2_types[6] +cdef char *ufunc_hankel2_doc = ( + "hankel2(v, z, out=None)\n" + "\n" + "Hankel function of the second kind\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Hankel function of the second kind.\n" + "\n" + "See Also\n" + "--------\n" + "hankel2e : this function with leading exponential behavior stripped off.\n" + "\n" + "Notes\n" + "-----\n" + "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n" + "computation using the relation,\n" + "\n" + ".. math:: H^{(2)}_v(z) =\n" + " -\\frac{2}{\\imath\\pi} \\exp(\\imath \\pi v/2) K_v(z \\exp(\\imath\\pi/2))\n" + "\n" + "where :math:`K_v` is the modified Bessel function of the second kind.\n" + "For negative orders, the relation\n" + "\n" + ".. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \\exp(-\\imath\\pi v)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/") +ufunc_hankel2_loops[0] = loop_D_dD__As_fF_F +ufunc_hankel2_loops[1] = loop_D_dD__As_dD_D +ufunc_hankel2_types[0] = NPY_FLOAT +ufunc_hankel2_types[1] = NPY_CFLOAT +ufunc_hankel2_types[2] = NPY_CFLOAT +ufunc_hankel2_types[3] = NPY_DOUBLE +ufunc_hankel2_types[4] = NPY_CDOUBLE +ufunc_hankel2_types[5] = NPY_CDOUBLE +ufunc_hankel2_ptr[2*0] = _func_cbesh_wrap2 +ufunc_hankel2_ptr[2*0+1] = ("hankel2") +ufunc_hankel2_ptr[2*1] = _func_cbesh_wrap2 +ufunc_hankel2_ptr[2*1+1] = ("hankel2") +ufunc_hankel2_data[0] = &ufunc_hankel2_ptr[2*0] +ufunc_hankel2_data[1] = &ufunc_hankel2_ptr[2*1] +hankel2 = np.PyUFunc_FromFuncAndData(ufunc_hankel2_loops, ufunc_hankel2_data, ufunc_hankel2_types, 2, 2, 1, 0, "hankel2", ufunc_hankel2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hankel2e_loops[2] +cdef void *ufunc_hankel2e_ptr[4] +cdef void *ufunc_hankel2e_data[2] +cdef char ufunc_hankel2e_types[6] +cdef char *ufunc_hankel2e_doc = ( + "hankel2e(v, z, out=None)\n" + "\n" + "Exponentially scaled Hankel function of the second kind\n" + "\n" + "Defined as::\n" + "\n" + " hankel2e(v, z) = hankel2(v, z) * exp(1j * z)\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the exponentially scaled Hankel function of the second kind.\n" + "\n" + "Notes\n" + "-----\n" + "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n" + "computation using the relation,\n" + "\n" + ".. math:: H^{(2)}_v(z) = -\\frac{2}{\\imath\\pi}\n" + " \\exp(\\frac{\\imath \\pi v}{2}) K_v(z exp(\\frac{\\imath\\pi}{2}))\n" + "\n" + "where :math:`K_v` is the modified Bessel function of the second kind.\n" + "For negative orders, the relation\n" + "\n" + ".. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \\exp(-\\imath\\pi v)\n" + "\n" + "is used.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/") +ufunc_hankel2e_loops[0] = loop_D_dD__As_fF_F +ufunc_hankel2e_loops[1] = loop_D_dD__As_dD_D +ufunc_hankel2e_types[0] = NPY_FLOAT +ufunc_hankel2e_types[1] = NPY_CFLOAT +ufunc_hankel2e_types[2] = NPY_CFLOAT +ufunc_hankel2e_types[3] = NPY_DOUBLE +ufunc_hankel2e_types[4] = NPY_CDOUBLE +ufunc_hankel2e_types[5] = NPY_CDOUBLE +ufunc_hankel2e_ptr[2*0] = _func_cbesh_wrap2_e +ufunc_hankel2e_ptr[2*0+1] = ("hankel2e") +ufunc_hankel2e_ptr[2*1] = _func_cbesh_wrap2_e +ufunc_hankel2e_ptr[2*1+1] = ("hankel2e") +ufunc_hankel2e_data[0] = &ufunc_hankel2e_ptr[2*0] +ufunc_hankel2e_data[1] = &ufunc_hankel2e_ptr[2*1] +hankel2e = np.PyUFunc_FromFuncAndData(ufunc_hankel2e_loops, ufunc_hankel2e_data, ufunc_hankel2e_types, 2, 2, 1, 0, "hankel2e", ufunc_hankel2e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_huber_loops[2] +cdef void *ufunc_huber_ptr[4] +cdef void *ufunc_huber_data[2] +cdef char ufunc_huber_types[6] +cdef char *ufunc_huber_doc = ( + "huber(delta, r, out=None)\n" + "\n" + "Huber loss function.\n" + "\n" + ".. math:: \\text{huber}(\\delta, r) = \\begin{cases} \\infty & \\delta < 0 \\\\\n" + " \\frac{1}{2}r^2 & 0 \\le \\delta, | r | \\le \\delta \\\\\n" + " \\delta ( |r| - \\frac{1}{2}\\delta ) & \\text{otherwise} \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "delta : ndarray\n" + " Input array, indicating the quadratic vs. linear loss changepoint.\n" + "r : ndarray\n" + " Input array, possibly representing residuals.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The computed Huber loss function values.\n" + "\n" + "See Also\n" + "--------\n" + "pseudo_huber : smooth approximation of this function\n" + "\n" + "Notes\n" + "-----\n" + "`huber` is useful as a loss function in robust statistics or machine\n" + "learning to reduce the influence of outliers as compared to the common\n" + "squared error loss, residuals with a magnitude higher than `delta` are\n" + "not squared [1]_.\n" + "\n" + "Typically, `r` represents residuals, the difference\n" + "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n" + "`huber` resembles the squared error and for :math:`|r|>\\delta` the\n" + "absolute error. This way, the Huber loss often achieves\n" + "a fast convergence in model fitting for small residuals like the squared\n" + "error loss function and still reduces the influence of outliers\n" + "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n" + "the cutoff between squared and absolute error regimes, it has\n" + "to be tuned carefully for each problem. `huber` is also\n" + "convex, making it suitable for gradient based optimization.\n" + "\n" + ".. versionadded:: 0.15.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Peter Huber. \"Robust Estimation of a Location Parameter\",\n" + " 1964. Annals of Statistics. 53 (1): 73 - 101.\n" + "\n" + "Examples\n" + "--------\n" + "Import all necessary modules.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import huber\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Compute the function for ``delta=1`` at ``r=2``\n" + "\n" + ">>> huber(1., 2.)\n" + "1.5\n" + "\n" + "Compute the function for different `delta` by providing a NumPy array or\n" + "list for `delta`.\n" + "\n" + ">>> huber([1., 3., 5.], 4.)\n" + "array([3.5, 7.5, 8. ])\n" + "\n" + "Compute the function at different points by providing a NumPy array or\n" + "list for `r`.\n" + "\n" + ">>> huber(2., np.array([1., 1.5, 3.]))\n" + "array([0.5 , 1.125, 4. ])\n" + "\n" + "The function can be calculated for different `delta` and `r` by\n" + "providing arrays for both with compatible shapes for broadcasting.\n" + "\n" + ">>> r = np.array([1., 2.5, 8., 10.])\n" + ">>> deltas = np.array([[1.], [5.], [9.]])\n" + ">>> print(r.shape, deltas.shape)\n" + "(4,) (3, 1)\n" + "\n" + ">>> huber(deltas, r)\n" + "array([[ 0.5 , 2. , 7.5 , 9.5 ],\n" + " [ 0.5 , 3.125, 27.5 , 37.5 ],\n" + " [ 0.5 , 3.125, 32. , 49.5 ]])\n" + "\n" + "Plot the function for different `delta`.\n" + "\n" + ">>> x = np.linspace(-4, 4, 500)\n" + ">>> deltas = [1, 2, 3]\n" + ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n" + ">>> for delta, style in combined_plot_parameters:\n" + "... ax.plot(x, huber(delta, x), label=fr\"$\\delta={delta}$\", ls=style)\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(r\"Huber loss function $h_{\\delta}(x)$\")\n" + ">>> ax.set_xlim(-4, 4)\n" + ">>> ax.set_ylim(0, 8)\n" + ">>> plt.show()") +ufunc_huber_loops[0] = loop_d_dd__As_ff_f +ufunc_huber_loops[1] = loop_d_dd__As_dd_d +ufunc_huber_types[0] = NPY_FLOAT +ufunc_huber_types[1] = NPY_FLOAT +ufunc_huber_types[2] = NPY_FLOAT +ufunc_huber_types[3] = NPY_DOUBLE +ufunc_huber_types[4] = NPY_DOUBLE +ufunc_huber_types[5] = NPY_DOUBLE +ufunc_huber_ptr[2*0] = _func_huber +ufunc_huber_ptr[2*0+1] = ("huber") +ufunc_huber_ptr[2*1] = _func_huber +ufunc_huber_ptr[2*1+1] = ("huber") +ufunc_huber_data[0] = &ufunc_huber_ptr[2*0] +ufunc_huber_data[1] = &ufunc_huber_ptr[2*1] +huber = np.PyUFunc_FromFuncAndData(ufunc_huber_loops, ufunc_huber_data, ufunc_huber_types, 2, 2, 1, 0, "huber", ufunc_huber_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyp0f1_loops[4] +cdef void *ufunc_hyp0f1_ptr[8] +cdef void *ufunc_hyp0f1_data[4] +cdef char ufunc_hyp0f1_types[12] +cdef char *ufunc_hyp0f1_doc = ( + "hyp0f1(v, z, out=None)\n" + "\n" + "Confluent hypergeometric limit function 0F1.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Real-valued parameter\n" + "z : array_like\n" + " Real- or complex-valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The confluent hypergeometric limit function\n" + "\n" + "Notes\n" + "-----\n" + "This function is defined as:\n" + "\n" + ".. math:: _0F_1(v, z) = \\sum_{k=0}^{\\infty}\\frac{z^k}{(v)_k k!}.\n" + "\n" + "It's also the limit as :math:`q \\to \\infty` of :math:`_1F_1(q; v; z/q)`,\n" + "and satisfies the differential equation :math:`f''(z) + vf'(z) =\n" + "f(z)`. See [1]_ for more information.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Wolfram MathWorld, \"Confluent Hypergeometric Limit Function\",\n" + " http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is one when `z` is zero.\n" + "\n" + ">>> sc.hyp0f1(1, 0)\n" + "1.0\n" + "\n" + "It is the limit of the confluent hypergeometric function as `q`\n" + "goes to infinity.\n" + "\n" + ">>> q = np.array([1, 10, 100, 1000])\n" + ">>> v = 1\n" + ">>> z = 1\n" + ">>> sc.hyp1f1(q, v, z / q)\n" + "array([2.71828183, 2.31481985, 2.28303778, 2.27992985])\n" + ">>> sc.hyp0f1(v, z)\n" + "2.2795853023360673\n" + "\n" + "It is related to Bessel functions.\n" + "\n" + ">>> n = 1\n" + ">>> x = np.linspace(0, 1, 5)\n" + ">>> sc.jv(n, x)\n" + "array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])\n" + ">>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)\n" + "array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])") +ufunc_hyp0f1_loops[0] = loop_d_dd__As_ff_f +ufunc_hyp0f1_loops[1] = loop_D_dD__As_fF_F +ufunc_hyp0f1_loops[2] = loop_d_dd__As_dd_d +ufunc_hyp0f1_loops[3] = loop_D_dD__As_dD_D +ufunc_hyp0f1_types[0] = NPY_FLOAT +ufunc_hyp0f1_types[1] = NPY_FLOAT +ufunc_hyp0f1_types[2] = NPY_FLOAT +ufunc_hyp0f1_types[3] = NPY_FLOAT +ufunc_hyp0f1_types[4] = NPY_CFLOAT +ufunc_hyp0f1_types[5] = NPY_CFLOAT +ufunc_hyp0f1_types[6] = NPY_DOUBLE +ufunc_hyp0f1_types[7] = NPY_DOUBLE +ufunc_hyp0f1_types[8] = NPY_DOUBLE +ufunc_hyp0f1_types[9] = NPY_DOUBLE +ufunc_hyp0f1_types[10] = NPY_CDOUBLE +ufunc_hyp0f1_types[11] = NPY_CDOUBLE +ufunc_hyp0f1_ptr[2*0] = _func__hyp0f1_real +ufunc_hyp0f1_ptr[2*0+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*1] = _func__hyp0f1_cmplx +ufunc_hyp0f1_ptr[2*1+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*2] = _func__hyp0f1_real +ufunc_hyp0f1_ptr[2*2+1] = ("hyp0f1") +ufunc_hyp0f1_ptr[2*3] = _func__hyp0f1_cmplx +ufunc_hyp0f1_ptr[2*3+1] = ("hyp0f1") +ufunc_hyp0f1_data[0] = &ufunc_hyp0f1_ptr[2*0] +ufunc_hyp0f1_data[1] = &ufunc_hyp0f1_ptr[2*1] +ufunc_hyp0f1_data[2] = &ufunc_hyp0f1_ptr[2*2] +ufunc_hyp0f1_data[3] = &ufunc_hyp0f1_ptr[2*3] +hyp0f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp0f1_loops, ufunc_hyp0f1_data, ufunc_hyp0f1_types, 4, 2, 1, 0, "hyp0f1", ufunc_hyp0f1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyp1f1_loops[4] +cdef void *ufunc_hyp1f1_ptr[8] +cdef void *ufunc_hyp1f1_data[4] +cdef char ufunc_hyp1f1_types[16] +cdef char *ufunc_hyp1f1_doc = ( + "hyp1f1(a, b, x, out=None)\n" + "\n" + "Confluent hypergeometric function 1F1.\n" + "\n" + "The confluent hypergeometric function is defined by the series\n" + "\n" + ".. math::\n" + "\n" + " {}_1F_1(a; b; x) = \\sum_{k = 0}^\\infty \\frac{(a)_k}{(b)_k k!} x^k.\n" + "\n" + "See [dlmf]_ for more details. Here :math:`(\\cdot)_k` is the\n" + "Pochhammer symbol; see `poch`.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real parameters\n" + "x : array_like\n" + " Real or complex argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the confluent hypergeometric function\n" + "\n" + "See Also\n" + "--------\n" + "hyperu : another confluent hypergeometric function\n" + "hyp0f1 : confluent hypergeometric limit function\n" + "hyp2f1 : Gaussian hypergeometric function\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/13.2#E2\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is one when `x` is zero:\n" + "\n" + ">>> sc.hyp1f1(0.5, 0.5, 0)\n" + "1.0\n" + "\n" + "It is singular when `b` is a nonpositive integer.\n" + "\n" + ">>> sc.hyp1f1(0.5, -1, 0)\n" + "inf\n" + "\n" + "It is a polynomial when `a` is a nonpositive integer.\n" + "\n" + ">>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.hyp1f1(a, b, x)\n" + "array([-1., -3., -5., -7.])\n" + ">>> 1 + (a / b) * x\n" + "array([-1., -3., -5., -7.])\n" + "\n" + "It reduces to the exponential function when `a = b`.\n" + "\n" + ">>> sc.hyp1f1(2, 2, [1, 2, 3, 4])\n" + "array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])\n" + ">>> np.exp([1, 2, 3, 4])\n" + "array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])") +ufunc_hyp1f1_loops[0] = loop_d_ddd__As_fff_f +ufunc_hyp1f1_loops[1] = loop_D_ddD__As_ffF_F +ufunc_hyp1f1_loops[2] = loop_d_ddd__As_ddd_d +ufunc_hyp1f1_loops[3] = loop_D_ddD__As_ddD_D +ufunc_hyp1f1_types[0] = NPY_FLOAT +ufunc_hyp1f1_types[1] = NPY_FLOAT +ufunc_hyp1f1_types[2] = NPY_FLOAT +ufunc_hyp1f1_types[3] = NPY_FLOAT +ufunc_hyp1f1_types[4] = NPY_FLOAT +ufunc_hyp1f1_types[5] = NPY_FLOAT +ufunc_hyp1f1_types[6] = NPY_CFLOAT +ufunc_hyp1f1_types[7] = NPY_CFLOAT +ufunc_hyp1f1_types[8] = NPY_DOUBLE +ufunc_hyp1f1_types[9] = NPY_DOUBLE +ufunc_hyp1f1_types[10] = NPY_DOUBLE +ufunc_hyp1f1_types[11] = NPY_DOUBLE +ufunc_hyp1f1_types[12] = NPY_DOUBLE +ufunc_hyp1f1_types[13] = NPY_DOUBLE +ufunc_hyp1f1_types[14] = NPY_CDOUBLE +ufunc_hyp1f1_types[15] = NPY_CDOUBLE +ufunc_hyp1f1_ptr[2*0] = scipy.special._ufuncs_cxx._export_hyp1f1_double +ufunc_hyp1f1_ptr[2*0+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*1] = _func_chyp1f1_wrap +ufunc_hyp1f1_ptr[2*1+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*2] = scipy.special._ufuncs_cxx._export_hyp1f1_double +ufunc_hyp1f1_ptr[2*2+1] = ("hyp1f1") +ufunc_hyp1f1_ptr[2*3] = _func_chyp1f1_wrap +ufunc_hyp1f1_ptr[2*3+1] = ("hyp1f1") +ufunc_hyp1f1_data[0] = &ufunc_hyp1f1_ptr[2*0] +ufunc_hyp1f1_data[1] = &ufunc_hyp1f1_ptr[2*1] +ufunc_hyp1f1_data[2] = &ufunc_hyp1f1_ptr[2*2] +ufunc_hyp1f1_data[3] = &ufunc_hyp1f1_ptr[2*3] +hyp1f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp1f1_loops, ufunc_hyp1f1_data, ufunc_hyp1f1_types, 4, 3, 1, 0, "hyp1f1", ufunc_hyp1f1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyp2f1_loops[4] +cdef void *ufunc_hyp2f1_ptr[8] +cdef void *ufunc_hyp2f1_data[4] +cdef char ufunc_hyp2f1_types[20] +cdef char *ufunc_hyp2f1_doc = ( + "hyp2f1(a, b, c, z, out=None)\n" + "\n" + "Gauss hypergeometric function 2F1(a, b; c; z)\n" + "\n" + "Parameters\n" + "----------\n" + "a, b, c : array_like\n" + " Arguments, should be real-valued.\n" + "z : array_like\n" + " Argument, real or complex.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "hyp2f1 : scalar or ndarray\n" + " The values of the gaussian hypergeometric function.\n" + "\n" + "See Also\n" + "--------\n" + "hyp0f1 : confluent hypergeometric limit function.\n" + "hyp1f1 : Kummer's (confluent hypergeometric) function.\n" + "\n" + "Notes\n" + "-----\n" + "This function is defined for :math:`|z| < 1` as\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{hyp2f1}(a, b, c, z) = \\sum_{n=0}^\\infty\n" + " \\frac{(a)_n (b)_n}{(c)_n}\\frac{z^n}{n!},\n" + "\n" + "and defined on the rest of the complex z-plane by analytic\n" + "continuation [1]_.\n" + "Here :math:`(\\cdot)_n` is the Pochhammer symbol; see `poch`. When\n" + ":math:`n` is an integer the result is a polynomial of degree :math:`n`.\n" + "\n" + "The implementation for complex values of ``z`` is described in [2]_,\n" + "except for ``z`` in the region defined by\n" + "\n" + ".. math::\n" + "\n" + " 0.9 <= \\left|z\\right| < 1.1,\n" + " \\left|1 - z\\right| >= 0.9,\n" + " \\mathrm{real}(z) >= 0\n" + "\n" + "in which the implementation follows [4]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/15.2\n" + ".. [2] S. Zhang and J.M. Jin, \"Computation of Special Functions\", Wiley 1996\n" + ".. [3] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [4] J.L. Lopez and N.M. Temme, \"New series expansions of the Gauss\n" + " hypergeometric function\", Adv Comput Math 39, 349-365 (2013).\n" + " https://doi.org/10.1007/s10444-012-9283-y\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It has poles when `c` is a negative integer.\n" + "\n" + ">>> sc.hyp2f1(1, 1, -2, 1)\n" + "inf\n" + "\n" + "It is a polynomial when `a` or `b` is a negative integer.\n" + "\n" + ">>> a, b, c = -1, 1, 1.5\n" + ">>> z = np.linspace(0, 1, 5)\n" + ">>> sc.hyp2f1(a, b, c, z)\n" + "array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])\n" + ">>> 1 + a * b * z / c\n" + "array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])\n" + "\n" + "It is symmetric in `a` and `b`.\n" + "\n" + ">>> a = np.linspace(0, 1, 5)\n" + ">>> b = np.linspace(0, 1, 5)\n" + ">>> sc.hyp2f1(a, b, 1, 0.5)\n" + "array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])\n" + ">>> sc.hyp2f1(b, a, 1, 0.5)\n" + "array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])\n" + "\n" + "It contains many other functions as special cases.\n" + "\n" + ">>> z = 0.5\n" + ">>> sc.hyp2f1(1, 1, 2, z)\n" + "1.3862943611198901\n" + ">>> -np.log(1 - z) / z\n" + "1.3862943611198906\n" + "\n" + ">>> sc.hyp2f1(0.5, 1, 1.5, z**2)\n" + "1.098612288668109\n" + ">>> np.log((1 + z) / (1 - z)) / (2 * z)\n" + "1.0986122886681098\n" + "\n" + ">>> sc.hyp2f1(0.5, 1, 1.5, -z**2)\n" + "0.9272952180016117\n" + ">>> np.arctan(z) / z\n" + "0.9272952180016122") +ufunc_hyp2f1_loops[0] = loop_d_dddd__As_ffff_f +ufunc_hyp2f1_loops[1] = loop_D_dddD__As_fffF_F +ufunc_hyp2f1_loops[2] = loop_d_dddd__As_dddd_d +ufunc_hyp2f1_loops[3] = loop_D_dddD__As_dddD_D +ufunc_hyp2f1_types[0] = NPY_FLOAT +ufunc_hyp2f1_types[1] = NPY_FLOAT +ufunc_hyp2f1_types[2] = NPY_FLOAT +ufunc_hyp2f1_types[3] = NPY_FLOAT +ufunc_hyp2f1_types[4] = NPY_FLOAT +ufunc_hyp2f1_types[5] = NPY_FLOAT +ufunc_hyp2f1_types[6] = NPY_FLOAT +ufunc_hyp2f1_types[7] = NPY_FLOAT +ufunc_hyp2f1_types[8] = NPY_CFLOAT +ufunc_hyp2f1_types[9] = NPY_CFLOAT +ufunc_hyp2f1_types[10] = NPY_DOUBLE +ufunc_hyp2f1_types[11] = NPY_DOUBLE +ufunc_hyp2f1_types[12] = NPY_DOUBLE +ufunc_hyp2f1_types[13] = NPY_DOUBLE +ufunc_hyp2f1_types[14] = NPY_DOUBLE +ufunc_hyp2f1_types[15] = NPY_DOUBLE +ufunc_hyp2f1_types[16] = NPY_DOUBLE +ufunc_hyp2f1_types[17] = NPY_DOUBLE +ufunc_hyp2f1_types[18] = NPY_CDOUBLE +ufunc_hyp2f1_types[19] = NPY_CDOUBLE +ufunc_hyp2f1_ptr[2*0] = _func_hyp2f1 +ufunc_hyp2f1_ptr[2*0+1] = ("hyp2f1") +ufunc_hyp2f1_ptr[2*1] = _func_hyp2f1_complex +ufunc_hyp2f1_ptr[2*1+1] = ("hyp2f1") +ufunc_hyp2f1_ptr[2*2] = _func_hyp2f1 +ufunc_hyp2f1_ptr[2*2+1] = ("hyp2f1") +ufunc_hyp2f1_ptr[2*3] = _func_hyp2f1_complex +ufunc_hyp2f1_ptr[2*3+1] = ("hyp2f1") +ufunc_hyp2f1_data[0] = &ufunc_hyp2f1_ptr[2*0] +ufunc_hyp2f1_data[1] = &ufunc_hyp2f1_ptr[2*1] +ufunc_hyp2f1_data[2] = &ufunc_hyp2f1_ptr[2*2] +ufunc_hyp2f1_data[3] = &ufunc_hyp2f1_ptr[2*3] +hyp2f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp2f1_loops, ufunc_hyp2f1_data, ufunc_hyp2f1_types, 4, 4, 1, 0, "hyp2f1", ufunc_hyp2f1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_hyperu_loops[2] +cdef void *ufunc_hyperu_ptr[4] +cdef void *ufunc_hyperu_data[2] +cdef char ufunc_hyperu_types[8] +cdef char *ufunc_hyperu_doc = ( + "hyperu(a, b, x, out=None)\n" + "\n" + "Confluent hypergeometric function U\n" + "\n" + "It is defined as the solution to the equation\n" + "\n" + ".. math::\n" + "\n" + " x \\frac{d^2w}{dx^2} + (b - x) \\frac{dw}{dx} - aw = 0\n" + "\n" + "which satisfies the property\n" + "\n" + ".. math::\n" + "\n" + " U(a, b, x) \\sim x^{-a}\n" + "\n" + "as :math:`x \\to \\infty`. See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "a, b : array_like\n" + " Real-valued parameters\n" + "x : array_like\n" + " Real-valued argument\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of `U`\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematics Functions\n" + " https://dlmf.nist.gov/13.2#E6\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It has a branch cut along the negative `x` axis.\n" + "\n" + ">>> x = np.linspace(-0.1, -10, 5)\n" + ">>> sc.hyperu(1, 1, x)\n" + "array([nan, nan, nan, nan, nan])\n" + "\n" + "It approaches zero as `x` goes to infinity.\n" + "\n" + ">>> x = np.array([1, 10, 100])\n" + ">>> sc.hyperu(1, 1, x)\n" + "array([0.59634736, 0.09156333, 0.00990194])\n" + "\n" + "It satisfies Kummer's transformation.\n" + "\n" + ">>> a, b, x = 2, 1, 1\n" + ">>> sc.hyperu(a, b, x)\n" + "0.1926947246463881\n" + ">>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)\n" + "0.1926947246463881") +ufunc_hyperu_loops[0] = loop_d_ddd__As_fff_f +ufunc_hyperu_loops[1] = loop_d_ddd__As_ddd_d +ufunc_hyperu_types[0] = NPY_FLOAT +ufunc_hyperu_types[1] = NPY_FLOAT +ufunc_hyperu_types[2] = NPY_FLOAT +ufunc_hyperu_types[3] = NPY_FLOAT +ufunc_hyperu_types[4] = NPY_DOUBLE +ufunc_hyperu_types[5] = NPY_DOUBLE +ufunc_hyperu_types[6] = NPY_DOUBLE +ufunc_hyperu_types[7] = NPY_DOUBLE +ufunc_hyperu_ptr[2*0] = _func_hyperu +ufunc_hyperu_ptr[2*0+1] = ("hyperu") +ufunc_hyperu_ptr[2*1] = _func_hyperu +ufunc_hyperu_ptr[2*1+1] = ("hyperu") +ufunc_hyperu_data[0] = &ufunc_hyperu_ptr[2*0] +ufunc_hyperu_data[1] = &ufunc_hyperu_ptr[2*1] +hyperu = np.PyUFunc_FromFuncAndData(ufunc_hyperu_loops, ufunc_hyperu_data, ufunc_hyperu_types, 2, 3, 1, 0, "hyperu", ufunc_hyperu_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i0_loops[2] +cdef void *ufunc_i0_ptr[4] +cdef void *ufunc_i0_data[2] +cdef char ufunc_i0_types[4] +cdef char *ufunc_i0_doc = ( + "i0(x, out=None)\n" + "\n" + "Modified Bessel function of order 0.\n" + "\n" + "Defined as,\n" + "\n" + ".. math::\n" + " I_0(x) = \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{(k!)^2} = J_0(\\imath x),\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the modified Bessel function of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of any order\n" + "i0e: Exponentially scaled modified Bessel function of order 0\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import i0\n" + ">>> i0(1.)\n" + "1.2660658777520082\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> i0(np.array([-2., 0., 3.5]))\n" + "array([2.2795853 , 1. , 7.37820343])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i0_loops[0] = loop_d_d__As_f_f +ufunc_i0_loops[1] = loop_d_d__As_d_d +ufunc_i0_types[0] = NPY_FLOAT +ufunc_i0_types[1] = NPY_FLOAT +ufunc_i0_types[2] = NPY_DOUBLE +ufunc_i0_types[3] = NPY_DOUBLE +ufunc_i0_ptr[2*0] = _func_i0 +ufunc_i0_ptr[2*0+1] = ("i0") +ufunc_i0_ptr[2*1] = _func_i0 +ufunc_i0_ptr[2*1+1] = ("i0") +ufunc_i0_data[0] = &ufunc_i0_ptr[2*0] +ufunc_i0_data[1] = &ufunc_i0_ptr[2*1] +i0 = np.PyUFunc_FromFuncAndData(ufunc_i0_loops, ufunc_i0_data, ufunc_i0_types, 2, 1, 1, 0, "i0", ufunc_i0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i0e_loops[2] +cdef void *ufunc_i0e_ptr[4] +cdef void *ufunc_i0e_data[2] +cdef char ufunc_i0e_types[4] +cdef char *ufunc_i0e_doc = ( + "i0e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of order 0.\n" + "\n" + "Defined as::\n" + "\n" + " i0e(x) = exp(-abs(x)) * i0(x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function of order 0\n" + " at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i0: Modified Bessel function of order 0\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval. The\n" + "polynomial expansions used are the same as those in `i0`, but\n" + "they are not multiplied by the dominant exponential factor.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i0e`. `i0e`\n" + "is useful for large arguments `x`: for these, `i0` quickly overflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `i0` returns infinity whereas `i0e` still returns\n" + "a finite number.\n" + "\n" + ">>> from scipy.special import i0, i0e\n" + ">>> i0(1000.), i0e(1000.)\n" + "(inf, 0.012617240455891257)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> i0e(np.array([-2., 0., 3.]))\n" + "array([0.30850832, 1. , 0.24300035])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i0e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i0e_loops[0] = loop_d_d__As_f_f +ufunc_i0e_loops[1] = loop_d_d__As_d_d +ufunc_i0e_types[0] = NPY_FLOAT +ufunc_i0e_types[1] = NPY_FLOAT +ufunc_i0e_types[2] = NPY_DOUBLE +ufunc_i0e_types[3] = NPY_DOUBLE +ufunc_i0e_ptr[2*0] = _func_i0e +ufunc_i0e_ptr[2*0+1] = ("i0e") +ufunc_i0e_ptr[2*1] = _func_i0e +ufunc_i0e_ptr[2*1+1] = ("i0e") +ufunc_i0e_data[0] = &ufunc_i0e_ptr[2*0] +ufunc_i0e_data[1] = &ufunc_i0e_ptr[2*1] +i0e = np.PyUFunc_FromFuncAndData(ufunc_i0e_loops, ufunc_i0e_data, ufunc_i0e_types, 2, 1, 1, 0, "i0e", ufunc_i0e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i1_loops[2] +cdef void *ufunc_i1_ptr[4] +cdef void *ufunc_i1_data[2] +cdef char ufunc_i1_types[4] +cdef char *ufunc_i1_doc = ( + "i1(x, out=None)\n" + "\n" + "Modified Bessel function of order 1.\n" + "\n" + "Defined as,\n" + "\n" + ".. math::\n" + " I_1(x) = \\frac{1}{2}x \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{k! (k + 1)!}\n" + " = -\\imath J_1(\\imath x),\n" + "\n" + "where :math:`J_1` is the Bessel function of the first kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the modified Bessel function of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i1e: Exponentially scaled modified Bessel function of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import i1\n" + ">>> i1(1.)\n" + "0.5651591039924851\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> i1(np.array([-2., 0., 6.]))\n" + "array([-1.59063685, 0. , 61.34193678])\n" + "\n" + "Plot the function between -10 and 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i1_loops[0] = loop_d_d__As_f_f +ufunc_i1_loops[1] = loop_d_d__As_d_d +ufunc_i1_types[0] = NPY_FLOAT +ufunc_i1_types[1] = NPY_FLOAT +ufunc_i1_types[2] = NPY_DOUBLE +ufunc_i1_types[3] = NPY_DOUBLE +ufunc_i1_ptr[2*0] = _func_i1 +ufunc_i1_ptr[2*0+1] = ("i1") +ufunc_i1_ptr[2*1] = _func_i1 +ufunc_i1_ptr[2*1+1] = ("i1") +ufunc_i1_data[0] = &ufunc_i1_ptr[2*0] +ufunc_i1_data[1] = &ufunc_i1_ptr[2*1] +i1 = np.PyUFunc_FromFuncAndData(ufunc_i1_loops, ufunc_i1_data, ufunc_i1_types, 2, 1, 1, 0, "i1", ufunc_i1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_i1e_loops[2] +cdef void *ufunc_i1e_ptr[4] +cdef void *ufunc_i1e_data[2] +cdef char ufunc_i1e_types[4] +cdef char *ufunc_i1e_doc = ( + "i1e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of order 1.\n" + "\n" + "Defined as::\n" + "\n" + " i1e(x) = exp(-abs(x)) * i1(x)\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function of order 1\n" + " at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i1: Modified Bessel function of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval. The\n" + "polynomial expansions used are the same as those in `i1`, but\n" + "they are not multiplied by the dominant exponential factor.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `i1e`. `i1e`\n" + "is useful for large arguments `x`: for these, `i1` quickly overflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `i1` returns infinity whereas `i1e` still returns\n" + "a finite number.\n" + "\n" + ">>> from scipy.special import i1, i1e\n" + ">>> i1(1000.), i1e(1000.)\n" + "(inf, 0.01261093025692863)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> i1e(np.array([-2., 0., 6.]))\n" + "array([-0.21526929, 0. , 0.15205146])\n" + "\n" + "Plot the function between -10 and 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> y = i1e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_i1e_loops[0] = loop_d_d__As_f_f +ufunc_i1e_loops[1] = loop_d_d__As_d_d +ufunc_i1e_types[0] = NPY_FLOAT +ufunc_i1e_types[1] = NPY_FLOAT +ufunc_i1e_types[2] = NPY_DOUBLE +ufunc_i1e_types[3] = NPY_DOUBLE +ufunc_i1e_ptr[2*0] = _func_i1e +ufunc_i1e_ptr[2*0+1] = ("i1e") +ufunc_i1e_ptr[2*1] = _func_i1e +ufunc_i1e_ptr[2*1+1] = ("i1e") +ufunc_i1e_data[0] = &ufunc_i1e_ptr[2*0] +ufunc_i1e_data[1] = &ufunc_i1e_ptr[2*1] +i1e = np.PyUFunc_FromFuncAndData(ufunc_i1e_loops, ufunc_i1e_data, ufunc_i1e_types, 2, 1, 1, 0, "i1e", ufunc_i1e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_inv_boxcox_loops[2] +cdef void *ufunc_inv_boxcox_ptr[4] +cdef void *ufunc_inv_boxcox_data[2] +cdef char ufunc_inv_boxcox_types[6] +cdef char *ufunc_inv_boxcox_doc = ( + "inv_boxcox(y, lmbda, out=None)\n" + "\n" + "Compute the inverse of the Box-Cox transformation.\n" + "\n" + "Find ``x`` such that::\n" + "\n" + " y = (x**lmbda - 1) / lmbda if lmbda != 0\n" + " log(x) if lmbda == 0\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.16.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox, inv_boxcox\n" + ">>> y = boxcox([1, 4, 10], 2.5)\n" + ">>> inv_boxcox(y, 2.5)\n" + "array([1., 4., 10.])") +ufunc_inv_boxcox_loops[0] = loop_d_dd__As_ff_f +ufunc_inv_boxcox_loops[1] = loop_d_dd__As_dd_d +ufunc_inv_boxcox_types[0] = NPY_FLOAT +ufunc_inv_boxcox_types[1] = NPY_FLOAT +ufunc_inv_boxcox_types[2] = NPY_FLOAT +ufunc_inv_boxcox_types[3] = NPY_DOUBLE +ufunc_inv_boxcox_types[4] = NPY_DOUBLE +ufunc_inv_boxcox_types[5] = NPY_DOUBLE +ufunc_inv_boxcox_ptr[2*0] = _func_inv_boxcox +ufunc_inv_boxcox_ptr[2*0+1] = ("inv_boxcox") +ufunc_inv_boxcox_ptr[2*1] = _func_inv_boxcox +ufunc_inv_boxcox_ptr[2*1+1] = ("inv_boxcox") +ufunc_inv_boxcox_data[0] = &ufunc_inv_boxcox_ptr[2*0] +ufunc_inv_boxcox_data[1] = &ufunc_inv_boxcox_ptr[2*1] +inv_boxcox = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox_loops, ufunc_inv_boxcox_data, ufunc_inv_boxcox_types, 2, 2, 1, 0, "inv_boxcox", ufunc_inv_boxcox_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_inv_boxcox1p_loops[2] +cdef void *ufunc_inv_boxcox1p_ptr[4] +cdef void *ufunc_inv_boxcox1p_data[2] +cdef char ufunc_inv_boxcox1p_types[6] +cdef char *ufunc_inv_boxcox1p_doc = ( + "inv_boxcox1p(y, lmbda, out=None)\n" + "\n" + "Compute the inverse of the Box-Cox transformation.\n" + "\n" + "Find ``x`` such that::\n" + "\n" + " y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0\n" + " log(1+x) if lmbda == 0\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " Data to be transformed.\n" + "lmbda : array_like\n" + " Power parameter of the Box-Cox transform.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Transformed data.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.16.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import boxcox1p, inv_boxcox1p\n" + ">>> y = boxcox1p([1, 4, 10], 2.5)\n" + ">>> inv_boxcox1p(y, 2.5)\n" + "array([1., 4., 10.])") +ufunc_inv_boxcox1p_loops[0] = loop_d_dd__As_ff_f +ufunc_inv_boxcox1p_loops[1] = loop_d_dd__As_dd_d +ufunc_inv_boxcox1p_types[0] = NPY_FLOAT +ufunc_inv_boxcox1p_types[1] = NPY_FLOAT +ufunc_inv_boxcox1p_types[2] = NPY_FLOAT +ufunc_inv_boxcox1p_types[3] = NPY_DOUBLE +ufunc_inv_boxcox1p_types[4] = NPY_DOUBLE +ufunc_inv_boxcox1p_types[5] = NPY_DOUBLE +ufunc_inv_boxcox1p_ptr[2*0] = _func_inv_boxcox1p +ufunc_inv_boxcox1p_ptr[2*0+1] = ("inv_boxcox1p") +ufunc_inv_boxcox1p_ptr[2*1] = _func_inv_boxcox1p +ufunc_inv_boxcox1p_ptr[2*1+1] = ("inv_boxcox1p") +ufunc_inv_boxcox1p_data[0] = &ufunc_inv_boxcox1p_ptr[2*0] +ufunc_inv_boxcox1p_data[1] = &ufunc_inv_boxcox1p_ptr[2*1] +inv_boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox1p_loops, ufunc_inv_boxcox1p_data, ufunc_inv_boxcox1p_types, 2, 2, 1, 0, "inv_boxcox1p", ufunc_inv_boxcox1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_it2i0k0_loops[2] +cdef void *ufunc_it2i0k0_ptr[4] +cdef void *ufunc_it2i0k0_data[2] +cdef char ufunc_it2i0k0_types[6] +cdef char *ufunc_it2i0k0_doc = ( + "it2i0k0(x, out=None)\n" + "\n" + "Integrals related to modified Bessel functions of order 0.\n" + "\n" + "Computes the integrals\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{I_0(t) - 1}{t} dt \\\\\n" + " \\int_x^\\infty \\frac{K_0(t)}{t} dt.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Values at which to evaluate the integrals.\n" + "out : tuple of ndarrays, optional\n" + " Optional output arrays for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "ii0 : scalar or ndarray\n" + " The integral for `i0`\n" + "ik0 : scalar or ndarray\n" + " The integral for `k0`\n" + "\n" + "References\n" + "----------\n" + ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n" + " Wiley 1996\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the functions at one point.\n" + "\n" + ">>> from scipy.special import it2i0k0\n" + ">>> int_i, int_k = it2i0k0(1.)\n" + ">>> int_i, int_k\n" + "(0.12897944249456852, 0.2085182909001295)\n" + "\n" + "Evaluate the functions at several points.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0.5, 1.5, 3.])\n" + ">>> int_i, int_k = it2i0k0(points)\n" + ">>> int_i, int_k\n" + "(array([0.03149527, 0.30187149, 1.50012461]),\n" + " array([0.66575102, 0.0823715 , 0.00823631]))\n" + "\n" + "Plot the functions from 0 to 5.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 5., 1000)\n" + ">>> int_i, int_k = it2i0k0(x)\n" + ">>> ax.plot(x, int_i, label=r\"$\\int_0^x \\frac{I_0(t)-1}{t}\\,dt$\")\n" + ">>> ax.plot(x, int_k, label=r\"$\\int_x^{\\infty} \\frac{K_0(t)}{t}\\,dt$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_ylim(0, 10)\n" + ">>> plt.show()") +ufunc_it2i0k0_loops[0] = loop_i_d_dd_As_f_ff +ufunc_it2i0k0_loops[1] = loop_i_d_dd_As_d_dd +ufunc_it2i0k0_types[0] = NPY_FLOAT +ufunc_it2i0k0_types[1] = NPY_FLOAT +ufunc_it2i0k0_types[2] = NPY_FLOAT +ufunc_it2i0k0_types[3] = NPY_DOUBLE +ufunc_it2i0k0_types[4] = NPY_DOUBLE +ufunc_it2i0k0_types[5] = NPY_DOUBLE +ufunc_it2i0k0_ptr[2*0] = _func_it2i0k0_wrap +ufunc_it2i0k0_ptr[2*0+1] = ("it2i0k0") +ufunc_it2i0k0_ptr[2*1] = _func_it2i0k0_wrap +ufunc_it2i0k0_ptr[2*1+1] = ("it2i0k0") +ufunc_it2i0k0_data[0] = &ufunc_it2i0k0_ptr[2*0] +ufunc_it2i0k0_data[1] = &ufunc_it2i0k0_ptr[2*1] +it2i0k0 = np.PyUFunc_FromFuncAndData(ufunc_it2i0k0_loops, ufunc_it2i0k0_data, ufunc_it2i0k0_types, 2, 1, 2, 0, "it2i0k0", ufunc_it2i0k0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_it2j0y0_loops[2] +cdef void *ufunc_it2j0y0_ptr[4] +cdef void *ufunc_it2j0y0_data[2] +cdef char ufunc_it2j0y0_types[6] +cdef char *ufunc_it2j0y0_doc = ( + "it2j0y0(x, out=None)\n" + "\n" + "Integrals related to Bessel functions of the first kind of order 0.\n" + "\n" + "Computes the integrals\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{1 - J_0(t)}{t} dt \\\\\n" + " \\int_x^\\infty \\frac{Y_0(t)}{t} dt.\n" + "\n" + "For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Values at which to evaluate the integrals.\n" + "out : tuple of ndarrays, optional\n" + " Optional output arrays for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "ij0 : scalar or ndarray\n" + " The integral for `j0`\n" + "iy0 : scalar or ndarray\n" + " The integral for `y0`\n" + "\n" + "References\n" + "----------\n" + ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n" + " Wiley 1996\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the functions at one point.\n" + "\n" + ">>> from scipy.special import it2j0y0\n" + ">>> int_j, int_y = it2j0y0(1.)\n" + ">>> int_j, int_y\n" + "(0.12116524699506871, 0.39527290169929336)\n" + "\n" + "Evaluate the functions at several points.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0.5, 1.5, 3.])\n" + ">>> int_j, int_y = it2j0y0(points)\n" + ">>> int_j, int_y\n" + "(array([0.03100699, 0.26227724, 0.85614669]),\n" + " array([ 0.26968854, 0.29769696, -0.02987272]))\n" + "\n" + "Plot the functions from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> int_j, int_y = it2j0y0(x)\n" + ">>> ax.plot(x, int_j, label=r\"$\\int_0^x \\frac{1-J_0(t)}{t}\\,dt$\")\n" + ">>> ax.plot(x, int_y, label=r\"$\\int_x^{\\infty} \\frac{Y_0(t)}{t}\\,dt$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_ylim(-2.5, 2.5)\n" + ">>> plt.show()") +ufunc_it2j0y0_loops[0] = loop_i_d_dd_As_f_ff +ufunc_it2j0y0_loops[1] = loop_i_d_dd_As_d_dd +ufunc_it2j0y0_types[0] = NPY_FLOAT +ufunc_it2j0y0_types[1] = NPY_FLOAT +ufunc_it2j0y0_types[2] = NPY_FLOAT +ufunc_it2j0y0_types[3] = NPY_DOUBLE +ufunc_it2j0y0_types[4] = NPY_DOUBLE +ufunc_it2j0y0_types[5] = NPY_DOUBLE +ufunc_it2j0y0_ptr[2*0] = _func_it2j0y0_wrap +ufunc_it2j0y0_ptr[2*0+1] = ("it2j0y0") +ufunc_it2j0y0_ptr[2*1] = _func_it2j0y0_wrap +ufunc_it2j0y0_ptr[2*1+1] = ("it2j0y0") +ufunc_it2j0y0_data[0] = &ufunc_it2j0y0_ptr[2*0] +ufunc_it2j0y0_data[1] = &ufunc_it2j0y0_ptr[2*1] +it2j0y0 = np.PyUFunc_FromFuncAndData(ufunc_it2j0y0_loops, ufunc_it2j0y0_data, ufunc_it2j0y0_types, 2, 1, 2, 0, "it2j0y0", ufunc_it2j0y0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_it2struve0_loops[2] +cdef void *ufunc_it2struve0_ptr[4] +cdef void *ufunc_it2struve0_data[2] +cdef char ufunc_it2struve0_types[4] +cdef char *ufunc_it2struve0_doc = ( + "it2struve0(x, out=None)\n" + "\n" + "Integral related to the Struve function of order 0.\n" + "\n" + "Returns the integral,\n" + "\n" + ".. math::\n" + " \\int_x^\\infty \\frac{H_0(t)}{t}\\,dt\n" + "\n" + "where :math:`H_0` is the Struve function of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Lower limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " The value of the integral.\n" + "\n" + "See Also\n" + "--------\n" + "struve\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n" + "Jin [1]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n" + " Functions\", John Wiley and Sons, 1996.\n" + " https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function at one point.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import it2struve0\n" + ">>> it2struve0(1.)\n" + "0.9571973506383524\n" + "\n" + "Evaluate the function at several points by supplying\n" + "an array for `x`.\n" + "\n" + ">>> points = np.array([1., 2., 3.5])\n" + ">>> it2struve0(points)\n" + "array([0.95719735, 0.46909296, 0.10366042])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> it2struve0_values = it2struve0(x)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, it2struve0_values)\n" + ">>> ax.set_xlabel(r'$x$')\n" + ">>> ax.set_ylabel(r'$\\int_x^{\\infty}\\frac{H_0(t)}{t}\\,dt$')\n" + ">>> plt.show()") +ufunc_it2struve0_loops[0] = loop_d_d__As_f_f +ufunc_it2struve0_loops[1] = loop_d_d__As_d_d +ufunc_it2struve0_types[0] = NPY_FLOAT +ufunc_it2struve0_types[1] = NPY_FLOAT +ufunc_it2struve0_types[2] = NPY_DOUBLE +ufunc_it2struve0_types[3] = NPY_DOUBLE +ufunc_it2struve0_ptr[2*0] = _func_it2struve0_wrap +ufunc_it2struve0_ptr[2*0+1] = ("it2struve0") +ufunc_it2struve0_ptr[2*1] = _func_it2struve0_wrap +ufunc_it2struve0_ptr[2*1+1] = ("it2struve0") +ufunc_it2struve0_data[0] = &ufunc_it2struve0_ptr[2*0] +ufunc_it2struve0_data[1] = &ufunc_it2struve0_ptr[2*1] +it2struve0 = np.PyUFunc_FromFuncAndData(ufunc_it2struve0_loops, ufunc_it2struve0_data, ufunc_it2struve0_types, 2, 1, 1, 0, "it2struve0", ufunc_it2struve0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_itairy_loops[2] +cdef void *ufunc_itairy_ptr[4] +cdef void *ufunc_itairy_data[2] +cdef char ufunc_itairy_types[10] +cdef char *ufunc_itairy_doc = ( + "itairy(x, out=None)\n" + "\n" + "Integrals of Airy functions\n" + "\n" + "Calculates the integrals of Airy functions from 0 to `x`.\n" + "\n" + "Parameters\n" + "----------\n" + "\n" + "x : array_like\n" + " Upper limit of integration (float).\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "Apt : scalar or ndarray\n" + " Integral of Ai(t) from 0 to x.\n" + "Bpt : scalar or ndarray\n" + " Integral of Bi(t) from 0 to x.\n" + "Ant : scalar or ndarray\n" + " Integral of Ai(-t) from 0 to x.\n" + "Bnt : scalar or ndarray\n" + " Integral of Bi(-t) from 0 to x.\n" + "\n" + "Notes\n" + "-----\n" + "\n" + "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n" + "Jin [1]_.\n" + "\n" + "References\n" + "----------\n" + "\n" + ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n" + " Functions\", John Wiley and Sons, 1996.\n" + " https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n" + "\n" + "Examples\n" + "--------\n" + "Compute the functions at ``x=1.``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import itairy\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> apt, bpt, ant, bnt = itairy(1.)\n" + ">>> apt, bpt, ant, bnt\n" + "(0.23631734191710949,\n" + " 0.8727691167380077,\n" + " 0.46567398346706845,\n" + " 0.3730050096342943)\n" + "\n" + "Compute the functions at several points by providing a NumPy array for `x`.\n" + "\n" + ">>> x = np.array([1., 1.5, 2.5, 5])\n" + ">>> apt, bpt, ant, bnt = itairy(x)\n" + ">>> apt, bpt, ant, bnt\n" + "(array([0.23631734, 0.28678675, 0.324638 , 0.33328759]),\n" + " array([ 0.87276912, 1.62470809, 5.20906691, 321.47831857]),\n" + " array([0.46567398, 0.72232876, 0.93187776, 0.7178822 ]),\n" + " array([ 0.37300501, 0.35038814, -0.02812939, 0.15873094]))\n" + "\n" + "Plot the functions from -10 to 10.\n" + "\n" + ">>> x = np.linspace(-10, 10, 500)\n" + ">>> apt, bpt, ant, bnt = itairy(x)\n" + ">>> fig, ax = plt.subplots(figsize=(6, 5))\n" + ">>> ax.plot(x, apt, label=r\"$\\int_0^x\\, Ai(t)\\, dt$\")\n" + ">>> ax.plot(x, bpt, ls=\"dashed\", label=r\"$\\int_0^x\\, Bi(t)\\, dt$\")\n" + ">>> ax.plot(x, ant, ls=\"dashdot\", label=r\"$\\int_0^x\\, Ai(-t)\\, dt$\")\n" + ">>> ax.plot(x, bnt, ls=\"dotted\", label=r\"$\\int_0^x\\, Bi(-t)\\, dt$\")\n" + ">>> ax.set_ylim(-2, 1.5)\n" + ">>> ax.legend(loc=\"lower right\")\n" + ">>> plt.show()") +ufunc_itairy_loops[0] = loop_i_d_dddd_As_f_ffff +ufunc_itairy_loops[1] = loop_i_d_dddd_As_d_dddd +ufunc_itairy_types[0] = NPY_FLOAT +ufunc_itairy_types[1] = NPY_FLOAT +ufunc_itairy_types[2] = NPY_FLOAT +ufunc_itairy_types[3] = NPY_FLOAT +ufunc_itairy_types[4] = NPY_FLOAT +ufunc_itairy_types[5] = NPY_DOUBLE +ufunc_itairy_types[6] = NPY_DOUBLE +ufunc_itairy_types[7] = NPY_DOUBLE +ufunc_itairy_types[8] = NPY_DOUBLE +ufunc_itairy_types[9] = NPY_DOUBLE +ufunc_itairy_ptr[2*0] = _func_itairy_wrap +ufunc_itairy_ptr[2*0+1] = ("itairy") +ufunc_itairy_ptr[2*1] = _func_itairy_wrap +ufunc_itairy_ptr[2*1+1] = ("itairy") +ufunc_itairy_data[0] = &ufunc_itairy_ptr[2*0] +ufunc_itairy_data[1] = &ufunc_itairy_ptr[2*1] +itairy = np.PyUFunc_FromFuncAndData(ufunc_itairy_loops, ufunc_itairy_data, ufunc_itairy_types, 2, 1, 4, 0, "itairy", ufunc_itairy_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_iti0k0_loops[2] +cdef void *ufunc_iti0k0_ptr[4] +cdef void *ufunc_iti0k0_data[2] +cdef char ufunc_iti0k0_types[6] +cdef char *ufunc_iti0k0_doc = ( + "iti0k0(x, out=None)\n" + "\n" + "Integrals of modified Bessel functions of order 0.\n" + "\n" + "Computes the integrals\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x I_0(t) dt \\\\\n" + " \\int_0^x K_0(t) dt.\n" + "\n" + "For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Values at which to evaluate the integrals.\n" + "out : tuple of ndarrays, optional\n" + " Optional output arrays for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "ii0 : scalar or ndarray\n" + " The integral for `i0`\n" + "ik0 : scalar or ndarray\n" + " The integral for `k0`\n" + "\n" + "References\n" + "----------\n" + ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n" + " Wiley 1996\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the functions at one point.\n" + "\n" + ">>> from scipy.special import iti0k0\n" + ">>> int_i, int_k = iti0k0(1.)\n" + ">>> int_i, int_k\n" + "(1.0865210970235892, 1.2425098486237771)\n" + "\n" + "Evaluate the functions at several points.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0., 1.5, 3.])\n" + ">>> int_i, int_k = iti0k0(points)\n" + ">>> int_i, int_k\n" + "(array([0. , 1.80606937, 6.16096149]),\n" + " array([0. , 1.39458246, 1.53994809]))\n" + "\n" + "Plot the functions from 0 to 5.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 5., 1000)\n" + ">>> int_i, int_k = iti0k0(x)\n" + ">>> ax.plot(x, int_i, label=r\"$\\int_0^x I_0(t)\\,dt$\")\n" + ">>> ax.plot(x, int_k, label=r\"$\\int_0^x K_0(t)\\,dt$\")\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_iti0k0_loops[0] = loop_i_d_dd_As_f_ff +ufunc_iti0k0_loops[1] = loop_i_d_dd_As_d_dd +ufunc_iti0k0_types[0] = NPY_FLOAT +ufunc_iti0k0_types[1] = NPY_FLOAT +ufunc_iti0k0_types[2] = NPY_FLOAT +ufunc_iti0k0_types[3] = NPY_DOUBLE +ufunc_iti0k0_types[4] = NPY_DOUBLE +ufunc_iti0k0_types[5] = NPY_DOUBLE +ufunc_iti0k0_ptr[2*0] = _func_it1i0k0_wrap +ufunc_iti0k0_ptr[2*0+1] = ("iti0k0") +ufunc_iti0k0_ptr[2*1] = _func_it1i0k0_wrap +ufunc_iti0k0_ptr[2*1+1] = ("iti0k0") +ufunc_iti0k0_data[0] = &ufunc_iti0k0_ptr[2*0] +ufunc_iti0k0_data[1] = &ufunc_iti0k0_ptr[2*1] +iti0k0 = np.PyUFunc_FromFuncAndData(ufunc_iti0k0_loops, ufunc_iti0k0_data, ufunc_iti0k0_types, 2, 1, 2, 0, "iti0k0", ufunc_iti0k0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_itj0y0_loops[2] +cdef void *ufunc_itj0y0_ptr[4] +cdef void *ufunc_itj0y0_data[2] +cdef char ufunc_itj0y0_types[6] +cdef char *ufunc_itj0y0_doc = ( + "itj0y0(x, out=None)\n" + "\n" + "Integrals of Bessel functions of the first kind of order 0.\n" + "\n" + "Computes the integrals\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x J_0(t) dt \\\\\n" + " \\int_0^x Y_0(t) dt.\n" + "\n" + "For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Values at which to evaluate the integrals.\n" + "out : tuple of ndarrays, optional\n" + " Optional output arrays for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "ij0 : scalar or ndarray\n" + " The integral of `j0`\n" + "iy0 : scalar or ndarray\n" + " The integral of `y0`\n" + "\n" + "References\n" + "----------\n" + ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n" + " Wiley 1996\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the functions at one point.\n" + "\n" + ">>> from scipy.special import itj0y0\n" + ">>> int_j, int_y = itj0y0(1.)\n" + ">>> int_j, int_y\n" + "(0.9197304100897596, -0.637069376607422)\n" + "\n" + "Evaluate the functions at several points.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0., 1.5, 3.])\n" + ">>> int_j, int_y = itj0y0(points)\n" + ">>> int_j, int_y\n" + "(array([0. , 1.24144951, 1.38756725]),\n" + " array([ 0. , -0.51175903, 0.19765826]))\n" + "\n" + "Plot the functions from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> int_j, int_y = itj0y0(x)\n" + ">>> ax.plot(x, int_j, label=r\"$\\int_0^x J_0(t)\\,dt$\")\n" + ">>> ax.plot(x, int_y, label=r\"$\\int_0^x Y_0(t)\\,dt$\")\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_itj0y0_loops[0] = loop_i_d_dd_As_f_ff +ufunc_itj0y0_loops[1] = loop_i_d_dd_As_d_dd +ufunc_itj0y0_types[0] = NPY_FLOAT +ufunc_itj0y0_types[1] = NPY_FLOAT +ufunc_itj0y0_types[2] = NPY_FLOAT +ufunc_itj0y0_types[3] = NPY_DOUBLE +ufunc_itj0y0_types[4] = NPY_DOUBLE +ufunc_itj0y0_types[5] = NPY_DOUBLE +ufunc_itj0y0_ptr[2*0] = _func_it1j0y0_wrap +ufunc_itj0y0_ptr[2*0+1] = ("itj0y0") +ufunc_itj0y0_ptr[2*1] = _func_it1j0y0_wrap +ufunc_itj0y0_ptr[2*1+1] = ("itj0y0") +ufunc_itj0y0_data[0] = &ufunc_itj0y0_ptr[2*0] +ufunc_itj0y0_data[1] = &ufunc_itj0y0_ptr[2*1] +itj0y0 = np.PyUFunc_FromFuncAndData(ufunc_itj0y0_loops, ufunc_itj0y0_data, ufunc_itj0y0_types, 2, 1, 2, 0, "itj0y0", ufunc_itj0y0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_itmodstruve0_loops[2] +cdef void *ufunc_itmodstruve0_ptr[4] +cdef void *ufunc_itmodstruve0_data[2] +cdef char ufunc_itmodstruve0_types[4] +cdef char *ufunc_itmodstruve0_doc = ( + "itmodstruve0(x, out=None)\n" + "\n" + "Integral of the modified Struve function of order 0.\n" + "\n" + ".. math::\n" + " I = \\int_0^x L_0(t)\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper limit of integration (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " The integral of :math:`L_0` from 0 to `x`.\n" + "\n" + "See Also\n" + "--------\n" + "modstruve: Modified Struve function which is integrated by this function\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n" + "Jin [1]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n" + " Functions\", John Wiley and Sons, 1996.\n" + " https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function at one point.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import itmodstruve0\n" + ">>> itmodstruve0(1.)\n" + "0.3364726286440384\n" + "\n" + "Evaluate the function at several points by supplying\n" + "an array for `x`.\n" + "\n" + ">>> points = np.array([1., 2., 3.5])\n" + ">>> itmodstruve0(points)\n" + "array([0.33647263, 1.588285 , 7.60382578])\n" + "\n" + "Plot the function from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> itmodstruve0_values = itmodstruve0(x)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, itmodstruve0_values)\n" + ">>> ax.set_xlabel(r'$x$')\n" + ">>> ax.set_ylabel(r'$\\int_0^xL_0(t)\\,dt$')\n" + ">>> plt.show()") +ufunc_itmodstruve0_loops[0] = loop_d_d__As_f_f +ufunc_itmodstruve0_loops[1] = loop_d_d__As_d_d +ufunc_itmodstruve0_types[0] = NPY_FLOAT +ufunc_itmodstruve0_types[1] = NPY_FLOAT +ufunc_itmodstruve0_types[2] = NPY_DOUBLE +ufunc_itmodstruve0_types[3] = NPY_DOUBLE +ufunc_itmodstruve0_ptr[2*0] = _func_itmodstruve0_wrap +ufunc_itmodstruve0_ptr[2*0+1] = ("itmodstruve0") +ufunc_itmodstruve0_ptr[2*1] = _func_itmodstruve0_wrap +ufunc_itmodstruve0_ptr[2*1+1] = ("itmodstruve0") +ufunc_itmodstruve0_data[0] = &ufunc_itmodstruve0_ptr[2*0] +ufunc_itmodstruve0_data[1] = &ufunc_itmodstruve0_ptr[2*1] +itmodstruve0 = np.PyUFunc_FromFuncAndData(ufunc_itmodstruve0_loops, ufunc_itmodstruve0_data, ufunc_itmodstruve0_types, 2, 1, 1, 0, "itmodstruve0", ufunc_itmodstruve0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_itstruve0_loops[2] +cdef void *ufunc_itstruve0_ptr[4] +cdef void *ufunc_itstruve0_data[2] +cdef char ufunc_itstruve0_types[4] +cdef char *ufunc_itstruve0_doc = ( + "itstruve0(x, out=None)\n" + "\n" + "Integral of the Struve function of order 0.\n" + "\n" + ".. math::\n" + " I = \\int_0^x H_0(t)\\,dt\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Upper limit of integration (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "I : scalar or ndarray\n" + " The integral of :math:`H_0` from 0 to `x`.\n" + "\n" + "See Also\n" + "--------\n" + "struve: Function which is integrated by this function\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n" + "Jin [1]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n" + " Functions\", John Wiley and Sons, 1996.\n" + " https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function at one point.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import itstruve0\n" + ">>> itstruve0(1.)\n" + "0.30109042670805547\n" + "\n" + "Evaluate the function at several points by supplying\n" + "an array for `x`.\n" + "\n" + ">>> points = np.array([1., 2., 3.5])\n" + ">>> itstruve0(points)\n" + "array([0.30109043, 1.01870116, 1.96804581])\n" + "\n" + "Plot the function from -20 to 20.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-20., 20., 1000)\n" + ">>> istruve0_values = itstruve0(x)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, istruve0_values)\n" + ">>> ax.set_xlabel(r'$x$')\n" + ">>> ax.set_ylabel(r'$\\int_0^{x}H_0(t)\\,dt$')\n" + ">>> plt.show()") +ufunc_itstruve0_loops[0] = loop_d_d__As_f_f +ufunc_itstruve0_loops[1] = loop_d_d__As_d_d +ufunc_itstruve0_types[0] = NPY_FLOAT +ufunc_itstruve0_types[1] = NPY_FLOAT +ufunc_itstruve0_types[2] = NPY_DOUBLE +ufunc_itstruve0_types[3] = NPY_DOUBLE +ufunc_itstruve0_ptr[2*0] = _func_itstruve0_wrap +ufunc_itstruve0_ptr[2*0+1] = ("itstruve0") +ufunc_itstruve0_ptr[2*1] = _func_itstruve0_wrap +ufunc_itstruve0_ptr[2*1+1] = ("itstruve0") +ufunc_itstruve0_data[0] = &ufunc_itstruve0_ptr[2*0] +ufunc_itstruve0_data[1] = &ufunc_itstruve0_ptr[2*1] +itstruve0 = np.PyUFunc_FromFuncAndData(ufunc_itstruve0_loops, ufunc_itstruve0_data, ufunc_itstruve0_types, 2, 1, 1, 0, "itstruve0", ufunc_itstruve0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_iv_loops[4] +cdef void *ufunc_iv_ptr[8] +cdef void *ufunc_iv_data[4] +cdef char ufunc_iv_types[12] +cdef char *ufunc_iv_doc = ( + "iv(v, z, out=None)\n" + "\n" + "Modified Bessel function of the first kind of real order.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order. If `z` is of real type and negative, `v` must be integer\n" + " valued.\n" + "z : array_like of float or complex\n" + " Argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the modified Bessel function.\n" + "\n" + "See Also\n" + "--------\n" + "ive : This function with leading exponential behavior stripped off.\n" + "i0 : Faster version of this function for order 0.\n" + "i1 : Faster version of this function for order 1.\n" + "\n" + "Notes\n" + "-----\n" + "For real `z` and :math:`v \\in [-50, 50]`, the evaluation is carried out\n" + "using Temme's method [1]_. For larger orders, uniform asymptotic\n" + "expansions are applied.\n" + "\n" + "For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is\n" + "called. It uses a power series for small `z`, the asymptotic expansion\n" + "for large `abs(z)`, the Miller algorithm normalized by the Wronskian\n" + "and a Neumann series for intermediate magnitudes, and the uniform\n" + "asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large\n" + "orders. Backward recurrence is used to generate sequences or reduce\n" + "orders when necessary.\n" + "\n" + "The calculations above are done in the right half plane and continued\n" + "into the left half plane by the formula,\n" + "\n" + ".. math:: I_v(z \\exp(\\pm\\imath\\pi)) = \\exp(\\pm\\pi v) I_v(z)\n" + "\n" + "(valid when the real part of `z` is positive). For negative `v`, the\n" + "formula\n" + "\n" + ".. math:: I_{-v}(z) = I_v(z) + \\frac{2}{\\pi} \\sin(\\pi v) K_v(z)\n" + "\n" + "is used, where :math:`K_v(z)` is the modified Bessel function of the\n" + "second kind, evaluated using the AMOS routine `zbesk`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)\n" + ".. [2] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function of order 0 at one point.\n" + "\n" + ">>> from scipy.special import iv\n" + ">>> iv(0, 1.)\n" + "1.2660658777520084\n" + "\n" + "Evaluate the function at one point for different orders.\n" + "\n" + ">>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.)\n" + "(1.2660658777520084, 0.565159103992485, 0.2935253263474798)\n" + "\n" + "The evaluation for different orders can be carried out in one call by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> iv([0, 1, 1.5], 1.)\n" + "array([1.26606588, 0.5651591 , 0.29352533])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([-2., 0., 3.])\n" + ">>> iv(0, points)\n" + "array([2.2795853 , 1. , 4.88079259])\n" + "\n" + "If `z` is an array, the order parameter `v` must be broadcastable to\n" + "the correct shape if different orders shall be computed in one call.\n" + "To calculate the orders 0 and 1 for an 1D array:\n" + "\n" + ">>> orders = np.array([[0], [1]])\n" + ">>> orders.shape\n" + "(2, 1)\n" + "\n" + ">>> iv(orders, points)\n" + "array([[ 2.2795853 , 1. , 4.88079259],\n" + " [-1.59063685, 0. , 3.95337022]])\n" + "\n" + "Plot the functions of order 0 to 3 from -5 to 5.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-5., 5., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, iv(i, x), label=f'$I_{i!r}$')\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_iv_loops[0] = loop_d_dd__As_ff_f +ufunc_iv_loops[1] = loop_D_dD__As_fF_F +ufunc_iv_loops[2] = loop_d_dd__As_dd_d +ufunc_iv_loops[3] = loop_D_dD__As_dD_D +ufunc_iv_types[0] = NPY_FLOAT +ufunc_iv_types[1] = NPY_FLOAT +ufunc_iv_types[2] = NPY_FLOAT +ufunc_iv_types[3] = NPY_FLOAT +ufunc_iv_types[4] = NPY_CFLOAT +ufunc_iv_types[5] = NPY_CFLOAT +ufunc_iv_types[6] = NPY_DOUBLE +ufunc_iv_types[7] = NPY_DOUBLE +ufunc_iv_types[8] = NPY_DOUBLE +ufunc_iv_types[9] = NPY_DOUBLE +ufunc_iv_types[10] = NPY_CDOUBLE +ufunc_iv_types[11] = NPY_CDOUBLE +ufunc_iv_ptr[2*0] = _func_iv +ufunc_iv_ptr[2*0+1] = ("iv") +ufunc_iv_ptr[2*1] = _func_cbesi_wrap +ufunc_iv_ptr[2*1+1] = ("iv") +ufunc_iv_ptr[2*2] = _func_iv +ufunc_iv_ptr[2*2+1] = ("iv") +ufunc_iv_ptr[2*3] = _func_cbesi_wrap +ufunc_iv_ptr[2*3+1] = ("iv") +ufunc_iv_data[0] = &ufunc_iv_ptr[2*0] +ufunc_iv_data[1] = &ufunc_iv_ptr[2*1] +ufunc_iv_data[2] = &ufunc_iv_ptr[2*2] +ufunc_iv_data[3] = &ufunc_iv_ptr[2*3] +iv = np.PyUFunc_FromFuncAndData(ufunc_iv_loops, ufunc_iv_data, ufunc_iv_types, 4, 2, 1, 0, "iv", ufunc_iv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ive_loops[4] +cdef void *ufunc_ive_ptr[8] +cdef void *ufunc_ive_data[4] +cdef char ufunc_ive_types[12] +cdef char *ufunc_ive_doc = ( + "ive(v, z, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of the first kind.\n" + "\n" + "Defined as::\n" + "\n" + " ive(v, z) = iv(v, z) * exp(-abs(z.real))\n" + "\n" + "For imaginary numbers without a real part, returns the unscaled\n" + "Bessel function of the first kind `iv`.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like of float\n" + " Order.\n" + "z : array_like of float or complex\n" + " Argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the exponentially scaled modified Bessel function.\n" + "\n" + "See Also\n" + "--------\n" + "iv: Modified Bessel function of the first kind\n" + "i0e: Faster implementation of this function for order 0\n" + "i1e: Faster implementation of this function for order 1\n" + "\n" + "Notes\n" + "-----\n" + "For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a\n" + "power series for small `z`, the asymptotic expansion for large\n" + "`abs(z)`, the Miller algorithm normalized by the Wronskian and a\n" + "Neumann series for intermediate magnitudes, and the uniform asymptotic\n" + "expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.\n" + "Backward recurrence is used to generate sequences or reduce orders when\n" + "necessary.\n" + "\n" + "The calculations above are done in the right half plane and continued\n" + "into the left half plane by the formula,\n" + "\n" + ".. math:: I_v(z \\exp(\\pm\\imath\\pi)) = \\exp(\\pm\\pi v) I_v(z)\n" + "\n" + "(valid when the real part of `z` is positive). For negative `v`, the\n" + "formula\n" + "\n" + ".. math:: I_{-v}(z) = I_v(z) + \\frac{2}{\\pi} \\sin(\\pi v) K_v(z)\n" + "\n" + "is used, where :math:`K_v(z)` is the modified Bessel function of the\n" + "second kind, evaluated using the AMOS routine `zbesk`.\n" + "\n" + "`ive` is useful for large arguments `z`: for these, `iv` easily overflows,\n" + "while `ive` does not due to the exponential scaling.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `iv` returns infinity whereas `ive` still returns\n" + "a finite number.\n" + "\n" + ">>> from scipy.special import iv, ive\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> iv(3, 1000.), ive(3, 1000.)\n" + "(inf, 0.01256056218254712)\n" + "\n" + "Evaluate the function at one point for different orders by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> ive([0, 1, 1.5], 1.)\n" + "array([0.46575961, 0.20791042, 0.10798193])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> points = np.array([-2., 0., 3.])\n" + ">>> ive(0, points)\n" + "array([0.30850832, 1. , 0.24300035])\n" + "\n" + "Evaluate the function at several points for different orders by\n" + "providing arrays for both `v` for `z`. Both arrays have to be\n" + "broadcastable to the correct shape. To calculate the orders 0, 1\n" + "and 2 for a 1D array of points:\n" + "\n" + ">>> ive([[0], [1], [2]], points)\n" + "array([[ 0.30850832, 1. , 0.24300035],\n" + " [-0.21526929, 0. , 0.19682671],\n" + " [ 0.09323903, 0. , 0.11178255]])\n" + "\n" + "Plot the functions of order 0 to 3 from -5 to 5.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-5., 5., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, ive(i, x), label=fr'$I_{i!r}(z)\\cdot e^{{-|z|}}$')\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(r\"$z$\")\n" + ">>> plt.show()") +ufunc_ive_loops[0] = loop_d_dd__As_ff_f +ufunc_ive_loops[1] = loop_D_dD__As_fF_F +ufunc_ive_loops[2] = loop_d_dd__As_dd_d +ufunc_ive_loops[3] = loop_D_dD__As_dD_D +ufunc_ive_types[0] = NPY_FLOAT +ufunc_ive_types[1] = NPY_FLOAT +ufunc_ive_types[2] = NPY_FLOAT +ufunc_ive_types[3] = NPY_FLOAT +ufunc_ive_types[4] = NPY_CFLOAT +ufunc_ive_types[5] = NPY_CFLOAT +ufunc_ive_types[6] = NPY_DOUBLE +ufunc_ive_types[7] = NPY_DOUBLE +ufunc_ive_types[8] = NPY_DOUBLE +ufunc_ive_types[9] = NPY_DOUBLE +ufunc_ive_types[10] = NPY_CDOUBLE +ufunc_ive_types[11] = NPY_CDOUBLE +ufunc_ive_ptr[2*0] = _func_cbesi_wrap_e_real +ufunc_ive_ptr[2*0+1] = ("ive") +ufunc_ive_ptr[2*1] = _func_cbesi_wrap_e +ufunc_ive_ptr[2*1+1] = ("ive") +ufunc_ive_ptr[2*2] = _func_cbesi_wrap_e_real +ufunc_ive_ptr[2*2+1] = ("ive") +ufunc_ive_ptr[2*3] = _func_cbesi_wrap_e +ufunc_ive_ptr[2*3+1] = ("ive") +ufunc_ive_data[0] = &ufunc_ive_ptr[2*0] +ufunc_ive_data[1] = &ufunc_ive_ptr[2*1] +ufunc_ive_data[2] = &ufunc_ive_ptr[2*2] +ufunc_ive_data[3] = &ufunc_ive_ptr[2*3] +ive = np.PyUFunc_FromFuncAndData(ufunc_ive_loops, ufunc_ive_data, ufunc_ive_types, 4, 2, 1, 0, "ive", ufunc_ive_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_j0_loops[2] +cdef void *ufunc_j0_ptr[4] +cdef void *ufunc_j0_data[2] +cdef char ufunc_j0_types[4] +cdef char *ufunc_j0_doc = ( + "j0(x, out=None)\n" + "\n" + "Bessel function of the first kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the Bessel function of the first kind of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "jv : Bessel function of real order and complex argument.\n" + "spherical_jn : spherical Bessel functions.\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n" + "first interval the following rational approximation is used:\n" + "\n" + ".. math::\n" + "\n" + " J_0(x) \\approx (w - r_1^2)(w - r_2^2) \\frac{P_3(w)}{Q_8(w)},\n" + "\n" + "where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of\n" + ":math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3\n" + "and 8, respectively.\n" + "\n" + "In the second interval, the Hankel asymptotic expansion is employed with\n" + "two rational functions of degree 6/6 and 7/7.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `j0`.\n" + "It should not be confused with the spherical Bessel functions (see\n" + "`spherical_jn`).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import j0\n" + ">>> j0(1.)\n" + "0.7651976865579665\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> j0(np.array([-2., 0., 4.]))\n" + "array([ 0.22389078, 1. , -0.39714981])\n" + "\n" + "Plot the function from -20 to 20.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-20., 20., 1000)\n" + ">>> y = j0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_j0_loops[0] = loop_d_d__As_f_f +ufunc_j0_loops[1] = loop_d_d__As_d_d +ufunc_j0_types[0] = NPY_FLOAT +ufunc_j0_types[1] = NPY_FLOAT +ufunc_j0_types[2] = NPY_DOUBLE +ufunc_j0_types[3] = NPY_DOUBLE +ufunc_j0_ptr[2*0] = _func_j0 +ufunc_j0_ptr[2*0+1] = ("j0") +ufunc_j0_ptr[2*1] = _func_j0 +ufunc_j0_ptr[2*1+1] = ("j0") +ufunc_j0_data[0] = &ufunc_j0_ptr[2*0] +ufunc_j0_data[1] = &ufunc_j0_ptr[2*1] +j0 = np.PyUFunc_FromFuncAndData(ufunc_j0_loops, ufunc_j0_data, ufunc_j0_types, 2, 1, 1, 0, "j0", ufunc_j0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_j1_loops[2] +cdef void *ufunc_j1_ptr[4] +cdef void *ufunc_j1_data[2] +cdef char ufunc_j1_types[4] +cdef char *ufunc_j1_doc = ( + "j1(x, out=None)\n" + "\n" + "Bessel function of the first kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the Bessel function of the first kind of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "jv: Bessel function of the first kind\n" + "spherical_jn: spherical Bessel functions.\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n" + "first interval a 24 term Chebyshev expansion is used. In the second, the\n" + "asymptotic trigonometric representation is employed using two rational\n" + "functions of degree 5/5.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `j1`.\n" + "It should not be confused with the spherical Bessel functions (see\n" + "`spherical_jn`).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import j1\n" + ">>> j1(1.)\n" + "0.44005058574493355\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> j1(np.array([-2., 0., 4.]))\n" + "array([-0.57672481, 0. , -0.06604333])\n" + "\n" + "Plot the function from -20 to 20.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-20., 20., 1000)\n" + ">>> y = j1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_j1_loops[0] = loop_d_d__As_f_f +ufunc_j1_loops[1] = loop_d_d__As_d_d +ufunc_j1_types[0] = NPY_FLOAT +ufunc_j1_types[1] = NPY_FLOAT +ufunc_j1_types[2] = NPY_DOUBLE +ufunc_j1_types[3] = NPY_DOUBLE +ufunc_j1_ptr[2*0] = _func_j1 +ufunc_j1_ptr[2*0+1] = ("j1") +ufunc_j1_ptr[2*1] = _func_j1 +ufunc_j1_ptr[2*1+1] = ("j1") +ufunc_j1_data[0] = &ufunc_j1_ptr[2*0] +ufunc_j1_data[1] = &ufunc_j1_ptr[2*1] +j1 = np.PyUFunc_FromFuncAndData(ufunc_j1_loops, ufunc_j1_data, ufunc_j1_types, 2, 1, 1, 0, "j1", ufunc_j1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_jv_loops[4] +cdef void *ufunc_jv_ptr[8] +cdef void *ufunc_jv_data[4] +cdef char ufunc_jv_types[12] +cdef char *ufunc_jv_doc = ( + "jv(v, z, out=None)\n" + "\n" + "Bessel function of the first kind of real order and complex argument.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the Bessel function, :math:`J_v(z)`.\n" + "\n" + "See Also\n" + "--------\n" + "jve : :math:`J_v` with leading exponential behavior stripped off.\n" + "spherical_jn : spherical Bessel functions.\n" + "j0 : faster version of this function for order 0.\n" + "j1 : faster version of this function for order 1.\n" + "\n" + "Notes\n" + "-----\n" + "For positive `v` values, the computation is carried out using the AMOS\n" + "[1]_ `zbesj` routine, which exploits the connection to the modified\n" + "Bessel function :math:`I_v`,\n" + "\n" + ".. math::\n" + " J_v(z) = \\exp(v\\pi\\imath/2) I_v(-\\imath z)\\qquad (\\Im z > 0)\n" + "\n" + " J_v(z) = \\exp(-v\\pi\\imath/2) I_v(\\imath z)\\qquad (\\Im z < 0)\n" + "\n" + "For negative `v` values the formula,\n" + "\n" + ".. math:: J_{-v}(z) = J_v(z) \\cos(\\pi v) - Y_v(z) \\sin(\\pi v)\n" + "\n" + "is used, where :math:`Y_v(z)` is the Bessel function of the second\n" + "kind, computed using the AMOS routine `zbesy`. Note that the second\n" + "term is exactly zero for integer `v`; to improve accuracy the second\n" + "term is explicitly omitted for `v` values such that `v = floor(v)`.\n" + "\n" + "Not to be confused with the spherical Bessel functions (see `spherical_jn`).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function of order 0 at one point.\n" + "\n" + ">>> from scipy.special import jv\n" + ">>> jv(0, 1.)\n" + "0.7651976865579666\n" + "\n" + "Evaluate the function at one point for different orders.\n" + "\n" + ">>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.)\n" + "(0.7651976865579666, 0.44005058574493355, 0.24029783912342725)\n" + "\n" + "The evaluation for different orders can be carried out in one call by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> jv([0, 1, 1.5], 1.)\n" + "array([0.76519769, 0.44005059, 0.24029784])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([-2., 0., 3.])\n" + ">>> jv(0, points)\n" + "array([ 0.22389078, 1. , -0.26005195])\n" + "\n" + "If `z` is an array, the order parameter `v` must be broadcastable to\n" + "the correct shape if different orders shall be computed in one call.\n" + "To calculate the orders 0 and 1 for an 1D array:\n" + "\n" + ">>> orders = np.array([[0], [1]])\n" + ">>> orders.shape\n" + "(2, 1)\n" + "\n" + ">>> jv(orders, points)\n" + "array([[ 0.22389078, 1. , -0.26005195],\n" + " [-0.57672481, 0. , 0.33905896]])\n" + "\n" + "Plot the functions of order 0 to 3 from -10 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, jv(i, x), label=f'$J_{i!r}$')\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_jv_loops[0] = loop_d_dd__As_ff_f +ufunc_jv_loops[1] = loop_D_dD__As_fF_F +ufunc_jv_loops[2] = loop_d_dd__As_dd_d +ufunc_jv_loops[3] = loop_D_dD__As_dD_D +ufunc_jv_types[0] = NPY_FLOAT +ufunc_jv_types[1] = NPY_FLOAT +ufunc_jv_types[2] = NPY_FLOAT +ufunc_jv_types[3] = NPY_FLOAT +ufunc_jv_types[4] = NPY_CFLOAT +ufunc_jv_types[5] = NPY_CFLOAT +ufunc_jv_types[6] = NPY_DOUBLE +ufunc_jv_types[7] = NPY_DOUBLE +ufunc_jv_types[8] = NPY_DOUBLE +ufunc_jv_types[9] = NPY_DOUBLE +ufunc_jv_types[10] = NPY_CDOUBLE +ufunc_jv_types[11] = NPY_CDOUBLE +ufunc_jv_ptr[2*0] = _func_cbesj_wrap_real +ufunc_jv_ptr[2*0+1] = ("jv") +ufunc_jv_ptr[2*1] = _func_cbesj_wrap +ufunc_jv_ptr[2*1+1] = ("jv") +ufunc_jv_ptr[2*2] = _func_cbesj_wrap_real +ufunc_jv_ptr[2*2+1] = ("jv") +ufunc_jv_ptr[2*3] = _func_cbesj_wrap +ufunc_jv_ptr[2*3+1] = ("jv") +ufunc_jv_data[0] = &ufunc_jv_ptr[2*0] +ufunc_jv_data[1] = &ufunc_jv_ptr[2*1] +ufunc_jv_data[2] = &ufunc_jv_ptr[2*2] +ufunc_jv_data[3] = &ufunc_jv_ptr[2*3] +jv = np.PyUFunc_FromFuncAndData(ufunc_jv_loops, ufunc_jv_data, ufunc_jv_types, 4, 2, 1, 0, "jv", ufunc_jv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_jve_loops[4] +cdef void *ufunc_jve_ptr[8] +cdef void *ufunc_jve_data[4] +cdef char ufunc_jve_types[12] +cdef char *ufunc_jve_doc = ( + "jve(v, z, out=None)\n" + "\n" + "Exponentially scaled Bessel function of the first kind of order `v`.\n" + "\n" + "Defined as::\n" + "\n" + " jve(v, z) = jv(v, z) * exp(-abs(z.imag))\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "J : scalar or ndarray\n" + " Value of the exponentially scaled Bessel function.\n" + "\n" + "See Also\n" + "--------\n" + "jv: Unscaled Bessel function of the first kind\n" + "\n" + "Notes\n" + "-----\n" + "For positive `v` values, the computation is carried out using the AMOS\n" + "[1]_ `zbesj` routine, which exploits the connection to the modified\n" + "Bessel function :math:`I_v`,\n" + "\n" + ".. math::\n" + " J_v(z) = \\exp(v\\pi\\imath/2) I_v(-\\imath z)\\qquad (\\Im z > 0)\n" + "\n" + " J_v(z) = \\exp(-v\\pi\\imath/2) I_v(\\imath z)\\qquad (\\Im z < 0)\n" + "\n" + "For negative `v` values the formula,\n" + "\n" + ".. math:: J_{-v}(z) = J_v(z) \\cos(\\pi v) - Y_v(z) \\sin(\\pi v)\n" + "\n" + "is used, where :math:`Y_v(z)` is the Bessel function of the second\n" + "kind, computed using the AMOS routine `zbesy`. Note that the second\n" + "term is exactly zero for integer `v`; to improve accuracy the second\n" + "term is explicitly omitted for `v` values such that `v = floor(v)`.\n" + "\n" + "Exponentially scaled Bessel functions are useful for large arguments `z`:\n" + "for these, the unscaled Bessel functions can easily under-or overflow.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Compare the output of `jv` and `jve` for large complex arguments for `z`\n" + "by computing their values for order ``v=1`` at ``z=1000j``. We see that\n" + "`jv` overflows but `jve` returns a finite number:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import jv, jve\n" + ">>> v = 1\n" + ">>> z = 1000j\n" + ">>> jv(v, z), jve(v, z)\n" + "((inf+infj), (7.721967686709077e-19+0.012610930256928629j))\n" + "\n" + "For real arguments for `z`, `jve` returns the same as `jv`.\n" + "\n" + ">>> v, z = 1, 1000\n" + ">>> jv(v, z), jve(v, z)\n" + "(0.004728311907089523, 0.004728311907089523)\n" + "\n" + "The function can be evaluated for several orders at the same time by\n" + "providing a list or NumPy array for `v`:\n" + "\n" + ">>> jve([1, 3, 5], 1j)\n" + "array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j,\n" + " 6.11480940e-21+9.98657141e-05j])\n" + "\n" + "In the same way, the function can be evaluated at several points in one\n" + "call by providing a list or NumPy array for `z`:\n" + "\n" + ">>> jve(1, np.array([1j, 2j, 3j]))\n" + "array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j,\n" + " 1.20521602e-17+0.19682671j])\n" + "\n" + "It is also possible to evaluate several orders at several points\n" + "at the same time by providing arrays for `v` and `z` with\n" + "compatible shapes for broadcasting. Compute `jve` for two different orders\n" + "`v` and three points `z` resulting in a 2x3 array.\n" + "\n" + ">>> v = np.array([[1], [3]])\n" + ">>> z = np.array([1j, 2j, 3j])\n" + ">>> v.shape, z.shape\n" + "((2, 1), (3,))\n" + "\n" + ">>> jve(v, z)\n" + "array([[1.27304208e-17+0.20791042j, 1.31810070e-17+0.21526929j,\n" + " 1.20517622e-17+0.19682671j],\n" + " [-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j,\n" + " -2.92578784e-18-0.04778332j]])") +ufunc_jve_loops[0] = loop_d_dd__As_ff_f +ufunc_jve_loops[1] = loop_D_dD__As_fF_F +ufunc_jve_loops[2] = loop_d_dd__As_dd_d +ufunc_jve_loops[3] = loop_D_dD__As_dD_D +ufunc_jve_types[0] = NPY_FLOAT +ufunc_jve_types[1] = NPY_FLOAT +ufunc_jve_types[2] = NPY_FLOAT +ufunc_jve_types[3] = NPY_FLOAT +ufunc_jve_types[4] = NPY_CFLOAT +ufunc_jve_types[5] = NPY_CFLOAT +ufunc_jve_types[6] = NPY_DOUBLE +ufunc_jve_types[7] = NPY_DOUBLE +ufunc_jve_types[8] = NPY_DOUBLE +ufunc_jve_types[9] = NPY_DOUBLE +ufunc_jve_types[10] = NPY_CDOUBLE +ufunc_jve_types[11] = NPY_CDOUBLE +ufunc_jve_ptr[2*0] = _func_cbesj_wrap_e_real +ufunc_jve_ptr[2*0+1] = ("jve") +ufunc_jve_ptr[2*1] = _func_cbesj_wrap_e +ufunc_jve_ptr[2*1+1] = ("jve") +ufunc_jve_ptr[2*2] = _func_cbesj_wrap_e_real +ufunc_jve_ptr[2*2+1] = ("jve") +ufunc_jve_ptr[2*3] = _func_cbesj_wrap_e +ufunc_jve_ptr[2*3+1] = ("jve") +ufunc_jve_data[0] = &ufunc_jve_ptr[2*0] +ufunc_jve_data[1] = &ufunc_jve_ptr[2*1] +ufunc_jve_data[2] = &ufunc_jve_ptr[2*2] +ufunc_jve_data[3] = &ufunc_jve_ptr[2*3] +jve = np.PyUFunc_FromFuncAndData(ufunc_jve_loops, ufunc_jve_data, ufunc_jve_types, 4, 2, 1, 0, "jve", ufunc_jve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k0_loops[2] +cdef void *ufunc_k0_ptr[4] +cdef void *ufunc_k0_data[2] +cdef char ufunc_k0_types[4] +cdef char *ufunc_k0_doc = ( + "k0(x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of order 0, :math:`K_0`.\n" + "\n" + "This function is also sometimes referred to as the modified Bessel\n" + "function of the third kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the modified Bessel function :math:`K_0` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k0e: Exponentially scaled modified Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import k0\n" + ">>> k0(1.)\n" + "0.42102443824070823\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> k0(np.array([0.5, 2., 3.]))\n" + "array([0.92441907, 0.11389387, 0.0347395 ])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k0_loops[0] = loop_d_d__As_f_f +ufunc_k0_loops[1] = loop_d_d__As_d_d +ufunc_k0_types[0] = NPY_FLOAT +ufunc_k0_types[1] = NPY_FLOAT +ufunc_k0_types[2] = NPY_DOUBLE +ufunc_k0_types[3] = NPY_DOUBLE +ufunc_k0_ptr[2*0] = _func_k0 +ufunc_k0_ptr[2*0+1] = ("k0") +ufunc_k0_ptr[2*1] = _func_k0 +ufunc_k0_ptr[2*1+1] = ("k0") +ufunc_k0_data[0] = &ufunc_k0_ptr[2*0] +ufunc_k0_data[1] = &ufunc_k0_ptr[2*1] +k0 = np.PyUFunc_FromFuncAndData(ufunc_k0_loops, ufunc_k0_data, ufunc_k0_types, 2, 1, 1, 0, "k0", ufunc_k0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k0e_loops[2] +cdef void *ufunc_k0e_ptr[4] +cdef void *ufunc_k0e_data[2] +cdef char ufunc_k0e_types[4] +cdef char *ufunc_k0e_doc = ( + "k0e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function K of order 0\n" + "\n" + "Defined as::\n" + "\n" + " k0e(x) = exp(x) * k0(x).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function K of order\n" + " 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k0: Modified Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k0e`. `k0e` is\n" + "useful for large arguments: for these, `k0` easily underflows.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `k0` returns 0 whereas `k0e` still returns a\n" + "useful finite number:\n" + "\n" + ">>> from scipy.special import k0, k0e\n" + ">>> k0(1000.), k0e(1000)\n" + "(0., 0.03962832160075422)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> k0e(np.array([0.5, 2., 3.]))\n" + "array([1.52410939, 0.84156822, 0.6977616 ])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k0e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k0e_loops[0] = loop_d_d__As_f_f +ufunc_k0e_loops[1] = loop_d_d__As_d_d +ufunc_k0e_types[0] = NPY_FLOAT +ufunc_k0e_types[1] = NPY_FLOAT +ufunc_k0e_types[2] = NPY_DOUBLE +ufunc_k0e_types[3] = NPY_DOUBLE +ufunc_k0e_ptr[2*0] = _func_k0e +ufunc_k0e_ptr[2*0+1] = ("k0e") +ufunc_k0e_ptr[2*1] = _func_k0e +ufunc_k0e_ptr[2*1+1] = ("k0e") +ufunc_k0e_data[0] = &ufunc_k0e_ptr[2*0] +ufunc_k0e_data[1] = &ufunc_k0e_ptr[2*1] +k0e = np.PyUFunc_FromFuncAndData(ufunc_k0e_loops, ufunc_k0e_data, ufunc_k0e_types, 2, 1, 1, 0, "k0e", ufunc_k0e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k1_loops[2] +cdef void *ufunc_k1_ptr[4] +cdef void *ufunc_k1_data[2] +cdef char ufunc_k1_types[4] +cdef char *ufunc_k1_doc = ( + "k1(x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the modified Bessel function K of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k1e: Exponentially scaled modified Bessel function K of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import k1\n" + ">>> k1(1.)\n" + "0.6019072301972346\n" + "\n" + "Calculate the function at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> k1(np.array([0.5, 2., 3.]))\n" + "array([1.65644112, 0.13986588, 0.04015643])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k1_loops[0] = loop_d_d__As_f_f +ufunc_k1_loops[1] = loop_d_d__As_d_d +ufunc_k1_types[0] = NPY_FLOAT +ufunc_k1_types[1] = NPY_FLOAT +ufunc_k1_types[2] = NPY_DOUBLE +ufunc_k1_types[3] = NPY_DOUBLE +ufunc_k1_ptr[2*0] = _func_k1 +ufunc_k1_ptr[2*0+1] = ("k1") +ufunc_k1_ptr[2*1] = _func_k1 +ufunc_k1_ptr[2*1+1] = ("k1") +ufunc_k1_data[0] = &ufunc_k1_ptr[2*0] +ufunc_k1_data[1] = &ufunc_k1_ptr[2*1] +k1 = np.PyUFunc_FromFuncAndData(ufunc_k1_loops, ufunc_k1_data, ufunc_k1_types, 2, 1, 1, 0, "k1", ufunc_k1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_k1e_loops[2] +cdef void *ufunc_k1e_ptr[4] +cdef void *ufunc_k1e_data[2] +cdef char ufunc_k1e_types[4] +cdef char *ufunc_k1e_doc = ( + "k1e(x, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function K of order 1\n" + "\n" + "Defined as::\n" + "\n" + " k1e(x) = exp(x) * k1(x)\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float)\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "K : scalar or ndarray\n" + " Value of the exponentially scaled modified Bessel function K of order\n" + " 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "kv: Modified Bessel function of the second kind of any order\n" + "k1: Modified Bessel function of the second kind of order 1\n" + "\n" + "Notes\n" + "-----\n" + "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n" + "Chebyshev polynomial expansions are employed in each interval.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `k1e`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `k1` returns 0 whereas `k1e` still returns a\n" + "useful floating point number.\n" + "\n" + ">>> from scipy.special import k1, k1e\n" + ">>> k1(1000.), k1e(1000.)\n" + "(0., 0.03964813081296021)\n" + "\n" + "Calculate the function at several points by providing a NumPy array or\n" + "list for `x`:\n" + "\n" + ">>> import numpy as np\n" + ">>> k1e(np.array([0.5, 2., 3.]))\n" + "array([2.73100971, 1.03347685, 0.80656348])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = k1e(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_k1e_loops[0] = loop_d_d__As_f_f +ufunc_k1e_loops[1] = loop_d_d__As_d_d +ufunc_k1e_types[0] = NPY_FLOAT +ufunc_k1e_types[1] = NPY_FLOAT +ufunc_k1e_types[2] = NPY_DOUBLE +ufunc_k1e_types[3] = NPY_DOUBLE +ufunc_k1e_ptr[2*0] = _func_k1e +ufunc_k1e_ptr[2*0+1] = ("k1e") +ufunc_k1e_ptr[2*1] = _func_k1e +ufunc_k1e_ptr[2*1+1] = ("k1e") +ufunc_k1e_data[0] = &ufunc_k1e_ptr[2*0] +ufunc_k1e_data[1] = &ufunc_k1e_ptr[2*1] +k1e = np.PyUFunc_FromFuncAndData(ufunc_k1e_loops, ufunc_k1e_data, ufunc_k1e_types, 2, 1, 1, 0, "k1e", ufunc_k1e_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kei_loops[2] +cdef void *ufunc_kei_ptr[4] +cdef void *ufunc_kei_data[2] +cdef char ufunc_kei_types[4] +cdef char *ufunc_kei_doc = ( + "kei(x, out=None)\n" + "\n" + "Kelvin function kei.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{kei}(x) = \\Im[K_0(x e^{\\pi i / 4})]\n" + "\n" + "where :math:`K_0` is the modified Bessel function of the second\n" + "kind (see `kv`). See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kelvin function.\n" + "\n" + "See Also\n" + "--------\n" + "ker : the corresponding real part\n" + "keip : the derivative of kei\n" + "kv : modified Bessel function of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10.61\n" + "\n" + "Examples\n" + "--------\n" + "It can be expressed using the modified Bessel function of the\n" + "second kind.\n" + "\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag\n" + "array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])\n" + ">>> sc.kei(x)\n" + "array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])") +ufunc_kei_loops[0] = loop_d_d__As_f_f +ufunc_kei_loops[1] = loop_d_d__As_d_d +ufunc_kei_types[0] = NPY_FLOAT +ufunc_kei_types[1] = NPY_FLOAT +ufunc_kei_types[2] = NPY_DOUBLE +ufunc_kei_types[3] = NPY_DOUBLE +ufunc_kei_ptr[2*0] = _func_kei_wrap +ufunc_kei_ptr[2*0+1] = ("kei") +ufunc_kei_ptr[2*1] = _func_kei_wrap +ufunc_kei_ptr[2*1+1] = ("kei") +ufunc_kei_data[0] = &ufunc_kei_ptr[2*0] +ufunc_kei_data[1] = &ufunc_kei_ptr[2*1] +kei = np.PyUFunc_FromFuncAndData(ufunc_kei_loops, ufunc_kei_data, ufunc_kei_types, 2, 1, 1, 0, "kei", ufunc_kei_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_keip_loops[2] +cdef void *ufunc_keip_ptr[4] +cdef void *ufunc_keip_data[2] +cdef char ufunc_keip_types[4] +cdef char *ufunc_keip_doc = ( + "keip(x, out=None)\n" + "\n" + "Derivative of the Kelvin function kei.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The values of the derivative of kei.\n" + "\n" + "See Also\n" + "--------\n" + "kei\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10#PT5") +ufunc_keip_loops[0] = loop_d_d__As_f_f +ufunc_keip_loops[1] = loop_d_d__As_d_d +ufunc_keip_types[0] = NPY_FLOAT +ufunc_keip_types[1] = NPY_FLOAT +ufunc_keip_types[2] = NPY_DOUBLE +ufunc_keip_types[3] = NPY_DOUBLE +ufunc_keip_ptr[2*0] = _func_keip_wrap +ufunc_keip_ptr[2*0+1] = ("keip") +ufunc_keip_ptr[2*1] = _func_keip_wrap +ufunc_keip_ptr[2*1+1] = ("keip") +ufunc_keip_data[0] = &ufunc_keip_ptr[2*0] +ufunc_keip_data[1] = &ufunc_keip_ptr[2*1] +keip = np.PyUFunc_FromFuncAndData(ufunc_keip_loops, ufunc_keip_data, ufunc_keip_types, 2, 1, 1, 0, "keip", ufunc_keip_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kelvin_loops[2] +cdef void *ufunc_kelvin_ptr[4] +cdef void *ufunc_kelvin_data[2] +cdef char ufunc_kelvin_types[10] +cdef char *ufunc_kelvin_doc = ( + "kelvin(x, out=None)\n" + "\n" + "Kelvin functions as complex numbers\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function values\n" + "\n" + "Returns\n" + "-------\n" + "Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray\n" + " The tuple (Be, Ke, Bep, Kep) contains complex numbers\n" + " representing the real and imaginary Kelvin functions and their\n" + " derivatives evaluated at `x`. For example, kelvin(x)[0].real =\n" + " ber x and kelvin(x)[0].imag = bei x with similar relationships\n" + " for ker and kei.") +ufunc_kelvin_loops[0] = loop_i_d_DDDD_As_f_FFFF +ufunc_kelvin_loops[1] = loop_i_d_DDDD_As_d_DDDD +ufunc_kelvin_types[0] = NPY_FLOAT +ufunc_kelvin_types[1] = NPY_CFLOAT +ufunc_kelvin_types[2] = NPY_CFLOAT +ufunc_kelvin_types[3] = NPY_CFLOAT +ufunc_kelvin_types[4] = NPY_CFLOAT +ufunc_kelvin_types[5] = NPY_DOUBLE +ufunc_kelvin_types[6] = NPY_CDOUBLE +ufunc_kelvin_types[7] = NPY_CDOUBLE +ufunc_kelvin_types[8] = NPY_CDOUBLE +ufunc_kelvin_types[9] = NPY_CDOUBLE +ufunc_kelvin_ptr[2*0] = _func_kelvin_wrap +ufunc_kelvin_ptr[2*0+1] = ("kelvin") +ufunc_kelvin_ptr[2*1] = _func_kelvin_wrap +ufunc_kelvin_ptr[2*1+1] = ("kelvin") +ufunc_kelvin_data[0] = &ufunc_kelvin_ptr[2*0] +ufunc_kelvin_data[1] = &ufunc_kelvin_ptr[2*1] +kelvin = np.PyUFunc_FromFuncAndData(ufunc_kelvin_loops, ufunc_kelvin_data, ufunc_kelvin_types, 2, 1, 4, 0, "kelvin", ufunc_kelvin_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ker_loops[2] +cdef void *ufunc_ker_ptr[4] +cdef void *ufunc_ker_data[2] +cdef char ufunc_ker_types[4] +cdef char *ufunc_ker_doc = ( + "ker(x, out=None)\n" + "\n" + "Kelvin function ker.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{ker}(x) = \\Re[K_0(x e^{\\pi i / 4})]\n" + "\n" + "Where :math:`K_0` is the modified Bessel function of the second\n" + "kind (see `kv`). See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kelvin function.\n" + "\n" + "See Also\n" + "--------\n" + "kei : the corresponding imaginary part\n" + "kerp : the derivative of ker\n" + "kv : modified Bessel function of the second kind\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10.61\n" + "\n" + "Examples\n" + "--------\n" + "It can be expressed using the modified Bessel function of the\n" + "second kind.\n" + "\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n" + ">>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real\n" + "array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])\n" + ">>> sc.ker(x)\n" + "array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])") +ufunc_ker_loops[0] = loop_d_d__As_f_f +ufunc_ker_loops[1] = loop_d_d__As_d_d +ufunc_ker_types[0] = NPY_FLOAT +ufunc_ker_types[1] = NPY_FLOAT +ufunc_ker_types[2] = NPY_DOUBLE +ufunc_ker_types[3] = NPY_DOUBLE +ufunc_ker_ptr[2*0] = _func_ker_wrap +ufunc_ker_ptr[2*0+1] = ("ker") +ufunc_ker_ptr[2*1] = _func_ker_wrap +ufunc_ker_ptr[2*1+1] = ("ker") +ufunc_ker_data[0] = &ufunc_ker_ptr[2*0] +ufunc_ker_data[1] = &ufunc_ker_ptr[2*1] +ker = np.PyUFunc_FromFuncAndData(ufunc_ker_loops, ufunc_ker_data, ufunc_ker_types, 2, 1, 1, 0, "ker", ufunc_ker_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kerp_loops[2] +cdef void *ufunc_kerp_ptr[4] +cdef void *ufunc_kerp_data[2] +cdef char ufunc_kerp_types[4] +cdef char *ufunc_kerp_doc = ( + "kerp(x, out=None)\n" + "\n" + "Derivative of the Kelvin function ker.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the derivative of ker.\n" + "\n" + "See Also\n" + "--------\n" + "ker\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n" + " https://dlmf.nist.gov/10#PT5") +ufunc_kerp_loops[0] = loop_d_d__As_f_f +ufunc_kerp_loops[1] = loop_d_d__As_d_d +ufunc_kerp_types[0] = NPY_FLOAT +ufunc_kerp_types[1] = NPY_FLOAT +ufunc_kerp_types[2] = NPY_DOUBLE +ufunc_kerp_types[3] = NPY_DOUBLE +ufunc_kerp_ptr[2*0] = _func_kerp_wrap +ufunc_kerp_ptr[2*0+1] = ("kerp") +ufunc_kerp_ptr[2*1] = _func_kerp_wrap +ufunc_kerp_ptr[2*1+1] = ("kerp") +ufunc_kerp_data[0] = &ufunc_kerp_ptr[2*0] +ufunc_kerp_data[1] = &ufunc_kerp_ptr[2*1] +kerp = np.PyUFunc_FromFuncAndData(ufunc_kerp_loops, ufunc_kerp_data, ufunc_kerp_types, 2, 1, 1, 0, "kerp", ufunc_kerp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kl_div_loops[2] +cdef void *ufunc_kl_div_ptr[4] +cdef void *ufunc_kl_div_data[2] +cdef char ufunc_kl_div_types[6] +cdef char *ufunc_kl_div_doc = ( + "kl_div(x, y, out=None)\n" + "\n" + "Elementwise function for computing Kullback-Leibler divergence.\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{kl\\_div}(x, y) =\n" + " \\begin{cases}\n" + " x \\log(x / y) - x + y & x > 0, y > 0 \\\\\n" + " y & x = 0, y \\ge 0 \\\\\n" + " \\infty & \\text{otherwise}\n" + " \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Real arguments\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Kullback-Liebler divergence.\n" + "\n" + "See Also\n" + "--------\n" + "entr, rel_entr, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is non-negative and is jointly convex in `x` and `y`.\n" + "\n" + "The origin of this function is in convex programming; see [1]_ for\n" + "details. This is why the function contains the extra :math:`-x\n" + "+ y` terms over what might be expected from the Kullback-Leibler\n" + "divergence. For a version of the function without the extra terms,\n" + "see `rel_entr`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`") +ufunc_kl_div_loops[0] = loop_d_dd__As_ff_f +ufunc_kl_div_loops[1] = loop_d_dd__As_dd_d +ufunc_kl_div_types[0] = NPY_FLOAT +ufunc_kl_div_types[1] = NPY_FLOAT +ufunc_kl_div_types[2] = NPY_FLOAT +ufunc_kl_div_types[3] = NPY_DOUBLE +ufunc_kl_div_types[4] = NPY_DOUBLE +ufunc_kl_div_types[5] = NPY_DOUBLE +ufunc_kl_div_ptr[2*0] = _func_kl_div +ufunc_kl_div_ptr[2*0+1] = ("kl_div") +ufunc_kl_div_ptr[2*1] = _func_kl_div +ufunc_kl_div_ptr[2*1+1] = ("kl_div") +ufunc_kl_div_data[0] = &ufunc_kl_div_ptr[2*0] +ufunc_kl_div_data[1] = &ufunc_kl_div_ptr[2*1] +kl_div = np.PyUFunc_FromFuncAndData(ufunc_kl_div_loops, ufunc_kl_div_data, ufunc_kl_div_types, 2, 2, 1, 0, "kl_div", ufunc_kl_div_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kn_loops[3] +cdef void *ufunc_kn_ptr[6] +cdef void *ufunc_kn_data[3] +cdef char ufunc_kn_types[9] +cdef char *ufunc_kn_doc = ( + "kn(n, x, out=None)\n" + "\n" + "Modified Bessel function of the second kind of integer order `n`\n" + "\n" + "Returns the modified Bessel function of the second kind for integer order\n" + "`n` at real `z`.\n" + "\n" + "These are also sometimes called functions of the third kind, Basset\n" + "functions, or Macdonald functions.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like of int\n" + " Order of Bessel functions (floats will truncate with a warning)\n" + "x : array_like of float\n" + " Argument at which to evaluate the Bessel functions\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Modified Bessel function of the second kind,\n" + " :math:`K_n(x)`.\n" + "\n" + "See Also\n" + "--------\n" + "kv : Same function, but accepts real order and complex argument\n" + "kvp : Derivative of this function\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the\n" + "algorithm used, see [2]_ and the references therein.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n" + " functions of a complex argument and nonnegative order\", ACM\n" + " TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n" + "\n" + "Examples\n" + "--------\n" + "Plot the function of several orders for real input:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kn\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0, 5, 1000)\n" + ">>> for N in range(6):\n" + "... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))\n" + ">>> plt.ylim(0, 10)\n" + ">>> plt.legend()\n" + ">>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')\n" + ">>> plt.show()\n" + "\n" + "Calculate for a single value at multiple orders:\n" + "\n" + ">>> kn([4, 5, 6], 1)\n" + "array([ 44.23241585, 360.9605896 , 3653.83831186])") +ufunc_kn_loops[0] = loop_d_id__As_ld_d +ufunc_kn_loops[1] = loop_d_dd__As_ff_f +ufunc_kn_loops[2] = loop_d_dd__As_dd_d +ufunc_kn_types[0] = NPY_LONG +ufunc_kn_types[1] = NPY_DOUBLE +ufunc_kn_types[2] = NPY_DOUBLE +ufunc_kn_types[3] = NPY_FLOAT +ufunc_kn_types[4] = NPY_FLOAT +ufunc_kn_types[5] = NPY_FLOAT +ufunc_kn_types[6] = NPY_DOUBLE +ufunc_kn_types[7] = NPY_DOUBLE +ufunc_kn_types[8] = NPY_DOUBLE +ufunc_kn_ptr[2*0] = _func_cbesk_wrap_real_int +ufunc_kn_ptr[2*0+1] = ("kn") +ufunc_kn_ptr[2*1] = _func_kn_unsafe +ufunc_kn_ptr[2*1+1] = ("kn") +ufunc_kn_ptr[2*2] = _func_kn_unsafe +ufunc_kn_ptr[2*2+1] = ("kn") +ufunc_kn_data[0] = &ufunc_kn_ptr[2*0] +ufunc_kn_data[1] = &ufunc_kn_ptr[2*1] +ufunc_kn_data[2] = &ufunc_kn_ptr[2*2] +kn = np.PyUFunc_FromFuncAndData(ufunc_kn_loops, ufunc_kn_data, ufunc_kn_types, 3, 2, 1, 0, "kn", ufunc_kn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kolmogi_loops[2] +cdef void *ufunc_kolmogi_ptr[4] +cdef void *ufunc_kolmogi_data[2] +cdef char ufunc_kolmogi_types[4] +cdef char *ufunc_kolmogi_doc = ( + "kolmogi(p, out=None)\n" + "\n" + "Inverse Survival Function of Kolmogorov distribution\n" + "\n" + "It is the inverse function to `kolmogorov`.\n" + "Returns y such that ``kolmogorov(y) == p``.\n" + "\n" + "Parameters\n" + "----------\n" + "p : float array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of kolmogi(p)\n" + "\n" + "See Also\n" + "--------\n" + "kolmogorov : The Survival Function for the distribution\n" + "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n" + "smirnov, smirnovi : Functions for the one-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`kolmogorov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.kstwobign` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import kolmogi\n" + ">>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])\n" + "array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,\n" + " 0.57117327, 0. ])") +ufunc_kolmogi_loops[0] = loop_d_d__As_f_f +ufunc_kolmogi_loops[1] = loop_d_d__As_d_d +ufunc_kolmogi_types[0] = NPY_FLOAT +ufunc_kolmogi_types[1] = NPY_FLOAT +ufunc_kolmogi_types[2] = NPY_DOUBLE +ufunc_kolmogi_types[3] = NPY_DOUBLE +ufunc_kolmogi_ptr[2*0] = _func_kolmogi +ufunc_kolmogi_ptr[2*0+1] = ("kolmogi") +ufunc_kolmogi_ptr[2*1] = _func_kolmogi +ufunc_kolmogi_ptr[2*1+1] = ("kolmogi") +ufunc_kolmogi_data[0] = &ufunc_kolmogi_ptr[2*0] +ufunc_kolmogi_data[1] = &ufunc_kolmogi_ptr[2*1] +kolmogi = np.PyUFunc_FromFuncAndData(ufunc_kolmogi_loops, ufunc_kolmogi_data, ufunc_kolmogi_types, 2, 1, 1, 0, "kolmogi", ufunc_kolmogi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kolmogorov_loops[2] +cdef void *ufunc_kolmogorov_ptr[4] +cdef void *ufunc_kolmogorov_data[2] +cdef char ufunc_kolmogorov_types[4] +cdef char *ufunc_kolmogorov_doc = ( + "kolmogorov(y, out=None)\n" + "\n" + "Complementary cumulative distribution (Survival Function) function of\n" + "Kolmogorov distribution.\n" + "\n" + "Returns the complementary cumulative distribution function of\n" + "Kolmogorov's limiting distribution (``D_n*\\sqrt(n)`` as n goes to infinity)\n" + "of a two-sided test for equality between an empirical and a theoretical\n" + "distribution. It is equal to the (limit as n->infinity of the)\n" + "probability that ``sqrt(n) * max absolute deviation > y``.\n" + "\n" + "Parameters\n" + "----------\n" + "y : float array_like\n" + " Absolute deviation between the Empirical CDF (ECDF) and the target CDF,\n" + " multiplied by sqrt(n).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of kolmogorov(y)\n" + "\n" + "See Also\n" + "--------\n" + "kolmogi : The Inverse Survival Function for the distribution\n" + "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n" + "smirnov, smirnovi : Functions for the one-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`kolmogorov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.kstwobign` distribution.\n" + "\n" + "Examples\n" + "--------\n" + "Show the probability of a gap at least as big as 0, 0.5 and 1.0.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kolmogorov\n" + ">>> from scipy.stats import kstwobign\n" + ">>> kolmogorov([0, 0.5, 1.0])\n" + "array([ 1. , 0.96394524, 0.26999967])\n" + "\n" + "Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against\n" + "the target distribution, a Normal(0, 1) distribution.\n" + "\n" + ">>> from scipy.stats import norm, laplace\n" + ">>> rng = np.random.default_rng()\n" + ">>> n = 1000\n" + ">>> lap01 = laplace(0, 1)\n" + ">>> x = np.sort(lap01.rvs(n, random_state=rng))\n" + ">>> np.mean(x), np.std(x)\n" + "(-0.05841730131499543, 1.3968109101997568)\n" + "\n" + "Construct the Empirical CDF and the K-S statistic Dn.\n" + "\n" + ">>> target = norm(0,1) # Normal mean 0, stddev 1\n" + ">>> cdfs = target.cdf(x)\n" + ">>> ecdfs = np.arange(n+1, dtype=float)/n\n" + ">>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])\n" + ">>> Dn = np.max(gaps)\n" + ">>> Kn = np.sqrt(n) * Dn\n" + ">>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))\n" + "Dn=0.043363, sqrt(n)*Dn=1.371265\n" + ">>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',\n" + "... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' %\n" + "... (Kn, kolmogorov(Kn)),\n" + "... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' %\n" + "... (Kn, kstwobign.cdf(Kn))]))\n" + "For a sample of size n drawn from a N(0, 1) distribution:\n" + " the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533\n" + " the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467\n" + "\n" + "Plot the Empirical CDF against the target N(0, 1) CDF.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')\n" + ">>> x3 = np.linspace(-3, 3, 100)\n" + ">>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')\n" + ">>> plt.ylim([0, 1]); plt.grid(True); plt.legend();\n" + ">>> # Add vertical lines marking Dn+ and Dn-\n" + ">>> iminus, iplus = np.argmax(gaps, axis=0)\n" + ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus],\n" + "... color='r', linestyle='dashed', lw=4)\n" + ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1],\n" + "... color='r', linestyle='dashed', lw=4)\n" + ">>> plt.show()") +ufunc_kolmogorov_loops[0] = loop_d_d__As_f_f +ufunc_kolmogorov_loops[1] = loop_d_d__As_d_d +ufunc_kolmogorov_types[0] = NPY_FLOAT +ufunc_kolmogorov_types[1] = NPY_FLOAT +ufunc_kolmogorov_types[2] = NPY_DOUBLE +ufunc_kolmogorov_types[3] = NPY_DOUBLE +ufunc_kolmogorov_ptr[2*0] = _func_kolmogorov +ufunc_kolmogorov_ptr[2*0+1] = ("kolmogorov") +ufunc_kolmogorov_ptr[2*1] = _func_kolmogorov +ufunc_kolmogorov_ptr[2*1+1] = ("kolmogorov") +ufunc_kolmogorov_data[0] = &ufunc_kolmogorov_ptr[2*0] +ufunc_kolmogorov_data[1] = &ufunc_kolmogorov_ptr[2*1] +kolmogorov = np.PyUFunc_FromFuncAndData(ufunc_kolmogorov_loops, ufunc_kolmogorov_data, ufunc_kolmogorov_types, 2, 1, 1, 0, "kolmogorov", ufunc_kolmogorov_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kv_loops[4] +cdef void *ufunc_kv_ptr[8] +cdef void *ufunc_kv_data[4] +cdef char ufunc_kv_types[12] +cdef char *ufunc_kv_doc = ( + "kv(v, z, out=None)\n" + "\n" + "Modified Bessel function of the second kind of real order `v`\n" + "\n" + "Returns the modified Bessel function of the second kind for real order\n" + "`v` at complex `z`.\n" + "\n" + "These are also sometimes called functions of the third kind, Basset\n" + "functions, or Macdonald functions. They are defined as those solutions\n" + "of the modified Bessel equation for which,\n" + "\n" + ".. math::\n" + " K_v(x) \\sim \\sqrt{\\pi/(2x)} \\exp(-x)\n" + "\n" + "as :math:`x \\to \\infty` [3]_.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like of float\n" + " Order of Bessel functions\n" + "z : array_like of complex\n" + " Argument at which to evaluate the Bessel functions\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The results. Note that input must be of complex type to get complex\n" + " output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.\n" + "\n" + "See Also\n" + "--------\n" + "kve : This function with leading exponential behavior stripped off.\n" + "kvp : Derivative of this function\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the\n" + "algorithm used, see [2]_ and the references therein.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n" + " functions of a complex argument and nonnegative order\", ACM\n" + " TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n" + ".. [3] NIST Digital Library of Mathematical Functions,\n" + " Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3\n" + "\n" + "Examples\n" + "--------\n" + "Plot the function of several orders for real input:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kv\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0, 5, 1000)\n" + ">>> for N in np.linspace(0, 6, 5):\n" + "... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))\n" + ">>> plt.ylim(0, 10)\n" + ">>> plt.legend()\n" + ">>> plt.title(r'Modified Bessel function of the second kind $K_\\nu(x)$')\n" + ">>> plt.show()\n" + "\n" + "Calculate for a single value at multiple orders:\n" + "\n" + ">>> kv([4, 4.5, 5], 1+2j)\n" + "array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])") +ufunc_kv_loops[0] = loop_d_dd__As_ff_f +ufunc_kv_loops[1] = loop_D_dD__As_fF_F +ufunc_kv_loops[2] = loop_d_dd__As_dd_d +ufunc_kv_loops[3] = loop_D_dD__As_dD_D +ufunc_kv_types[0] = NPY_FLOAT +ufunc_kv_types[1] = NPY_FLOAT +ufunc_kv_types[2] = NPY_FLOAT +ufunc_kv_types[3] = NPY_FLOAT +ufunc_kv_types[4] = NPY_CFLOAT +ufunc_kv_types[5] = NPY_CFLOAT +ufunc_kv_types[6] = NPY_DOUBLE +ufunc_kv_types[7] = NPY_DOUBLE +ufunc_kv_types[8] = NPY_DOUBLE +ufunc_kv_types[9] = NPY_DOUBLE +ufunc_kv_types[10] = NPY_CDOUBLE +ufunc_kv_types[11] = NPY_CDOUBLE +ufunc_kv_ptr[2*0] = _func_cbesk_wrap_real +ufunc_kv_ptr[2*0+1] = ("kv") +ufunc_kv_ptr[2*1] = _func_cbesk_wrap +ufunc_kv_ptr[2*1+1] = ("kv") +ufunc_kv_ptr[2*2] = _func_cbesk_wrap_real +ufunc_kv_ptr[2*2+1] = ("kv") +ufunc_kv_ptr[2*3] = _func_cbesk_wrap +ufunc_kv_ptr[2*3+1] = ("kv") +ufunc_kv_data[0] = &ufunc_kv_ptr[2*0] +ufunc_kv_data[1] = &ufunc_kv_ptr[2*1] +ufunc_kv_data[2] = &ufunc_kv_ptr[2*2] +ufunc_kv_data[3] = &ufunc_kv_ptr[2*3] +kv = np.PyUFunc_FromFuncAndData(ufunc_kv_loops, ufunc_kv_data, ufunc_kv_types, 4, 2, 1, 0, "kv", ufunc_kv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_kve_loops[4] +cdef void *ufunc_kve_ptr[8] +cdef void *ufunc_kve_data[4] +cdef char ufunc_kve_types[12] +cdef char *ufunc_kve_doc = ( + "kve(v, z, out=None)\n" + "\n" + "Exponentially scaled modified Bessel function of the second kind.\n" + "\n" + "Returns the exponentially scaled, modified Bessel function of the\n" + "second kind (sometimes called the third kind) for real order `v` at\n" + "complex `z`::\n" + "\n" + " kve(v, z) = kv(v, z) * exp(z)\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like of float\n" + " Order of Bessel functions\n" + "z : array_like of complex\n" + " Argument at which to evaluate the Bessel functions\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The exponentially scaled modified Bessel function of the second kind.\n" + "\n" + "See Also\n" + "--------\n" + "kv : This function without exponential scaling.\n" + "k0e : Faster version of this function for order 0.\n" + "k1e : Faster version of this function for order 1.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the\n" + "algorithm used, see [2]_ and the references therein.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n" + " functions of a complex argument and nonnegative order\", ACM\n" + " TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n" + "\n" + "Examples\n" + "--------\n" + "In the following example `kv` returns 0 whereas `kve` still returns\n" + "a useful finite number.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import kv, kve\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> kv(3, 1000.), kve(3, 1000.)\n" + "(0.0, 0.03980696128440973)\n" + "\n" + "Evaluate the function at one point for different orders by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> kve([0, 1, 1.5], 1.)\n" + "array([1.14446308, 1.63615349, 2.50662827])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> points = np.array([1., 3., 10.])\n" + ">>> kve(0, points)\n" + "array([1.14446308, 0.6977616 , 0.39163193])\n" + "\n" + "Evaluate the function at several points for different orders by\n" + "providing arrays for both `v` for `z`. Both arrays have to be\n" + "broadcastable to the correct shape. To calculate the orders 0, 1\n" + "and 2 for a 1D array of points:\n" + "\n" + ">>> kve([[0], [1], [2]], points)\n" + "array([[1.14446308, 0.6977616 , 0.39163193],\n" + " [1.63615349, 0.80656348, 0.41076657],\n" + " [4.41677005, 1.23547058, 0.47378525]])\n" + "\n" + "Plot the functions of order 0 to 3 from 0 to 5.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 5., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, kve(i, x), label=fr'$K_{i!r}(z)\\cdot e^z$')\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(r\"$z$\")\n" + ">>> ax.set_ylim(0, 4)\n" + ">>> ax.set_xlim(0, 5)\n" + ">>> plt.show()") +ufunc_kve_loops[0] = loop_d_dd__As_ff_f +ufunc_kve_loops[1] = loop_D_dD__As_fF_F +ufunc_kve_loops[2] = loop_d_dd__As_dd_d +ufunc_kve_loops[3] = loop_D_dD__As_dD_D +ufunc_kve_types[0] = NPY_FLOAT +ufunc_kve_types[1] = NPY_FLOAT +ufunc_kve_types[2] = NPY_FLOAT +ufunc_kve_types[3] = NPY_FLOAT +ufunc_kve_types[4] = NPY_CFLOAT +ufunc_kve_types[5] = NPY_CFLOAT +ufunc_kve_types[6] = NPY_DOUBLE +ufunc_kve_types[7] = NPY_DOUBLE +ufunc_kve_types[8] = NPY_DOUBLE +ufunc_kve_types[9] = NPY_DOUBLE +ufunc_kve_types[10] = NPY_CDOUBLE +ufunc_kve_types[11] = NPY_CDOUBLE +ufunc_kve_ptr[2*0] = _func_cbesk_wrap_e_real +ufunc_kve_ptr[2*0+1] = ("kve") +ufunc_kve_ptr[2*1] = _func_cbesk_wrap_e +ufunc_kve_ptr[2*1+1] = ("kve") +ufunc_kve_ptr[2*2] = _func_cbesk_wrap_e_real +ufunc_kve_ptr[2*2+1] = ("kve") +ufunc_kve_ptr[2*3] = _func_cbesk_wrap_e +ufunc_kve_ptr[2*3+1] = ("kve") +ufunc_kve_data[0] = &ufunc_kve_ptr[2*0] +ufunc_kve_data[1] = &ufunc_kve_ptr[2*1] +ufunc_kve_data[2] = &ufunc_kve_ptr[2*2] +ufunc_kve_data[3] = &ufunc_kve_ptr[2*3] +kve = np.PyUFunc_FromFuncAndData(ufunc_kve_loops, ufunc_kve_data, ufunc_kve_types, 4, 2, 1, 0, "kve", ufunc_kve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_log1p_loops[4] +cdef void *ufunc_log1p_ptr[8] +cdef void *ufunc_log1p_data[4] +cdef char ufunc_log1p_types[8] +cdef char *ufunc_log1p_doc = ( + "log1p(x, out=None)\n" + "\n" + "Calculates log(1 + x) for use when `x` is near zero.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex valued input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of ``log(1 + x)``.\n" + "\n" + "See Also\n" + "--------\n" + "expm1, cosm1\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using ``log(1 + x)`` directly for ``x``\n" + "near 0. Note that in the below example ``1 + 1e-17 == 1`` to\n" + "double precision.\n" + "\n" + ">>> sc.log1p(1e-17)\n" + "1e-17\n" + ">>> np.log(1 + 1e-17)\n" + "0.0") +ufunc_log1p_loops[0] = loop_d_d__As_f_f +ufunc_log1p_loops[1] = loop_d_d__As_d_d +ufunc_log1p_loops[2] = loop_D_D__As_F_F +ufunc_log1p_loops[3] = loop_D_D__As_D_D +ufunc_log1p_types[0] = NPY_FLOAT +ufunc_log1p_types[1] = NPY_FLOAT +ufunc_log1p_types[2] = NPY_DOUBLE +ufunc_log1p_types[3] = NPY_DOUBLE +ufunc_log1p_types[4] = NPY_CFLOAT +ufunc_log1p_types[5] = NPY_CFLOAT +ufunc_log1p_types[6] = NPY_CDOUBLE +ufunc_log1p_types[7] = NPY_CDOUBLE +ufunc_log1p_ptr[2*0] = _func_log1p +ufunc_log1p_ptr[2*0+1] = ("log1p") +ufunc_log1p_ptr[2*1] = _func_log1p +ufunc_log1p_ptr[2*1+1] = ("log1p") +ufunc_log1p_ptr[2*2] = _func_clog1p +ufunc_log1p_ptr[2*2+1] = ("log1p") +ufunc_log1p_ptr[2*3] = _func_clog1p +ufunc_log1p_ptr[2*3+1] = ("log1p") +ufunc_log1p_data[0] = &ufunc_log1p_ptr[2*0] +ufunc_log1p_data[1] = &ufunc_log1p_ptr[2*1] +ufunc_log1p_data[2] = &ufunc_log1p_ptr[2*2] +ufunc_log1p_data[3] = &ufunc_log1p_ptr[2*3] +log1p = np.PyUFunc_FromFuncAndData(ufunc_log1p_loops, ufunc_log1p_data, ufunc_log1p_types, 4, 1, 1, 0, "log1p", ufunc_log1p_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_log_expit_loops[3] +cdef void *ufunc_log_expit_ptr[6] +cdef void *ufunc_log_expit_data[3] +cdef char ufunc_log_expit_types[6] +cdef char *ufunc_log_expit_doc = ( + "log_expit(x, out=None)\n" + "\n" + "Logarithm of the logistic sigmoid function.\n" + "\n" + "The SciPy implementation of the logistic sigmoid function is\n" + "`scipy.special.expit`, so this function is called ``log_expit``.\n" + "\n" + "The function is mathematically equivalent to ``log(expit(x))``, but\n" + "is formulated to avoid loss of precision for inputs with large\n" + "(positive or negative) magnitude.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " The values to apply ``log_expit`` to element-wise.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "out : scalar or ndarray\n" + " The computed values, an ndarray of the same shape as ``x``.\n" + "\n" + "See Also\n" + "--------\n" + "expit\n" + "\n" + "Notes\n" + "-----\n" + "As a ufunc, ``log_expit`` takes a number of optional keyword arguments.\n" + "For more information see\n" + "`ufuncs `_\n" + "\n" + ".. versionadded:: 1.8.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import log_expit, expit\n" + "\n" + ">>> log_expit([-3.0, 0.25, 2.5, 5.0])\n" + "array([-3.04858735, -0.57593942, -0.07888973, -0.00671535])\n" + "\n" + "Large negative values:\n" + "\n" + ">>> log_expit([-100, -500, -1000])\n" + "array([ -100., -500., -1000.])\n" + "\n" + "Note that ``expit(-1000)`` returns 0, so the naive implementation\n" + "``log(expit(-1000))`` return ``-inf``.\n" + "\n" + "Large positive values:\n" + "\n" + ">>> log_expit([29, 120, 400])\n" + "array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174])\n" + "\n" + "Compare that to the naive implementation:\n" + "\n" + ">>> np.log(expit([29, 120, 400]))\n" + "array([-2.54463117e-13, 0.00000000e+00, 0.00000000e+00])\n" + "\n" + "The first value is accurate to only 3 digits, and the larger inputs\n" + "lose all precision and return 0.") +ufunc_log_expit_loops[0] = loop_f_f__As_f_f +ufunc_log_expit_loops[1] = loop_d_d__As_d_d +ufunc_log_expit_loops[2] = loop_g_g__As_g_g +ufunc_log_expit_types[0] = NPY_FLOAT +ufunc_log_expit_types[1] = NPY_FLOAT +ufunc_log_expit_types[2] = NPY_DOUBLE +ufunc_log_expit_types[3] = NPY_DOUBLE +ufunc_log_expit_types[4] = NPY_LONGDOUBLE +ufunc_log_expit_types[5] = NPY_LONGDOUBLE +ufunc_log_expit_ptr[2*0] = scipy.special._ufuncs_cxx._export_log_expitf +ufunc_log_expit_ptr[2*0+1] = ("log_expit") +ufunc_log_expit_ptr[2*1] = scipy.special._ufuncs_cxx._export_log_expit +ufunc_log_expit_ptr[2*1+1] = ("log_expit") +ufunc_log_expit_ptr[2*2] = scipy.special._ufuncs_cxx._export_log_expitl +ufunc_log_expit_ptr[2*2+1] = ("log_expit") +ufunc_log_expit_data[0] = &ufunc_log_expit_ptr[2*0] +ufunc_log_expit_data[1] = &ufunc_log_expit_ptr[2*1] +ufunc_log_expit_data[2] = &ufunc_log_expit_ptr[2*2] +log_expit = np.PyUFunc_FromFuncAndData(ufunc_log_expit_loops, ufunc_log_expit_data, ufunc_log_expit_types, 3, 1, 1, 0, "log_expit", ufunc_log_expit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_log_ndtr_loops[4] +cdef void *ufunc_log_ndtr_ptr[8] +cdef void *ufunc_log_ndtr_data[4] +cdef char ufunc_log_ndtr_types[8] +cdef char *ufunc_log_ndtr_doc = ( + "log_ndtr(x, out=None)\n" + "\n" + "Logarithm of Gaussian cumulative distribution function.\n" + "\n" + "Returns the log of the area under the standard Gaussian probability\n" + "density function, integrated from minus infinity to `x`::\n" + "\n" + " log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like, real or complex\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the log of the normal CDF evaluated at `x`\n" + "\n" + "See Also\n" + "--------\n" + "erf\n" + "erfc\n" + "scipy.stats.norm\n" + "ndtr\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import log_ndtr, ndtr\n" + "\n" + "The benefit of ``log_ndtr(x)`` over the naive implementation\n" + "``np.log(ndtr(x))`` is most evident with moderate to large positive\n" + "values of ``x``:\n" + "\n" + ">>> x = np.array([6, 7, 9, 12, 15, 25])\n" + ">>> log_ndtr(x)\n" + "array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019,\n" + " -1.77648211e-033, -3.67096620e-051, -3.05669671e-138])\n" + "\n" + "The results of the naive calculation for the moderate ``x`` values\n" + "have only 5 or 6 correct significant digits. For values of ``x``\n" + "greater than approximately 8.3, the naive expression returns 0:\n" + "\n" + ">>> np.log(ndtr(x))\n" + "array([-9.86587701e-10, -1.27986510e-12, 0.00000000e+00,\n" + " 0.00000000e+00, 0.00000000e+00, 0.00000000e+00])") +ufunc_log_ndtr_loops[0] = loop_d_d__As_f_f +ufunc_log_ndtr_loops[1] = loop_d_d__As_d_d +ufunc_log_ndtr_loops[2] = loop_D_D__As_F_F +ufunc_log_ndtr_loops[3] = loop_D_D__As_D_D +ufunc_log_ndtr_types[0] = NPY_FLOAT +ufunc_log_ndtr_types[1] = NPY_FLOAT +ufunc_log_ndtr_types[2] = NPY_DOUBLE +ufunc_log_ndtr_types[3] = NPY_DOUBLE +ufunc_log_ndtr_types[4] = NPY_CFLOAT +ufunc_log_ndtr_types[5] = NPY_CFLOAT +ufunc_log_ndtr_types[6] = NPY_CDOUBLE +ufunc_log_ndtr_types[7] = NPY_CDOUBLE +ufunc_log_ndtr_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr +ufunc_log_ndtr_ptr[2*0+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr +ufunc_log_ndtr_ptr[2*1+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex +ufunc_log_ndtr_ptr[2*2+1] = ("log_ndtr") +ufunc_log_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex +ufunc_log_ndtr_ptr[2*3+1] = ("log_ndtr") +ufunc_log_ndtr_data[0] = &ufunc_log_ndtr_ptr[2*0] +ufunc_log_ndtr_data[1] = &ufunc_log_ndtr_ptr[2*1] +ufunc_log_ndtr_data[2] = &ufunc_log_ndtr_ptr[2*2] +ufunc_log_ndtr_data[3] = &ufunc_log_ndtr_ptr[2*3] +log_ndtr = np.PyUFunc_FromFuncAndData(ufunc_log_ndtr_loops, ufunc_log_ndtr_data, ufunc_log_ndtr_types, 4, 1, 1, 0, "log_ndtr", ufunc_log_ndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_loggamma_loops[4] +cdef void *ufunc_loggamma_ptr[8] +cdef void *ufunc_loggamma_data[4] +cdef char ufunc_loggamma_types[8] +cdef char *ufunc_loggamma_doc = ( + "loggamma(z, out=None)\n" + "\n" + "Principal branch of the logarithm of the gamma function.\n" + "\n" + "Defined to be :math:`\\log(\\Gamma(x))` for :math:`x > 0` and\n" + "extended to the complex plane by analytic continuation. The\n" + "function has a single branch cut on the negative real axis.\n" + "\n" + ".. versionadded:: 0.18.0\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Values in the complex plane at which to compute ``loggamma``\n" + "out : ndarray, optional\n" + " Output array for computed values of ``loggamma``\n" + "\n" + "Returns\n" + "-------\n" + "loggamma : scalar or ndarray\n" + " Values of ``loggamma`` at z.\n" + "\n" + "See Also\n" + "--------\n" + "gammaln : logarithm of the absolute value of the gamma function\n" + "gammasgn : sign of the gamma function\n" + "\n" + "Notes\n" + "-----\n" + "It is not generally true that :math:`\\log\\Gamma(z) =\n" + "\\log(\\Gamma(z))`, though the real parts of the functions do\n" + "agree. The benefit of not defining `loggamma` as\n" + ":math:`\\log(\\Gamma(z))` is that the latter function has a\n" + "complicated branch cut structure whereas `loggamma` is analytic\n" + "except for on the negative real axis.\n" + "\n" + "The identities\n" + "\n" + ".. math::\n" + " \\exp(\\log\\Gamma(z)) &= \\Gamma(z) \\\\\n" + " \\log\\Gamma(z + 1) &= \\log(z) + \\log\\Gamma(z)\n" + "\n" + "make `loggamma` useful for working in complex logspace.\n" + "\n" + "On the real line `loggamma` is related to `gammaln` via\n" + "``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to\n" + "rounding error.\n" + "\n" + "The implementation here is based on [hare1997]_.\n" + "\n" + "References\n" + "----------\n" + ".. [hare1997] D.E.G. Hare,\n" + " *Computing the Principal Branch of log-Gamma*,\n" + " Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.") +ufunc_loggamma_loops[0] = loop_d_d__As_f_f +ufunc_loggamma_loops[1] = loop_d_d__As_d_d +ufunc_loggamma_loops[2] = loop_D_D__As_F_F +ufunc_loggamma_loops[3] = loop_D_D__As_D_D +ufunc_loggamma_types[0] = NPY_FLOAT +ufunc_loggamma_types[1] = NPY_FLOAT +ufunc_loggamma_types[2] = NPY_DOUBLE +ufunc_loggamma_types[3] = NPY_DOUBLE +ufunc_loggamma_types[4] = NPY_CFLOAT +ufunc_loggamma_types[5] = NPY_CFLOAT +ufunc_loggamma_types[6] = NPY_CDOUBLE +ufunc_loggamma_types[7] = NPY_CDOUBLE +ufunc_loggamma_ptr[2*0] = scipy.special._ufuncs_cxx._export_loggamma_real +ufunc_loggamma_ptr[2*0+1] = ("loggamma") +ufunc_loggamma_ptr[2*1] = scipy.special._ufuncs_cxx._export_loggamma_real +ufunc_loggamma_ptr[2*1+1] = ("loggamma") +ufunc_loggamma_ptr[2*2] = scipy.special._ufuncs_cxx._export_loggamma +ufunc_loggamma_ptr[2*2+1] = ("loggamma") +ufunc_loggamma_ptr[2*3] = scipy.special._ufuncs_cxx._export_loggamma +ufunc_loggamma_ptr[2*3+1] = ("loggamma") +ufunc_loggamma_data[0] = &ufunc_loggamma_ptr[2*0] +ufunc_loggamma_data[1] = &ufunc_loggamma_ptr[2*1] +ufunc_loggamma_data[2] = &ufunc_loggamma_ptr[2*2] +ufunc_loggamma_data[3] = &ufunc_loggamma_ptr[2*3] +loggamma = np.PyUFunc_FromFuncAndData(ufunc_loggamma_loops, ufunc_loggamma_data, ufunc_loggamma_types, 4, 1, 1, 0, "loggamma", ufunc_loggamma_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_logit_loops[3] +cdef void *ufunc_logit_ptr[6] +cdef void *ufunc_logit_data[3] +cdef char ufunc_logit_types[6] +cdef char *ufunc_logit_doc = ( + "logit(x, out=None)\n" + "\n" + "Logit ufunc for ndarrays.\n" + "\n" + "The logit function is defined as logit(p) = log(p/(1-p)).\n" + "Note that logit(0) = -inf, logit(1) = inf, and logit(p)\n" + "for p<0 or p>1 yields nan.\n" + "\n" + "Parameters\n" + "----------\n" + "x : ndarray\n" + " The ndarray to apply logit to element-wise.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " An ndarray of the same shape as x. Its entries\n" + " are logit of the corresponding entry of x.\n" + "\n" + "See Also\n" + "--------\n" + "expit\n" + "\n" + "Notes\n" + "-----\n" + "As a ufunc logit takes a number of optional\n" + "keyword arguments. For more information\n" + "see `ufuncs `_\n" + "\n" + ".. versionadded:: 0.10.0\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import logit, expit\n" + "\n" + ">>> logit([0, 0.25, 0.5, 0.75, 1])\n" + "array([ -inf, -1.09861229, 0. , 1.09861229, inf])\n" + "\n" + "`expit` is the inverse of `logit`:\n" + "\n" + ">>> expit(logit([0.1, 0.75, 0.999]))\n" + "array([ 0.1 , 0.75 , 0.999])\n" + "\n" + "Plot logit(x) for x in [0, 1]:\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0, 1, 501)\n" + ">>> y = logit(x)\n" + ">>> plt.plot(x, y)\n" + ">>> plt.grid()\n" + ">>> plt.ylim(-6, 6)\n" + ">>> plt.xlabel('x')\n" + ">>> plt.title('logit(x)')\n" + ">>> plt.show()") +ufunc_logit_loops[0] = loop_f_f__As_f_f +ufunc_logit_loops[1] = loop_d_d__As_d_d +ufunc_logit_loops[2] = loop_g_g__As_g_g +ufunc_logit_types[0] = NPY_FLOAT +ufunc_logit_types[1] = NPY_FLOAT +ufunc_logit_types[2] = NPY_DOUBLE +ufunc_logit_types[3] = NPY_DOUBLE +ufunc_logit_types[4] = NPY_LONGDOUBLE +ufunc_logit_types[5] = NPY_LONGDOUBLE +ufunc_logit_ptr[2*0] = scipy.special._ufuncs_cxx._export_logitf +ufunc_logit_ptr[2*0+1] = ("logit") +ufunc_logit_ptr[2*1] = scipy.special._ufuncs_cxx._export_logit +ufunc_logit_ptr[2*1+1] = ("logit") +ufunc_logit_ptr[2*2] = scipy.special._ufuncs_cxx._export_logitl +ufunc_logit_ptr[2*2+1] = ("logit") +ufunc_logit_data[0] = &ufunc_logit_ptr[2*0] +ufunc_logit_data[1] = &ufunc_logit_ptr[2*1] +ufunc_logit_data[2] = &ufunc_logit_ptr[2*2] +logit = np.PyUFunc_FromFuncAndData(ufunc_logit_loops, ufunc_logit_data, ufunc_logit_types, 3, 1, 1, 0, "logit", ufunc_logit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_lpmv_loops[2] +cdef void *ufunc_lpmv_ptr[4] +cdef void *ufunc_lpmv_data[2] +cdef char ufunc_lpmv_types[8] +cdef char *ufunc_lpmv_doc = ( + "lpmv(m, v, x, out=None)\n" + "\n" + "Associated Legendre function of integer order and real degree.\n" + "\n" + "Defined as\n" + "\n" + ".. math::\n" + "\n" + " P_v^m = (-1)^m (1 - x^2)^{m/2} \\frac{d^m}{dx^m} P_v(x)\n" + "\n" + "where\n" + "\n" + ".. math::\n" + "\n" + " P_v = \\sum_{k = 0}^\\infty \\frac{(-v)_k (v + 1)_k}{(k!)^2}\n" + " \\left(\\frac{1 - x}{2}\\right)^k\n" + "\n" + "is the Legendre function of the first kind. Here :math:`(\\cdot)_k`\n" + "is the Pochhammer symbol; see `poch`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order (int or float). If passed a float not equal to an\n" + " integer the function returns NaN.\n" + "v : array_like\n" + " Degree (float).\n" + "x : array_like\n" + " Argument (float). Must have ``|x| <= 1``.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "pmv : scalar or ndarray\n" + " Value of the associated Legendre function.\n" + "\n" + "See Also\n" + "--------\n" + "lpmn : Compute the associated Legendre function for all orders\n" + " ``0, ..., m`` and degrees ``0, ..., n``.\n" + "clpmn : Compute the associated Legendre function at complex\n" + " arguments.\n" + "\n" + "Notes\n" + "-----\n" + "Note that this implementation includes the Condon-Shortley phase.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Zhang, Jin, \"Computation of Special Functions\", John Wiley\n" + " and Sons, Inc, 1996.") +ufunc_lpmv_loops[0] = loop_d_ddd__As_fff_f +ufunc_lpmv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_lpmv_types[0] = NPY_FLOAT +ufunc_lpmv_types[1] = NPY_FLOAT +ufunc_lpmv_types[2] = NPY_FLOAT +ufunc_lpmv_types[3] = NPY_FLOAT +ufunc_lpmv_types[4] = NPY_DOUBLE +ufunc_lpmv_types[5] = NPY_DOUBLE +ufunc_lpmv_types[6] = NPY_DOUBLE +ufunc_lpmv_types[7] = NPY_DOUBLE +ufunc_lpmv_ptr[2*0] = _func_pmv_wrap +ufunc_lpmv_ptr[2*0+1] = ("lpmv") +ufunc_lpmv_ptr[2*1] = _func_pmv_wrap +ufunc_lpmv_ptr[2*1+1] = ("lpmv") +ufunc_lpmv_data[0] = &ufunc_lpmv_ptr[2*0] +ufunc_lpmv_data[1] = &ufunc_lpmv_ptr[2*1] +lpmv = np.PyUFunc_FromFuncAndData(ufunc_lpmv_loops, ufunc_lpmv_data, ufunc_lpmv_types, 2, 3, 1, 0, "lpmv", ufunc_lpmv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_a_loops[2] +cdef void *ufunc_mathieu_a_ptr[4] +cdef void *ufunc_mathieu_a_data[2] +cdef char ufunc_mathieu_a_types[6] +cdef char *ufunc_mathieu_a_doc = ( + "mathieu_a(m, q, out=None)\n" + "\n" + "Characteristic value of even Mathieu functions\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Characteristic value for the even solution, ``ce_m(z, q)``, of\n" + " Mathieu's equation.\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_b, mathieu_cem, mathieu_sem") +ufunc_mathieu_a_loops[0] = loop_d_dd__As_ff_f +ufunc_mathieu_a_loops[1] = loop_d_dd__As_dd_d +ufunc_mathieu_a_types[0] = NPY_FLOAT +ufunc_mathieu_a_types[1] = NPY_FLOAT +ufunc_mathieu_a_types[2] = NPY_FLOAT +ufunc_mathieu_a_types[3] = NPY_DOUBLE +ufunc_mathieu_a_types[4] = NPY_DOUBLE +ufunc_mathieu_a_types[5] = NPY_DOUBLE +ufunc_mathieu_a_ptr[2*0] = _func_cem_cva_wrap +ufunc_mathieu_a_ptr[2*0+1] = ("mathieu_a") +ufunc_mathieu_a_ptr[2*1] = _func_cem_cva_wrap +ufunc_mathieu_a_ptr[2*1+1] = ("mathieu_a") +ufunc_mathieu_a_data[0] = &ufunc_mathieu_a_ptr[2*0] +ufunc_mathieu_a_data[1] = &ufunc_mathieu_a_ptr[2*1] +mathieu_a = np.PyUFunc_FromFuncAndData(ufunc_mathieu_a_loops, ufunc_mathieu_a_data, ufunc_mathieu_a_types, 2, 2, 1, 0, "mathieu_a", ufunc_mathieu_a_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_b_loops[2] +cdef void *ufunc_mathieu_b_ptr[4] +cdef void *ufunc_mathieu_b_data[2] +cdef char ufunc_mathieu_b_types[6] +cdef char *ufunc_mathieu_b_doc = ( + "mathieu_b(m, q, out=None)\n" + "\n" + "Characteristic value of odd Mathieu functions\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's\n" + " equation.\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_a, mathieu_cem, mathieu_sem") +ufunc_mathieu_b_loops[0] = loop_d_dd__As_ff_f +ufunc_mathieu_b_loops[1] = loop_d_dd__As_dd_d +ufunc_mathieu_b_types[0] = NPY_FLOAT +ufunc_mathieu_b_types[1] = NPY_FLOAT +ufunc_mathieu_b_types[2] = NPY_FLOAT +ufunc_mathieu_b_types[3] = NPY_DOUBLE +ufunc_mathieu_b_types[4] = NPY_DOUBLE +ufunc_mathieu_b_types[5] = NPY_DOUBLE +ufunc_mathieu_b_ptr[2*0] = _func_sem_cva_wrap +ufunc_mathieu_b_ptr[2*0+1] = ("mathieu_b") +ufunc_mathieu_b_ptr[2*1] = _func_sem_cva_wrap +ufunc_mathieu_b_ptr[2*1+1] = ("mathieu_b") +ufunc_mathieu_b_data[0] = &ufunc_mathieu_b_ptr[2*0] +ufunc_mathieu_b_data[1] = &ufunc_mathieu_b_ptr[2*1] +mathieu_b = np.PyUFunc_FromFuncAndData(ufunc_mathieu_b_loops, ufunc_mathieu_b_data, ufunc_mathieu_b_types, 2, 2, 1, 0, "mathieu_b", ufunc_mathieu_b_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_cem_loops[2] +cdef void *ufunc_mathieu_cem_ptr[4] +cdef void *ufunc_mathieu_cem_data[2] +cdef char ufunc_mathieu_cem_types[10] +cdef char *ufunc_mathieu_cem_doc = ( + "mathieu_cem(m, q, x, out=None)\n" + "\n" + "Even Mathieu function and its derivative\n" + "\n" + "Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and\n" + "parameter `q` evaluated at `x` (given in degrees). Also returns the\n" + "derivative with respect to `x` of ce_m(x, q)\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_a, mathieu_b, mathieu_sem") +ufunc_mathieu_cem_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_cem_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_cem_types[0] = NPY_FLOAT +ufunc_mathieu_cem_types[1] = NPY_FLOAT +ufunc_mathieu_cem_types[2] = NPY_FLOAT +ufunc_mathieu_cem_types[3] = NPY_FLOAT +ufunc_mathieu_cem_types[4] = NPY_FLOAT +ufunc_mathieu_cem_types[5] = NPY_DOUBLE +ufunc_mathieu_cem_types[6] = NPY_DOUBLE +ufunc_mathieu_cem_types[7] = NPY_DOUBLE +ufunc_mathieu_cem_types[8] = NPY_DOUBLE +ufunc_mathieu_cem_types[9] = NPY_DOUBLE +ufunc_mathieu_cem_ptr[2*0] = _func_cem_wrap +ufunc_mathieu_cem_ptr[2*0+1] = ("mathieu_cem") +ufunc_mathieu_cem_ptr[2*1] = _func_cem_wrap +ufunc_mathieu_cem_ptr[2*1+1] = ("mathieu_cem") +ufunc_mathieu_cem_data[0] = &ufunc_mathieu_cem_ptr[2*0] +ufunc_mathieu_cem_data[1] = &ufunc_mathieu_cem_ptr[2*1] +mathieu_cem = np.PyUFunc_FromFuncAndData(ufunc_mathieu_cem_loops, ufunc_mathieu_cem_data, ufunc_mathieu_cem_types, 2, 3, 2, 0, "mathieu_cem", ufunc_mathieu_cem_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_modcem1_loops[2] +cdef void *ufunc_mathieu_modcem1_ptr[4] +cdef void *ufunc_mathieu_modcem1_data[2] +cdef char ufunc_mathieu_modcem1_types[10] +cdef char *ufunc_mathieu_modcem1_doc = ( + "mathieu_modcem1(m, q, x, out=None)\n" + "\n" + "Even modified Mathieu function of the first kind and its derivative\n" + "\n" + "Evaluates the even modified Mathieu function of the first kind,\n" + "``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter\n" + "`q`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_modsem1") +ufunc_mathieu_modcem1_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_modcem1_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_modcem1_types[0] = NPY_FLOAT +ufunc_mathieu_modcem1_types[1] = NPY_FLOAT +ufunc_mathieu_modcem1_types[2] = NPY_FLOAT +ufunc_mathieu_modcem1_types[3] = NPY_FLOAT +ufunc_mathieu_modcem1_types[4] = NPY_FLOAT +ufunc_mathieu_modcem1_types[5] = NPY_DOUBLE +ufunc_mathieu_modcem1_types[6] = NPY_DOUBLE +ufunc_mathieu_modcem1_types[7] = NPY_DOUBLE +ufunc_mathieu_modcem1_types[8] = NPY_DOUBLE +ufunc_mathieu_modcem1_types[9] = NPY_DOUBLE +ufunc_mathieu_modcem1_ptr[2*0] = _func_mcm1_wrap +ufunc_mathieu_modcem1_ptr[2*0+1] = ("mathieu_modcem1") +ufunc_mathieu_modcem1_ptr[2*1] = _func_mcm1_wrap +ufunc_mathieu_modcem1_ptr[2*1+1] = ("mathieu_modcem1") +ufunc_mathieu_modcem1_data[0] = &ufunc_mathieu_modcem1_ptr[2*0] +ufunc_mathieu_modcem1_data[1] = &ufunc_mathieu_modcem1_ptr[2*1] +mathieu_modcem1 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modcem1_loops, ufunc_mathieu_modcem1_data, ufunc_mathieu_modcem1_types, 2, 3, 2, 0, "mathieu_modcem1", ufunc_mathieu_modcem1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_modcem2_loops[2] +cdef void *ufunc_mathieu_modcem2_ptr[4] +cdef void *ufunc_mathieu_modcem2_data[2] +cdef char ufunc_mathieu_modcem2_types[10] +cdef char *ufunc_mathieu_modcem2_doc = ( + "mathieu_modcem2(m, q, x, out=None)\n" + "\n" + "Even modified Mathieu function of the second kind and its derivative\n" + "\n" + "Evaluates the even modified Mathieu function of the second kind,\n" + "Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`\n" + "and parameter `q`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_modsem2") +ufunc_mathieu_modcem2_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_modcem2_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_modcem2_types[0] = NPY_FLOAT +ufunc_mathieu_modcem2_types[1] = NPY_FLOAT +ufunc_mathieu_modcem2_types[2] = NPY_FLOAT +ufunc_mathieu_modcem2_types[3] = NPY_FLOAT +ufunc_mathieu_modcem2_types[4] = NPY_FLOAT +ufunc_mathieu_modcem2_types[5] = NPY_DOUBLE +ufunc_mathieu_modcem2_types[6] = NPY_DOUBLE +ufunc_mathieu_modcem2_types[7] = NPY_DOUBLE +ufunc_mathieu_modcem2_types[8] = NPY_DOUBLE +ufunc_mathieu_modcem2_types[9] = NPY_DOUBLE +ufunc_mathieu_modcem2_ptr[2*0] = _func_mcm2_wrap +ufunc_mathieu_modcem2_ptr[2*0+1] = ("mathieu_modcem2") +ufunc_mathieu_modcem2_ptr[2*1] = _func_mcm2_wrap +ufunc_mathieu_modcem2_ptr[2*1+1] = ("mathieu_modcem2") +ufunc_mathieu_modcem2_data[0] = &ufunc_mathieu_modcem2_ptr[2*0] +ufunc_mathieu_modcem2_data[1] = &ufunc_mathieu_modcem2_ptr[2*1] +mathieu_modcem2 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modcem2_loops, ufunc_mathieu_modcem2_data, ufunc_mathieu_modcem2_types, 2, 3, 2, 0, "mathieu_modcem2", ufunc_mathieu_modcem2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_modsem1_loops[2] +cdef void *ufunc_mathieu_modsem1_ptr[4] +cdef void *ufunc_mathieu_modsem1_data[2] +cdef char ufunc_mathieu_modsem1_types[10] +cdef char *ufunc_mathieu_modsem1_doc = ( + "mathieu_modsem1(m, q, x, out=None)\n" + "\n" + "Odd modified Mathieu function of the first kind and its derivative\n" + "\n" + "Evaluates the odd modified Mathieu function of the first kind,\n" + "Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`\n" + "and parameter `q`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_modcem1") +ufunc_mathieu_modsem1_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_modsem1_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_modsem1_types[0] = NPY_FLOAT +ufunc_mathieu_modsem1_types[1] = NPY_FLOAT +ufunc_mathieu_modsem1_types[2] = NPY_FLOAT +ufunc_mathieu_modsem1_types[3] = NPY_FLOAT +ufunc_mathieu_modsem1_types[4] = NPY_FLOAT +ufunc_mathieu_modsem1_types[5] = NPY_DOUBLE +ufunc_mathieu_modsem1_types[6] = NPY_DOUBLE +ufunc_mathieu_modsem1_types[7] = NPY_DOUBLE +ufunc_mathieu_modsem1_types[8] = NPY_DOUBLE +ufunc_mathieu_modsem1_types[9] = NPY_DOUBLE +ufunc_mathieu_modsem1_ptr[2*0] = _func_msm1_wrap +ufunc_mathieu_modsem1_ptr[2*0+1] = ("mathieu_modsem1") +ufunc_mathieu_modsem1_ptr[2*1] = _func_msm1_wrap +ufunc_mathieu_modsem1_ptr[2*1+1] = ("mathieu_modsem1") +ufunc_mathieu_modsem1_data[0] = &ufunc_mathieu_modsem1_ptr[2*0] +ufunc_mathieu_modsem1_data[1] = &ufunc_mathieu_modsem1_ptr[2*1] +mathieu_modsem1 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modsem1_loops, ufunc_mathieu_modsem1_data, ufunc_mathieu_modsem1_types, 2, 3, 2, 0, "mathieu_modsem1", ufunc_mathieu_modsem1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_modsem2_loops[2] +cdef void *ufunc_mathieu_modsem2_ptr[4] +cdef void *ufunc_mathieu_modsem2_data[2] +cdef char ufunc_mathieu_modsem2_types[10] +cdef char *ufunc_mathieu_modsem2_doc = ( + "mathieu_modsem2(m, q, x, out=None)\n" + "\n" + "Odd modified Mathieu function of the second kind and its derivative\n" + "\n" + "Evaluates the odd modified Mathieu function of the second kind,\n" + "Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`\n" + "and parameter q.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_modcem2") +ufunc_mathieu_modsem2_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_modsem2_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_modsem2_types[0] = NPY_FLOAT +ufunc_mathieu_modsem2_types[1] = NPY_FLOAT +ufunc_mathieu_modsem2_types[2] = NPY_FLOAT +ufunc_mathieu_modsem2_types[3] = NPY_FLOAT +ufunc_mathieu_modsem2_types[4] = NPY_FLOAT +ufunc_mathieu_modsem2_types[5] = NPY_DOUBLE +ufunc_mathieu_modsem2_types[6] = NPY_DOUBLE +ufunc_mathieu_modsem2_types[7] = NPY_DOUBLE +ufunc_mathieu_modsem2_types[8] = NPY_DOUBLE +ufunc_mathieu_modsem2_types[9] = NPY_DOUBLE +ufunc_mathieu_modsem2_ptr[2*0] = _func_msm2_wrap +ufunc_mathieu_modsem2_ptr[2*0+1] = ("mathieu_modsem2") +ufunc_mathieu_modsem2_ptr[2*1] = _func_msm2_wrap +ufunc_mathieu_modsem2_ptr[2*1+1] = ("mathieu_modsem2") +ufunc_mathieu_modsem2_data[0] = &ufunc_mathieu_modsem2_ptr[2*0] +ufunc_mathieu_modsem2_data[1] = &ufunc_mathieu_modsem2_ptr[2*1] +mathieu_modsem2 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modsem2_loops, ufunc_mathieu_modsem2_data, ufunc_mathieu_modsem2_types, 2, 3, 2, 0, "mathieu_modsem2", ufunc_mathieu_modsem2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_mathieu_sem_loops[2] +cdef void *ufunc_mathieu_sem_ptr[4] +cdef void *ufunc_mathieu_sem_data[2] +cdef char ufunc_mathieu_sem_types[10] +cdef char *ufunc_mathieu_sem_doc = ( + "mathieu_sem(m, q, x, out=None)\n" + "\n" + "Odd Mathieu function and its derivative\n" + "\n" + "Returns the odd Mathieu function, se_m(x, q), of order `m` and\n" + "parameter `q` evaluated at `x` (given in degrees). Also returns the\n" + "derivative with respect to `x` of se_m(x, q).\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the function\n" + "q : array_like\n" + " Parameter of the function\n" + "x : array_like\n" + " Argument of the function, *given in degrees, not radians*.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "y : scalar or ndarray\n" + " Value of the function\n" + "yp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "mathieu_a, mathieu_b, mathieu_cem") +ufunc_mathieu_sem_loops[0] = loop_i_ddd_dd_As_fff_ff +ufunc_mathieu_sem_loops[1] = loop_i_ddd_dd_As_ddd_dd +ufunc_mathieu_sem_types[0] = NPY_FLOAT +ufunc_mathieu_sem_types[1] = NPY_FLOAT +ufunc_mathieu_sem_types[2] = NPY_FLOAT +ufunc_mathieu_sem_types[3] = NPY_FLOAT +ufunc_mathieu_sem_types[4] = NPY_FLOAT +ufunc_mathieu_sem_types[5] = NPY_DOUBLE +ufunc_mathieu_sem_types[6] = NPY_DOUBLE +ufunc_mathieu_sem_types[7] = NPY_DOUBLE +ufunc_mathieu_sem_types[8] = NPY_DOUBLE +ufunc_mathieu_sem_types[9] = NPY_DOUBLE +ufunc_mathieu_sem_ptr[2*0] = _func_sem_wrap +ufunc_mathieu_sem_ptr[2*0+1] = ("mathieu_sem") +ufunc_mathieu_sem_ptr[2*1] = _func_sem_wrap +ufunc_mathieu_sem_ptr[2*1+1] = ("mathieu_sem") +ufunc_mathieu_sem_data[0] = &ufunc_mathieu_sem_ptr[2*0] +ufunc_mathieu_sem_data[1] = &ufunc_mathieu_sem_ptr[2*1] +mathieu_sem = np.PyUFunc_FromFuncAndData(ufunc_mathieu_sem_loops, ufunc_mathieu_sem_data, ufunc_mathieu_sem_types, 2, 3, 2, 0, "mathieu_sem", ufunc_mathieu_sem_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_modfresnelm_loops[2] +cdef void *ufunc_modfresnelm_ptr[4] +cdef void *ufunc_modfresnelm_data[2] +cdef char ufunc_modfresnelm_types[6] +cdef char *ufunc_modfresnelm_doc = ( + "modfresnelm(x, out=None)\n" + "\n" + "Modified Fresnel negative integrals\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Function argument\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "fm : scalar or ndarray\n" + " Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``\n" + "km : scalar or ndarray\n" + " Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``\n" + "\n" + "See Also\n" + "--------\n" + "modfresnelp") +ufunc_modfresnelm_loops[0] = loop_i_d_DD_As_f_FF +ufunc_modfresnelm_loops[1] = loop_i_d_DD_As_d_DD +ufunc_modfresnelm_types[0] = NPY_FLOAT +ufunc_modfresnelm_types[1] = NPY_CFLOAT +ufunc_modfresnelm_types[2] = NPY_CFLOAT +ufunc_modfresnelm_types[3] = NPY_DOUBLE +ufunc_modfresnelm_types[4] = NPY_CDOUBLE +ufunc_modfresnelm_types[5] = NPY_CDOUBLE +ufunc_modfresnelm_ptr[2*0] = _func_modified_fresnel_minus_wrap +ufunc_modfresnelm_ptr[2*0+1] = ("modfresnelm") +ufunc_modfresnelm_ptr[2*1] = _func_modified_fresnel_minus_wrap +ufunc_modfresnelm_ptr[2*1+1] = ("modfresnelm") +ufunc_modfresnelm_data[0] = &ufunc_modfresnelm_ptr[2*0] +ufunc_modfresnelm_data[1] = &ufunc_modfresnelm_ptr[2*1] +modfresnelm = np.PyUFunc_FromFuncAndData(ufunc_modfresnelm_loops, ufunc_modfresnelm_data, ufunc_modfresnelm_types, 2, 1, 2, 0, "modfresnelm", ufunc_modfresnelm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_modfresnelp_loops[2] +cdef void *ufunc_modfresnelp_ptr[4] +cdef void *ufunc_modfresnelp_data[2] +cdef char ufunc_modfresnelp_types[6] +cdef char *ufunc_modfresnelp_doc = ( + "modfresnelp(x, out=None)\n" + "\n" + "Modified Fresnel positive integrals\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Function argument\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "fp : scalar or ndarray\n" + " Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``\n" + "kp : scalar or ndarray\n" + " Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``\n" + "\n" + "See Also\n" + "--------\n" + "modfresnelm") +ufunc_modfresnelp_loops[0] = loop_i_d_DD_As_f_FF +ufunc_modfresnelp_loops[1] = loop_i_d_DD_As_d_DD +ufunc_modfresnelp_types[0] = NPY_FLOAT +ufunc_modfresnelp_types[1] = NPY_CFLOAT +ufunc_modfresnelp_types[2] = NPY_CFLOAT +ufunc_modfresnelp_types[3] = NPY_DOUBLE +ufunc_modfresnelp_types[4] = NPY_CDOUBLE +ufunc_modfresnelp_types[5] = NPY_CDOUBLE +ufunc_modfresnelp_ptr[2*0] = _func_modified_fresnel_plus_wrap +ufunc_modfresnelp_ptr[2*0+1] = ("modfresnelp") +ufunc_modfresnelp_ptr[2*1] = _func_modified_fresnel_plus_wrap +ufunc_modfresnelp_ptr[2*1+1] = ("modfresnelp") +ufunc_modfresnelp_data[0] = &ufunc_modfresnelp_ptr[2*0] +ufunc_modfresnelp_data[1] = &ufunc_modfresnelp_ptr[2*1] +modfresnelp = np.PyUFunc_FromFuncAndData(ufunc_modfresnelp_loops, ufunc_modfresnelp_data, ufunc_modfresnelp_types, 2, 1, 2, 0, "modfresnelp", ufunc_modfresnelp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_modstruve_loops[2] +cdef void *ufunc_modstruve_ptr[4] +cdef void *ufunc_modstruve_data[2] +cdef char ufunc_modstruve_types[6] +cdef char *ufunc_modstruve_doc = ( + "modstruve(v, x, out=None)\n" + "\n" + "Modified Struve function.\n" + "\n" + "Return the value of the modified Struve function of order `v` at `x`. The\n" + "modified Struve function is defined as,\n" + "\n" + ".. math::\n" + " L_v(x) = -\\imath \\exp(-\\pi\\imath v/2) H_v(\\imath x),\n" + "\n" + "where :math:`H_v` is the Struve function.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order of the modified Struve function (float).\n" + "x : array_like\n" + " Argument of the Struve function (float; must be positive unless `v` is\n" + " an integer).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "L : scalar or ndarray\n" + " Value of the modified Struve function of order `v` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "struve\n" + "\n" + "Notes\n" + "-----\n" + "Three methods discussed in [1]_ are used to evaluate the function:\n" + "\n" + "- power series\n" + "- expansion in Bessel functions (if :math:`|x| < |v| + 20`)\n" + "- asymptotic large-x expansion (if :math:`x \\geq 0.7v + 12`)\n" + "\n" + "Rounding errors are estimated based on the largest terms in the sums, and\n" + "the result associated with the smallest error is returned.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/11\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the modified Struve function of order 1 at 2.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import modstruve\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> modstruve(1, 2.)\n" + "1.102759787367716\n" + "\n" + "Calculate the modified Struve function at 2 for orders 1, 2 and 3 by\n" + "providing a list for the order parameter `v`.\n" + "\n" + ">>> modstruve([1, 2, 3], 2.)\n" + "array([1.10275979, 0.41026079, 0.11247294])\n" + "\n" + "Calculate the modified Struve function of order 1 for several points\n" + "by providing an array for `x`.\n" + "\n" + ">>> points = np.array([2., 5., 8.])\n" + ">>> modstruve(1, points)\n" + "array([ 1.10275979, 23.72821578, 399.24709139])\n" + "\n" + "Compute the modified Struve function for several orders at several\n" + "points by providing arrays for `v` and `z`. The arrays have to be\n" + "broadcastable to the correct shapes.\n" + "\n" + ">>> orders = np.array([[1], [2], [3]])\n" + ">>> points.shape, orders.shape\n" + "((3,), (3, 1))\n" + "\n" + ">>> modstruve(orders, points)\n" + "array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02],\n" + " [4.10260789e-01, 1.65535979e+01, 3.25973609e+02],\n" + " [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]])\n" + "\n" + "Plot the modified Struve functions of order 0 to 3 from -5 to 5.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-5., 5., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$')\n" + ">>> ax.legend(ncol=2)\n" + ">>> ax.set_xlim(-5, 5)\n" + ">>> ax.set_title(r\"Modified Struve functions $L_{\\nu}$\")\n" + ">>> plt.show()") +ufunc_modstruve_loops[0] = loop_d_dd__As_ff_f +ufunc_modstruve_loops[1] = loop_d_dd__As_dd_d +ufunc_modstruve_types[0] = NPY_FLOAT +ufunc_modstruve_types[1] = NPY_FLOAT +ufunc_modstruve_types[2] = NPY_FLOAT +ufunc_modstruve_types[3] = NPY_DOUBLE +ufunc_modstruve_types[4] = NPY_DOUBLE +ufunc_modstruve_types[5] = NPY_DOUBLE +ufunc_modstruve_ptr[2*0] = _func_struve_l +ufunc_modstruve_ptr[2*0+1] = ("modstruve") +ufunc_modstruve_ptr[2*1] = _func_struve_l +ufunc_modstruve_ptr[2*1+1] = ("modstruve") +ufunc_modstruve_data[0] = &ufunc_modstruve_ptr[2*0] +ufunc_modstruve_data[1] = &ufunc_modstruve_ptr[2*1] +modstruve = np.PyUFunc_FromFuncAndData(ufunc_modstruve_loops, ufunc_modstruve_data, ufunc_modstruve_types, 2, 2, 1, 0, "modstruve", ufunc_modstruve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtr_loops[3] +cdef void *ufunc_nbdtr_ptr[6] +cdef void *ufunc_nbdtr_data[3] +cdef char ufunc_nbdtr_types[12] +cdef char *ufunc_nbdtr_doc = ( + "nbdtr(k, n, p, out=None)\n" + "\n" + "Negative binomial cumulative distribution function.\n" + "\n" + "Returns the sum of the terms 0 through `k` of the negative binomial\n" + "distribution probability mass function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\sum_{j=0}^k {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n" + "\n" + "In a sequence of Bernoulli trials with individual success probabilities\n" + "`p`, this is the probability that `k` or fewer failures precede the nth\n" + "success.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The probability of `k` or fewer failures before `n` successes in a\n" + " sequence of events with individual success probability `p`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtrc : Negative binomial survival function\n" + "nbdtrik : Negative binomial quantile function\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "If floating point values are passed for `k` or `n`, they will be truncated\n" + "to integers.\n" + "\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `nbdtr`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtr` directly can improve performance\n" + "compared to the ``cdf`` method of `scipy.stats.nbinom` (see last example).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtr\n" + ">>> nbdtr(10, 5, 0.5)\n" + "0.940765380859375\n" + "\n" + "Compute the function for ``n=10`` and ``p=0.5`` at several points by\n" + "providing a NumPy array or list for `k`.\n" + "\n" + ">>> nbdtr([5, 10, 15], 10, 0.5)\n" + "array([0.15087891, 0.58809853, 0.88523853])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> k = np.arange(130)\n" + ">>> n_parameters = [20, 20, 20, 80]\n" + ">>> p_parameters = [0.2, 0.5, 0.8, 0.5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters,\n" + "... linestyles))\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtr_vals = nbdtr(k, n, p)\n" + "... ax.plot(k, nbdtr_vals, label=rf\"$n={n},\\, p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$k$\")\n" + ">>> ax.set_title(\"Negative binomial cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtr` directly can be much faster than\n" + "calling the ``cdf`` method of `scipy.stats.nbinom`, especially for small\n" + "arrays or individual values. To get the same results one must use the\n" + "following parametrization: ``nbinom(n, p).cdf(k)=nbdtr(k, n, p)``.\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> k, n, p = 5, 3, 0.5\n" + ">>> nbdtr_res = nbdtr(k, n, p) # this will often be faster than below\n" + ">>> stats_res = nbinom(n, p).cdf(k)\n" + ">>> stats_res, nbdtr_res # test that results are equal\n" + "(0.85546875, 0.85546875)\n" + "\n" + "`nbdtr` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> p = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtr(k, 5, p)\n" + "array([[0.15026833, 0.62304687, 0.95265101, 0.9998531 ],\n" + " [0.48450894, 0.94076538, 0.99932777, 0.99999999],\n" + " [0.76249222, 0.99409103, 0.99999445, 1. ]])") +ufunc_nbdtr_loops[0] = loop_d_iid__As_lld_d +ufunc_nbdtr_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtr_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtr_types[0] = NPY_LONG +ufunc_nbdtr_types[1] = NPY_LONG +ufunc_nbdtr_types[2] = NPY_DOUBLE +ufunc_nbdtr_types[3] = NPY_DOUBLE +ufunc_nbdtr_types[4] = NPY_FLOAT +ufunc_nbdtr_types[5] = NPY_FLOAT +ufunc_nbdtr_types[6] = NPY_FLOAT +ufunc_nbdtr_types[7] = NPY_FLOAT +ufunc_nbdtr_types[8] = NPY_DOUBLE +ufunc_nbdtr_types[9] = NPY_DOUBLE +ufunc_nbdtr_types[10] = NPY_DOUBLE +ufunc_nbdtr_types[11] = NPY_DOUBLE +ufunc_nbdtr_ptr[2*0] = _func_nbdtr +ufunc_nbdtr_ptr[2*0+1] = ("nbdtr") +ufunc_nbdtr_ptr[2*1] = _func_nbdtr_unsafe +ufunc_nbdtr_ptr[2*1+1] = ("nbdtr") +ufunc_nbdtr_ptr[2*2] = _func_nbdtr_unsafe +ufunc_nbdtr_ptr[2*2+1] = ("nbdtr") +ufunc_nbdtr_data[0] = &ufunc_nbdtr_ptr[2*0] +ufunc_nbdtr_data[1] = &ufunc_nbdtr_ptr[2*1] +ufunc_nbdtr_data[2] = &ufunc_nbdtr_ptr[2*2] +nbdtr = np.PyUFunc_FromFuncAndData(ufunc_nbdtr_loops, ufunc_nbdtr_data, ufunc_nbdtr_types, 3, 3, 1, 0, "nbdtr", ufunc_nbdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrc_loops[3] +cdef void *ufunc_nbdtrc_ptr[6] +cdef void *ufunc_nbdtrc_data[3] +cdef char ufunc_nbdtrc_types[12] +cdef char *ufunc_nbdtrc_doc = ( + "nbdtrc(k, n, p, out=None)\n" + "\n" + "Negative binomial survival function.\n" + "\n" + "Returns the sum of the terms `k + 1` to infinity of the negative binomial\n" + "distribution probability mass function,\n" + "\n" + ".. math::\n" + "\n" + " F = \\sum_{j=k + 1}^\\infty {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n" + "\n" + "In a sequence of Bernoulli trials with individual success probabilities\n" + "`p`, this is the probability that more than `k` failures precede the nth\n" + "success.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "F : scalar or ndarray\n" + " The probability of `k + 1` or more failures before `n` successes in a\n" + " sequence of events with individual success probability `p`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Negative binomial cumulative distribution function\n" + "nbdtrik : Negative binomial percentile function\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "If floating point values are passed for `k` or `n`, they will be truncated\n" + "to integers.\n" + "\n" + "The terms are not summed directly; instead the regularized incomplete beta\n" + "function is employed, according to the formula,\n" + "\n" + ".. math::\n" + " \\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).\n" + "\n" + "Wrapper for the Cephes [1]_ routine `nbdtrc`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtrc` directly can improve performance\n" + "compared to the ``sf`` method of `scipy.stats.nbinom` (see last example).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtrc\n" + ">>> nbdtrc(10, 5, 0.5)\n" + "0.059234619140624986\n" + "\n" + "Compute the function for ``n=10`` and ``p=0.5`` at several points by\n" + "providing a NumPy array or list for `k`.\n" + "\n" + ">>> nbdtrc([5, 10, 15], 10, 0.5)\n" + "array([0.84912109, 0.41190147, 0.11476147])\n" + "\n" + "Plot the function for four different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> k = np.arange(130)\n" + ">>> n_parameters = [20, 20, 20, 80]\n" + ">>> p_parameters = [0.2, 0.5, 0.8, 0.5]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters,\n" + "... linestyles))\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtrc_vals = nbdtrc(k, n, p)\n" + "... ax.plot(k, nbdtrc_vals, label=rf\"$n={n},\\, p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_xlabel(\"$k$\")\n" + ">>> ax.set_title(\"Negative binomial distribution survival function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtrc` directly can be much faster than\n" + "calling the ``sf`` method of `scipy.stats.nbinom`, especially for small\n" + "arrays or individual values. To get the same results one must use the\n" + "following parametrization: ``nbinom(n, p).sf(k)=nbdtrc(k, n, p)``.\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> k, n, p = 3, 5, 0.5\n" + ">>> nbdtr_res = nbdtrc(k, n, p) # this will often be faster than below\n" + ">>> stats_res = nbinom(n, p).sf(k)\n" + ">>> stats_res, nbdtr_res # test that results are equal\n" + "(0.6367187499999999, 0.6367187499999999)\n" + "\n" + "`nbdtrc` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> p = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtrc(k, 5, p)\n" + "array([[8.49731667e-01, 3.76953125e-01, 4.73489874e-02, 1.46902600e-04],\n" + " [5.15491059e-01, 5.92346191e-02, 6.72234070e-04, 9.29610100e-09],\n" + " [2.37507779e-01, 5.90896606e-03, 5.55025308e-06, 3.26346760e-13]])") +ufunc_nbdtrc_loops[0] = loop_d_iid__As_lld_d +ufunc_nbdtrc_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtrc_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtrc_types[0] = NPY_LONG +ufunc_nbdtrc_types[1] = NPY_LONG +ufunc_nbdtrc_types[2] = NPY_DOUBLE +ufunc_nbdtrc_types[3] = NPY_DOUBLE +ufunc_nbdtrc_types[4] = NPY_FLOAT +ufunc_nbdtrc_types[5] = NPY_FLOAT +ufunc_nbdtrc_types[6] = NPY_FLOAT +ufunc_nbdtrc_types[7] = NPY_FLOAT +ufunc_nbdtrc_types[8] = NPY_DOUBLE +ufunc_nbdtrc_types[9] = NPY_DOUBLE +ufunc_nbdtrc_types[10] = NPY_DOUBLE +ufunc_nbdtrc_types[11] = NPY_DOUBLE +ufunc_nbdtrc_ptr[2*0] = _func_nbdtrc +ufunc_nbdtrc_ptr[2*0+1] = ("nbdtrc") +ufunc_nbdtrc_ptr[2*1] = _func_nbdtrc_unsafe +ufunc_nbdtrc_ptr[2*1+1] = ("nbdtrc") +ufunc_nbdtrc_ptr[2*2] = _func_nbdtrc_unsafe +ufunc_nbdtrc_ptr[2*2+1] = ("nbdtrc") +ufunc_nbdtrc_data[0] = &ufunc_nbdtrc_ptr[2*0] +ufunc_nbdtrc_data[1] = &ufunc_nbdtrc_ptr[2*1] +ufunc_nbdtrc_data[2] = &ufunc_nbdtrc_ptr[2*2] +nbdtrc = np.PyUFunc_FromFuncAndData(ufunc_nbdtrc_loops, ufunc_nbdtrc_data, ufunc_nbdtrc_types, 3, 3, 1, 0, "nbdtrc", ufunc_nbdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtri_loops[3] +cdef void *ufunc_nbdtri_ptr[6] +cdef void *ufunc_nbdtri_data[3] +cdef char ufunc_nbdtri_types[12] +cdef char *ufunc_nbdtri_doc = ( + "nbdtri(k, n, y, out=None)\n" + "\n" + "Returns the inverse with respect to the parameter `p` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "p : scalar or ndarray\n" + " Probability of success in a single event (float) such that\n" + " `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtrc : Negative binomial survival function.\n" + "scipy.stats.nbinom : negative binomial distribution.\n" + "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n" + "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `nbdtri`.\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. Using `nbdtri` directly can improve performance\n" + "compared to the ``ppf`` method of `scipy.stats.nbinom`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "`nbdtri` is the inverse of `nbdtr` with respect to `p`.\n" + "Up to floating point errors the following holds:\n" + "``nbdtri(k, n, nbdtr(k, n, p))=p``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtri, nbdtr\n" + ">>> k, n, y = 5, 10, 0.2\n" + ">>> cdf_val = nbdtr(k, n, y)\n" + ">>> nbdtri(k, n, cdf_val)\n" + "0.20000000000000004\n" + "\n" + "Compute the function for ``k=10`` and ``n=5`` at several points by\n" + "providing a NumPy array or list for `y`.\n" + "\n" + ">>> y = np.array([0.1, 0.4, 0.8])\n" + ">>> nbdtri(3, 5, y)\n" + "array([0.34462319, 0.51653095, 0.69677416])\n" + "\n" + "Plot the function for three different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> n_parameters = [5, 20, 30, 30]\n" + ">>> k_parameters = [20, 20, 60, 80]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(n_parameters, k_parameters, linestyles))\n" + ">>> cdf_vals = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... n, k, style = parameter_set\n" + "... nbdtri_vals = nbdtri(k, n, cdf_vals)\n" + "... ax.plot(cdf_vals, nbdtri_vals, label=rf\"$k={k},\\ n={n}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_ylabel(\"$p$\")\n" + ">>> ax.set_xlabel(\"$CDF$\")\n" + ">>> title = \"nbdtri: inverse of negative binomial CDF with respect to $p$\"\n" + ">>> ax.set_title(title)\n" + ">>> plt.show()\n" + "\n" + "`nbdtri` can evaluate different parameter sets by providing arrays with\n" + "shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute\n" + "the function for three different `k` at four locations `p`, resulting in\n" + "a 3x4 array.\n" + "\n" + ">>> k = np.array([[5], [10], [15]])\n" + ">>> y = np.array([0.3, 0.5, 0.7, 0.9])\n" + ">>> k.shape, y.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> nbdtri(k, 5, y)\n" + "array([[0.37258157, 0.45169416, 0.53249956, 0.64578407],\n" + " [0.24588501, 0.30451981, 0.36778453, 0.46397088],\n" + " [0.18362101, 0.22966758, 0.28054743, 0.36066188]])") +ufunc_nbdtri_loops[0] = loop_d_iid__As_lld_d +ufunc_nbdtri_loops[1] = loop_d_ddd__As_fff_f +ufunc_nbdtri_loops[2] = loop_d_ddd__As_ddd_d +ufunc_nbdtri_types[0] = NPY_LONG +ufunc_nbdtri_types[1] = NPY_LONG +ufunc_nbdtri_types[2] = NPY_DOUBLE +ufunc_nbdtri_types[3] = NPY_DOUBLE +ufunc_nbdtri_types[4] = NPY_FLOAT +ufunc_nbdtri_types[5] = NPY_FLOAT +ufunc_nbdtri_types[6] = NPY_FLOAT +ufunc_nbdtri_types[7] = NPY_FLOAT +ufunc_nbdtri_types[8] = NPY_DOUBLE +ufunc_nbdtri_types[9] = NPY_DOUBLE +ufunc_nbdtri_types[10] = NPY_DOUBLE +ufunc_nbdtri_types[11] = NPY_DOUBLE +ufunc_nbdtri_ptr[2*0] = _func_nbdtri +ufunc_nbdtri_ptr[2*0+1] = ("nbdtri") +ufunc_nbdtri_ptr[2*1] = _func_nbdtri_unsafe +ufunc_nbdtri_ptr[2*1+1] = ("nbdtri") +ufunc_nbdtri_ptr[2*2] = _func_nbdtri_unsafe +ufunc_nbdtri_ptr[2*2+1] = ("nbdtri") +ufunc_nbdtri_data[0] = &ufunc_nbdtri_ptr[2*0] +ufunc_nbdtri_data[1] = &ufunc_nbdtri_ptr[2*1] +ufunc_nbdtri_data[2] = &ufunc_nbdtri_ptr[2*2] +nbdtri = np.PyUFunc_FromFuncAndData(ufunc_nbdtri_loops, ufunc_nbdtri_data, ufunc_nbdtri_types, 3, 3, 1, 0, "nbdtri", ufunc_nbdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrik_loops[2] +cdef void *ufunc_nbdtrik_ptr[4] +cdef void *ufunc_nbdtrik_data[2] +cdef char ufunc_nbdtrik_types[8] +cdef char *ufunc_nbdtrik_doc = ( + "nbdtrik(y, n, p, out=None)\n" + "\n" + "Negative binomial percentile function.\n" + "\n" + "Returns the inverse with respect to the parameter `k` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "n : array_like\n" + " The target number of successes (positive int).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "k : scalar or ndarray\n" + " The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtrc : Survival function of the negative binomial.\n" + "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n" + "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n" + "scipy.stats.nbinom : Negative binomial distribution\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n" + "\n" + "Formula 26.5.26 of [2]_,\n" + "\n" + ".. math::\n" + " \\sum_{j=k + 1}^\\infty {{n + j - 1}\n" + " \\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n" + "\n" + "is used to reduce calculation of the cumulative distribution function to\n" + "that of a regularized incomplete beta :math:`I`.\n" + "\n" + "Computation of `k` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `k`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + "Compute the negative binomial cumulative distribution function for an\n" + "exemplary parameter set.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import nbdtr, nbdtrik\n" + ">>> k, n, p = 5, 2, 0.5\n" + ">>> cdf_value = nbdtr(k, n, p)\n" + ">>> cdf_value\n" + "0.9375\n" + "\n" + "Verify that `nbdtrik` recovers the original value for `k`.\n" + "\n" + ">>> nbdtrik(cdf_value, n, p)\n" + "5.0\n" + "\n" + "Plot the function for different parameter sets.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> p_parameters = [0.2, 0.5, 0.7, 0.5]\n" + ">>> n_parameters = [30, 30, 30, 80]\n" + ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n" + ">>> parameters_list = list(zip(p_parameters, n_parameters, linestyles))\n" + ">>> cdf_vals = np.linspace(0, 1, 1000)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> for parameter_set in parameters_list:\n" + "... p, n, style = parameter_set\n" + "... nbdtrik_vals = nbdtrik(cdf_vals, n, p)\n" + "... ax.plot(cdf_vals, nbdtrik_vals, label=rf\"$n={n},\\ p={p}$\",\n" + "... ls=style)\n" + ">>> ax.legend()\n" + ">>> ax.set_ylabel(\"$k$\")\n" + ">>> ax.set_xlabel(\"$CDF$\")\n" + ">>> ax.set_title(\"Negative binomial percentile function\")\n" + ">>> plt.show()\n" + "\n" + "The negative binomial distribution is also available as\n" + "`scipy.stats.nbinom`. The percentile function method ``ppf``\n" + "returns the result of `nbdtrik` rounded up to integers:\n" + "\n" + ">>> from scipy.stats import nbinom\n" + ">>> q, n, p = 0.6, 5, 0.5\n" + ">>> nbinom.ppf(q, n, p), nbdtrik(q, n, p)\n" + "(5.0, 4.800428460273882)") +ufunc_nbdtrik_loops[0] = loop_d_ddd__As_fff_f +ufunc_nbdtrik_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nbdtrik_types[0] = NPY_FLOAT +ufunc_nbdtrik_types[1] = NPY_FLOAT +ufunc_nbdtrik_types[2] = NPY_FLOAT +ufunc_nbdtrik_types[3] = NPY_FLOAT +ufunc_nbdtrik_types[4] = NPY_DOUBLE +ufunc_nbdtrik_types[5] = NPY_DOUBLE +ufunc_nbdtrik_types[6] = NPY_DOUBLE +ufunc_nbdtrik_types[7] = NPY_DOUBLE +ufunc_nbdtrik_ptr[2*0] = _func_nbdtrik +ufunc_nbdtrik_ptr[2*0+1] = ("nbdtrik") +ufunc_nbdtrik_ptr[2*1] = _func_nbdtrik +ufunc_nbdtrik_ptr[2*1+1] = ("nbdtrik") +ufunc_nbdtrik_data[0] = &ufunc_nbdtrik_ptr[2*0] +ufunc_nbdtrik_data[1] = &ufunc_nbdtrik_ptr[2*1] +nbdtrik = np.PyUFunc_FromFuncAndData(ufunc_nbdtrik_loops, ufunc_nbdtrik_data, ufunc_nbdtrik_types, 2, 3, 1, 0, "nbdtrik", ufunc_nbdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nbdtrin_loops[2] +cdef void *ufunc_nbdtrin_ptr[4] +cdef void *ufunc_nbdtrin_data[2] +cdef char ufunc_nbdtrin_types[8] +cdef char *ufunc_nbdtrin_doc = ( + "nbdtrin(k, y, p, out=None)\n" + "\n" + "Inverse of `nbdtr` vs `n`.\n" + "\n" + "Returns the inverse with respect to the parameter `n` of\n" + "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n" + "function.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " The maximum number of allowed failures (nonnegative int).\n" + "y : array_like\n" + " The probability of `k` or fewer failures before `n` successes (float).\n" + "p : array_like\n" + " Probability of success in a single event (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "n : scalar or ndarray\n" + " The number of successes `n` such that `nbdtr(k, n, p) = y`.\n" + "\n" + "See Also\n" + "--------\n" + "nbdtr : Cumulative distribution function of the negative binomial.\n" + "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n" + "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n" + "\n" + "Formula 26.5.26 of [2]_,\n" + "\n" + ".. math::\n" + " \\sum_{j=k + 1}^\\infty {{n + j - 1}\n" + " \\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n" + "\n" + "is used to reduce calculation of the cumulative distribution function to\n" + "that of a regularized incomplete beta :math:`I`.\n" + "\n" + "Computation of `n` involves a search for a value that produces the desired\n" + "value of `y`. The search relies on the monotonicity of `y` with `n`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + "Compute the negative binomial cumulative distribution function for an\n" + "exemplary parameter set.\n" + "\n" + ">>> from scipy.special import nbdtr, nbdtrin\n" + ">>> k, n, p = 5, 2, 0.5\n" + ">>> cdf_value = nbdtr(k, n, p)\n" + ">>> cdf_value\n" + "0.9375\n" + "\n" + "Verify that `nbdtrin` recovers the original value for `n` up to floating\n" + "point accuracy.\n" + "\n" + ">>> nbdtrin(k, cdf_value, p)\n" + "1.999999999998137") +ufunc_nbdtrin_loops[0] = loop_d_ddd__As_fff_f +ufunc_nbdtrin_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nbdtrin_types[0] = NPY_FLOAT +ufunc_nbdtrin_types[1] = NPY_FLOAT +ufunc_nbdtrin_types[2] = NPY_FLOAT +ufunc_nbdtrin_types[3] = NPY_FLOAT +ufunc_nbdtrin_types[4] = NPY_DOUBLE +ufunc_nbdtrin_types[5] = NPY_DOUBLE +ufunc_nbdtrin_types[6] = NPY_DOUBLE +ufunc_nbdtrin_types[7] = NPY_DOUBLE +ufunc_nbdtrin_ptr[2*0] = _func_nbdtrin +ufunc_nbdtrin_ptr[2*0+1] = ("nbdtrin") +ufunc_nbdtrin_ptr[2*1] = _func_nbdtrin +ufunc_nbdtrin_ptr[2*1+1] = ("nbdtrin") +ufunc_nbdtrin_data[0] = &ufunc_nbdtrin_ptr[2*0] +ufunc_nbdtrin_data[1] = &ufunc_nbdtrin_ptr[2*1] +nbdtrin = np.PyUFunc_FromFuncAndData(ufunc_nbdtrin_loops, ufunc_nbdtrin_data, ufunc_nbdtrin_types, 2, 3, 1, 0, "nbdtrin", ufunc_nbdtrin_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtr_loops[2] +cdef void *ufunc_ncfdtr_ptr[4] +cdef void *ufunc_ncfdtr_data[2] +cdef char ufunc_ncfdtr_types[10] +cdef char *ufunc_ncfdtr_doc = ( + "ncfdtr(dfn, dfd, nc, f, out=None)\n" + "\n" + "Cumulative distribution function of the non-central F distribution.\n" + "\n" + "The non-central F describes the distribution of,\n" + "\n" + ".. math::\n" + " Z = \\frac{X/d_n}{Y/d_d}\n" + "\n" + "where :math:`X` and :math:`Y` are independently distributed, with\n" + ":math:`X` distributed non-central :math:`\\chi^2` with noncentrality\n" + "parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`\n" + "distributed :math:`\\chi^2` with :math:`d_d` degrees of freedom.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " The calculated CDF. If all inputs are scalar, the return will be a\n" + " float. Otherwise it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.\n" + "\n" + "The cumulative distribution function is computed using Formula 26.6.20 of\n" + "[2]_:\n" + "\n" + ".. math::\n" + " F(d_n, d_d, n_c, f) = \\sum_{j=0}^\\infty e^{-n_c/2}\n" + " \\frac{(n_c/2)^j}{j!} I_{x}(\\frac{d_n}{2} + j, \\frac{d_d}{2}),\n" + "\n" + "where :math:`I` is the regularized incomplete beta function, and\n" + ":math:`x = f d_n/(f d_n + d_d)`.\n" + "\n" + "The computation time required for this routine is proportional to the\n" + "noncentrality parameter `nc`. Very large values of this parameter can\n" + "consume immense computer resources. This is why the search range is\n" + "bounded by 10,000.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n" + " CDFLIB: Library of Fortran Routines for Cumulative Distribution\n" + " Functions, Inverses, and Other Parameters.\n" + ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> from scipy import stats\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Plot the CDF of the non-central F distribution, for nc=0. Compare with the\n" + "F-distribution from scipy.stats:\n" + "\n" + ">>> x = np.linspace(-1, 8, num=500)\n" + ">>> dfn = 3\n" + ">>> dfd = 2\n" + ">>> ncf_stats = stats.f.cdf(x, dfn, dfd)\n" + ">>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)\n" + "\n" + ">>> fig = plt.figure()\n" + ">>> ax = fig.add_subplot(111)\n" + ">>> ax.plot(x, ncf_stats, 'b-', lw=3)\n" + ">>> ax.plot(x, ncf_special, 'r-')\n" + ">>> plt.show()") +ufunc_ncfdtr_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtr_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtr_types[0] = NPY_FLOAT +ufunc_ncfdtr_types[1] = NPY_FLOAT +ufunc_ncfdtr_types[2] = NPY_FLOAT +ufunc_ncfdtr_types[3] = NPY_FLOAT +ufunc_ncfdtr_types[4] = NPY_FLOAT +ufunc_ncfdtr_types[5] = NPY_DOUBLE +ufunc_ncfdtr_types[6] = NPY_DOUBLE +ufunc_ncfdtr_types[7] = NPY_DOUBLE +ufunc_ncfdtr_types[8] = NPY_DOUBLE +ufunc_ncfdtr_types[9] = NPY_DOUBLE +ufunc_ncfdtr_ptr[2*0] = _func_ncfdtr +ufunc_ncfdtr_ptr[2*0+1] = ("ncfdtr") +ufunc_ncfdtr_ptr[2*1] = _func_ncfdtr +ufunc_ncfdtr_ptr[2*1+1] = ("ncfdtr") +ufunc_ncfdtr_data[0] = &ufunc_ncfdtr_ptr[2*0] +ufunc_ncfdtr_data[1] = &ufunc_ncfdtr_ptr[2*1] +ncfdtr = np.PyUFunc_FromFuncAndData(ufunc_ncfdtr_loops, ufunc_ncfdtr_data, ufunc_ncfdtr_types, 2, 4, 1, 0, "ncfdtr", ufunc_ncfdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtri_loops[2] +cdef void *ufunc_ncfdtri_ptr[4] +cdef void *ufunc_ncfdtri_data[2] +cdef char ufunc_ncfdtri_types[10] +cdef char *ufunc_ncfdtri_doc = ( + "ncfdtri(dfn, dfd, nc, p, out=None)\n" + "\n" + "Inverse with respect to `f` of the CDF of the non-central F distribution.\n" + "\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "f : scalar or ndarray\n" + " Quantiles, i.e., the upper limit of integration.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtri\n" + "\n" + "Compute the CDF for several values of `f`:\n" + "\n" + ">>> f = [0.5, 1, 1.5]\n" + ">>> p = ncfdtr(2, 3, 1.5, f)\n" + ">>> p\n" + "array([ 0.20782291, 0.36107392, 0.47345752])\n" + "\n" + "Compute the inverse. We recover the values of `f`, as expected:\n" + "\n" + ">>> ncfdtri(2, 3, 1.5, p)\n" + "array([ 0.5, 1. , 1.5])") +ufunc_ncfdtri_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtri_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtri_types[0] = NPY_FLOAT +ufunc_ncfdtri_types[1] = NPY_FLOAT +ufunc_ncfdtri_types[2] = NPY_FLOAT +ufunc_ncfdtri_types[3] = NPY_FLOAT +ufunc_ncfdtri_types[4] = NPY_FLOAT +ufunc_ncfdtri_types[5] = NPY_DOUBLE +ufunc_ncfdtri_types[6] = NPY_DOUBLE +ufunc_ncfdtri_types[7] = NPY_DOUBLE +ufunc_ncfdtri_types[8] = NPY_DOUBLE +ufunc_ncfdtri_types[9] = NPY_DOUBLE +ufunc_ncfdtri_ptr[2*0] = _func_ncfdtri +ufunc_ncfdtri_ptr[2*0+1] = ("ncfdtri") +ufunc_ncfdtri_ptr[2*1] = _func_ncfdtri +ufunc_ncfdtri_ptr[2*1+1] = ("ncfdtri") +ufunc_ncfdtri_data[0] = &ufunc_ncfdtri_ptr[2*0] +ufunc_ncfdtri_data[1] = &ufunc_ncfdtri_ptr[2*1] +ncfdtri = np.PyUFunc_FromFuncAndData(ufunc_ncfdtri_loops, ufunc_ncfdtri_data, ufunc_ncfdtri_types, 2, 4, 1, 0, "ncfdtri", ufunc_ncfdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtridfd_loops[2] +cdef void *ufunc_ncfdtridfd_ptr[4] +cdef void *ufunc_ncfdtridfd_data[2] +cdef char ufunc_ncfdtridfd_types[10] +cdef char *ufunc_ncfdtridfd_doc = ( + "ncfdtridfd(dfn, p, nc, f, out=None)\n" + "\n" + "Calculate degrees of freedom (denominator) for the noncentral F-distribution.\n" + "\n" + "This is the inverse with respect to `dfd` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "dfd : scalar or ndarray\n" + " Degrees of freedom of the denominator sum of squares.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "The value of the cumulative noncentral F distribution is not necessarily\n" + "monotone in either degrees of freedom. There thus may be two values that\n" + "provide a given CDF value. This routine assumes monotonicity and will\n" + "find an arbitrary one of the two values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtridfd\n" + "\n" + "Compute the CDF for several values of `dfd`:\n" + "\n" + ">>> dfd = [1, 2, 3]\n" + ">>> p = ncfdtr(2, dfd, 0.25, 15)\n" + ">>> p\n" + "array([ 0.8097138 , 0.93020416, 0.96787852])\n" + "\n" + "Compute the inverse. We recover the values of `dfd`, as expected:\n" + "\n" + ">>> ncfdtridfd(2, p, 0.25, 15)\n" + "array([ 1., 2., 3.])") +ufunc_ncfdtridfd_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtridfd_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtridfd_types[0] = NPY_FLOAT +ufunc_ncfdtridfd_types[1] = NPY_FLOAT +ufunc_ncfdtridfd_types[2] = NPY_FLOAT +ufunc_ncfdtridfd_types[3] = NPY_FLOAT +ufunc_ncfdtridfd_types[4] = NPY_FLOAT +ufunc_ncfdtridfd_types[5] = NPY_DOUBLE +ufunc_ncfdtridfd_types[6] = NPY_DOUBLE +ufunc_ncfdtridfd_types[7] = NPY_DOUBLE +ufunc_ncfdtridfd_types[8] = NPY_DOUBLE +ufunc_ncfdtridfd_types[9] = NPY_DOUBLE +ufunc_ncfdtridfd_ptr[2*0] = _func_ncfdtridfd +ufunc_ncfdtridfd_ptr[2*0+1] = ("ncfdtridfd") +ufunc_ncfdtridfd_ptr[2*1] = _func_ncfdtridfd +ufunc_ncfdtridfd_ptr[2*1+1] = ("ncfdtridfd") +ufunc_ncfdtridfd_data[0] = &ufunc_ncfdtridfd_ptr[2*0] +ufunc_ncfdtridfd_data[1] = &ufunc_ncfdtridfd_ptr[2*1] +ncfdtridfd = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfd_loops, ufunc_ncfdtridfd_data, ufunc_ncfdtridfd_types, 2, 4, 1, 0, "ncfdtridfd", ufunc_ncfdtridfd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtridfn_loops[2] +cdef void *ufunc_ncfdtridfn_ptr[4] +cdef void *ufunc_ncfdtridfn_data[2] +cdef char ufunc_ncfdtridfn_types[10] +cdef char *ufunc_ncfdtridfn_doc = ( + "ncfdtridfn(p, dfd, nc, f, out=None)\n" + "\n" + "Calculate degrees of freedom (numerator) for the noncentral F-distribution.\n" + "\n" + "This is the inverse with respect to `dfn` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (0, 1e4).\n" + "f : float\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "dfn : scalar or ndarray\n" + " Degrees of freedom of the numerator sum of squares.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n" + "\n" + "Notes\n" + "-----\n" + "The value of the cumulative noncentral F distribution is not necessarily\n" + "monotone in either degrees of freedom. There thus may be two values that\n" + "provide a given CDF value. This routine assumes monotonicity and will\n" + "find an arbitrary one of the two values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtridfn\n" + "\n" + "Compute the CDF for several values of `dfn`:\n" + "\n" + ">>> dfn = [1, 2, 3]\n" + ">>> p = ncfdtr(dfn, 2, 0.25, 15)\n" + ">>> p\n" + "array([ 0.92562363, 0.93020416, 0.93188394])\n" + "\n" + "Compute the inverse. We recover the values of `dfn`, as expected:\n" + "\n" + ">>> ncfdtridfn(p, 2, 0.25, 15)\n" + "array([ 1., 2., 3.])") +ufunc_ncfdtridfn_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtridfn_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtridfn_types[0] = NPY_FLOAT +ufunc_ncfdtridfn_types[1] = NPY_FLOAT +ufunc_ncfdtridfn_types[2] = NPY_FLOAT +ufunc_ncfdtridfn_types[3] = NPY_FLOAT +ufunc_ncfdtridfn_types[4] = NPY_FLOAT +ufunc_ncfdtridfn_types[5] = NPY_DOUBLE +ufunc_ncfdtridfn_types[6] = NPY_DOUBLE +ufunc_ncfdtridfn_types[7] = NPY_DOUBLE +ufunc_ncfdtridfn_types[8] = NPY_DOUBLE +ufunc_ncfdtridfn_types[9] = NPY_DOUBLE +ufunc_ncfdtridfn_ptr[2*0] = _func_ncfdtridfn +ufunc_ncfdtridfn_ptr[2*0+1] = ("ncfdtridfn") +ufunc_ncfdtridfn_ptr[2*1] = _func_ncfdtridfn +ufunc_ncfdtridfn_ptr[2*1+1] = ("ncfdtridfn") +ufunc_ncfdtridfn_data[0] = &ufunc_ncfdtridfn_ptr[2*0] +ufunc_ncfdtridfn_data[1] = &ufunc_ncfdtridfn_ptr[2*1] +ncfdtridfn = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfn_loops, ufunc_ncfdtridfn_data, ufunc_ncfdtridfn_types, 2, 4, 1, 0, "ncfdtridfn", ufunc_ncfdtridfn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ncfdtrinc_loops[2] +cdef void *ufunc_ncfdtrinc_ptr[4] +cdef void *ufunc_ncfdtrinc_data[2] +cdef char ufunc_ncfdtrinc_types[10] +cdef char *ufunc_ncfdtrinc_doc = ( + "ncfdtrinc(dfn, dfd, p, f, out=None)\n" + "\n" + "Calculate non-centrality parameter for non-central F distribution.\n" + "\n" + "This is the inverse with respect to `nc` of `ncfdtr`.\n" + "See `ncfdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "dfn : array_like\n" + " Degrees of freedom of the numerator sum of squares. Range (0, inf).\n" + "dfd : array_like\n" + " Degrees of freedom of the denominator sum of squares. Range (0, inf).\n" + "p : array_like\n" + " Value of the cumulative distribution function. Must be in the\n" + " range [0, 1].\n" + "f : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Noncentrality parameter.\n" + "\n" + "See Also\n" + "--------\n" + "ncfdtr : CDF of the non-central F distribution.\n" + "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n" + "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n" + "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import ncfdtr, ncfdtrinc\n" + "\n" + "Compute the CDF for several values of `nc`:\n" + "\n" + ">>> nc = [0.5, 1.5, 2.0]\n" + ">>> p = ncfdtr(2, 3, nc, 15)\n" + ">>> p\n" + "array([ 0.96309246, 0.94327955, 0.93304098])\n" + "\n" + "Compute the inverse. We recover the values of `nc`, as expected:\n" + "\n" + ">>> ncfdtrinc(2, 3, p, 15)\n" + "array([ 0.5, 1.5, 2. ])") +ufunc_ncfdtrinc_loops[0] = loop_d_dddd__As_ffff_f +ufunc_ncfdtrinc_loops[1] = loop_d_dddd__As_dddd_d +ufunc_ncfdtrinc_types[0] = NPY_FLOAT +ufunc_ncfdtrinc_types[1] = NPY_FLOAT +ufunc_ncfdtrinc_types[2] = NPY_FLOAT +ufunc_ncfdtrinc_types[3] = NPY_FLOAT +ufunc_ncfdtrinc_types[4] = NPY_FLOAT +ufunc_ncfdtrinc_types[5] = NPY_DOUBLE +ufunc_ncfdtrinc_types[6] = NPY_DOUBLE +ufunc_ncfdtrinc_types[7] = NPY_DOUBLE +ufunc_ncfdtrinc_types[8] = NPY_DOUBLE +ufunc_ncfdtrinc_types[9] = NPY_DOUBLE +ufunc_ncfdtrinc_ptr[2*0] = _func_ncfdtrinc +ufunc_ncfdtrinc_ptr[2*0+1] = ("ncfdtrinc") +ufunc_ncfdtrinc_ptr[2*1] = _func_ncfdtrinc +ufunc_ncfdtrinc_ptr[2*1+1] = ("ncfdtrinc") +ufunc_ncfdtrinc_data[0] = &ufunc_ncfdtrinc_ptr[2*0] +ufunc_ncfdtrinc_data[1] = &ufunc_ncfdtrinc_ptr[2*1] +ncfdtrinc = np.PyUFunc_FromFuncAndData(ufunc_ncfdtrinc_loops, ufunc_ncfdtrinc_data, ufunc_ncfdtrinc_types, 2, 4, 1, 0, "ncfdtrinc", ufunc_ncfdtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtr_loops[2] +cdef void *ufunc_nctdtr_ptr[4] +cdef void *ufunc_nctdtr_data[2] +cdef char ufunc_nctdtr_types[8] +cdef char *ufunc_nctdtr_doc = ( + "nctdtr(df, nc, t, out=None)\n" + "\n" + "Cumulative distribution function of the non-central `t` distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " The calculated CDF. If all inputs are scalar, the return will be a\n" + " float. Otherwise, it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> from scipy import stats\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Plot the CDF of the non-central t distribution, for nc=0. Compare with the\n" + "t-distribution from scipy.stats:\n" + "\n" + ">>> x = np.linspace(-5, 5, num=500)\n" + ">>> df = 3\n" + ">>> nct_stats = stats.t.cdf(x, df)\n" + ">>> nct_special = special.nctdtr(df, 0, x)\n" + "\n" + ">>> fig = plt.figure()\n" + ">>> ax = fig.add_subplot(111)\n" + ">>> ax.plot(x, nct_stats, 'b-', lw=3)\n" + ">>> ax.plot(x, nct_special, 'r-')\n" + ">>> plt.show()") +ufunc_nctdtr_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtr_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtr_types[0] = NPY_FLOAT +ufunc_nctdtr_types[1] = NPY_FLOAT +ufunc_nctdtr_types[2] = NPY_FLOAT +ufunc_nctdtr_types[3] = NPY_FLOAT +ufunc_nctdtr_types[4] = NPY_DOUBLE +ufunc_nctdtr_types[5] = NPY_DOUBLE +ufunc_nctdtr_types[6] = NPY_DOUBLE +ufunc_nctdtr_types[7] = NPY_DOUBLE +ufunc_nctdtr_ptr[2*0] = _func_nctdtr +ufunc_nctdtr_ptr[2*0+1] = ("nctdtr") +ufunc_nctdtr_ptr[2*1] = _func_nctdtr +ufunc_nctdtr_ptr[2*1+1] = ("nctdtr") +ufunc_nctdtr_data[0] = &ufunc_nctdtr_ptr[2*0] +ufunc_nctdtr_data[1] = &ufunc_nctdtr_ptr[2*1] +nctdtr = np.PyUFunc_FromFuncAndData(ufunc_nctdtr_loops, ufunc_nctdtr_data, ufunc_nctdtr_types, 2, 3, 1, 0, "nctdtr", ufunc_nctdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtridf_loops[2] +cdef void *ufunc_nctdtridf_ptr[4] +cdef void *ufunc_nctdtridf_data[2] +cdef char ufunc_nctdtridf_types[8] +cdef char *ufunc_nctdtridf_doc = ( + "nctdtridf(p, nc, t, out=None)\n" + "\n" + "Calculate degrees of freedom for non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " The degrees of freedom. If all inputs are scalar, the return will be a\n" + " float. Otherwise, it will be an array.\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtridf\n" + "\n" + "Compute the CDF for several values of `df`:\n" + "\n" + ">>> df = [1, 2, 3]\n" + ">>> p = nctdtr(df, 0.25, 1)\n" + ">>> p\n" + "array([0.67491974, 0.716464 , 0.73349456])\n" + "\n" + "Compute the inverse. We recover the values of `df`, as expected:\n" + "\n" + ">>> nctdtridf(p, 0.25, 1)\n" + "array([1., 2., 3.])") +ufunc_nctdtridf_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtridf_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtridf_types[0] = NPY_FLOAT +ufunc_nctdtridf_types[1] = NPY_FLOAT +ufunc_nctdtridf_types[2] = NPY_FLOAT +ufunc_nctdtridf_types[3] = NPY_FLOAT +ufunc_nctdtridf_types[4] = NPY_DOUBLE +ufunc_nctdtridf_types[5] = NPY_DOUBLE +ufunc_nctdtridf_types[6] = NPY_DOUBLE +ufunc_nctdtridf_types[7] = NPY_DOUBLE +ufunc_nctdtridf_ptr[2*0] = _func_nctdtridf +ufunc_nctdtridf_ptr[2*0+1] = ("nctdtridf") +ufunc_nctdtridf_ptr[2*1] = _func_nctdtridf +ufunc_nctdtridf_ptr[2*1+1] = ("nctdtridf") +ufunc_nctdtridf_data[0] = &ufunc_nctdtridf_ptr[2*0] +ufunc_nctdtridf_data[1] = &ufunc_nctdtridf_ptr[2*1] +nctdtridf = np.PyUFunc_FromFuncAndData(ufunc_nctdtridf_loops, ufunc_nctdtridf_data, ufunc_nctdtridf_types, 2, 3, 1, 0, "nctdtridf", ufunc_nctdtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtrinc_loops[2] +cdef void *ufunc_nctdtrinc_ptr[4] +cdef void *ufunc_nctdtrinc_data[2] +cdef char ufunc_nctdtrinc_types[8] +cdef char *ufunc_nctdtrinc_doc = ( + "nctdtrinc(df, p, t, out=None)\n" + "\n" + "Calculate non-centrality parameter for non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "t : array_like\n" + " Quantiles, i.e., the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "nc : scalar or ndarray\n" + " Noncentrality parameter\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtrinc\n" + "\n" + "Compute the CDF for several values of `nc`:\n" + "\n" + ">>> nc = [0.5, 1.5, 2.5]\n" + ">>> p = nctdtr(3, nc, 1.5)\n" + ">>> p\n" + "array([0.77569497, 0.45524533, 0.1668691 ])\n" + "\n" + "Compute the inverse. We recover the values of `nc`, as expected:\n" + "\n" + ">>> nctdtrinc(3, p, 1.5)\n" + "array([0.5, 1.5, 2.5])") +ufunc_nctdtrinc_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtrinc_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtrinc_types[0] = NPY_FLOAT +ufunc_nctdtrinc_types[1] = NPY_FLOAT +ufunc_nctdtrinc_types[2] = NPY_FLOAT +ufunc_nctdtrinc_types[3] = NPY_FLOAT +ufunc_nctdtrinc_types[4] = NPY_DOUBLE +ufunc_nctdtrinc_types[5] = NPY_DOUBLE +ufunc_nctdtrinc_types[6] = NPY_DOUBLE +ufunc_nctdtrinc_types[7] = NPY_DOUBLE +ufunc_nctdtrinc_ptr[2*0] = _func_nctdtrinc +ufunc_nctdtrinc_ptr[2*0+1] = ("nctdtrinc") +ufunc_nctdtrinc_ptr[2*1] = _func_nctdtrinc +ufunc_nctdtrinc_ptr[2*1+1] = ("nctdtrinc") +ufunc_nctdtrinc_data[0] = &ufunc_nctdtrinc_ptr[2*0] +ufunc_nctdtrinc_data[1] = &ufunc_nctdtrinc_ptr[2*1] +nctdtrinc = np.PyUFunc_FromFuncAndData(ufunc_nctdtrinc_loops, ufunc_nctdtrinc_data, ufunc_nctdtrinc_types, 2, 3, 1, 0, "nctdtrinc", ufunc_nctdtrinc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nctdtrit_loops[2] +cdef void *ufunc_nctdtrit_ptr[4] +cdef void *ufunc_nctdtrit_data[2] +cdef char ufunc_nctdtrit_types[8] +cdef char *ufunc_nctdtrit_doc = ( + "nctdtrit(df, nc, p, out=None)\n" + "\n" + "Inverse cumulative distribution function of the non-central t distribution.\n" + "\n" + "See `nctdtr` for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom of the distribution. Should be in range (0, inf).\n" + "nc : array_like\n" + " Noncentrality parameter. Should be in range (-1e6, 1e6).\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t : scalar or ndarray\n" + " Quantiles\n" + "\n" + "See Also\n" + "--------\n" + "nctdtr : CDF of the non-central `t` distribution.\n" + "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n" + "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import nctdtr, nctdtrit\n" + "\n" + "Compute the CDF for several values of `t`:\n" + "\n" + ">>> t = [0.5, 1, 1.5]\n" + ">>> p = nctdtr(3, 1, t)\n" + ">>> p\n" + "array([0.29811049, 0.46922687, 0.6257559 ])\n" + "\n" + "Compute the inverse. We recover the values of `t`, as expected:\n" + "\n" + ">>> nctdtrit(3, 1, p)\n" + "array([0.5, 1. , 1.5])") +ufunc_nctdtrit_loops[0] = loop_d_ddd__As_fff_f +ufunc_nctdtrit_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nctdtrit_types[0] = NPY_FLOAT +ufunc_nctdtrit_types[1] = NPY_FLOAT +ufunc_nctdtrit_types[2] = NPY_FLOAT +ufunc_nctdtrit_types[3] = NPY_FLOAT +ufunc_nctdtrit_types[4] = NPY_DOUBLE +ufunc_nctdtrit_types[5] = NPY_DOUBLE +ufunc_nctdtrit_types[6] = NPY_DOUBLE +ufunc_nctdtrit_types[7] = NPY_DOUBLE +ufunc_nctdtrit_ptr[2*0] = _func_nctdtrit +ufunc_nctdtrit_ptr[2*0+1] = ("nctdtrit") +ufunc_nctdtrit_ptr[2*1] = _func_nctdtrit +ufunc_nctdtrit_ptr[2*1+1] = ("nctdtrit") +ufunc_nctdtrit_data[0] = &ufunc_nctdtrit_ptr[2*0] +ufunc_nctdtrit_data[1] = &ufunc_nctdtrit_ptr[2*1] +nctdtrit = np.PyUFunc_FromFuncAndData(ufunc_nctdtrit_loops, ufunc_nctdtrit_data, ufunc_nctdtrit_types, 2, 3, 1, 0, "nctdtrit", ufunc_nctdtrit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtr_loops[4] +cdef void *ufunc_ndtr_ptr[8] +cdef void *ufunc_ndtr_data[4] +cdef char ufunc_ndtr_types[8] +cdef char *ufunc_ndtr_doc = ( + "ndtr(x, out=None)\n" + "\n" + "Cumulative distribution of the standard normal distribution.\n" + "\n" + "Returns the area under the standard Gaussian probability\n" + "density function, integrated from minus infinity to `x`\n" + "\n" + ".. math::\n" + "\n" + " \\frac{1}{\\sqrt{2\\pi}} \\int_{-\\infty}^x \\exp(-t^2/2) dt\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like, real or complex\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the normal CDF evaluated at `x`\n" + "\n" + "See Also\n" + "--------\n" + "log_ndtr : Logarithm of ndtr\n" + "ndtri : Inverse of ndtr, standard normal percentile function\n" + "erf : Error function\n" + "erfc : 1 - erf\n" + "scipy.stats.norm : Normal distribution\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate `ndtr` at one point.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import ndtr\n" + ">>> ndtr(0.5)\n" + "0.6914624612740131\n" + "\n" + "Evaluate the function at several points by providing a NumPy array\n" + "or list for `x`.\n" + "\n" + ">>> ndtr([0, 0.5, 2])\n" + "array([0.5 , 0.69146246, 0.97724987])\n" + "\n" + "Plot the function.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(-5, 5, 100)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, ndtr(x))\n" + ">>> ax.set_title(r\"Standard normal cumulative distribution function $\\Phi$\")\n" + ">>> plt.show()") +ufunc_ndtr_loops[0] = loop_d_d__As_f_f +ufunc_ndtr_loops[1] = loop_d_d__As_d_d +ufunc_ndtr_loops[2] = loop_D_D__As_F_F +ufunc_ndtr_loops[3] = loop_D_D__As_D_D +ufunc_ndtr_types[0] = NPY_FLOAT +ufunc_ndtr_types[1] = NPY_FLOAT +ufunc_ndtr_types[2] = NPY_DOUBLE +ufunc_ndtr_types[3] = NPY_DOUBLE +ufunc_ndtr_types[4] = NPY_CFLOAT +ufunc_ndtr_types[5] = NPY_CFLOAT +ufunc_ndtr_types[6] = NPY_CDOUBLE +ufunc_ndtr_types[7] = NPY_CDOUBLE +ufunc_ndtr_ptr[2*0] = _func_ndtr +ufunc_ndtr_ptr[2*0+1] = ("ndtr") +ufunc_ndtr_ptr[2*1] = _func_ndtr +ufunc_ndtr_ptr[2*1+1] = ("ndtr") +ufunc_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr +ufunc_ndtr_ptr[2*2+1] = ("ndtr") +ufunc_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr +ufunc_ndtr_ptr[2*3+1] = ("ndtr") +ufunc_ndtr_data[0] = &ufunc_ndtr_ptr[2*0] +ufunc_ndtr_data[1] = &ufunc_ndtr_ptr[2*1] +ufunc_ndtr_data[2] = &ufunc_ndtr_ptr[2*2] +ufunc_ndtr_data[3] = &ufunc_ndtr_ptr[2*3] +ndtr = np.PyUFunc_FromFuncAndData(ufunc_ndtr_loops, ufunc_ndtr_data, ufunc_ndtr_types, 4, 1, 1, 0, "ndtr", ufunc_ndtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtri_loops[2] +cdef void *ufunc_ndtri_ptr[4] +cdef void *ufunc_ndtri_data[2] +cdef char ufunc_ndtri_types[4] +cdef char *ufunc_ndtri_doc = ( + "ndtri(y, out=None)\n" + "\n" + "Inverse of `ndtr` vs x\n" + "\n" + "Returns the argument x for which the area under the standard normal\n" + "probability density function (integrated from minus infinity to `x`)\n" + "is equal to y.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "x : scalar or ndarray\n" + " Value of x such that ``ndtr(x) == p``.\n" + "\n" + "See Also\n" + "--------\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri_exp : Inverse of log_ndtr\n" + "\n" + "Examples\n" + "--------\n" + "`ndtri` is the percentile function of the standard normal distribution.\n" + "This means it returns the inverse of the cumulative density `ndtr`. First,\n" + "let us compute a cumulative density value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import ndtri, ndtr\n" + ">>> cdf_val = ndtr(2)\n" + ">>> cdf_val\n" + "0.9772498680518208\n" + "\n" + "Verify that `ndtri` yields the original value for `x` up to floating point\n" + "errors.\n" + "\n" + ">>> ndtri(cdf_val)\n" + "2.0000000000000004\n" + "\n" + "Plot the function. For that purpose, we provide a NumPy array as argument.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> x = np.linspace(0.01, 1, 200)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, ndtri(x))\n" + ">>> ax.set_title(\"Standard normal percentile function\")\n" + ">>> plt.show()") +ufunc_ndtri_loops[0] = loop_d_d__As_f_f +ufunc_ndtri_loops[1] = loop_d_d__As_d_d +ufunc_ndtri_types[0] = NPY_FLOAT +ufunc_ndtri_types[1] = NPY_FLOAT +ufunc_ndtri_types[2] = NPY_DOUBLE +ufunc_ndtri_types[3] = NPY_DOUBLE +ufunc_ndtri_ptr[2*0] = _func_ndtri +ufunc_ndtri_ptr[2*0+1] = ("ndtri") +ufunc_ndtri_ptr[2*1] = _func_ndtri +ufunc_ndtri_ptr[2*1+1] = ("ndtri") +ufunc_ndtri_data[0] = &ufunc_ndtri_ptr[2*0] +ufunc_ndtri_data[1] = &ufunc_ndtri_ptr[2*1] +ndtri = np.PyUFunc_FromFuncAndData(ufunc_ndtri_loops, ufunc_ndtri_data, ufunc_ndtri_types, 2, 1, 1, 0, "ndtri", ufunc_ndtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_ndtri_exp_loops[2] +cdef void *ufunc_ndtri_exp_ptr[4] +cdef void *ufunc_ndtri_exp_data[2] +cdef char ufunc_ndtri_exp_types[4] +cdef char *ufunc_ndtri_exp_doc = ( + "ndtri_exp(y, out=None)\n" + "\n" + "Inverse of `log_ndtr` vs x. Allows for greater precision than\n" + "`ndtri` composed with `numpy.exp` for very small values of y and for\n" + "y close to 0.\n" + "\n" + "Parameters\n" + "----------\n" + "y : array_like of float\n" + " Function argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Inverse of the log CDF of the standard normal distribution, evaluated\n" + " at y.\n" + "\n" + "See Also\n" + "--------\n" + "log_ndtr : log of the standard normal cumulative distribution function\n" + "ndtr : standard normal cumulative distribution function\n" + "ndtri : standard normal percentile function\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "`ndtri_exp` agrees with the naive implementation when the latter does\n" + "not suffer from underflow.\n" + "\n" + ">>> sc.ndtri_exp(-1)\n" + "-0.33747496376420244\n" + ">>> sc.ndtri(np.exp(-1))\n" + "-0.33747496376420244\n" + "\n" + "For extreme values of y, the naive approach fails\n" + "\n" + ">>> sc.ndtri(np.exp(-800))\n" + "-inf\n" + ">>> sc.ndtri(np.exp(-1e-20))\n" + "inf\n" + "\n" + "whereas `ndtri_exp` is still able to compute the result to high precision.\n" + "\n" + ">>> sc.ndtri_exp(-800)\n" + "-39.88469483825668\n" + ">>> sc.ndtri_exp(-1e-20)\n" + "9.262340089798409") +ufunc_ndtri_exp_loops[0] = loop_d_d__As_f_f +ufunc_ndtri_exp_loops[1] = loop_d_d__As_d_d +ufunc_ndtri_exp_types[0] = NPY_FLOAT +ufunc_ndtri_exp_types[1] = NPY_FLOAT +ufunc_ndtri_exp_types[2] = NPY_DOUBLE +ufunc_ndtri_exp_types[3] = NPY_DOUBLE +ufunc_ndtri_exp_ptr[2*0] = _func_ndtri_exp +ufunc_ndtri_exp_ptr[2*0+1] = ("ndtri_exp") +ufunc_ndtri_exp_ptr[2*1] = _func_ndtri_exp +ufunc_ndtri_exp_ptr[2*1+1] = ("ndtri_exp") +ufunc_ndtri_exp_data[0] = &ufunc_ndtri_exp_ptr[2*0] +ufunc_ndtri_exp_data[1] = &ufunc_ndtri_exp_ptr[2*1] +ndtri_exp = np.PyUFunc_FromFuncAndData(ufunc_ndtri_exp_loops, ufunc_ndtri_exp_data, ufunc_ndtri_exp_types, 2, 1, 1, 0, "ndtri_exp", ufunc_ndtri_exp_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nrdtrimn_loops[2] +cdef void *ufunc_nrdtrimn_ptr[4] +cdef void *ufunc_nrdtrimn_data[2] +cdef char ufunc_nrdtrimn_types[8] +cdef char *ufunc_nrdtrimn_doc = ( + "nrdtrimn(p, std, x, out=None)\n" + "\n" + "Calculate mean of normal distribution given other params.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "std : array_like\n" + " Standard deviation.\n" + "x : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "mn : scalar or ndarray\n" + " The mean of the normal distribution.\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.norm : Normal distribution\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri : Inverse of standard normal CDF with respect to quantile\n" + "nrdtrisd : Inverse of normal distribution CDF with respect to\n" + " standard deviation\n" + "\n" + "Examples\n" + "--------\n" + "`nrdtrimn` can be used to recover the mean of a normal distribution\n" + "if we know the CDF value `p` for a given quantile `x` and the\n" + "standard deviation `std`. First, we calculate\n" + "the normal distribution CDF for an exemplary parameter set.\n" + "\n" + ">>> from scipy.stats import norm\n" + ">>> mean = 3.\n" + ">>> std = 2.\n" + ">>> x = 6.\n" + ">>> p = norm.cdf(x, loc=mean, scale=std)\n" + ">>> p\n" + "0.9331927987311419\n" + "\n" + "Verify that `nrdtrimn` returns the original value for `mean`.\n" + "\n" + ">>> from scipy.special import nrdtrimn\n" + ">>> nrdtrimn(p, std, x)\n" + "3.0000000000000004") +ufunc_nrdtrimn_loops[0] = loop_d_ddd__As_fff_f +ufunc_nrdtrimn_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nrdtrimn_types[0] = NPY_FLOAT +ufunc_nrdtrimn_types[1] = NPY_FLOAT +ufunc_nrdtrimn_types[2] = NPY_FLOAT +ufunc_nrdtrimn_types[3] = NPY_FLOAT +ufunc_nrdtrimn_types[4] = NPY_DOUBLE +ufunc_nrdtrimn_types[5] = NPY_DOUBLE +ufunc_nrdtrimn_types[6] = NPY_DOUBLE +ufunc_nrdtrimn_types[7] = NPY_DOUBLE +ufunc_nrdtrimn_ptr[2*0] = _func_nrdtrimn +ufunc_nrdtrimn_ptr[2*0+1] = ("nrdtrimn") +ufunc_nrdtrimn_ptr[2*1] = _func_nrdtrimn +ufunc_nrdtrimn_ptr[2*1+1] = ("nrdtrimn") +ufunc_nrdtrimn_data[0] = &ufunc_nrdtrimn_ptr[2*0] +ufunc_nrdtrimn_data[1] = &ufunc_nrdtrimn_ptr[2*1] +nrdtrimn = np.PyUFunc_FromFuncAndData(ufunc_nrdtrimn_loops, ufunc_nrdtrimn_data, ufunc_nrdtrimn_types, 2, 3, 1, 0, "nrdtrimn", ufunc_nrdtrimn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_nrdtrisd_loops[2] +cdef void *ufunc_nrdtrisd_ptr[4] +cdef void *ufunc_nrdtrisd_data[2] +cdef char ufunc_nrdtrisd_types[8] +cdef char *ufunc_nrdtrisd_doc = ( + "nrdtrisd(mn, p, x, out=None)\n" + "\n" + "Calculate standard deviation of normal distribution given other params.\n" + "\n" + "Parameters\n" + "----------\n" + "mn : scalar or ndarray\n" + " The mean of the normal distribution.\n" + "p : array_like\n" + " CDF values, in range (0, 1].\n" + "x : array_like\n" + " Quantiles, i.e. the upper limit of integration.\n" + "\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "std : scalar or ndarray\n" + " Standard deviation.\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.norm : Normal distribution\n" + "ndtr : Standard normal cumulative probability distribution\n" + "ndtri : Inverse of standard normal CDF with respect to quantile\n" + "nrdtrimn : Inverse of normal distribution CDF with respect to\n" + " mean\n" + "\n" + "Examples\n" + "--------\n" + "`nrdtrisd` can be used to recover the standard deviation of a normal\n" + "distribution if we know the CDF value `p` for a given quantile `x` and\n" + "the mean `mn`. First, we calculate the normal distribution CDF for an\n" + "exemplary parameter set.\n" + "\n" + ">>> from scipy.stats import norm\n" + ">>> mean = 3.\n" + ">>> std = 2.\n" + ">>> x = 6.\n" + ">>> p = norm.cdf(x, loc=mean, scale=std)\n" + ">>> p\n" + "0.9331927987311419\n" + "\n" + "Verify that `nrdtrisd` returns the original value for `std`.\n" + "\n" + ">>> from scipy.special import nrdtrisd\n" + ">>> nrdtrisd(mean, p, x)\n" + "2.0000000000000004") +ufunc_nrdtrisd_loops[0] = loop_d_ddd__As_fff_f +ufunc_nrdtrisd_loops[1] = loop_d_ddd__As_ddd_d +ufunc_nrdtrisd_types[0] = NPY_FLOAT +ufunc_nrdtrisd_types[1] = NPY_FLOAT +ufunc_nrdtrisd_types[2] = NPY_FLOAT +ufunc_nrdtrisd_types[3] = NPY_FLOAT +ufunc_nrdtrisd_types[4] = NPY_DOUBLE +ufunc_nrdtrisd_types[5] = NPY_DOUBLE +ufunc_nrdtrisd_types[6] = NPY_DOUBLE +ufunc_nrdtrisd_types[7] = NPY_DOUBLE +ufunc_nrdtrisd_ptr[2*0] = _func_nrdtrisd +ufunc_nrdtrisd_ptr[2*0+1] = ("nrdtrisd") +ufunc_nrdtrisd_ptr[2*1] = _func_nrdtrisd +ufunc_nrdtrisd_ptr[2*1+1] = ("nrdtrisd") +ufunc_nrdtrisd_data[0] = &ufunc_nrdtrisd_ptr[2*0] +ufunc_nrdtrisd_data[1] = &ufunc_nrdtrisd_ptr[2*1] +nrdtrisd = np.PyUFunc_FromFuncAndData(ufunc_nrdtrisd_loops, ufunc_nrdtrisd_data, ufunc_nrdtrisd_types, 2, 3, 1, 0, "nrdtrisd", ufunc_nrdtrisd_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_ang1_loops[2] +cdef void *ufunc_obl_ang1_ptr[4] +cdef void *ufunc_obl_ang1_data[2] +cdef char ufunc_obl_ang1_types[12] +cdef char *ufunc_obl_ang1_doc = ( + "obl_ang1(m, n, c, x, out=None)\n" + "\n" + "Oblate spheroidal angular function of the first kind and its derivative\n" + "\n" + "Computes the oblate spheroidal angular function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_ang1_cv") +ufunc_obl_ang1_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_obl_ang1_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_obl_ang1_types[0] = NPY_FLOAT +ufunc_obl_ang1_types[1] = NPY_FLOAT +ufunc_obl_ang1_types[2] = NPY_FLOAT +ufunc_obl_ang1_types[3] = NPY_FLOAT +ufunc_obl_ang1_types[4] = NPY_FLOAT +ufunc_obl_ang1_types[5] = NPY_FLOAT +ufunc_obl_ang1_types[6] = NPY_DOUBLE +ufunc_obl_ang1_types[7] = NPY_DOUBLE +ufunc_obl_ang1_types[8] = NPY_DOUBLE +ufunc_obl_ang1_types[9] = NPY_DOUBLE +ufunc_obl_ang1_types[10] = NPY_DOUBLE +ufunc_obl_ang1_types[11] = NPY_DOUBLE +ufunc_obl_ang1_ptr[2*0] = _func_oblate_aswfa_nocv_wrap +ufunc_obl_ang1_ptr[2*0+1] = ("obl_ang1") +ufunc_obl_ang1_ptr[2*1] = _func_oblate_aswfa_nocv_wrap +ufunc_obl_ang1_ptr[2*1+1] = ("obl_ang1") +ufunc_obl_ang1_data[0] = &ufunc_obl_ang1_ptr[2*0] +ufunc_obl_ang1_data[1] = &ufunc_obl_ang1_ptr[2*1] +obl_ang1 = np.PyUFunc_FromFuncAndData(ufunc_obl_ang1_loops, ufunc_obl_ang1_data, ufunc_obl_ang1_types, 2, 4, 2, 0, "obl_ang1", ufunc_obl_ang1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_ang1_cv_loops[2] +cdef void *ufunc_obl_ang1_cv_ptr[4] +cdef void *ufunc_obl_ang1_cv_data[2] +cdef char ufunc_obl_ang1_cv_types[14] +cdef char *ufunc_obl_ang1_cv_doc = ( + "obl_ang1_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Oblate spheroidal angular function obl_ang1 for precomputed characteristic value\n" + "\n" + "Computes the oblate spheroidal angular function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_ang1") +ufunc_obl_ang1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_obl_ang1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_obl_ang1_cv_types[0] = NPY_FLOAT +ufunc_obl_ang1_cv_types[1] = NPY_FLOAT +ufunc_obl_ang1_cv_types[2] = NPY_FLOAT +ufunc_obl_ang1_cv_types[3] = NPY_FLOAT +ufunc_obl_ang1_cv_types[4] = NPY_FLOAT +ufunc_obl_ang1_cv_types[5] = NPY_FLOAT +ufunc_obl_ang1_cv_types[6] = NPY_FLOAT +ufunc_obl_ang1_cv_types[7] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[8] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[9] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[10] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[11] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[12] = NPY_DOUBLE +ufunc_obl_ang1_cv_types[13] = NPY_DOUBLE +ufunc_obl_ang1_cv_ptr[2*0] = _func_oblate_aswfa_wrap +ufunc_obl_ang1_cv_ptr[2*0+1] = ("obl_ang1_cv") +ufunc_obl_ang1_cv_ptr[2*1] = _func_oblate_aswfa_wrap +ufunc_obl_ang1_cv_ptr[2*1+1] = ("obl_ang1_cv") +ufunc_obl_ang1_cv_data[0] = &ufunc_obl_ang1_cv_ptr[2*0] +ufunc_obl_ang1_cv_data[1] = &ufunc_obl_ang1_cv_ptr[2*1] +obl_ang1_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_ang1_cv_loops, ufunc_obl_ang1_cv_data, ufunc_obl_ang1_cv_types, 2, 5, 2, 0, "obl_ang1_cv", ufunc_obl_ang1_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_cv_loops[2] +cdef void *ufunc_obl_cv_ptr[4] +cdef void *ufunc_obl_cv_data[2] +cdef char ufunc_obl_cv_types[8] +cdef char *ufunc_obl_cv_doc = ( + "obl_cv(m, n, c, out=None)\n" + "\n" + "Characteristic value of oblate spheroidal function\n" + "\n" + "Computes the characteristic value of oblate spheroidal wave\n" + "functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cv : scalar or ndarray\n" + " Characteristic value") +ufunc_obl_cv_loops[0] = loop_d_ddd__As_fff_f +ufunc_obl_cv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_obl_cv_types[0] = NPY_FLOAT +ufunc_obl_cv_types[1] = NPY_FLOAT +ufunc_obl_cv_types[2] = NPY_FLOAT +ufunc_obl_cv_types[3] = NPY_FLOAT +ufunc_obl_cv_types[4] = NPY_DOUBLE +ufunc_obl_cv_types[5] = NPY_DOUBLE +ufunc_obl_cv_types[6] = NPY_DOUBLE +ufunc_obl_cv_types[7] = NPY_DOUBLE +ufunc_obl_cv_ptr[2*0] = _func_oblate_segv_wrap +ufunc_obl_cv_ptr[2*0+1] = ("obl_cv") +ufunc_obl_cv_ptr[2*1] = _func_oblate_segv_wrap +ufunc_obl_cv_ptr[2*1+1] = ("obl_cv") +ufunc_obl_cv_data[0] = &ufunc_obl_cv_ptr[2*0] +ufunc_obl_cv_data[1] = &ufunc_obl_cv_ptr[2*1] +obl_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_cv_loops, ufunc_obl_cv_data, ufunc_obl_cv_types, 2, 3, 1, 0, "obl_cv", ufunc_obl_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_rad1_loops[2] +cdef void *ufunc_obl_rad1_ptr[4] +cdef void *ufunc_obl_rad1_data[2] +cdef char ufunc_obl_rad1_types[12] +cdef char *ufunc_obl_rad1_doc = ( + "obl_rad1(m, n, c, x, out=None)\n" + "\n" + "Oblate spheroidal radial function of the first kind and its derivative\n" + "\n" + "Computes the oblate spheroidal radial function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_rad1_cv") +ufunc_obl_rad1_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_obl_rad1_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_obl_rad1_types[0] = NPY_FLOAT +ufunc_obl_rad1_types[1] = NPY_FLOAT +ufunc_obl_rad1_types[2] = NPY_FLOAT +ufunc_obl_rad1_types[3] = NPY_FLOAT +ufunc_obl_rad1_types[4] = NPY_FLOAT +ufunc_obl_rad1_types[5] = NPY_FLOAT +ufunc_obl_rad1_types[6] = NPY_DOUBLE +ufunc_obl_rad1_types[7] = NPY_DOUBLE +ufunc_obl_rad1_types[8] = NPY_DOUBLE +ufunc_obl_rad1_types[9] = NPY_DOUBLE +ufunc_obl_rad1_types[10] = NPY_DOUBLE +ufunc_obl_rad1_types[11] = NPY_DOUBLE +ufunc_obl_rad1_ptr[2*0] = _func_oblate_radial1_nocv_wrap +ufunc_obl_rad1_ptr[2*0+1] = ("obl_rad1") +ufunc_obl_rad1_ptr[2*1] = _func_oblate_radial1_nocv_wrap +ufunc_obl_rad1_ptr[2*1+1] = ("obl_rad1") +ufunc_obl_rad1_data[0] = &ufunc_obl_rad1_ptr[2*0] +ufunc_obl_rad1_data[1] = &ufunc_obl_rad1_ptr[2*1] +obl_rad1 = np.PyUFunc_FromFuncAndData(ufunc_obl_rad1_loops, ufunc_obl_rad1_data, ufunc_obl_rad1_types, 2, 4, 2, 0, "obl_rad1", ufunc_obl_rad1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_rad1_cv_loops[2] +cdef void *ufunc_obl_rad1_cv_ptr[4] +cdef void *ufunc_obl_rad1_cv_data[2] +cdef char ufunc_obl_rad1_cv_types[14] +cdef char *ufunc_obl_rad1_cv_doc = ( + "obl_rad1_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Oblate spheroidal radial function obl_rad1 for precomputed characteristic value\n" + "\n" + "Computes the oblate spheroidal radial function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_rad1") +ufunc_obl_rad1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_obl_rad1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_obl_rad1_cv_types[0] = NPY_FLOAT +ufunc_obl_rad1_cv_types[1] = NPY_FLOAT +ufunc_obl_rad1_cv_types[2] = NPY_FLOAT +ufunc_obl_rad1_cv_types[3] = NPY_FLOAT +ufunc_obl_rad1_cv_types[4] = NPY_FLOAT +ufunc_obl_rad1_cv_types[5] = NPY_FLOAT +ufunc_obl_rad1_cv_types[6] = NPY_FLOAT +ufunc_obl_rad1_cv_types[7] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[8] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[9] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[10] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[11] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[12] = NPY_DOUBLE +ufunc_obl_rad1_cv_types[13] = NPY_DOUBLE +ufunc_obl_rad1_cv_ptr[2*0] = _func_oblate_radial1_wrap +ufunc_obl_rad1_cv_ptr[2*0+1] = ("obl_rad1_cv") +ufunc_obl_rad1_cv_ptr[2*1] = _func_oblate_radial1_wrap +ufunc_obl_rad1_cv_ptr[2*1+1] = ("obl_rad1_cv") +ufunc_obl_rad1_cv_data[0] = &ufunc_obl_rad1_cv_ptr[2*0] +ufunc_obl_rad1_cv_data[1] = &ufunc_obl_rad1_cv_ptr[2*1] +obl_rad1_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_rad1_cv_loops, ufunc_obl_rad1_cv_data, ufunc_obl_rad1_cv_types, 2, 5, 2, 0, "obl_rad1_cv", ufunc_obl_rad1_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_rad2_loops[2] +cdef void *ufunc_obl_rad2_ptr[4] +cdef void *ufunc_obl_rad2_data[2] +cdef char ufunc_obl_rad2_types[12] +cdef char *ufunc_obl_rad2_doc = ( + "obl_rad2(m, n, c, x, out=None)\n" + "\n" + "Oblate spheroidal radial function of the second kind and its derivative.\n" + "\n" + "Computes the oblate spheroidal radial function of the second kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_rad2_cv") +ufunc_obl_rad2_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_obl_rad2_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_obl_rad2_types[0] = NPY_FLOAT +ufunc_obl_rad2_types[1] = NPY_FLOAT +ufunc_obl_rad2_types[2] = NPY_FLOAT +ufunc_obl_rad2_types[3] = NPY_FLOAT +ufunc_obl_rad2_types[4] = NPY_FLOAT +ufunc_obl_rad2_types[5] = NPY_FLOAT +ufunc_obl_rad2_types[6] = NPY_DOUBLE +ufunc_obl_rad2_types[7] = NPY_DOUBLE +ufunc_obl_rad2_types[8] = NPY_DOUBLE +ufunc_obl_rad2_types[9] = NPY_DOUBLE +ufunc_obl_rad2_types[10] = NPY_DOUBLE +ufunc_obl_rad2_types[11] = NPY_DOUBLE +ufunc_obl_rad2_ptr[2*0] = _func_oblate_radial2_nocv_wrap +ufunc_obl_rad2_ptr[2*0+1] = ("obl_rad2") +ufunc_obl_rad2_ptr[2*1] = _func_oblate_radial2_nocv_wrap +ufunc_obl_rad2_ptr[2*1+1] = ("obl_rad2") +ufunc_obl_rad2_data[0] = &ufunc_obl_rad2_ptr[2*0] +ufunc_obl_rad2_data[1] = &ufunc_obl_rad2_ptr[2*1] +obl_rad2 = np.PyUFunc_FromFuncAndData(ufunc_obl_rad2_loops, ufunc_obl_rad2_data, ufunc_obl_rad2_types, 2, 4, 2, 0, "obl_rad2", ufunc_obl_rad2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_obl_rad2_cv_loops[2] +cdef void *ufunc_obl_rad2_cv_ptr[4] +cdef void *ufunc_obl_rad2_cv_data[2] +cdef char ufunc_obl_rad2_cv_types[14] +cdef char *ufunc_obl_rad2_cv_doc = ( + "obl_rad2_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Oblate spheroidal radial function obl_rad2 for precomputed characteristic value\n" + "\n" + "Computes the oblate spheroidal radial function of the second kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Mode parameter m (nonnegative)\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Parameter x (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x\n" + "\n" + "See Also\n" + "--------\n" + "obl_rad2") +ufunc_obl_rad2_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_obl_rad2_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_obl_rad2_cv_types[0] = NPY_FLOAT +ufunc_obl_rad2_cv_types[1] = NPY_FLOAT +ufunc_obl_rad2_cv_types[2] = NPY_FLOAT +ufunc_obl_rad2_cv_types[3] = NPY_FLOAT +ufunc_obl_rad2_cv_types[4] = NPY_FLOAT +ufunc_obl_rad2_cv_types[5] = NPY_FLOAT +ufunc_obl_rad2_cv_types[6] = NPY_FLOAT +ufunc_obl_rad2_cv_types[7] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[8] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[9] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[10] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[11] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[12] = NPY_DOUBLE +ufunc_obl_rad2_cv_types[13] = NPY_DOUBLE +ufunc_obl_rad2_cv_ptr[2*0] = _func_oblate_radial2_wrap +ufunc_obl_rad2_cv_ptr[2*0+1] = ("obl_rad2_cv") +ufunc_obl_rad2_cv_ptr[2*1] = _func_oblate_radial2_wrap +ufunc_obl_rad2_cv_ptr[2*1+1] = ("obl_rad2_cv") +ufunc_obl_rad2_cv_data[0] = &ufunc_obl_rad2_cv_ptr[2*0] +ufunc_obl_rad2_cv_data[1] = &ufunc_obl_rad2_cv_ptr[2*1] +obl_rad2_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_rad2_cv_loops, ufunc_obl_rad2_cv_data, ufunc_obl_rad2_cv_types, 2, 5, 2, 0, "obl_rad2_cv", ufunc_obl_rad2_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_owens_t_loops[2] +cdef void *ufunc_owens_t_ptr[4] +cdef void *ufunc_owens_t_data[2] +cdef char ufunc_owens_t_types[6] +cdef char *ufunc_owens_t_doc = ( + "owens_t(h, a, out=None)\n" + "\n" + "Owen's T Function.\n" + "\n" + "The function T(h, a) gives the probability of the event\n" + "(X > h and 0 < Y < a * X) where X and Y are independent\n" + "standard normal random variables.\n" + "\n" + "Parameters\n" + "----------\n" + "h: array_like\n" + " Input value.\n" + "a: array_like\n" + " Input value.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t: scalar or ndarray\n" + " Probability of the event (X > h and 0 < Y < a * X),\n" + " where X and Y are independent standard normal random variables.\n" + "\n" + "References\n" + "----------\n" + ".. [1] M. Patefield and D. Tandy, \"Fast and accurate calculation of\n" + " Owen's T Function\", Statistical Software vol. 5, pp. 1-25, 2000.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy import special\n" + ">>> a = 3.5\n" + ">>> h = 0.78\n" + ">>> special.owens_t(h, a)\n" + "0.10877216734852274") +ufunc_owens_t_loops[0] = loop_d_dd__As_ff_f +ufunc_owens_t_loops[1] = loop_d_dd__As_dd_d +ufunc_owens_t_types[0] = NPY_FLOAT +ufunc_owens_t_types[1] = NPY_FLOAT +ufunc_owens_t_types[2] = NPY_FLOAT +ufunc_owens_t_types[3] = NPY_DOUBLE +ufunc_owens_t_types[4] = NPY_DOUBLE +ufunc_owens_t_types[5] = NPY_DOUBLE +ufunc_owens_t_ptr[2*0] = _func_owens_t +ufunc_owens_t_ptr[2*0+1] = ("owens_t") +ufunc_owens_t_ptr[2*1] = _func_owens_t +ufunc_owens_t_ptr[2*1+1] = ("owens_t") +ufunc_owens_t_data[0] = &ufunc_owens_t_ptr[2*0] +ufunc_owens_t_data[1] = &ufunc_owens_t_ptr[2*1] +owens_t = np.PyUFunc_FromFuncAndData(ufunc_owens_t_loops, ufunc_owens_t_data, ufunc_owens_t_types, 2, 2, 1, 0, "owens_t", ufunc_owens_t_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pbdv_loops[2] +cdef void *ufunc_pbdv_ptr[4] +cdef void *ufunc_pbdv_data[2] +cdef char ufunc_pbdv_types[8] +cdef char *ufunc_pbdv_doc = ( + "pbdv(v, x, out=None)\n" + "\n" + "Parabolic cylinder function D\n" + "\n" + "Returns (d, dp) the parabolic cylinder function Dv(x) in d and the\n" + "derivative, Dv'(x) in dp.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Real parameter\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "d : scalar or ndarray\n" + " Value of the function\n" + "dp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pbdv_loops[0] = loop_i_dd_dd_As_ff_ff +ufunc_pbdv_loops[1] = loop_i_dd_dd_As_dd_dd +ufunc_pbdv_types[0] = NPY_FLOAT +ufunc_pbdv_types[1] = NPY_FLOAT +ufunc_pbdv_types[2] = NPY_FLOAT +ufunc_pbdv_types[3] = NPY_FLOAT +ufunc_pbdv_types[4] = NPY_DOUBLE +ufunc_pbdv_types[5] = NPY_DOUBLE +ufunc_pbdv_types[6] = NPY_DOUBLE +ufunc_pbdv_types[7] = NPY_DOUBLE +ufunc_pbdv_ptr[2*0] = _func_pbdv_wrap +ufunc_pbdv_ptr[2*0+1] = ("pbdv") +ufunc_pbdv_ptr[2*1] = _func_pbdv_wrap +ufunc_pbdv_ptr[2*1+1] = ("pbdv") +ufunc_pbdv_data[0] = &ufunc_pbdv_ptr[2*0] +ufunc_pbdv_data[1] = &ufunc_pbdv_ptr[2*1] +pbdv = np.PyUFunc_FromFuncAndData(ufunc_pbdv_loops, ufunc_pbdv_data, ufunc_pbdv_types, 2, 2, 2, 0, "pbdv", ufunc_pbdv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pbvv_loops[2] +cdef void *ufunc_pbvv_ptr[4] +cdef void *ufunc_pbvv_data[2] +cdef char ufunc_pbvv_types[8] +cdef char *ufunc_pbvv_doc = ( + "pbvv(v, x, out=None)\n" + "\n" + "Parabolic cylinder function V\n" + "\n" + "Returns the parabolic cylinder function Vv(x) in v and the\n" + "derivative, Vv'(x) in vp.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Real parameter\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "v : scalar or ndarray\n" + " Value of the function\n" + "vp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pbvv_loops[0] = loop_i_dd_dd_As_ff_ff +ufunc_pbvv_loops[1] = loop_i_dd_dd_As_dd_dd +ufunc_pbvv_types[0] = NPY_FLOAT +ufunc_pbvv_types[1] = NPY_FLOAT +ufunc_pbvv_types[2] = NPY_FLOAT +ufunc_pbvv_types[3] = NPY_FLOAT +ufunc_pbvv_types[4] = NPY_DOUBLE +ufunc_pbvv_types[5] = NPY_DOUBLE +ufunc_pbvv_types[6] = NPY_DOUBLE +ufunc_pbvv_types[7] = NPY_DOUBLE +ufunc_pbvv_ptr[2*0] = _func_pbvv_wrap +ufunc_pbvv_ptr[2*0+1] = ("pbvv") +ufunc_pbvv_ptr[2*1] = _func_pbvv_wrap +ufunc_pbvv_ptr[2*1+1] = ("pbvv") +ufunc_pbvv_data[0] = &ufunc_pbvv_ptr[2*0] +ufunc_pbvv_data[1] = &ufunc_pbvv_ptr[2*1] +pbvv = np.PyUFunc_FromFuncAndData(ufunc_pbvv_loops, ufunc_pbvv_data, ufunc_pbvv_types, 2, 2, 2, 0, "pbvv", ufunc_pbvv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pbwa_loops[2] +cdef void *ufunc_pbwa_ptr[4] +cdef void *ufunc_pbwa_data[2] +cdef char ufunc_pbwa_types[8] +cdef char *ufunc_pbwa_doc = ( + "pbwa(a, x, out=None)\n" + "\n" + "Parabolic cylinder function W.\n" + "\n" + "The function is a particular solution to the differential equation\n" + "\n" + ".. math::\n" + "\n" + " y'' + \\left(\\frac{1}{4}x^2 - a\\right)y = 0,\n" + "\n" + "for a full definition see section 12.14 in [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like\n" + " Real parameter\n" + "x : array_like\n" + " Real argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "w : scalar or ndarray\n" + " Value of the function\n" + "wp : scalar or ndarray\n" + " Value of the derivative in x\n" + "\n" + "Notes\n" + "-----\n" + "The function is a wrapper for a Fortran routine by Zhang and Jin\n" + "[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and\n" + "returns NaN outside that range.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Digital Library of Mathematical Functions, 14.30.\n" + " https://dlmf.nist.gov/14.30\n" + ".. [2] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n" + " Functions\", John Wiley and Sons, 1996.\n" + " https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html") +ufunc_pbwa_loops[0] = loop_i_dd_dd_As_ff_ff +ufunc_pbwa_loops[1] = loop_i_dd_dd_As_dd_dd +ufunc_pbwa_types[0] = NPY_FLOAT +ufunc_pbwa_types[1] = NPY_FLOAT +ufunc_pbwa_types[2] = NPY_FLOAT +ufunc_pbwa_types[3] = NPY_FLOAT +ufunc_pbwa_types[4] = NPY_DOUBLE +ufunc_pbwa_types[5] = NPY_DOUBLE +ufunc_pbwa_types[6] = NPY_DOUBLE +ufunc_pbwa_types[7] = NPY_DOUBLE +ufunc_pbwa_ptr[2*0] = _func_pbwa_wrap +ufunc_pbwa_ptr[2*0+1] = ("pbwa") +ufunc_pbwa_ptr[2*1] = _func_pbwa_wrap +ufunc_pbwa_ptr[2*1+1] = ("pbwa") +ufunc_pbwa_data[0] = &ufunc_pbwa_ptr[2*0] +ufunc_pbwa_data[1] = &ufunc_pbwa_ptr[2*1] +pbwa = np.PyUFunc_FromFuncAndData(ufunc_pbwa_loops, ufunc_pbwa_data, ufunc_pbwa_types, 2, 2, 2, 0, "pbwa", ufunc_pbwa_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtr_loops[2] +cdef void *ufunc_pdtr_ptr[4] +cdef void *ufunc_pdtr_data[2] +cdef char ufunc_pdtr_types[6] +cdef char *ufunc_pdtr_doc = ( + "pdtr(k, m, out=None)\n" + "\n" + "Poisson cumulative distribution function.\n" + "\n" + "Defined as the probability that a Poisson-distributed random\n" + "variable with event rate :math:`m` is less than or equal to\n" + ":math:`k`. More concretely, this works out to be [1]_\n" + "\n" + ".. math::\n" + "\n" + " \\exp(-m) \\sum_{j = 0}^{\\lfloor{k}\\rfloor} \\frac{m^j}{j!}.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Poisson cumulative distribution function\n" + "\n" + "See Also\n" + "--------\n" + "pdtrc : Poisson survival function\n" + "pdtrik : inverse of `pdtr` with respect to `k`\n" + "pdtri : inverse of `pdtr` with respect to `m`\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Poisson_distribution\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is a cumulative distribution function, so it converges to 1\n" + "monotonically as `k` goes to infinity.\n" + "\n" + ">>> sc.pdtr([1, 10, 100, np.inf], 1)\n" + "array([0.73575888, 0.99999999, 1. , 1. ])\n" + "\n" + "It is discontinuous at integers and constant between integers.\n" + "\n" + ">>> sc.pdtr([1, 1.5, 1.9, 2], 1)\n" + "array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])") +ufunc_pdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtr_types[0] = NPY_FLOAT +ufunc_pdtr_types[1] = NPY_FLOAT +ufunc_pdtr_types[2] = NPY_FLOAT +ufunc_pdtr_types[3] = NPY_DOUBLE +ufunc_pdtr_types[4] = NPY_DOUBLE +ufunc_pdtr_types[5] = NPY_DOUBLE +ufunc_pdtr_ptr[2*0] = _func_pdtr +ufunc_pdtr_ptr[2*0+1] = ("pdtr") +ufunc_pdtr_ptr[2*1] = _func_pdtr +ufunc_pdtr_ptr[2*1+1] = ("pdtr") +ufunc_pdtr_data[0] = &ufunc_pdtr_ptr[2*0] +ufunc_pdtr_data[1] = &ufunc_pdtr_ptr[2*1] +pdtr = np.PyUFunc_FromFuncAndData(ufunc_pdtr_loops, ufunc_pdtr_data, ufunc_pdtr_types, 2, 2, 1, 0, "pdtr", ufunc_pdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtrc_loops[2] +cdef void *ufunc_pdtrc_ptr[4] +cdef void *ufunc_pdtrc_data[2] +cdef char ufunc_pdtrc_types[6] +cdef char *ufunc_pdtrc_doc = ( + "pdtrc(k, m, out=None)\n" + "\n" + "Poisson survival function\n" + "\n" + "Returns the sum of the terms from k+1 to infinity of the Poisson\n" + "distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(\n" + "k+1, m). Arguments must both be non-negative doubles.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the Poisson survival function\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrik : inverse of `pdtr` with respect to `k`\n" + "pdtri : inverse of `pdtr` with respect to `m`") +ufunc_pdtrc_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtrc_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtrc_types[0] = NPY_FLOAT +ufunc_pdtrc_types[1] = NPY_FLOAT +ufunc_pdtrc_types[2] = NPY_FLOAT +ufunc_pdtrc_types[3] = NPY_DOUBLE +ufunc_pdtrc_types[4] = NPY_DOUBLE +ufunc_pdtrc_types[5] = NPY_DOUBLE +ufunc_pdtrc_ptr[2*0] = _func_pdtrc +ufunc_pdtrc_ptr[2*0+1] = ("pdtrc") +ufunc_pdtrc_ptr[2*1] = _func_pdtrc +ufunc_pdtrc_ptr[2*1+1] = ("pdtrc") +ufunc_pdtrc_data[0] = &ufunc_pdtrc_ptr[2*0] +ufunc_pdtrc_data[1] = &ufunc_pdtrc_ptr[2*1] +pdtrc = np.PyUFunc_FromFuncAndData(ufunc_pdtrc_loops, ufunc_pdtrc_data, ufunc_pdtrc_types, 2, 2, 1, 0, "pdtrc", ufunc_pdtrc_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtri_loops[3] +cdef void *ufunc_pdtri_ptr[6] +cdef void *ufunc_pdtri_data[3] +cdef char ufunc_pdtri_types[9] +cdef char *ufunc_pdtri_doc = ( + "pdtri(k, y, out=None)\n" + "\n" + "Inverse to `pdtr` vs m\n" + "\n" + "Returns the Poisson variable `m` such that the sum from 0 to `k` of\n" + "the Poisson density is equal to the given probability `y`:\n" + "calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative\n" + "integer and `y` between 0 and 1.\n" + "\n" + "Parameters\n" + "----------\n" + "k : array_like\n" + " Number of occurrences (nonnegative, real)\n" + "y : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the shape parameter `m` such that ``pdtr(k, m) = p``\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrc : Poisson survival function\n" + "pdtrik : inverse of `pdtr` with respect to `k`") +ufunc_pdtri_loops[0] = loop_d_id__As_ld_d +ufunc_pdtri_loops[1] = loop_d_dd__As_ff_f +ufunc_pdtri_loops[2] = loop_d_dd__As_dd_d +ufunc_pdtri_types[0] = NPY_LONG +ufunc_pdtri_types[1] = NPY_DOUBLE +ufunc_pdtri_types[2] = NPY_DOUBLE +ufunc_pdtri_types[3] = NPY_FLOAT +ufunc_pdtri_types[4] = NPY_FLOAT +ufunc_pdtri_types[5] = NPY_FLOAT +ufunc_pdtri_types[6] = NPY_DOUBLE +ufunc_pdtri_types[7] = NPY_DOUBLE +ufunc_pdtri_types[8] = NPY_DOUBLE +ufunc_pdtri_ptr[2*0] = _func_pdtri +ufunc_pdtri_ptr[2*0+1] = ("pdtri") +ufunc_pdtri_ptr[2*1] = _func_pdtri_unsafe +ufunc_pdtri_ptr[2*1+1] = ("pdtri") +ufunc_pdtri_ptr[2*2] = _func_pdtri_unsafe +ufunc_pdtri_ptr[2*2+1] = ("pdtri") +ufunc_pdtri_data[0] = &ufunc_pdtri_ptr[2*0] +ufunc_pdtri_data[1] = &ufunc_pdtri_ptr[2*1] +ufunc_pdtri_data[2] = &ufunc_pdtri_ptr[2*2] +pdtri = np.PyUFunc_FromFuncAndData(ufunc_pdtri_loops, ufunc_pdtri_data, ufunc_pdtri_types, 3, 2, 1, 0, "pdtri", ufunc_pdtri_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pdtrik_loops[2] +cdef void *ufunc_pdtrik_ptr[4] +cdef void *ufunc_pdtrik_data[2] +cdef char ufunc_pdtrik_types[6] +cdef char *ufunc_pdtrik_doc = ( + "pdtrik(p, m, out=None)\n" + "\n" + "Inverse to `pdtr` vs `m`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Shape parameter (nonnegative, real)\n" + "p : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The number of occurrences `k` such that ``pdtr(k, m) = p``\n" + "\n" + "See Also\n" + "--------\n" + "pdtr : Poisson cumulative distribution function\n" + "pdtrc : Poisson survival function\n" + "pdtri : inverse of `pdtr` with respect to `m`") +ufunc_pdtrik_loops[0] = loop_d_dd__As_ff_f +ufunc_pdtrik_loops[1] = loop_d_dd__As_dd_d +ufunc_pdtrik_types[0] = NPY_FLOAT +ufunc_pdtrik_types[1] = NPY_FLOAT +ufunc_pdtrik_types[2] = NPY_FLOAT +ufunc_pdtrik_types[3] = NPY_DOUBLE +ufunc_pdtrik_types[4] = NPY_DOUBLE +ufunc_pdtrik_types[5] = NPY_DOUBLE +ufunc_pdtrik_ptr[2*0] = _func_pdtrik +ufunc_pdtrik_ptr[2*0+1] = ("pdtrik") +ufunc_pdtrik_ptr[2*1] = _func_pdtrik +ufunc_pdtrik_ptr[2*1+1] = ("pdtrik") +ufunc_pdtrik_data[0] = &ufunc_pdtrik_ptr[2*0] +ufunc_pdtrik_data[1] = &ufunc_pdtrik_ptr[2*1] +pdtrik = np.PyUFunc_FromFuncAndData(ufunc_pdtrik_loops, ufunc_pdtrik_data, ufunc_pdtrik_types, 2, 2, 1, 0, "pdtrik", ufunc_pdtrik_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_poch_loops[2] +cdef void *ufunc_poch_ptr[4] +cdef void *ufunc_poch_data[2] +cdef char ufunc_poch_types[6] +cdef char *ufunc_poch_doc = ( + "poch(z, m, out=None)\n" + "\n" + "Pochhammer symbol.\n" + "\n" + "The Pochhammer symbol (rising factorial) is defined as\n" + "\n" + ".. math::\n" + "\n" + " (z)_m = \\frac{\\Gamma(z + m)}{\\Gamma(z)}\n" + "\n" + "For positive integer `m` it reads\n" + "\n" + ".. math::\n" + "\n" + " (z)_m = z (z + 1) ... (z + m - 1)\n" + "\n" + "See [dlmf]_ for more details.\n" + "\n" + "Parameters\n" + "----------\n" + "z, m : array_like\n" + " Real-valued arguments.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value of the function.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] Nist, Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5.2#iii\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is 1 when m is 0.\n" + "\n" + ">>> sc.poch([1, 2, 3, 4], 0)\n" + "array([1., 1., 1., 1.])\n" + "\n" + "For z equal to 1 it reduces to the factorial function.\n" + "\n" + ">>> sc.poch(1, 5)\n" + "120.0\n" + ">>> 1 * 2 * 3 * 4 * 5\n" + "120\n" + "\n" + "It can be expressed in terms of the gamma function.\n" + "\n" + ">>> z, m = 3.7, 2.1\n" + ">>> sc.poch(z, m)\n" + "20.529581933776953\n" + ">>> sc.gamma(z + m) / sc.gamma(z)\n" + "20.52958193377696") +ufunc_poch_loops[0] = loop_d_dd__As_ff_f +ufunc_poch_loops[1] = loop_d_dd__As_dd_d +ufunc_poch_types[0] = NPY_FLOAT +ufunc_poch_types[1] = NPY_FLOAT +ufunc_poch_types[2] = NPY_FLOAT +ufunc_poch_types[3] = NPY_DOUBLE +ufunc_poch_types[4] = NPY_DOUBLE +ufunc_poch_types[5] = NPY_DOUBLE +ufunc_poch_ptr[2*0] = _func_poch +ufunc_poch_ptr[2*0+1] = ("poch") +ufunc_poch_ptr[2*1] = _func_poch +ufunc_poch_ptr[2*1+1] = ("poch") +ufunc_poch_data[0] = &ufunc_poch_ptr[2*0] +ufunc_poch_data[1] = &ufunc_poch_ptr[2*1] +poch = np.PyUFunc_FromFuncAndData(ufunc_poch_loops, ufunc_poch_data, ufunc_poch_types, 2, 2, 1, 0, "poch", ufunc_poch_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_powm1_loops[2] +cdef void *ufunc_powm1_ptr[4] +cdef void *ufunc_powm1_data[2] +cdef char ufunc_powm1_types[6] +cdef char *ufunc_powm1_doc = ( + "powm1(x, y, out=None)\n" + "\n" + "Computes ``x**y - 1``.\n" + "\n" + "This function is useful when `y` is near 0, or when `x` is near 1.\n" + "\n" + "The function is implemented for real types only (unlike ``numpy.power``,\n" + "which accepts complex inputs).\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " The base. Must be a real type (i.e. integer or float, not complex).\n" + "y : array_like\n" + " The exponent. Must be a real type (i.e. integer or float, not complex).\n" + "\n" + "Returns\n" + "-------\n" + "array_like\n" + " Result of the calculation\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 1.10.0\n" + "\n" + "The underlying code is implemented for single precision and double\n" + "precision floats only. Unlike `numpy.power`, integer inputs to\n" + "`powm1` are converted to floating point, and complex inputs are\n" + "not accepted.\n" + "\n" + "Note the following edge cases:\n" + "\n" + "* ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf``\n" + " and ``nan``.\n" + "* ``powm1(1, y)`` returns 0 for any ``y``, including ``nan``\n" + " and ``inf``.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import powm1\n" + "\n" + ">>> x = np.array([1.2, 10.0, 0.9999999975])\n" + ">>> y = np.array([1e-9, 1e-11, 0.1875])\n" + ">>> powm1(x, y)\n" + "array([ 1.82321557e-10, 2.30258509e-11, -4.68749998e-10])\n" + "\n" + "It can be verified that the relative errors in those results\n" + "are less than 2.5e-16.\n" + "\n" + "Compare that to the result of ``x**y - 1``, where the\n" + "relative errors are all larger than 8e-8:\n" + "\n" + ">>> x**y - 1\n" + "array([ 1.82321491e-10, 2.30258035e-11, -4.68750039e-10])") +ufunc_powm1_loops[0] = loop_f_ff__As_ff_f +ufunc_powm1_loops[1] = loop_d_dd__As_dd_d +ufunc_powm1_types[0] = NPY_FLOAT +ufunc_powm1_types[1] = NPY_FLOAT +ufunc_powm1_types[2] = NPY_FLOAT +ufunc_powm1_types[3] = NPY_DOUBLE +ufunc_powm1_types[4] = NPY_DOUBLE +ufunc_powm1_types[5] = NPY_DOUBLE +ufunc_powm1_ptr[2*0] = scipy.special._ufuncs_cxx._export_powm1_float +ufunc_powm1_ptr[2*0+1] = ("powm1") +ufunc_powm1_ptr[2*1] = scipy.special._ufuncs_cxx._export_powm1_double +ufunc_powm1_ptr[2*1+1] = ("powm1") +ufunc_powm1_data[0] = &ufunc_powm1_ptr[2*0] +ufunc_powm1_data[1] = &ufunc_powm1_ptr[2*1] +powm1 = np.PyUFunc_FromFuncAndData(ufunc_powm1_loops, ufunc_powm1_data, ufunc_powm1_types, 2, 2, 1, 0, "powm1", ufunc_powm1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_ang1_loops[2] +cdef void *ufunc_pro_ang1_ptr[4] +cdef void *ufunc_pro_ang1_data[2] +cdef char ufunc_pro_ang1_types[12] +cdef char *ufunc_pro_ang1_doc = ( + "pro_ang1(m, n, c, x, out=None)\n" + "\n" + "Prolate spheroidal angular function of the first kind and its derivative\n" + "\n" + "Computes the prolate spheroidal angular function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_ang1_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_pro_ang1_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_pro_ang1_types[0] = NPY_FLOAT +ufunc_pro_ang1_types[1] = NPY_FLOAT +ufunc_pro_ang1_types[2] = NPY_FLOAT +ufunc_pro_ang1_types[3] = NPY_FLOAT +ufunc_pro_ang1_types[4] = NPY_FLOAT +ufunc_pro_ang1_types[5] = NPY_FLOAT +ufunc_pro_ang1_types[6] = NPY_DOUBLE +ufunc_pro_ang1_types[7] = NPY_DOUBLE +ufunc_pro_ang1_types[8] = NPY_DOUBLE +ufunc_pro_ang1_types[9] = NPY_DOUBLE +ufunc_pro_ang1_types[10] = NPY_DOUBLE +ufunc_pro_ang1_types[11] = NPY_DOUBLE +ufunc_pro_ang1_ptr[2*0] = _func_prolate_aswfa_nocv_wrap +ufunc_pro_ang1_ptr[2*0+1] = ("pro_ang1") +ufunc_pro_ang1_ptr[2*1] = _func_prolate_aswfa_nocv_wrap +ufunc_pro_ang1_ptr[2*1+1] = ("pro_ang1") +ufunc_pro_ang1_data[0] = &ufunc_pro_ang1_ptr[2*0] +ufunc_pro_ang1_data[1] = &ufunc_pro_ang1_ptr[2*1] +pro_ang1 = np.PyUFunc_FromFuncAndData(ufunc_pro_ang1_loops, ufunc_pro_ang1_data, ufunc_pro_ang1_types, 2, 4, 2, 0, "pro_ang1", ufunc_pro_ang1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_ang1_cv_loops[2] +cdef void *ufunc_pro_ang1_cv_ptr[4] +cdef void *ufunc_pro_ang1_cv_data[2] +cdef char ufunc_pro_ang1_cv_types[14] +cdef char *ufunc_pro_ang1_cv_doc = ( + "pro_ang1_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Prolate spheroidal angular function pro_ang1 for precomputed characteristic value\n" + "\n" + "Computes the prolate spheroidal angular function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_ang1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_pro_ang1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_pro_ang1_cv_types[0] = NPY_FLOAT +ufunc_pro_ang1_cv_types[1] = NPY_FLOAT +ufunc_pro_ang1_cv_types[2] = NPY_FLOAT +ufunc_pro_ang1_cv_types[3] = NPY_FLOAT +ufunc_pro_ang1_cv_types[4] = NPY_FLOAT +ufunc_pro_ang1_cv_types[5] = NPY_FLOAT +ufunc_pro_ang1_cv_types[6] = NPY_FLOAT +ufunc_pro_ang1_cv_types[7] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[8] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[9] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[10] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[11] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[12] = NPY_DOUBLE +ufunc_pro_ang1_cv_types[13] = NPY_DOUBLE +ufunc_pro_ang1_cv_ptr[2*0] = _func_prolate_aswfa_wrap +ufunc_pro_ang1_cv_ptr[2*0+1] = ("pro_ang1_cv") +ufunc_pro_ang1_cv_ptr[2*1] = _func_prolate_aswfa_wrap +ufunc_pro_ang1_cv_ptr[2*1+1] = ("pro_ang1_cv") +ufunc_pro_ang1_cv_data[0] = &ufunc_pro_ang1_cv_ptr[2*0] +ufunc_pro_ang1_cv_data[1] = &ufunc_pro_ang1_cv_ptr[2*1] +pro_ang1_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_ang1_cv_loops, ufunc_pro_ang1_cv_data, ufunc_pro_ang1_cv_types, 2, 5, 2, 0, "pro_ang1_cv", ufunc_pro_ang1_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_cv_loops[2] +cdef void *ufunc_pro_cv_ptr[4] +cdef void *ufunc_pro_cv_data[2] +cdef char ufunc_pro_cv_types[8] +cdef char *ufunc_pro_cv_doc = ( + "pro_cv(m, n, c, out=None)\n" + "\n" + "Characteristic value of prolate spheroidal function\n" + "\n" + "Computes the characteristic value of prolate spheroidal wave\n" + "functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cv : scalar or ndarray\n" + " Characteristic value") +ufunc_pro_cv_loops[0] = loop_d_ddd__As_fff_f +ufunc_pro_cv_loops[1] = loop_d_ddd__As_ddd_d +ufunc_pro_cv_types[0] = NPY_FLOAT +ufunc_pro_cv_types[1] = NPY_FLOAT +ufunc_pro_cv_types[2] = NPY_FLOAT +ufunc_pro_cv_types[3] = NPY_FLOAT +ufunc_pro_cv_types[4] = NPY_DOUBLE +ufunc_pro_cv_types[5] = NPY_DOUBLE +ufunc_pro_cv_types[6] = NPY_DOUBLE +ufunc_pro_cv_types[7] = NPY_DOUBLE +ufunc_pro_cv_ptr[2*0] = _func_prolate_segv_wrap +ufunc_pro_cv_ptr[2*0+1] = ("pro_cv") +ufunc_pro_cv_ptr[2*1] = _func_prolate_segv_wrap +ufunc_pro_cv_ptr[2*1+1] = ("pro_cv") +ufunc_pro_cv_data[0] = &ufunc_pro_cv_ptr[2*0] +ufunc_pro_cv_data[1] = &ufunc_pro_cv_ptr[2*1] +pro_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_cv_loops, ufunc_pro_cv_data, ufunc_pro_cv_types, 2, 3, 1, 0, "pro_cv", ufunc_pro_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_rad1_loops[2] +cdef void *ufunc_pro_rad1_ptr[4] +cdef void *ufunc_pro_rad1_data[2] +cdef char ufunc_pro_rad1_types[12] +cdef char *ufunc_pro_rad1_doc = ( + "pro_rad1(m, n, c, x, out=None)\n" + "\n" + "Prolate spheroidal radial function of the first kind and its derivative\n" + "\n" + "Computes the prolate spheroidal radial function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_rad1_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_pro_rad1_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_pro_rad1_types[0] = NPY_FLOAT +ufunc_pro_rad1_types[1] = NPY_FLOAT +ufunc_pro_rad1_types[2] = NPY_FLOAT +ufunc_pro_rad1_types[3] = NPY_FLOAT +ufunc_pro_rad1_types[4] = NPY_FLOAT +ufunc_pro_rad1_types[5] = NPY_FLOAT +ufunc_pro_rad1_types[6] = NPY_DOUBLE +ufunc_pro_rad1_types[7] = NPY_DOUBLE +ufunc_pro_rad1_types[8] = NPY_DOUBLE +ufunc_pro_rad1_types[9] = NPY_DOUBLE +ufunc_pro_rad1_types[10] = NPY_DOUBLE +ufunc_pro_rad1_types[11] = NPY_DOUBLE +ufunc_pro_rad1_ptr[2*0] = _func_prolate_radial1_nocv_wrap +ufunc_pro_rad1_ptr[2*0+1] = ("pro_rad1") +ufunc_pro_rad1_ptr[2*1] = _func_prolate_radial1_nocv_wrap +ufunc_pro_rad1_ptr[2*1+1] = ("pro_rad1") +ufunc_pro_rad1_data[0] = &ufunc_pro_rad1_ptr[2*0] +ufunc_pro_rad1_data[1] = &ufunc_pro_rad1_ptr[2*1] +pro_rad1 = np.PyUFunc_FromFuncAndData(ufunc_pro_rad1_loops, ufunc_pro_rad1_data, ufunc_pro_rad1_types, 2, 4, 2, 0, "pro_rad1", ufunc_pro_rad1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_rad1_cv_loops[2] +cdef void *ufunc_pro_rad1_cv_ptr[4] +cdef void *ufunc_pro_rad1_cv_data[2] +cdef char ufunc_pro_rad1_cv_types[14] +cdef char *ufunc_pro_rad1_cv_doc = ( + "pro_rad1_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Prolate spheroidal radial function pro_rad1 for precomputed characteristic value\n" + "\n" + "Computes the prolate spheroidal radial function of the first kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_rad1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_pro_rad1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_pro_rad1_cv_types[0] = NPY_FLOAT +ufunc_pro_rad1_cv_types[1] = NPY_FLOAT +ufunc_pro_rad1_cv_types[2] = NPY_FLOAT +ufunc_pro_rad1_cv_types[3] = NPY_FLOAT +ufunc_pro_rad1_cv_types[4] = NPY_FLOAT +ufunc_pro_rad1_cv_types[5] = NPY_FLOAT +ufunc_pro_rad1_cv_types[6] = NPY_FLOAT +ufunc_pro_rad1_cv_types[7] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[8] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[9] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[10] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[11] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[12] = NPY_DOUBLE +ufunc_pro_rad1_cv_types[13] = NPY_DOUBLE +ufunc_pro_rad1_cv_ptr[2*0] = _func_prolate_radial1_wrap +ufunc_pro_rad1_cv_ptr[2*0+1] = ("pro_rad1_cv") +ufunc_pro_rad1_cv_ptr[2*1] = _func_prolate_radial1_wrap +ufunc_pro_rad1_cv_ptr[2*1+1] = ("pro_rad1_cv") +ufunc_pro_rad1_cv_data[0] = &ufunc_pro_rad1_cv_ptr[2*0] +ufunc_pro_rad1_cv_data[1] = &ufunc_pro_rad1_cv_ptr[2*1] +pro_rad1_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_rad1_cv_loops, ufunc_pro_rad1_cv_data, ufunc_pro_rad1_cv_types, 2, 5, 2, 0, "pro_rad1_cv", ufunc_pro_rad1_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_rad2_loops[2] +cdef void *ufunc_pro_rad2_ptr[4] +cdef void *ufunc_pro_rad2_data[2] +cdef char ufunc_pro_rad2_types[12] +cdef char *ufunc_pro_rad2_doc = ( + "pro_rad2(m, n, c, x, out=None)\n" + "\n" + "Prolate spheroidal radial function of the second kind and its derivative\n" + "\n" + "Computes the prolate spheroidal radial function of the second kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_rad2_loops[0] = loop_d_dddd_d_As_ffff_ff +ufunc_pro_rad2_loops[1] = loop_d_dddd_d_As_dddd_dd +ufunc_pro_rad2_types[0] = NPY_FLOAT +ufunc_pro_rad2_types[1] = NPY_FLOAT +ufunc_pro_rad2_types[2] = NPY_FLOAT +ufunc_pro_rad2_types[3] = NPY_FLOAT +ufunc_pro_rad2_types[4] = NPY_FLOAT +ufunc_pro_rad2_types[5] = NPY_FLOAT +ufunc_pro_rad2_types[6] = NPY_DOUBLE +ufunc_pro_rad2_types[7] = NPY_DOUBLE +ufunc_pro_rad2_types[8] = NPY_DOUBLE +ufunc_pro_rad2_types[9] = NPY_DOUBLE +ufunc_pro_rad2_types[10] = NPY_DOUBLE +ufunc_pro_rad2_types[11] = NPY_DOUBLE +ufunc_pro_rad2_ptr[2*0] = _func_prolate_radial2_nocv_wrap +ufunc_pro_rad2_ptr[2*0+1] = ("pro_rad2") +ufunc_pro_rad2_ptr[2*1] = _func_prolate_radial2_nocv_wrap +ufunc_pro_rad2_ptr[2*1+1] = ("pro_rad2") +ufunc_pro_rad2_data[0] = &ufunc_pro_rad2_ptr[2*0] +ufunc_pro_rad2_data[1] = &ufunc_pro_rad2_ptr[2*1] +pro_rad2 = np.PyUFunc_FromFuncAndData(ufunc_pro_rad2_loops, ufunc_pro_rad2_data, ufunc_pro_rad2_types, 2, 4, 2, 0, "pro_rad2", ufunc_pro_rad2_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pro_rad2_cv_loops[2] +cdef void *ufunc_pro_rad2_cv_ptr[4] +cdef void *ufunc_pro_rad2_cv_data[2] +cdef char ufunc_pro_rad2_cv_types[14] +cdef char *ufunc_pro_rad2_cv_doc = ( + "pro_rad2_cv(m, n, c, cv, x, out=None)\n" + "\n" + "Prolate spheroidal radial function pro_rad2 for precomputed characteristic value\n" + "\n" + "Computes the prolate spheroidal radial function of the second kind\n" + "and its derivative (with respect to `x`) for mode parameters m>=0\n" + "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n" + "pre-computed characteristic value.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Nonnegative mode parameter m\n" + "n : array_like\n" + " Mode parameter n (>= m)\n" + "c : array_like\n" + " Spheroidal parameter\n" + "cv : array_like\n" + " Characteristic value\n" + "x : array_like\n" + " Real parameter (``|x| < 1.0``)\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Value of the function\n" + "sp : scalar or ndarray\n" + " Value of the derivative vs x") +ufunc_pro_rad2_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff +ufunc_pro_rad2_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd +ufunc_pro_rad2_cv_types[0] = NPY_FLOAT +ufunc_pro_rad2_cv_types[1] = NPY_FLOAT +ufunc_pro_rad2_cv_types[2] = NPY_FLOAT +ufunc_pro_rad2_cv_types[3] = NPY_FLOAT +ufunc_pro_rad2_cv_types[4] = NPY_FLOAT +ufunc_pro_rad2_cv_types[5] = NPY_FLOAT +ufunc_pro_rad2_cv_types[6] = NPY_FLOAT +ufunc_pro_rad2_cv_types[7] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[8] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[9] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[10] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[11] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[12] = NPY_DOUBLE +ufunc_pro_rad2_cv_types[13] = NPY_DOUBLE +ufunc_pro_rad2_cv_ptr[2*0] = _func_prolate_radial2_wrap +ufunc_pro_rad2_cv_ptr[2*0+1] = ("pro_rad2_cv") +ufunc_pro_rad2_cv_ptr[2*1] = _func_prolate_radial2_wrap +ufunc_pro_rad2_cv_ptr[2*1+1] = ("pro_rad2_cv") +ufunc_pro_rad2_cv_data[0] = &ufunc_pro_rad2_cv_ptr[2*0] +ufunc_pro_rad2_cv_data[1] = &ufunc_pro_rad2_cv_ptr[2*1] +pro_rad2_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_rad2_cv_loops, ufunc_pro_rad2_cv_data, ufunc_pro_rad2_cv_types, 2, 5, 2, 0, "pro_rad2_cv", ufunc_pro_rad2_cv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_pseudo_huber_loops[2] +cdef void *ufunc_pseudo_huber_ptr[4] +cdef void *ufunc_pseudo_huber_data[2] +cdef char ufunc_pseudo_huber_types[6] +cdef char *ufunc_pseudo_huber_doc = ( + "pseudo_huber(delta, r, out=None)\n" + "\n" + "Pseudo-Huber loss function.\n" + "\n" + ".. math:: \\mathrm{pseudo\\_huber}(\\delta, r) =\n" + " \\delta^2 \\left( \\sqrt{ 1 + \\left( \\frac{r}{\\delta} \\right)^2 } - 1 \\right)\n" + "\n" + "Parameters\n" + "----------\n" + "delta : array_like\n" + " Input array, indicating the soft quadratic vs. linear loss changepoint.\n" + "r : array_like\n" + " Input array, possibly representing residuals.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "res : scalar or ndarray\n" + " The computed Pseudo-Huber loss function values.\n" + "\n" + "See Also\n" + "--------\n" + "huber: Similar function which this function approximates\n" + "\n" + "Notes\n" + "-----\n" + "Like `huber`, `pseudo_huber` often serves as a robust loss function\n" + "in statistics or machine learning to reduce the influence of outliers.\n" + "Unlike `huber`, `pseudo_huber` is smooth.\n" + "\n" + "Typically, `r` represents residuals, the difference\n" + "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n" + "`pseudo_huber` resembles the squared error and for :math:`|r|>\\delta` the\n" + "absolute error. This way, the Pseudo-Huber loss often achieves\n" + "a fast convergence in model fitting for small residuals like the squared\n" + "error loss function and still reduces the influence of outliers\n" + "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n" + "the cutoff between squared and absolute error regimes, it has\n" + "to be tuned carefully for each problem. `pseudo_huber` is also\n" + "convex, making it suitable for gradient based optimization. [1]_ [2]_\n" + "\n" + ".. versionadded:: 0.15.0\n" + "\n" + "References\n" + "----------\n" + ".. [1] Hartley, Zisserman, \"Multiple View Geometry in Computer Vision\".\n" + " 2003. Cambridge University Press. p. 619\n" + ".. [2] Charbonnier et al. \"Deterministic edge-preserving regularization\n" + " in computed imaging\". 1997. IEEE Trans. Image Processing.\n" + " 6 (2): 298 - 311.\n" + "\n" + "Examples\n" + "--------\n" + "Import all necessary modules.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import pseudo_huber, huber\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "Calculate the function for ``delta=1`` at ``r=2``.\n" + "\n" + ">>> pseudo_huber(1., 2.)\n" + "1.2360679774997898\n" + "\n" + "Calculate the function at ``r=2`` for different `delta` by providing\n" + "a list or NumPy array for `delta`.\n" + "\n" + ">>> pseudo_huber([1., 2., 4.], 3.)\n" + "array([2.16227766, 3.21110255, 4. ])\n" + "\n" + "Calculate the function for ``delta=1`` at several points by providing\n" + "a list or NumPy array for `r`.\n" + "\n" + ">>> pseudo_huber(2., np.array([1., 1.5, 3., 4.]))\n" + "array([0.47213595, 1. , 3.21110255, 4.94427191])\n" + "\n" + "The function can be calculated for different `delta` and `r` by\n" + "providing arrays for both with compatible shapes for broadcasting.\n" + "\n" + ">>> r = np.array([1., 2.5, 8., 10.])\n" + ">>> deltas = np.array([[1.], [5.], [9.]])\n" + ">>> print(r.shape, deltas.shape)\n" + "(4,) (3, 1)\n" + "\n" + ">>> pseudo_huber(deltas, r)\n" + "array([[ 0.41421356, 1.6925824 , 7.06225775, 9.04987562],\n" + " [ 0.49509757, 2.95084972, 22.16990566, 30.90169944],\n" + " [ 0.49846624, 3.06693762, 27.37435121, 40.08261642]])\n" + "\n" + "Plot the function for different `delta`.\n" + "\n" + ">>> x = np.linspace(-4, 4, 500)\n" + ">>> deltas = [1, 2, 3]\n" + ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n" + ">>> for delta, style in combined_plot_parameters:\n" + "... ax.plot(x, pseudo_huber(delta, x), label=rf\"$\\delta={delta}$\",\n" + "... ls=style)\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> ax.set_xlabel(\"$x$\")\n" + ">>> ax.set_title(r\"Pseudo-Huber loss function $h_{\\delta}(x)$\")\n" + ">>> ax.set_xlim(-4, 4)\n" + ">>> ax.set_ylim(0, 8)\n" + ">>> plt.show()\n" + "\n" + "Finally, illustrate the difference between `huber` and `pseudo_huber` by\n" + "plotting them and their gradients with respect to `r`. The plot shows\n" + "that `pseudo_huber` is continuously differentiable while `huber` is not\n" + "at the points :math:`\\pm\\delta`.\n" + "\n" + ">>> def huber_grad(delta, x):\n" + "... grad = np.copy(x)\n" + "... linear_area = np.argwhere(np.abs(x) > delta)\n" + "... grad[linear_area]=delta*np.sign(x[linear_area])\n" + "... return grad\n" + ">>> def pseudo_huber_grad(delta, x):\n" + "... return x* (1+(x/delta)**2)**(-0.5)\n" + ">>> x=np.linspace(-3, 3, 500)\n" + ">>> delta = 1.\n" + ">>> fig, ax = plt.subplots(figsize=(7, 7))\n" + ">>> ax.plot(x, huber(delta, x), label=\"Huber\", ls=\"dashed\")\n" + ">>> ax.plot(x, huber_grad(delta, x), label=\"Huber Gradient\", ls=\"dashdot\")\n" + ">>> ax.plot(x, pseudo_huber(delta, x), label=\"Pseudo-Huber\", ls=\"dotted\")\n" + ">>> ax.plot(x, pseudo_huber_grad(delta, x), label=\"Pseudo-Huber Gradient\",\n" + "... ls=\"solid\")\n" + ">>> ax.legend(loc=\"upper center\")\n" + ">>> plt.show()") +ufunc_pseudo_huber_loops[0] = loop_d_dd__As_ff_f +ufunc_pseudo_huber_loops[1] = loop_d_dd__As_dd_d +ufunc_pseudo_huber_types[0] = NPY_FLOAT +ufunc_pseudo_huber_types[1] = NPY_FLOAT +ufunc_pseudo_huber_types[2] = NPY_FLOAT +ufunc_pseudo_huber_types[3] = NPY_DOUBLE +ufunc_pseudo_huber_types[4] = NPY_DOUBLE +ufunc_pseudo_huber_types[5] = NPY_DOUBLE +ufunc_pseudo_huber_ptr[2*0] = _func_pseudo_huber +ufunc_pseudo_huber_ptr[2*0+1] = ("pseudo_huber") +ufunc_pseudo_huber_ptr[2*1] = _func_pseudo_huber +ufunc_pseudo_huber_ptr[2*1+1] = ("pseudo_huber") +ufunc_pseudo_huber_data[0] = &ufunc_pseudo_huber_ptr[2*0] +ufunc_pseudo_huber_data[1] = &ufunc_pseudo_huber_ptr[2*1] +pseudo_huber = np.PyUFunc_FromFuncAndData(ufunc_pseudo_huber_loops, ufunc_pseudo_huber_data, ufunc_pseudo_huber_types, 2, 2, 1, 0, "pseudo_huber", ufunc_pseudo_huber_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_psi_loops[4] +cdef void *ufunc_psi_ptr[8] +cdef void *ufunc_psi_data[4] +cdef char ufunc_psi_types[8] +cdef char *ufunc_psi_doc = ( + "psi(z, out=None)\n" + "\n" + "The digamma function.\n" + "\n" + "The logarithmic derivative of the gamma function evaluated at ``z``.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex argument.\n" + "out : ndarray, optional\n" + " Array for the computed values of ``psi``.\n" + "\n" + "Returns\n" + "-------\n" + "digamma : scalar or ndarray\n" + " Computed values of ``psi``.\n" + "\n" + "Notes\n" + "-----\n" + "For large values not close to the negative real axis, ``psi`` is\n" + "computed using the asymptotic series (5.11.2) from [1]_. For small\n" + "arguments not close to the negative real axis, the recurrence\n" + "relation (5.5.2) from [1]_ is used until the argument is large\n" + "enough to use the asymptotic series. For values close to the\n" + "negative real axis, the reflection formula (5.5.4) from [1]_ is\n" + "used first. Note that ``psi`` has a family of zeros on the\n" + "negative real axis which occur between the poles at nonpositive\n" + "integers. Around the zeros the reflection formula suffers from\n" + "cancellation and the implementation loses precision. The sole\n" + "positive zero and the first negative zero, however, are handled\n" + "separately by precomputing series expansions using [2]_, so the\n" + "function should maintain full accuracy around the origin.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/5\n" + ".. [2] Fredrik Johansson and others.\n" + " \"mpmath: a Python library for arbitrary-precision floating-point arithmetic\"\n" + " (Version 0.19) http://mpmath.org/\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import psi\n" + ">>> z = 3 + 4j\n" + ">>> psi(z)\n" + "(1.55035981733341+1.0105022091860445j)\n" + "\n" + "Verify psi(z) = psi(z + 1) - 1/z:\n" + "\n" + ">>> psi(z + 1) - 1/z\n" + "(1.55035981733341+1.0105022091860445j)") +ufunc_psi_loops[0] = loop_d_d__As_f_f +ufunc_psi_loops[1] = loop_d_d__As_d_d +ufunc_psi_loops[2] = loop_D_D__As_F_F +ufunc_psi_loops[3] = loop_D_D__As_D_D +ufunc_psi_types[0] = NPY_FLOAT +ufunc_psi_types[1] = NPY_FLOAT +ufunc_psi_types[2] = NPY_DOUBLE +ufunc_psi_types[3] = NPY_DOUBLE +ufunc_psi_types[4] = NPY_CFLOAT +ufunc_psi_types[5] = NPY_CFLOAT +ufunc_psi_types[6] = NPY_CDOUBLE +ufunc_psi_types[7] = NPY_CDOUBLE +ufunc_psi_ptr[2*0] = scipy.special._ufuncs_cxx._export_digamma +ufunc_psi_ptr[2*0+1] = ("psi") +ufunc_psi_ptr[2*1] = scipy.special._ufuncs_cxx._export_digamma +ufunc_psi_ptr[2*1+1] = ("psi") +ufunc_psi_ptr[2*2] = scipy.special._ufuncs_cxx._export_cdigamma +ufunc_psi_ptr[2*2+1] = ("psi") +ufunc_psi_ptr[2*3] = scipy.special._ufuncs_cxx._export_cdigamma +ufunc_psi_ptr[2*3+1] = ("psi") +ufunc_psi_data[0] = &ufunc_psi_ptr[2*0] +ufunc_psi_data[1] = &ufunc_psi_ptr[2*1] +ufunc_psi_data[2] = &ufunc_psi_ptr[2*2] +ufunc_psi_data[3] = &ufunc_psi_ptr[2*3] +psi = np.PyUFunc_FromFuncAndData(ufunc_psi_loops, ufunc_psi_data, ufunc_psi_types, 4, 1, 1, 0, "psi", ufunc_psi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_radian_loops[2] +cdef void *ufunc_radian_ptr[4] +cdef void *ufunc_radian_data[2] +cdef char ufunc_radian_types[8] +cdef char *ufunc_radian_doc = ( + "radian(d, m, s, out=None)\n" + "\n" + "Convert from degrees to radians.\n" + "\n" + "Returns the angle given in (d)egrees, (m)inutes, and (s)econds in\n" + "radians.\n" + "\n" + "Parameters\n" + "----------\n" + "d : array_like\n" + " Degrees, can be real-valued.\n" + "m : array_like\n" + " Minutes, can be real-valued.\n" + "s : array_like\n" + " Seconds, can be real-valued.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of the inputs in radians.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "There are many ways to specify an angle.\n" + "\n" + ">>> sc.radian(90, 0, 0)\n" + "1.5707963267948966\n" + ">>> sc.radian(0, 60 * 90, 0)\n" + "1.5707963267948966\n" + ">>> sc.radian(0, 0, 60**2 * 90)\n" + "1.5707963267948966\n" + "\n" + "The inputs can be real-valued.\n" + "\n" + ">>> sc.radian(1.5, 0, 0)\n" + "0.02617993877991494\n" + ">>> sc.radian(1, 30, 0)\n" + "0.02617993877991494") +ufunc_radian_loops[0] = loop_d_ddd__As_fff_f +ufunc_radian_loops[1] = loop_d_ddd__As_ddd_d +ufunc_radian_types[0] = NPY_FLOAT +ufunc_radian_types[1] = NPY_FLOAT +ufunc_radian_types[2] = NPY_FLOAT +ufunc_radian_types[3] = NPY_FLOAT +ufunc_radian_types[4] = NPY_DOUBLE +ufunc_radian_types[5] = NPY_DOUBLE +ufunc_radian_types[6] = NPY_DOUBLE +ufunc_radian_types[7] = NPY_DOUBLE +ufunc_radian_ptr[2*0] = _func_radian +ufunc_radian_ptr[2*0+1] = ("radian") +ufunc_radian_ptr[2*1] = _func_radian +ufunc_radian_ptr[2*1+1] = ("radian") +ufunc_radian_data[0] = &ufunc_radian_ptr[2*0] +ufunc_radian_data[1] = &ufunc_radian_ptr[2*1] +radian = np.PyUFunc_FromFuncAndData(ufunc_radian_loops, ufunc_radian_data, ufunc_radian_types, 2, 3, 1, 0, "radian", ufunc_radian_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_rel_entr_loops[2] +cdef void *ufunc_rel_entr_ptr[4] +cdef void *ufunc_rel_entr_data[2] +cdef char ufunc_rel_entr_types[6] +cdef char *ufunc_rel_entr_doc = ( + "rel_entr(x, y, out=None)\n" + "\n" + "Elementwise function for computing relative entropy.\n" + "\n" + ".. math::\n" + "\n" + " \\mathrm{rel\\_entr}(x, y) =\n" + " \\begin{cases}\n" + " x \\log(x / y) & x > 0, y > 0 \\\\\n" + " 0 & x = 0, y \\ge 0 \\\\\n" + " \\infty & \\text{otherwise}\n" + " \\end{cases}\n" + "\n" + "Parameters\n" + "----------\n" + "x, y : array_like\n" + " Input arrays\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Relative entropy of the inputs\n" + "\n" + "See Also\n" + "--------\n" + "entr, kl_div, scipy.stats.entropy\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.15.0\n" + "\n" + "This function is jointly convex in x and y.\n" + "\n" + "The origin of this function is in convex programming; see\n" + "[1]_. Given two discrete probability distributions :math:`p_1,\n" + "\\ldots, p_n` and :math:`q_1, \\ldots, q_n`, the definition of relative\n" + "entropy in the context of *information theory* is\n" + "\n" + ".. math::\n" + "\n" + " \\sum_{i = 1}^n \\mathrm{rel\\_entr}(p_i, q_i).\n" + "\n" + "To compute the latter quantity, use `scipy.stats.entropy`.\n" + "\n" + "See [2]_ for details.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n" + " Cambridge University Press, 2004.\n" + " :doi:`https://doi.org/10.1017/CBO9780511804441`\n" + ".. [2] Kullback-Leibler divergence,\n" + " https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence") +ufunc_rel_entr_loops[0] = loop_d_dd__As_ff_f +ufunc_rel_entr_loops[1] = loop_d_dd__As_dd_d +ufunc_rel_entr_types[0] = NPY_FLOAT +ufunc_rel_entr_types[1] = NPY_FLOAT +ufunc_rel_entr_types[2] = NPY_FLOAT +ufunc_rel_entr_types[3] = NPY_DOUBLE +ufunc_rel_entr_types[4] = NPY_DOUBLE +ufunc_rel_entr_types[5] = NPY_DOUBLE +ufunc_rel_entr_ptr[2*0] = _func_rel_entr +ufunc_rel_entr_ptr[2*0+1] = ("rel_entr") +ufunc_rel_entr_ptr[2*1] = _func_rel_entr +ufunc_rel_entr_ptr[2*1+1] = ("rel_entr") +ufunc_rel_entr_data[0] = &ufunc_rel_entr_ptr[2*0] +ufunc_rel_entr_data[1] = &ufunc_rel_entr_ptr[2*1] +rel_entr = np.PyUFunc_FromFuncAndData(ufunc_rel_entr_loops, ufunc_rel_entr_data, ufunc_rel_entr_types, 2, 2, 1, 0, "rel_entr", ufunc_rel_entr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_rgamma_loops[4] +cdef void *ufunc_rgamma_ptr[8] +cdef void *ufunc_rgamma_data[4] +cdef char ufunc_rgamma_types[8] +cdef char *ufunc_rgamma_doc = ( + "rgamma(z, out=None)\n" + "\n" + "Reciprocal of the gamma function.\n" + "\n" + "Defined as :math:`1 / \\Gamma(z)`, where :math:`\\Gamma` is the\n" + "gamma function. For more on the gamma function see `gamma`.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Real or complex valued input\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Function results\n" + "\n" + "See Also\n" + "--------\n" + "gamma, gammaln, loggamma\n" + "\n" + "Notes\n" + "-----\n" + "The gamma function has no zeros and has simple poles at\n" + "nonpositive integers, so `rgamma` is an entire function with zeros\n" + "at the nonpositive integers. See the discussion in [dlmf]_ for\n" + "more details.\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] Nist, Digital Library of Mathematical functions,\n" + " https://dlmf.nist.gov/5.2#i\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It is the reciprocal of the gamma function.\n" + "\n" + ">>> sc.rgamma([1, 2, 3, 4])\n" + "array([1. , 1. , 0.5 , 0.16666667])\n" + ">>> 1 / sc.gamma([1, 2, 3, 4])\n" + "array([1. , 1. , 0.5 , 0.16666667])\n" + "\n" + "It is zero at nonpositive integers.\n" + "\n" + ">>> sc.rgamma([0, -1, -2, -3])\n" + "array([0., 0., 0., 0.])\n" + "\n" + "It rapidly underflows to zero along the positive real axis.\n" + "\n" + ">>> sc.rgamma([10, 100, 179])\n" + "array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])") +ufunc_rgamma_loops[0] = loop_d_d__As_f_f +ufunc_rgamma_loops[1] = loop_d_d__As_d_d +ufunc_rgamma_loops[2] = loop_D_D__As_F_F +ufunc_rgamma_loops[3] = loop_D_D__As_D_D +ufunc_rgamma_types[0] = NPY_FLOAT +ufunc_rgamma_types[1] = NPY_FLOAT +ufunc_rgamma_types[2] = NPY_DOUBLE +ufunc_rgamma_types[3] = NPY_DOUBLE +ufunc_rgamma_types[4] = NPY_CFLOAT +ufunc_rgamma_types[5] = NPY_CFLOAT +ufunc_rgamma_types[6] = NPY_CDOUBLE +ufunc_rgamma_types[7] = NPY_CDOUBLE +ufunc_rgamma_ptr[2*0] = _func_rgamma +ufunc_rgamma_ptr[2*0+1] = ("rgamma") +ufunc_rgamma_ptr[2*1] = _func_rgamma +ufunc_rgamma_ptr[2*1+1] = ("rgamma") +ufunc_rgamma_ptr[2*2] = scipy.special._ufuncs_cxx._export_crgamma +ufunc_rgamma_ptr[2*2+1] = ("rgamma") +ufunc_rgamma_ptr[2*3] = scipy.special._ufuncs_cxx._export_crgamma +ufunc_rgamma_ptr[2*3+1] = ("rgamma") +ufunc_rgamma_data[0] = &ufunc_rgamma_ptr[2*0] +ufunc_rgamma_data[1] = &ufunc_rgamma_ptr[2*1] +ufunc_rgamma_data[2] = &ufunc_rgamma_ptr[2*2] +ufunc_rgamma_data[3] = &ufunc_rgamma_ptr[2*3] +rgamma = np.PyUFunc_FromFuncAndData(ufunc_rgamma_loops, ufunc_rgamma_data, ufunc_rgamma_types, 4, 1, 1, 0, "rgamma", ufunc_rgamma_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_round_loops[2] +cdef void *ufunc_round_ptr[4] +cdef void *ufunc_round_data[2] +cdef char ufunc_round_types[4] +cdef char *ufunc_round_doc = ( + "round(x, out=None)\n" + "\n" + "Round to the nearest integer.\n" + "\n" + "Returns the nearest integer to `x`. If `x` ends in 0.5 exactly,\n" + "the nearest even integer is chosen.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real valued input.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The nearest integers to the elements of `x`. The result is of\n" + " floating type, not integer type.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import scipy.special as sc\n" + "\n" + "It rounds to even.\n" + "\n" + ">>> sc.round([0.5, 1.5])\n" + "array([0., 2.])") +ufunc_round_loops[0] = loop_d_d__As_f_f +ufunc_round_loops[1] = loop_d_d__As_d_d +ufunc_round_types[0] = NPY_FLOAT +ufunc_round_types[1] = NPY_FLOAT +ufunc_round_types[2] = NPY_DOUBLE +ufunc_round_types[3] = NPY_DOUBLE +ufunc_round_ptr[2*0] = _func_round +ufunc_round_ptr[2*0+1] = ("round") +ufunc_round_ptr[2*1] = _func_round +ufunc_round_ptr[2*1+1] = ("round") +ufunc_round_data[0] = &ufunc_round_ptr[2*0] +ufunc_round_data[1] = &ufunc_round_ptr[2*1] +round = np.PyUFunc_FromFuncAndData(ufunc_round_loops, ufunc_round_data, ufunc_round_types, 2, 1, 1, 0, "round", ufunc_round_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_shichi_loops[4] +cdef void *ufunc_shichi_ptr[8] +cdef void *ufunc_shichi_data[4] +cdef char ufunc_shichi_types[12] +cdef char *ufunc_shichi_doc = ( + "shichi(x, out=None)\n" + "\n" + "Hyperbolic sine and cosine integrals.\n" + "\n" + "The hyperbolic sine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{\\sinh{t}}{t}dt\n" + "\n" + "and the hyperbolic cosine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\gamma + \\log(x) + \\int_0^x \\frac{\\cosh{t} - 1}{t} dt\n" + "\n" + "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n" + "principal branch of the logarithm [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex points at which to compute the hyperbolic sine\n" + " and cosine integrals.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "si : scalar or ndarray\n" + " Hyperbolic sine integral at ``x``\n" + "ci : scalar or ndarray\n" + " Hyperbolic cosine integral at ``x``\n" + "\n" + "See Also\n" + "--------\n" + "sici : Sine and cosine integrals.\n" + "exp1 : Exponential integral E1.\n" + "expi : Exponential integral Ei.\n" + "\n" + "Notes\n" + "-----\n" + "For real arguments with ``x < 0``, ``chi`` is the real part of the\n" + "hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x\n" + "+ 0j)`` differ by a factor of ``1j*pi``.\n" + "\n" + "For real arguments the function is computed by calling Cephes'\n" + "[2]_ *shichi* routine. For complex arguments the algorithm is based\n" + "on Mpmath's [3]_ *shi* and *chi* routines.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + " (See Section 5.2.)\n" + ".. [2] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [3] Fredrik Johansson and others.\n" + " \"mpmath: a Python library for arbitrary-precision floating-point\n" + " arithmetic\" (Version 0.19) http://mpmath.org/\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import shichi, sici\n" + "\n" + "`shichi` accepts real or complex input:\n" + "\n" + ">>> shichi(0.5)\n" + "(0.5069967498196671, -0.05277684495649357)\n" + ">>> shichi(0.5 + 2.5j)\n" + "((0.11772029666668238+1.831091777729851j),\n" + " (0.29912435887648825+1.7395351121166562j))\n" + "\n" + "The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are\n" + "related to the sine and cosine integrals Si(z) and Ci(z) by\n" + "\n" + "* Shi(z) = -i*Si(i*z)\n" + "* Chi(z) = Ci(-i*z) + i*pi/2\n" + "\n" + ">>> z = 0.25 + 5j\n" + ">>> shi, chi = shichi(z)\n" + ">>> shi, -1j*sici(1j*z)[0] # Should be the same.\n" + "((-0.04834719325101729+1.5469354086921228j),\n" + " (-0.04834719325101729+1.5469354086921228j))\n" + ">>> chi, sici(-1j*z)[1] + 1j*np.pi/2 # Should be the same.\n" + "((-0.19568708973868087+1.556276312103824j),\n" + " (-0.19568708973868087+1.556276312103824j))\n" + "\n" + "Plot the functions evaluated on the real axis:\n" + "\n" + ">>> xp = np.geomspace(1e-8, 4.0, 250)\n" + ">>> x = np.concatenate((-xp[::-1], xp))\n" + ">>> shi, chi = shichi(x)\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, shi, label='Shi(x)')\n" + ">>> ax.plot(x, chi, '--', label='Chi(x)')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('Hyperbolic Sine and Cosine Integrals')\n" + ">>> ax.legend(shadow=True, framealpha=1, loc='lower right')\n" + ">>> ax.grid(True)\n" + ">>> plt.show()") +ufunc_shichi_loops[0] = loop_i_d_dd_As_f_ff +ufunc_shichi_loops[1] = loop_i_d_dd_As_d_dd +ufunc_shichi_loops[2] = loop_i_D_DD_As_F_FF +ufunc_shichi_loops[3] = loop_i_D_DD_As_D_DD +ufunc_shichi_types[0] = NPY_FLOAT +ufunc_shichi_types[1] = NPY_FLOAT +ufunc_shichi_types[2] = NPY_FLOAT +ufunc_shichi_types[3] = NPY_DOUBLE +ufunc_shichi_types[4] = NPY_DOUBLE +ufunc_shichi_types[5] = NPY_DOUBLE +ufunc_shichi_types[6] = NPY_CFLOAT +ufunc_shichi_types[7] = NPY_CFLOAT +ufunc_shichi_types[8] = NPY_CFLOAT +ufunc_shichi_types[9] = NPY_CDOUBLE +ufunc_shichi_types[10] = NPY_CDOUBLE +ufunc_shichi_types[11] = NPY_CDOUBLE +ufunc_shichi_ptr[2*0] = _func_shichi +ufunc_shichi_ptr[2*0+1] = ("shichi") +ufunc_shichi_ptr[2*1] = _func_shichi +ufunc_shichi_ptr[2*1+1] = ("shichi") +ufunc_shichi_ptr[2*2] = _func_cshichi +ufunc_shichi_ptr[2*2+1] = ("shichi") +ufunc_shichi_ptr[2*3] = _func_cshichi +ufunc_shichi_ptr[2*3+1] = ("shichi") +ufunc_shichi_data[0] = &ufunc_shichi_ptr[2*0] +ufunc_shichi_data[1] = &ufunc_shichi_ptr[2*1] +ufunc_shichi_data[2] = &ufunc_shichi_ptr[2*2] +ufunc_shichi_data[3] = &ufunc_shichi_ptr[2*3] +shichi = np.PyUFunc_FromFuncAndData(ufunc_shichi_loops, ufunc_shichi_data, ufunc_shichi_types, 4, 1, 2, 0, "shichi", ufunc_shichi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_sici_loops[4] +cdef void *ufunc_sici_ptr[8] +cdef void *ufunc_sici_data[4] +cdef char ufunc_sici_types[12] +cdef char *ufunc_sici_doc = ( + "sici(x, out=None)\n" + "\n" + "Sine and cosine integrals.\n" + "\n" + "The sine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\int_0^x \\frac{\\sin{t}}{t}dt\n" + "\n" + "and the cosine integral is\n" + "\n" + ".. math::\n" + "\n" + " \\gamma + \\log(x) + \\int_0^x \\frac{\\cos{t} - 1}{t}dt\n" + "\n" + "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n" + "principal branch of the logarithm [1]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real or complex points at which to compute the sine and cosine\n" + " integrals.\n" + "out : tuple of ndarray, optional\n" + " Optional output arrays for the function results\n" + "\n" + "Returns\n" + "-------\n" + "si : scalar or ndarray\n" + " Sine integral at ``x``\n" + "ci : scalar or ndarray\n" + " Cosine integral at ``x``\n" + "\n" + "See Also\n" + "--------\n" + "shichi : Hyperbolic sine and cosine integrals.\n" + "exp1 : Exponential integral E1.\n" + "expi : Exponential integral Ei.\n" + "\n" + "Notes\n" + "-----\n" + "For real arguments with ``x < 0``, ``ci`` is the real part of the\n" + "cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``\n" + "differ by a factor of ``1j*pi``.\n" + "\n" + "For real arguments the function is computed by calling Cephes'\n" + "[2]_ *sici* routine. For complex arguments the algorithm is based\n" + "on Mpmath's [3]_ *si* and *ci* routines.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n" + " Handbook of Mathematical Functions with Formulas,\n" + " Graphs, and Mathematical Tables. New York: Dover, 1972.\n" + " (See Section 5.2.)\n" + ".. [2] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + ".. [3] Fredrik Johansson and others.\n" + " \"mpmath: a Python library for arbitrary-precision floating-point\n" + " arithmetic\" (Version 0.19) http://mpmath.org/\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import sici, exp1\n" + "\n" + "`sici` accepts real or complex input:\n" + "\n" + ">>> sici(2.5)\n" + "(1.7785201734438267, 0.2858711963653835)\n" + ">>> sici(2.5 + 3j)\n" + "((4.505735874563953+0.06863305018999577j),\n" + "(0.0793644206906966-2.935510262937543j))\n" + "\n" + "For z in the right half plane, the sine and cosine integrals are\n" + "related to the exponential integral E1 (implemented in SciPy as\n" + "`scipy.special.exp1`) by\n" + "\n" + "* Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2\n" + "* Ci(z) = -(E1(i*z) + E1(-i*z))/2\n" + "\n" + "See [1]_ (equations 5.2.21 and 5.2.23).\n" + "\n" + "We can verify these relations:\n" + "\n" + ">>> z = 2 - 3j\n" + ">>> sici(z)\n" + "((4.54751388956229-1.3991965806460565j),\n" + "(1.408292501520851+2.9836177420296055j))\n" + "\n" + ">>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2 # Same as sine integral\n" + "(4.54751388956229-1.3991965806460565j)\n" + "\n" + ">>> -(exp1(1j*z) + exp1(-1j*z))/2 # Same as cosine integral\n" + "(1.408292501520851+2.9836177420296055j)\n" + "\n" + "Plot the functions evaluated on the real axis; the dotted horizontal\n" + "lines are at pi/2 and -pi/2:\n" + "\n" + ">>> x = np.linspace(-16, 16, 150)\n" + ">>> si, ci = sici(x)\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> ax.plot(x, si, label='Si(x)')\n" + ">>> ax.plot(x, ci, '--', label='Ci(x)')\n" + ">>> ax.legend(shadow=True, framealpha=1, loc='upper left')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('Sine and Cosine Integrals')\n" + ">>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k')\n" + ">>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k')\n" + ">>> ax.grid(True)\n" + ">>> plt.show()") +ufunc_sici_loops[0] = loop_i_d_dd_As_f_ff +ufunc_sici_loops[1] = loop_i_d_dd_As_d_dd +ufunc_sici_loops[2] = loop_i_D_DD_As_F_FF +ufunc_sici_loops[3] = loop_i_D_DD_As_D_DD +ufunc_sici_types[0] = NPY_FLOAT +ufunc_sici_types[1] = NPY_FLOAT +ufunc_sici_types[2] = NPY_FLOAT +ufunc_sici_types[3] = NPY_DOUBLE +ufunc_sici_types[4] = NPY_DOUBLE +ufunc_sici_types[5] = NPY_DOUBLE +ufunc_sici_types[6] = NPY_CFLOAT +ufunc_sici_types[7] = NPY_CFLOAT +ufunc_sici_types[8] = NPY_CFLOAT +ufunc_sici_types[9] = NPY_CDOUBLE +ufunc_sici_types[10] = NPY_CDOUBLE +ufunc_sici_types[11] = NPY_CDOUBLE +ufunc_sici_ptr[2*0] = _func_sici +ufunc_sici_ptr[2*0+1] = ("sici") +ufunc_sici_ptr[2*1] = _func_sici +ufunc_sici_ptr[2*1+1] = ("sici") +ufunc_sici_ptr[2*2] = _func_csici +ufunc_sici_ptr[2*2+1] = ("sici") +ufunc_sici_ptr[2*3] = _func_csici +ufunc_sici_ptr[2*3+1] = ("sici") +ufunc_sici_data[0] = &ufunc_sici_ptr[2*0] +ufunc_sici_data[1] = &ufunc_sici_ptr[2*1] +ufunc_sici_data[2] = &ufunc_sici_ptr[2*2] +ufunc_sici_data[3] = &ufunc_sici_ptr[2*3] +sici = np.PyUFunc_FromFuncAndData(ufunc_sici_loops, ufunc_sici_data, ufunc_sici_types, 4, 1, 2, 0, "sici", ufunc_sici_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_sindg_loops[2] +cdef void *ufunc_sindg_ptr[4] +cdef void *ufunc_sindg_data[2] +cdef char ufunc_sindg_types[4] +cdef char *ufunc_sindg_doc = ( + "sindg(x, out=None)\n" + "\n" + "Sine of the angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Sine at the input.\n" + "\n" + "See Also\n" + "--------\n" + "cosdg, tandg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using sine directly.\n" + "\n" + ">>> x = 180 * np.arange(3)\n" + ">>> sc.sindg(x)\n" + "array([ 0., -0., 0.])\n" + ">>> np.sin(x * np.pi / 180)\n" + "array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16])") +ufunc_sindg_loops[0] = loop_d_d__As_f_f +ufunc_sindg_loops[1] = loop_d_d__As_d_d +ufunc_sindg_types[0] = NPY_FLOAT +ufunc_sindg_types[1] = NPY_FLOAT +ufunc_sindg_types[2] = NPY_DOUBLE +ufunc_sindg_types[3] = NPY_DOUBLE +ufunc_sindg_ptr[2*0] = _func_sindg +ufunc_sindg_ptr[2*0+1] = ("sindg") +ufunc_sindg_ptr[2*1] = _func_sindg +ufunc_sindg_ptr[2*1+1] = ("sindg") +ufunc_sindg_data[0] = &ufunc_sindg_ptr[2*0] +ufunc_sindg_data[1] = &ufunc_sindg_ptr[2*1] +sindg = np.PyUFunc_FromFuncAndData(ufunc_sindg_loops, ufunc_sindg_data, ufunc_sindg_types, 2, 1, 1, 0, "sindg", ufunc_sindg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_smirnov_loops[3] +cdef void *ufunc_smirnov_ptr[6] +cdef void *ufunc_smirnov_data[3] +cdef char ufunc_smirnov_types[9] +cdef char *ufunc_smirnov_doc = ( + "smirnov(n, d, out=None)\n" + "\n" + "Kolmogorov-Smirnov complementary cumulative distribution function\n" + "\n" + "Returns the exact Kolmogorov-Smirnov complementary cumulative\n" + "distribution function,(aka the Survival Function) of Dn+ (or Dn-)\n" + "for a one-sided test of equality between an empirical and a\n" + "theoretical distribution. It is equal to the probability that the\n" + "maximum difference between a theoretical distribution and an empirical\n" + "one based on `n` samples is greater than d.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Number of samples\n" + "d : float array_like\n" + " Deviation between the Empirical CDF (ECDF) and the target CDF.\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))\n" + "\n" + "See Also\n" + "--------\n" + "smirnovi : The Inverse Survival Function for the distribution\n" + "scipy.stats.ksone : Provides the functionality as a continuous distribution\n" + "kolmogorov, kolmogi : Functions for the two-sided distribution\n" + "\n" + "Notes\n" + "-----\n" + "`smirnov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.ksone` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import smirnov\n" + ">>> from scipy.stats import norm\n" + "\n" + "Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a\n" + "sample of size 5.\n" + "\n" + ">>> smirnov(5, [0, 0.5, 1.0])\n" + "array([ 1. , 0.056, 0. ])\n" + "\n" + "Compare a sample of size 5 against N(0, 1), the standard normal\n" + "distribution with mean 0 and standard deviation 1.\n" + "\n" + "`x` is the sample.\n" + "\n" + ">>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82])\n" + "\n" + ">>> target = norm(0, 1)\n" + ">>> cdfs = target.cdf(x)\n" + ">>> cdfs\n" + "array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ])\n" + "\n" + "Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn).\n" + "\n" + ">>> n = len(x)\n" + ">>> ecdfs = np.arange(n+1, dtype=float)/n\n" + ">>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n],\n" + "... ecdfs[1:] - cdfs])\n" + ">>> with np.printoptions(precision=3):\n" + "... print(cols)\n" + "[[-1.392 0.2 0.082 0.082 0.118]\n" + " [-0.135 0.4 0.446 0.246 -0.046]\n" + " [ 0.114 0.6 0.545 0.145 0.055]\n" + " [ 0.19 0.8 0.575 -0.025 0.225]\n" + " [ 1.82 1. 0.966 0.166 0.034]]\n" + ">>> gaps = cols[:, -2:]\n" + ">>> Dnpm = np.max(gaps, axis=0)\n" + ">>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}')\n" + "Dn-=0.246306, Dn+=0.224655\n" + ">>> probs = smirnov(n, Dnpm)\n" + ">>> print(f'For a sample of size {n} drawn from N(0, 1):',\n" + "... f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}',\n" + "... f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}',\n" + "... sep='\\n')\n" + "For a sample of size 5 drawn from N(0, 1):\n" + " Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711\n" + " Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245\n" + "\n" + "Plot the empirical CDF and the standard normal CDF.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> plt.step(np.concatenate(([-2.5], x, [2.5])),\n" + "... np.concatenate((ecdfs, [1])),\n" + "... where='post', label='Empirical CDF')\n" + ">>> xx = np.linspace(-2.5, 2.5, 100)\n" + ">>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)')\n" + "\n" + "Add vertical lines marking Dn+ and Dn-.\n" + "\n" + ">>> iminus, iplus = np.argmax(gaps, axis=0)\n" + ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r',\n" + "... alpha=0.5, lw=4)\n" + ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m',\n" + "... alpha=0.5, lw=4)\n" + "\n" + ">>> plt.grid(True)\n" + ">>> plt.legend(framealpha=1, shadow=True)\n" + ">>> plt.show()") +ufunc_smirnov_loops[0] = loop_d_id__As_ld_d +ufunc_smirnov_loops[1] = loop_d_dd__As_ff_f +ufunc_smirnov_loops[2] = loop_d_dd__As_dd_d +ufunc_smirnov_types[0] = NPY_LONG +ufunc_smirnov_types[1] = NPY_DOUBLE +ufunc_smirnov_types[2] = NPY_DOUBLE +ufunc_smirnov_types[3] = NPY_FLOAT +ufunc_smirnov_types[4] = NPY_FLOAT +ufunc_smirnov_types[5] = NPY_FLOAT +ufunc_smirnov_types[6] = NPY_DOUBLE +ufunc_smirnov_types[7] = NPY_DOUBLE +ufunc_smirnov_types[8] = NPY_DOUBLE +ufunc_smirnov_ptr[2*0] = _func_smirnov +ufunc_smirnov_ptr[2*0+1] = ("smirnov") +ufunc_smirnov_ptr[2*1] = _func_smirnov_unsafe +ufunc_smirnov_ptr[2*1+1] = ("smirnov") +ufunc_smirnov_ptr[2*2] = _func_smirnov_unsafe +ufunc_smirnov_ptr[2*2+1] = ("smirnov") +ufunc_smirnov_data[0] = &ufunc_smirnov_ptr[2*0] +ufunc_smirnov_data[1] = &ufunc_smirnov_ptr[2*1] +ufunc_smirnov_data[2] = &ufunc_smirnov_ptr[2*2] +smirnov = np.PyUFunc_FromFuncAndData(ufunc_smirnov_loops, ufunc_smirnov_data, ufunc_smirnov_types, 3, 2, 1, 0, "smirnov", ufunc_smirnov_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_smirnovi_loops[3] +cdef void *ufunc_smirnovi_ptr[6] +cdef void *ufunc_smirnovi_data[3] +cdef char ufunc_smirnovi_types[9] +cdef char *ufunc_smirnovi_doc = ( + "smirnovi(n, p, out=None)\n" + "\n" + "Inverse to `smirnov`\n" + "\n" + "Returns `d` such that ``smirnov(n, d) == p``, the critical value\n" + "corresponding to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "n : int\n" + " Number of samples\n" + "p : float array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The value(s) of smirnovi(n, p), the critical values.\n" + "\n" + "See Also\n" + "--------\n" + "smirnov : The Survival Function (SF) for the distribution\n" + "scipy.stats.ksone : Provides the functionality as a continuous distribution\n" + "kolmogorov, kolmogi : Functions for the two-sided distribution\n" + "scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n\n" + "\n" + "Notes\n" + "-----\n" + "`smirnov` is used by `stats.kstest` in the application of the\n" + "Kolmogorov-Smirnov Goodness of Fit test. For historical reasons this\n" + "function is exposed in `scpy.special`, but the recommended way to achieve\n" + "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n" + "`stats.ksone` distribution.\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import smirnovi, smirnov\n" + "\n" + ">>> n = 24\n" + ">>> deviations = [0.1, 0.2, 0.3]\n" + "\n" + "Use `smirnov` to compute the complementary CDF of the Smirnov\n" + "distribution for the given number of samples and deviations.\n" + "\n" + ">>> p = smirnov(n, deviations)\n" + ">>> p\n" + "array([0.58105083, 0.12826832, 0.01032231])\n" + "\n" + "The inverse function ``smirnovi(n, p)`` returns ``deviations``.\n" + "\n" + ">>> smirnovi(n, p)\n" + "array([0.1, 0.2, 0.3])") +ufunc_smirnovi_loops[0] = loop_d_id__As_ld_d +ufunc_smirnovi_loops[1] = loop_d_dd__As_ff_f +ufunc_smirnovi_loops[2] = loop_d_dd__As_dd_d +ufunc_smirnovi_types[0] = NPY_LONG +ufunc_smirnovi_types[1] = NPY_DOUBLE +ufunc_smirnovi_types[2] = NPY_DOUBLE +ufunc_smirnovi_types[3] = NPY_FLOAT +ufunc_smirnovi_types[4] = NPY_FLOAT +ufunc_smirnovi_types[5] = NPY_FLOAT +ufunc_smirnovi_types[6] = NPY_DOUBLE +ufunc_smirnovi_types[7] = NPY_DOUBLE +ufunc_smirnovi_types[8] = NPY_DOUBLE +ufunc_smirnovi_ptr[2*0] = _func_smirnovi +ufunc_smirnovi_ptr[2*0+1] = ("smirnovi") +ufunc_smirnovi_ptr[2*1] = _func_smirnovi_unsafe +ufunc_smirnovi_ptr[2*1+1] = ("smirnovi") +ufunc_smirnovi_ptr[2*2] = _func_smirnovi_unsafe +ufunc_smirnovi_ptr[2*2+1] = ("smirnovi") +ufunc_smirnovi_data[0] = &ufunc_smirnovi_ptr[2*0] +ufunc_smirnovi_data[1] = &ufunc_smirnovi_ptr[2*1] +ufunc_smirnovi_data[2] = &ufunc_smirnovi_ptr[2*2] +smirnovi = np.PyUFunc_FromFuncAndData(ufunc_smirnovi_loops, ufunc_smirnovi_data, ufunc_smirnovi_types, 3, 2, 1, 0, "smirnovi", ufunc_smirnovi_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_spence_loops[4] +cdef void *ufunc_spence_ptr[8] +cdef void *ufunc_spence_data[4] +cdef char ufunc_spence_types[8] +cdef char *ufunc_spence_doc = ( + "spence(z, out=None)\n" + "\n" + "Spence's function, also known as the dilogarithm.\n" + "\n" + "It is defined to be\n" + "\n" + ".. math::\n" + " \\int_1^z \\frac{\\log(t)}{1 - t}dt\n" + "\n" + "for complex :math:`z`, where the contour of integration is taken\n" + "to avoid the branch cut of the logarithm. Spence's function is\n" + "analytic everywhere except the negative real axis where it has a\n" + "branch cut.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Points at which to evaluate Spence's function\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "s : scalar or ndarray\n" + " Computed values of Spence's function\n" + "\n" + "Notes\n" + "-----\n" + "There is a different convention which defines Spence's function by\n" + "the integral\n" + "\n" + ".. math::\n" + " -\\int_0^z \\frac{\\log(1 - t)}{t}dt;\n" + "\n" + "this is our ``spence(1 - z)``.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import spence\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + "The function is defined for complex inputs:\n" + "\n" + ">>> spence([1-1j, 1.5+2j, 3j, -10-5j])\n" + "array([-0.20561676+0.91596559j, -0.86766909-1.39560134j,\n" + " -0.59422064-2.49129918j, -1.14044398+6.80075924j])\n" + "\n" + "For complex inputs on the branch cut, which is the negative real axis,\n" + "the function returns the limit for ``z`` with positive imaginary part.\n" + "For example, in the following, note the sign change of the imaginary\n" + "part of the output for ``z = -2`` and ``z = -2 - 1e-8j``:\n" + "\n" + ">>> spence([-2 + 1e-8j, -2, -2 - 1e-8j])\n" + "array([2.32018041-3.45139229j, 2.32018042-3.4513923j ,\n" + " 2.32018041+3.45139229j])\n" + "\n" + "The function returns ``nan`` for real inputs on the branch cut:\n" + "\n" + ">>> spence(-1.5)\n" + "nan\n" + "\n" + "Verify some particular values: ``spence(0) = pi**2/6``,\n" + "``spence(1) = 0`` and ``spence(2) = -pi**2/12``.\n" + "\n" + ">>> spence([0, 1, 2])\n" + "array([ 1.64493407, 0. , -0.82246703])\n" + ">>> np.pi**2/6, -np.pi**2/12\n" + "(1.6449340668482264, -0.8224670334241132)\n" + "\n" + "Verify the identity::\n" + "\n" + " spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z)\n" + "\n" + ">>> z = 3 + 4j\n" + ">>> spence(z) + spence(1 - z)\n" + "(-2.6523186143876067+1.8853470951513935j)\n" + ">>> np.pi**2/6 - np.log(z)*np.log(1 - z)\n" + "(-2.652318614387606+1.885347095151394j)\n" + "\n" + "Plot the function for positive real input.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0, 6, 400)\n" + ">>> ax.plot(x, spence(x))\n" + ">>> ax.grid()\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.set_title('spence(x)')\n" + ">>> plt.show()") +ufunc_spence_loops[0] = loop_d_d__As_f_f +ufunc_spence_loops[1] = loop_d_d__As_d_d +ufunc_spence_loops[2] = loop_D_D__As_F_F +ufunc_spence_loops[3] = loop_D_D__As_D_D +ufunc_spence_types[0] = NPY_FLOAT +ufunc_spence_types[1] = NPY_FLOAT +ufunc_spence_types[2] = NPY_DOUBLE +ufunc_spence_types[3] = NPY_DOUBLE +ufunc_spence_types[4] = NPY_CFLOAT +ufunc_spence_types[5] = NPY_CFLOAT +ufunc_spence_types[6] = NPY_CDOUBLE +ufunc_spence_types[7] = NPY_CDOUBLE +ufunc_spence_ptr[2*0] = _func_spence +ufunc_spence_ptr[2*0+1] = ("spence") +ufunc_spence_ptr[2*1] = _func_spence +ufunc_spence_ptr[2*1+1] = ("spence") +ufunc_spence_ptr[2*2] = _func_cspence +ufunc_spence_ptr[2*2+1] = ("spence") +ufunc_spence_ptr[2*3] = _func_cspence +ufunc_spence_ptr[2*3+1] = ("spence") +ufunc_spence_data[0] = &ufunc_spence_ptr[2*0] +ufunc_spence_data[1] = &ufunc_spence_ptr[2*1] +ufunc_spence_data[2] = &ufunc_spence_ptr[2*2] +ufunc_spence_data[3] = &ufunc_spence_ptr[2*3] +spence = np.PyUFunc_FromFuncAndData(ufunc_spence_loops, ufunc_spence_data, ufunc_spence_types, 4, 1, 1, 0, "spence", ufunc_spence_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_sph_harm_loops[3] +cdef void *ufunc_sph_harm_ptr[6] +cdef void *ufunc_sph_harm_data[3] +cdef char ufunc_sph_harm_types[15] +cdef char *ufunc_sph_harm_doc = ( + "sph_harm(m, n, theta, phi, out=None)\n" + "\n" + "Compute spherical harmonics.\n" + "\n" + "The spherical harmonics are defined as\n" + "\n" + ".. math::\n" + "\n" + " Y^m_n(\\theta,\\phi) = \\sqrt{\\frac{2n+1}{4\\pi} \\frac{(n-m)!}{(n+m)!}}\n" + " e^{i m \\theta} P^m_n(\\cos(\\phi))\n" + "\n" + "where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.\n" + "\n" + "Parameters\n" + "----------\n" + "m : array_like\n" + " Order of the harmonic (int); must have ``|m| <= n``.\n" + "n : array_like\n" + " Degree of the harmonic (int); must have ``n >= 0``. This is\n" + " often denoted by ``l`` (lower case L) in descriptions of\n" + " spherical harmonics.\n" + "theta : array_like\n" + " Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.\n" + "phi : array_like\n" + " Polar (colatitudinal) coordinate; must be in ``[0, pi]``.\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "y_mn : complex scalar or ndarray\n" + " The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.\n" + "\n" + "Notes\n" + "-----\n" + "There are different conventions for the meanings of the input\n" + "arguments ``theta`` and ``phi``. In SciPy ``theta`` is the\n" + "azimuthal angle and ``phi`` is the polar angle. It is common to\n" + "see the opposite convention, that is, ``theta`` as the polar angle\n" + "and ``phi`` as the azimuthal angle.\n" + "\n" + "Note that SciPy's spherical harmonics include the Condon-Shortley\n" + "phase [2]_ because it is part of `lpmv`.\n" + "\n" + "With SciPy's conventions, the first several spherical harmonics\n" + "are\n" + "\n" + ".. math::\n" + "\n" + " Y_0^0(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{1}{\\pi}} \\\\\n" + " Y_1^{-1}(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{3}{2\\pi}}\n" + " e^{-i\\theta} \\sin(\\phi) \\\\\n" + " Y_1^0(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{3}{\\pi}}\n" + " \\cos(\\phi) \\\\\n" + " Y_1^1(\\theta, \\phi) &= -\\frac{1}{2} \\sqrt{\\frac{3}{2\\pi}}\n" + " e^{i\\theta} \\sin(\\phi).\n" + "\n" + "References\n" + "----------\n" + ".. [1] Digital Library of Mathematical Functions, 14.30.\n" + " https://dlmf.nist.gov/14.30\n" + ".. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase") +ufunc_sph_harm_loops[0] = loop_D_iidd__As_lldd_D +ufunc_sph_harm_loops[1] = loop_D_dddd__As_ffff_F +ufunc_sph_harm_loops[2] = loop_D_dddd__As_dddd_D +ufunc_sph_harm_types[0] = NPY_LONG +ufunc_sph_harm_types[1] = NPY_LONG +ufunc_sph_harm_types[2] = NPY_DOUBLE +ufunc_sph_harm_types[3] = NPY_DOUBLE +ufunc_sph_harm_types[4] = NPY_CDOUBLE +ufunc_sph_harm_types[5] = NPY_FLOAT +ufunc_sph_harm_types[6] = NPY_FLOAT +ufunc_sph_harm_types[7] = NPY_FLOAT +ufunc_sph_harm_types[8] = NPY_FLOAT +ufunc_sph_harm_types[9] = NPY_CFLOAT +ufunc_sph_harm_types[10] = NPY_DOUBLE +ufunc_sph_harm_types[11] = NPY_DOUBLE +ufunc_sph_harm_types[12] = NPY_DOUBLE +ufunc_sph_harm_types[13] = NPY_DOUBLE +ufunc_sph_harm_types[14] = NPY_CDOUBLE +ufunc_sph_harm_ptr[2*0] = _func_sph_harmonic +ufunc_sph_harm_ptr[2*0+1] = ("sph_harm") +ufunc_sph_harm_ptr[2*1] = _func_sph_harmonic_unsafe +ufunc_sph_harm_ptr[2*1+1] = ("sph_harm") +ufunc_sph_harm_ptr[2*2] = _func_sph_harmonic_unsafe +ufunc_sph_harm_ptr[2*2+1] = ("sph_harm") +ufunc_sph_harm_data[0] = &ufunc_sph_harm_ptr[2*0] +ufunc_sph_harm_data[1] = &ufunc_sph_harm_ptr[2*1] +ufunc_sph_harm_data[2] = &ufunc_sph_harm_ptr[2*2] +sph_harm = np.PyUFunc_FromFuncAndData(ufunc_sph_harm_loops, ufunc_sph_harm_data, ufunc_sph_harm_types, 3, 4, 1, 0, "sph_harm", ufunc_sph_harm_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtr_loops[2] +cdef void *ufunc_stdtr_ptr[4] +cdef void *ufunc_stdtr_data[2] +cdef char ufunc_stdtr_types[6] +cdef char *ufunc_stdtr_doc = ( + "stdtr(df, t, out=None)\n" + "\n" + "Student t distribution cumulative distribution function\n" + "\n" + "Returns the integral:\n" + "\n" + ".. math::\n" + " \\frac{\\Gamma((df+1)/2)}{\\sqrt{\\pi df} \\Gamma(df/2)}\n" + " \\int_{-\\infty}^t (1+x^2/df)^{-(df+1)/2}\\, dx\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom\n" + "t : array_like\n" + " Upper bound of the integral\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Student t CDF at t\n" + "\n" + "See Also\n" + "--------\n" + "stdtridf : inverse of stdtr with respect to `df`\n" + "stdtrit : inverse of stdtr with respect to `t`\n" + "scipy.stats.t : student t distribution\n" + "\n" + "Notes\n" + "-----\n" + "The student t distribution is also available as `scipy.stats.t`.\n" + "Calling `stdtr` directly can improve performance compared to the\n" + "``cdf`` method of `scipy.stats.t` (see last example below).\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function for ``df=3`` at ``t=1``.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import stdtr\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> stdtr(3, 1)\n" + "0.8044988905221148\n" + "\n" + "Plot the function for three different degrees of freedom.\n" + "\n" + ">>> x = np.linspace(-10, 10, 1000)\n" + ">>> fig, ax = plt.subplots()\n" + ">>> parameters = [(1, \"solid\"), (3, \"dashed\"), (10, \"dotted\")]\n" + ">>> for (df, linestyle) in parameters:\n" + "... ax.plot(x, stdtr(df, x), ls=linestyle, label=f\"$df={df}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_title(\"Student t distribution cumulative distribution function\")\n" + ">>> plt.show()\n" + "\n" + "The function can be computed for several degrees of freedom at the same\n" + "time by providing a NumPy array or list for `df`:\n" + "\n" + ">>> stdtr([1, 2, 3], 1)\n" + "array([0.75 , 0.78867513, 0.80449889])\n" + "\n" + "It is possible to calculate the function at several points for several\n" + "different degrees of freedom simultaneously by providing arrays for `df`\n" + "and `t` with shapes compatible for broadcasting. Compute `stdtr` at\n" + "4 points for 3 degrees of freedom resulting in an array of shape 3x4.\n" + "\n" + ">>> dfs = np.array([[1], [2], [3]])\n" + ">>> t = np.array([2, 4, 6, 8])\n" + ">>> dfs.shape, t.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> stdtr(dfs, t)\n" + "array([[0.85241638, 0.92202087, 0.94743154, 0.96041658],\n" + " [0.90824829, 0.97140452, 0.98666426, 0.99236596],\n" + " [0.93033702, 0.98599577, 0.99536364, 0.99796171]])\n" + "\n" + "The t distribution is also available as `scipy.stats.t`. Calling `stdtr`\n" + "directly can be much faster than calling the ``cdf`` method of\n" + "`scipy.stats.t`. To get the same results, one must use the following\n" + "parametrization: ``scipy.stats.t(df).cdf(x) = stdtr(df, x)``.\n" + "\n" + ">>> from scipy.stats import t\n" + ">>> df, x = 3, 1\n" + ">>> stdtr_result = stdtr(df, x) # this can be faster than below\n" + ">>> stats_result = t(df).cdf(x)\n" + ">>> stats_result == stdtr_result # test that results are equal\n" + "True") +ufunc_stdtr_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtr_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtr_types[0] = NPY_FLOAT +ufunc_stdtr_types[1] = NPY_FLOAT +ufunc_stdtr_types[2] = NPY_FLOAT +ufunc_stdtr_types[3] = NPY_DOUBLE +ufunc_stdtr_types[4] = NPY_DOUBLE +ufunc_stdtr_types[5] = NPY_DOUBLE +ufunc_stdtr_ptr[2*0] = _func_stdtr +ufunc_stdtr_ptr[2*0+1] = ("stdtr") +ufunc_stdtr_ptr[2*1] = _func_stdtr +ufunc_stdtr_ptr[2*1+1] = ("stdtr") +ufunc_stdtr_data[0] = &ufunc_stdtr_ptr[2*0] +ufunc_stdtr_data[1] = &ufunc_stdtr_ptr[2*1] +stdtr = np.PyUFunc_FromFuncAndData(ufunc_stdtr_loops, ufunc_stdtr_data, ufunc_stdtr_types, 2, 2, 1, 0, "stdtr", ufunc_stdtr_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtridf_loops[2] +cdef void *ufunc_stdtridf_ptr[4] +cdef void *ufunc_stdtridf_data[2] +cdef char ufunc_stdtridf_types[6] +cdef char *ufunc_stdtridf_doc = ( + "stdtridf(p, t, out=None)\n" + "\n" + "Inverse of `stdtr` vs df\n" + "\n" + "Returns the argument df such that stdtr(df, t) is equal to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "p : array_like\n" + " Probability\n" + "t : array_like\n" + " Upper bound of the integral\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "df : scalar or ndarray\n" + " Value of `df` such that ``stdtr(df, t) == p``\n" + "\n" + "See Also\n" + "--------\n" + "stdtr : Student t CDF\n" + "stdtrit : inverse of stdtr with respect to `t`\n" + "scipy.stats.t : Student t distribution\n" + "\n" + "Examples\n" + "--------\n" + "Compute the student t cumulative distribution function for one\n" + "parameter set.\n" + "\n" + ">>> from scipy.special import stdtr, stdtridf\n" + ">>> df, x = 5, 2\n" + ">>> cdf_value = stdtr(df, x)\n" + ">>> cdf_value\n" + "0.9490302605850709\n" + "\n" + "Verify that `stdtridf` recovers the original value for `df` given\n" + "the CDF value and `x`.\n" + "\n" + ">>> stdtridf(cdf_value, x)\n" + "5.0") +ufunc_stdtridf_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtridf_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtridf_types[0] = NPY_FLOAT +ufunc_stdtridf_types[1] = NPY_FLOAT +ufunc_stdtridf_types[2] = NPY_FLOAT +ufunc_stdtridf_types[3] = NPY_DOUBLE +ufunc_stdtridf_types[4] = NPY_DOUBLE +ufunc_stdtridf_types[5] = NPY_DOUBLE +ufunc_stdtridf_ptr[2*0] = _func_stdtridf +ufunc_stdtridf_ptr[2*0+1] = ("stdtridf") +ufunc_stdtridf_ptr[2*1] = _func_stdtridf +ufunc_stdtridf_ptr[2*1+1] = ("stdtridf") +ufunc_stdtridf_data[0] = &ufunc_stdtridf_ptr[2*0] +ufunc_stdtridf_data[1] = &ufunc_stdtridf_ptr[2*1] +stdtridf = np.PyUFunc_FromFuncAndData(ufunc_stdtridf_loops, ufunc_stdtridf_data, ufunc_stdtridf_types, 2, 2, 1, 0, "stdtridf", ufunc_stdtridf_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_stdtrit_loops[2] +cdef void *ufunc_stdtrit_ptr[4] +cdef void *ufunc_stdtrit_data[2] +cdef char ufunc_stdtrit_types[6] +cdef char *ufunc_stdtrit_doc = ( + "stdtrit(df, p, out=None)\n" + "\n" + "The `p`-th quantile of the student t distribution.\n" + "\n" + "This function is the inverse of the student t distribution cumulative\n" + "distribution function (CDF), returning `t` such that `stdtr(df, t) = p`.\n" + "\n" + "Returns the argument `t` such that stdtr(df, t) is equal to `p`.\n" + "\n" + "Parameters\n" + "----------\n" + "df : array_like\n" + " Degrees of freedom\n" + "p : array_like\n" + " Probability\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "t : scalar or ndarray\n" + " Value of `t` such that ``stdtr(df, t) == p``\n" + "\n" + "See Also\n" + "--------\n" + "stdtr : Student t CDF\n" + "stdtridf : inverse of stdtr with respect to `df`\n" + "scipy.stats.t : Student t distribution\n" + "\n" + "Notes\n" + "-----\n" + "The student t distribution is also available as `scipy.stats.t`. Calling\n" + "`stdtrit` directly can improve performance compared to the ``ppf``\n" + "method of `scipy.stats.t` (see last example below).\n" + "\n" + "Examples\n" + "--------\n" + "`stdtrit` represents the inverse of the student t distribution CDF which\n" + "is available as `stdtr`. Here, we calculate the CDF for ``df`` at\n" + "``x=1``. `stdtrit` then returns ``1`` up to floating point errors\n" + "given the same value for `df` and the computed CDF value.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import stdtr, stdtrit\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> df = 3\n" + ">>> x = 1\n" + ">>> cdf_value = stdtr(df, x)\n" + ">>> stdtrit(df, cdf_value)\n" + "0.9999999994418539\n" + "\n" + "Plot the function for three different degrees of freedom.\n" + "\n" + ">>> x = np.linspace(0, 1, 1000)\n" + ">>> parameters = [(1, \"solid\"), (2, \"dashed\"), (5, \"dotted\")]\n" + ">>> fig, ax = plt.subplots()\n" + ">>> for (df, linestyle) in parameters:\n" + "... ax.plot(x, stdtrit(df, x), ls=linestyle, label=f\"$df={df}$\")\n" + ">>> ax.legend()\n" + ">>> ax.set_ylim(-10, 10)\n" + ">>> ax.set_title(\"Student t distribution quantile function\")\n" + ">>> plt.show()\n" + "\n" + "The function can be computed for several degrees of freedom at the same\n" + "time by providing a NumPy array or list for `df`:\n" + "\n" + ">>> stdtrit([1, 2, 3], 0.7)\n" + "array([0.72654253, 0.6172134 , 0.58438973])\n" + "\n" + "It is possible to calculate the function at several points for several\n" + "different degrees of freedom simultaneously by providing arrays for `df`\n" + "and `p` with shapes compatible for broadcasting. Compute `stdtrit` at\n" + "4 points for 3 degrees of freedom resulting in an array of shape 3x4.\n" + "\n" + ">>> dfs = np.array([[1], [2], [3]])\n" + ">>> p = np.array([0.2, 0.4, 0.7, 0.8])\n" + ">>> dfs.shape, p.shape\n" + "((3, 1), (4,))\n" + "\n" + ">>> stdtrit(dfs, p)\n" + "array([[-1.37638192, -0.3249197 , 0.72654253, 1.37638192],\n" + " [-1.06066017, -0.28867513, 0.6172134 , 1.06066017],\n" + " [-0.97847231, -0.27667066, 0.58438973, 0.97847231]])\n" + "\n" + "The t distribution is also available as `scipy.stats.t`. Calling `stdtrit`\n" + "directly can be much faster than calling the ``ppf`` method of\n" + "`scipy.stats.t`. To get the same results, one must use the following\n" + "parametrization: ``scipy.stats.t(df).ppf(x) = stdtrit(df, x)``.\n" + "\n" + ">>> from scipy.stats import t\n" + ">>> df, x = 3, 0.5\n" + ">>> stdtrit_result = stdtrit(df, x) # this can be faster than below\n" + ">>> stats_result = t(df).ppf(x)\n" + ">>> stats_result == stdtrit_result # test that results are equal\n" + "True") +ufunc_stdtrit_loops[0] = loop_d_dd__As_ff_f +ufunc_stdtrit_loops[1] = loop_d_dd__As_dd_d +ufunc_stdtrit_types[0] = NPY_FLOAT +ufunc_stdtrit_types[1] = NPY_FLOAT +ufunc_stdtrit_types[2] = NPY_FLOAT +ufunc_stdtrit_types[3] = NPY_DOUBLE +ufunc_stdtrit_types[4] = NPY_DOUBLE +ufunc_stdtrit_types[5] = NPY_DOUBLE +ufunc_stdtrit_ptr[2*0] = _func_stdtrit +ufunc_stdtrit_ptr[2*0+1] = ("stdtrit") +ufunc_stdtrit_ptr[2*1] = _func_stdtrit +ufunc_stdtrit_ptr[2*1+1] = ("stdtrit") +ufunc_stdtrit_data[0] = &ufunc_stdtrit_ptr[2*0] +ufunc_stdtrit_data[1] = &ufunc_stdtrit_ptr[2*1] +stdtrit = np.PyUFunc_FromFuncAndData(ufunc_stdtrit_loops, ufunc_stdtrit_data, ufunc_stdtrit_types, 2, 2, 1, 0, "stdtrit", ufunc_stdtrit_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_struve_loops[2] +cdef void *ufunc_struve_ptr[4] +cdef void *ufunc_struve_data[2] +cdef char ufunc_struve_types[6] +cdef char *ufunc_struve_doc = ( + "struve(v, x, out=None)\n" + "\n" + "Struve function.\n" + "\n" + "Return the value of the Struve function of order `v` at `x`. The Struve\n" + "function is defined as,\n" + "\n" + ".. math::\n" + " H_v(x) = (z/2)^{v + 1} \\sum_{n=0}^\\infty\n" + " \\frac{(-1)^n (z/2)^{2n}}{\\Gamma(n + \\frac{3}{2}) \\Gamma(n + v + \\frac{3}{2})},\n" + "\n" + "where :math:`\\Gamma` is the gamma function.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order of the Struve function (float).\n" + "x : array_like\n" + " Argument of the Struve function (float; must be positive unless `v` is\n" + " an integer).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "H : scalar or ndarray\n" + " Value of the Struve function of order `v` at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "modstruve: Modified Struve function\n" + "\n" + "Notes\n" + "-----\n" + "Three methods discussed in [1]_ are used to evaluate the Struve function:\n" + "\n" + "- power series\n" + "- expansion in Bessel functions (if :math:`|z| < |v| + 20`)\n" + "- asymptotic large-z expansion (if :math:`z \\geq 0.7v + 12`)\n" + "\n" + "Rounding errors are estimated based on the largest terms in the sums, and\n" + "the result associated with the smallest error is returned.\n" + "\n" + "References\n" + "----------\n" + ".. [1] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/11\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the Struve function of order 1 at 2.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import struve\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> struve(1, 2.)\n" + "0.6467637282835622\n" + "\n" + "Calculate the Struve function at 2 for orders 1, 2 and 3 by providing\n" + "a list for the order parameter `v`.\n" + "\n" + ">>> struve([1, 2, 3], 2.)\n" + "array([0.64676373, 0.28031806, 0.08363767])\n" + "\n" + "Calculate the Struve function of order 1 for several points by providing\n" + "an array for `x`.\n" + "\n" + ">>> points = np.array([2., 5., 8.])\n" + ">>> struve(1, points)\n" + "array([0.64676373, 0.80781195, 0.48811605])\n" + "\n" + "Compute the Struve function for several orders at several points by\n" + "providing arrays for `v` and `z`. The arrays have to be broadcastable\n" + "to the correct shapes.\n" + "\n" + ">>> orders = np.array([[1], [2], [3]])\n" + ">>> points.shape, orders.shape\n" + "((3,), (3, 1))\n" + "\n" + ">>> struve(orders, points)\n" + "array([[0.64676373, 0.80781195, 0.48811605],\n" + " [0.28031806, 1.56937455, 1.51769363],\n" + " [0.08363767, 1.50872065, 2.98697513]])\n" + "\n" + "Plot the Struve functions of order 0 to 3 from -10 to 10.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-10., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, struve(i, x), label=f'$H_{i!r}$')\n" + ">>> ax.legend(ncol=2)\n" + ">>> ax.set_xlim(-10, 10)\n" + ">>> ax.set_title(r\"Struve functions $H_{\\nu}$\")\n" + ">>> plt.show()") +ufunc_struve_loops[0] = loop_d_dd__As_ff_f +ufunc_struve_loops[1] = loop_d_dd__As_dd_d +ufunc_struve_types[0] = NPY_FLOAT +ufunc_struve_types[1] = NPY_FLOAT +ufunc_struve_types[2] = NPY_FLOAT +ufunc_struve_types[3] = NPY_DOUBLE +ufunc_struve_types[4] = NPY_DOUBLE +ufunc_struve_types[5] = NPY_DOUBLE +ufunc_struve_ptr[2*0] = _func_struve_h +ufunc_struve_ptr[2*0+1] = ("struve") +ufunc_struve_ptr[2*1] = _func_struve_h +ufunc_struve_ptr[2*1+1] = ("struve") +ufunc_struve_data[0] = &ufunc_struve_ptr[2*0] +ufunc_struve_data[1] = &ufunc_struve_ptr[2*1] +struve = np.PyUFunc_FromFuncAndData(ufunc_struve_loops, ufunc_struve_data, ufunc_struve_types, 2, 2, 1, 0, "struve", ufunc_struve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_tandg_loops[2] +cdef void *ufunc_tandg_ptr[4] +cdef void *ufunc_tandg_data[2] +cdef char ufunc_tandg_types[4] +cdef char *ufunc_tandg_doc = ( + "tandg(x, out=None)\n" + "\n" + "Tangent of angle `x` given in degrees.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Angle, given in degrees.\n" + "out : ndarray, optional\n" + " Optional output array for the function results.\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Tangent at the input.\n" + "\n" + "See Also\n" + "--------\n" + "sindg, cosdg, cotdg\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import scipy.special as sc\n" + "\n" + "It is more accurate than using tangent directly.\n" + "\n" + ">>> x = 180 * np.arange(3)\n" + ">>> sc.tandg(x)\n" + "array([0., 0., 0.])\n" + ">>> np.tan(x * np.pi / 180)\n" + "array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])") +ufunc_tandg_loops[0] = loop_d_d__As_f_f +ufunc_tandg_loops[1] = loop_d_d__As_d_d +ufunc_tandg_types[0] = NPY_FLOAT +ufunc_tandg_types[1] = NPY_FLOAT +ufunc_tandg_types[2] = NPY_DOUBLE +ufunc_tandg_types[3] = NPY_DOUBLE +ufunc_tandg_ptr[2*0] = _func_tandg +ufunc_tandg_ptr[2*0+1] = ("tandg") +ufunc_tandg_ptr[2*1] = _func_tandg +ufunc_tandg_ptr[2*1+1] = ("tandg") +ufunc_tandg_data[0] = &ufunc_tandg_ptr[2*0] +ufunc_tandg_data[1] = &ufunc_tandg_ptr[2*1] +tandg = np.PyUFunc_FromFuncAndData(ufunc_tandg_loops, ufunc_tandg_data, ufunc_tandg_types, 2, 1, 1, 0, "tandg", ufunc_tandg_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_tklmbda_loops[2] +cdef void *ufunc_tklmbda_ptr[4] +cdef void *ufunc_tklmbda_data[2] +cdef char ufunc_tklmbda_types[6] +cdef char *ufunc_tklmbda_doc = ( + "tklmbda(x, lmbda, out=None)\n" + "\n" + "Cumulative distribution function of the Tukey lambda distribution.\n" + "\n" + "Parameters\n" + "----------\n" + "x, lmbda : array_like\n" + " Parameters\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "cdf : scalar or ndarray\n" + " Value of the Tukey lambda CDF\n" + "\n" + "See Also\n" + "--------\n" + "scipy.stats.tukeylambda : Tukey lambda distribution\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> from scipy.special import tklmbda, expit\n" + "\n" + "Compute the cumulative distribution function (CDF) of the Tukey lambda\n" + "distribution at several ``x`` values for `lmbda` = -1.5.\n" + "\n" + ">>> x = np.linspace(-2, 2, 9)\n" + ">>> x\n" + "array([-2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. ])\n" + ">>> tklmbda(x, -1.5)\n" + "array([0.34688734, 0.3786554 , 0.41528805, 0.45629737, 0.5 ,\n" + " 0.54370263, 0.58471195, 0.6213446 , 0.65311266])\n" + "\n" + "When `lmbda` is 0, the function is the logistic sigmoid function,\n" + "which is implemented in `scipy.special` as `expit`.\n" + "\n" + ">>> tklmbda(x, 0)\n" + "array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,\n" + " 0.62245933, 0.73105858, 0.81757448, 0.88079708])\n" + ">>> expit(x)\n" + "array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,\n" + " 0.62245933, 0.73105858, 0.81757448, 0.88079708])\n" + "\n" + "When `lmbda` is 1, the Tukey lambda distribution is uniform on the\n" + "interval [-1, 1], so the CDF increases linearly.\n" + "\n" + ">>> t = np.linspace(-1, 1, 9)\n" + ">>> tklmbda(t, 1)\n" + "array([0. , 0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875, 1. ])\n" + "\n" + "In the following, we generate plots for several values of `lmbda`.\n" + "\n" + "The first figure shows graphs for `lmbda` <= 0.\n" + "\n" + ">>> styles = ['-', '-.', '--', ':']\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-12, 12, 500)\n" + ">>> for k, lmbda in enumerate([-1.0, -0.5, 0.0]):\n" + "... y = tklmbda(x, lmbda)\n" + "... ax.plot(x, y, styles[k], label=rf'$\\lambda$ = {lmbda:-4.1f}')\n" + "\n" + ">>> ax.set_title(r'tklmbda(x, $\\lambda$)')\n" + ">>> ax.set_label('x')\n" + ">>> ax.legend(framealpha=1, shadow=True)\n" + ">>> ax.grid(True)\n" + "\n" + "The second figure shows graphs for `lmbda` > 0. The dots in the\n" + "graphs show the bounds of the support of the distribution.\n" + "\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(-4.2, 4.2, 500)\n" + ">>> lmbdas = [0.25, 0.5, 1.0, 1.5]\n" + ">>> for k, lmbda in enumerate(lmbdas):\n" + "... y = tklmbda(x, lmbda)\n" + "... ax.plot(x, y, styles[k], label=fr'$\\lambda$ = {lmbda}')\n" + "\n" + ">>> ax.set_prop_cycle(None)\n" + ">>> for lmbda in lmbdas:\n" + "... ax.plot([-1/lmbda, 1/lmbda], [0, 1], '.', ms=8)\n" + "\n" + ">>> ax.set_title(r'tklmbda(x, $\\lambda$)')\n" + ">>> ax.set_xlabel('x')\n" + ">>> ax.legend(framealpha=1, shadow=True)\n" + ">>> ax.grid(True)\n" + "\n" + ">>> plt.tight_layout()\n" + ">>> plt.show()\n" + "\n" + "The CDF of the Tukey lambda distribution is also implemented as the\n" + "``cdf`` method of `scipy.stats.tukeylambda`. In the following,\n" + "``tukeylambda.cdf(x, -0.5)`` and ``tklmbda(x, -0.5)`` compute the\n" + "same values:\n" + "\n" + ">>> from scipy.stats import tukeylambda\n" + ">>> x = np.linspace(-2, 2, 9)\n" + "\n" + ">>> tukeylambda.cdf(x, -0.5)\n" + "array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,\n" + " 0.58671839, 0.66458323, 0.72906142, 0.78004843])\n" + "\n" + ">>> tklmbda(x, -0.5)\n" + "array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,\n" + " 0.58671839, 0.66458323, 0.72906142, 0.78004843])\n" + "\n" + "The implementation in ``tukeylambda`` also provides location and scale\n" + "parameters, and other methods such as ``pdf()`` (the probability\n" + "density function) and ``ppf()`` (the inverse of the CDF), so for\n" + "working with the Tukey lambda distribution, ``tukeylambda`` is more\n" + "generally useful. The primary advantage of ``tklmbda`` is that it is\n" + "significantly faster than ``tukeylambda.cdf``.") +ufunc_tklmbda_loops[0] = loop_d_dd__As_ff_f +ufunc_tklmbda_loops[1] = loop_d_dd__As_dd_d +ufunc_tklmbda_types[0] = NPY_FLOAT +ufunc_tklmbda_types[1] = NPY_FLOAT +ufunc_tklmbda_types[2] = NPY_FLOAT +ufunc_tklmbda_types[3] = NPY_DOUBLE +ufunc_tklmbda_types[4] = NPY_DOUBLE +ufunc_tklmbda_types[5] = NPY_DOUBLE +ufunc_tklmbda_ptr[2*0] = _func_tukeylambdacdf +ufunc_tklmbda_ptr[2*0+1] = ("tklmbda") +ufunc_tklmbda_ptr[2*1] = _func_tukeylambdacdf +ufunc_tklmbda_ptr[2*1+1] = ("tklmbda") +ufunc_tklmbda_data[0] = &ufunc_tklmbda_ptr[2*0] +ufunc_tklmbda_data[1] = &ufunc_tklmbda_ptr[2*1] +tklmbda = np.PyUFunc_FromFuncAndData(ufunc_tklmbda_loops, ufunc_tklmbda_data, ufunc_tklmbda_types, 2, 2, 1, 0, "tklmbda", ufunc_tklmbda_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_voigt_profile_loops[2] +cdef void *ufunc_voigt_profile_ptr[4] +cdef void *ufunc_voigt_profile_data[2] +cdef char ufunc_voigt_profile_types[8] +cdef char *ufunc_voigt_profile_doc = ( + "voigt_profile(x, sigma, gamma, out=None)\n" + "\n" + "Voigt profile.\n" + "\n" + "The Voigt profile is a convolution of a 1-D Normal distribution with\n" + "standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at\n" + "half-maximum ``gamma``.\n" + "\n" + "If ``sigma = 0``, PDF of Cauchy distribution is returned.\n" + "Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.\n" + "If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``,\n" + "and ``0`` for all other ``x``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Real argument\n" + "sigma : array_like\n" + " The standard deviation of the Normal distribution part\n" + "gamma : array_like\n" + " The half-width at half-maximum of the Cauchy distribution part\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " The Voigt profile at the given arguments\n" + "\n" + "See Also\n" + "--------\n" + "wofz : Faddeeva function\n" + "\n" + "Notes\n" + "-----\n" + "It can be expressed in terms of Faddeeva function\n" + "\n" + ".. math:: V(x; \\sigma, \\gamma) = \\frac{Re[w(z)]}{\\sigma\\sqrt{2\\pi}},\n" + ".. math:: z = \\frac{x + i\\gamma}{\\sqrt{2}\\sigma}\n" + "\n" + "where :math:`w(z)` is the Faddeeva function.\n" + "\n" + "References\n" + "----------\n" + ".. [1] https://en.wikipedia.org/wiki/Voigt_profile\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``.\n" + "\n" + ">>> from scipy.special import voigt_profile\n" + ">>> import numpy as np\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> voigt_profile(2, 1., 1.)\n" + "0.09071519942627544\n" + "\n" + "Calculate the function at several points by providing a NumPy array\n" + "for `x`.\n" + "\n" + ">>> values = np.array([-2., 0., 5])\n" + ">>> voigt_profile(values, 1., 1.)\n" + "array([0.0907152 , 0.20870928, 0.01388492])\n" + "\n" + "Plot the function for different parameter sets.\n" + "\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> x = np.linspace(-10, 10, 500)\n" + ">>> parameters_list = [(1.5, 0., \"solid\"), (1.3, 0.5, \"dashed\"),\n" + "... (0., 1.8, \"dotted\"), (1., 1., \"dashdot\")]\n" + ">>> for params in parameters_list:\n" + "... sigma, gamma, linestyle = params\n" + "... voigt = voigt_profile(x, sigma, gamma)\n" + "... ax.plot(x, voigt, label=rf\"$\\sigma={sigma},\\, \\gamma={gamma}$\",\n" + "... ls=linestyle)\n" + ">>> ax.legend()\n" + ">>> plt.show()\n" + "\n" + "Verify visually that the Voigt profile indeed arises as the convolution\n" + "of a normal and a Cauchy distribution.\n" + "\n" + ">>> from scipy.signal import convolve\n" + ">>> x, dx = np.linspace(-10, 10, 500, retstep=True)\n" + ">>> def gaussian(x, sigma):\n" + "... return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi))\n" + ">>> def cauchy(x, gamma):\n" + "... return gamma/(np.pi * (np.square(x)+gamma**2))\n" + ">>> sigma = 2\n" + ">>> gamma = 1\n" + ">>> gauss_profile = gaussian(x, sigma)\n" + ">>> cauchy_profile = cauchy(x, gamma)\n" + ">>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode=\"same\")\n" + ">>> voigt = voigt_profile(x, sigma, gamma)\n" + ">>> fig, ax = plt.subplots(figsize=(8, 8))\n" + ">>> ax.plot(x, gauss_profile, label=\"Gauss: $G$\", c='b')\n" + ">>> ax.plot(x, cauchy_profile, label=\"Cauchy: $C$\", c='y', ls=\"dashed\")\n" + ">>> xx = 0.5*(x[1:] + x[:-1]) # midpoints\n" + ">>> ax.plot(xx, convolved[1:], label=\"Convolution: $G * C$\", ls='dashdot',\n" + "... c='k')\n" + ">>> ax.plot(x, voigt, label=\"Voigt\", ls='dotted', c='r')\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_voigt_profile_loops[0] = loop_d_ddd__As_fff_f +ufunc_voigt_profile_loops[1] = loop_d_ddd__As_ddd_d +ufunc_voigt_profile_types[0] = NPY_FLOAT +ufunc_voigt_profile_types[1] = NPY_FLOAT +ufunc_voigt_profile_types[2] = NPY_FLOAT +ufunc_voigt_profile_types[3] = NPY_FLOAT +ufunc_voigt_profile_types[4] = NPY_DOUBLE +ufunc_voigt_profile_types[5] = NPY_DOUBLE +ufunc_voigt_profile_types[6] = NPY_DOUBLE +ufunc_voigt_profile_types[7] = NPY_DOUBLE +ufunc_voigt_profile_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile +ufunc_voigt_profile_ptr[2*0+1] = ("voigt_profile") +ufunc_voigt_profile_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile +ufunc_voigt_profile_ptr[2*1+1] = ("voigt_profile") +ufunc_voigt_profile_data[0] = &ufunc_voigt_profile_ptr[2*0] +ufunc_voigt_profile_data[1] = &ufunc_voigt_profile_ptr[2*1] +voigt_profile = np.PyUFunc_FromFuncAndData(ufunc_voigt_profile_loops, ufunc_voigt_profile_data, ufunc_voigt_profile_types, 2, 3, 1, 0, "voigt_profile", ufunc_voigt_profile_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_wofz_loops[2] +cdef void *ufunc_wofz_ptr[4] +cdef void *ufunc_wofz_data[2] +cdef char ufunc_wofz_types[4] +cdef char *ufunc_wofz_doc = ( + "wofz(z, out=None)\n" + "\n" + "Faddeeva function\n" + "\n" + "Returns the value of the Faddeeva function for complex argument::\n" + "\n" + " exp(-z**2) * erfc(-i*z)\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " complex argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Faddeeva function\n" + "\n" + "See Also\n" + "--------\n" + "dawsn, erf, erfc, erfcx, erfi\n" + "\n" + "References\n" + "----------\n" + ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n" + " http://ab-initio.mit.edu/Faddeeva\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy import special\n" + ">>> import matplotlib.pyplot as plt\n" + "\n" + ">>> x = np.linspace(-3, 3)\n" + ">>> z = special.wofz(x)\n" + "\n" + ">>> plt.plot(x, z.real, label='wofz(x).real')\n" + ">>> plt.plot(x, z.imag, label='wofz(x).imag')\n" + ">>> plt.xlabel('$x$')\n" + ">>> plt.legend(framealpha=1, shadow=True)\n" + ">>> plt.grid(alpha=0.25)\n" + ">>> plt.show()") +ufunc_wofz_loops[0] = loop_D_D__As_F_F +ufunc_wofz_loops[1] = loop_D_D__As_D_D +ufunc_wofz_types[0] = NPY_CFLOAT +ufunc_wofz_types[1] = NPY_CFLOAT +ufunc_wofz_types[2] = NPY_CDOUBLE +ufunc_wofz_types[3] = NPY_CDOUBLE +ufunc_wofz_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_w +ufunc_wofz_ptr[2*0+1] = ("wofz") +ufunc_wofz_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_w +ufunc_wofz_ptr[2*1+1] = ("wofz") +ufunc_wofz_data[0] = &ufunc_wofz_ptr[2*0] +ufunc_wofz_data[1] = &ufunc_wofz_ptr[2*1] +wofz = np.PyUFunc_FromFuncAndData(ufunc_wofz_loops, ufunc_wofz_data, ufunc_wofz_types, 2, 1, 1, 0, "wofz", ufunc_wofz_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_wright_bessel_loops[2] +cdef void *ufunc_wright_bessel_ptr[4] +cdef void *ufunc_wright_bessel_data[2] +cdef char ufunc_wright_bessel_types[8] +cdef char *ufunc_wright_bessel_doc = ( + "wright_bessel(a, b, x, out=None)\n" + "\n" + "Wright's generalized Bessel function.\n" + "\n" + "Wright's generalized Bessel function is an entire function and defined as\n" + "\n" + ".. math:: \\Phi(a, b; x) = \\sum_{k=0}^\\infty \\frac{x^k}{k! \\Gamma(a k + b)}\n" + "\n" + "See Also [1].\n" + "\n" + "Parameters\n" + "----------\n" + "a : array_like of float\n" + " a >= 0\n" + "b : array_like of float\n" + " b >= 0\n" + "x : array_like of float\n" + " x >= 0\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Value of the Wright's generalized Bessel function\n" + "\n" + "Notes\n" + "-----\n" + "Due to the complexity of the function with its three parameters, only\n" + "non-negative arguments are implemented.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Digital Library of Mathematical Functions, 10.46.\n" + " https://dlmf.nist.gov/10.46.E1\n" + "\n" + "Examples\n" + "--------\n" + ">>> from scipy.special import wright_bessel\n" + ">>> a, b, x = 1.5, 1.1, 2.5\n" + ">>> wright_bessel(a, b-1, x)\n" + "4.5314465939443025\n" + "\n" + "Now, let us verify the relation\n" + "\n" + ".. math:: \\Phi(a, b-1; x) = a x \\Phi(a, b+a; x) + (b-1) \\Phi(a, b; x)\n" + "\n" + ">>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)\n" + "4.5314465939443025") +ufunc_wright_bessel_loops[0] = loop_d_ddd__As_fff_f +ufunc_wright_bessel_loops[1] = loop_d_ddd__As_ddd_d +ufunc_wright_bessel_types[0] = NPY_FLOAT +ufunc_wright_bessel_types[1] = NPY_FLOAT +ufunc_wright_bessel_types[2] = NPY_FLOAT +ufunc_wright_bessel_types[3] = NPY_FLOAT +ufunc_wright_bessel_types[4] = NPY_DOUBLE +ufunc_wright_bessel_types[5] = NPY_DOUBLE +ufunc_wright_bessel_types[6] = NPY_DOUBLE +ufunc_wright_bessel_types[7] = NPY_DOUBLE +ufunc_wright_bessel_ptr[2*0] = _func_wright_bessel_scalar +ufunc_wright_bessel_ptr[2*0+1] = ("wright_bessel") +ufunc_wright_bessel_ptr[2*1] = _func_wright_bessel_scalar +ufunc_wright_bessel_ptr[2*1+1] = ("wright_bessel") +ufunc_wright_bessel_data[0] = &ufunc_wright_bessel_ptr[2*0] +ufunc_wright_bessel_data[1] = &ufunc_wright_bessel_ptr[2*1] +wright_bessel = np.PyUFunc_FromFuncAndData(ufunc_wright_bessel_loops, ufunc_wright_bessel_data, ufunc_wright_bessel_types, 2, 3, 1, 0, "wright_bessel", ufunc_wright_bessel_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_wrightomega_loops[4] +cdef void *ufunc_wrightomega_ptr[8] +cdef void *ufunc_wrightomega_data[4] +cdef char ufunc_wrightomega_types[8] +cdef char *ufunc_wrightomega_doc = ( + "wrightomega(z, out=None)\n" + "\n" + "Wright Omega function.\n" + "\n" + "Defined as the solution to\n" + "\n" + ".. math::\n" + "\n" + " \\omega + \\log(\\omega) = z\n" + "\n" + "where :math:`\\log` is the principal branch of the complex logarithm.\n" + "\n" + "Parameters\n" + "----------\n" + "z : array_like\n" + " Points at which to evaluate the Wright Omega function\n" + "out : ndarray, optional\n" + " Optional output array for the function values\n" + "\n" + "Returns\n" + "-------\n" + "omega : scalar or ndarray\n" + " Values of the Wright Omega function\n" + "\n" + "See Also\n" + "--------\n" + "lambertw : The Lambert W function\n" + "\n" + "Notes\n" + "-----\n" + ".. versionadded:: 0.19.0\n" + "\n" + "The function can also be defined as\n" + "\n" + ".. math::\n" + "\n" + " \\omega(z) = W_{K(z)}(e^z)\n" + "\n" + "where :math:`K(z) = \\lceil (\\Im(z) - \\pi)/(2\\pi) \\rceil` is the\n" + "unwinding number and :math:`W` is the Lambert W function.\n" + "\n" + "The implementation here is taken from [1]_.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Lawrence, Corless, and Jeffrey, \"Algorithm 917: Complex\n" + " Double-Precision Evaluation of the Wright :math:`\\omega`\n" + " Function.\" ACM Transactions on Mathematical Software,\n" + " 2012. :doi:`10.1145/2168773.2168779`.\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import wrightomega, lambertw\n" + "\n" + ">>> wrightomega([-2, -1, 0, 1, 2])\n" + "array([0.12002824, 0.27846454, 0.56714329, 1. , 1.5571456 ])\n" + "\n" + "Complex input:\n" + "\n" + ">>> wrightomega(3 + 5j)\n" + "(1.5804428632097158+3.8213626783287937j)\n" + "\n" + "Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``:\n" + "\n" + ">>> w = -5 + 4j\n" + ">>> wrightomega(w + np.log(w))\n" + "(-5+4j)\n" + "\n" + "Verify the connection to ``lambertw``:\n" + "\n" + ">>> z = 0.5 + 3j\n" + ">>> wrightomega(z)\n" + "(0.0966015889280649+1.4937828458191993j)\n" + ">>> lambertw(np.exp(z))\n" + "(0.09660158892806493+1.4937828458191993j)\n" + "\n" + ">>> z = 0.5 + 4j\n" + ">>> wrightomega(z)\n" + "(-0.3362123489037213+2.282986001579032j)\n" + ">>> lambertw(np.exp(z), k=1)\n" + "(-0.33621234890372115+2.282986001579032j)") +ufunc_wrightomega_loops[0] = loop_d_d__As_f_f +ufunc_wrightomega_loops[1] = loop_d_d__As_d_d +ufunc_wrightomega_loops[2] = loop_D_D__As_F_F +ufunc_wrightomega_loops[3] = loop_D_D__As_D_D +ufunc_wrightomega_types[0] = NPY_FLOAT +ufunc_wrightomega_types[1] = NPY_FLOAT +ufunc_wrightomega_types[2] = NPY_DOUBLE +ufunc_wrightomega_types[3] = NPY_DOUBLE +ufunc_wrightomega_types[4] = NPY_CFLOAT +ufunc_wrightomega_types[5] = NPY_CFLOAT +ufunc_wrightomega_types[6] = NPY_CDOUBLE +ufunc_wrightomega_types[7] = NPY_CDOUBLE +ufunc_wrightomega_ptr[2*0] = scipy.special._ufuncs_cxx._export_wrightomega_real +ufunc_wrightomega_ptr[2*0+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*1] = scipy.special._ufuncs_cxx._export_wrightomega_real +ufunc_wrightomega_ptr[2*1+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*2] = scipy.special._ufuncs_cxx._export_wrightomega +ufunc_wrightomega_ptr[2*2+1] = ("wrightomega") +ufunc_wrightomega_ptr[2*3] = scipy.special._ufuncs_cxx._export_wrightomega +ufunc_wrightomega_ptr[2*3+1] = ("wrightomega") +ufunc_wrightomega_data[0] = &ufunc_wrightomega_ptr[2*0] +ufunc_wrightomega_data[1] = &ufunc_wrightomega_ptr[2*1] +ufunc_wrightomega_data[2] = &ufunc_wrightomega_ptr[2*2] +ufunc_wrightomega_data[3] = &ufunc_wrightomega_ptr[2*3] +wrightomega = np.PyUFunc_FromFuncAndData(ufunc_wrightomega_loops, ufunc_wrightomega_data, ufunc_wrightomega_types, 4, 1, 1, 0, "wrightomega", ufunc_wrightomega_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_xlog1py_loops[4] +cdef void *ufunc_xlog1py_ptr[8] +cdef void *ufunc_xlog1py_data[4] +cdef char ufunc_xlog1py_types[12] +cdef char *ufunc_xlog1py_doc = ( + "xlog1py(x, y, out=None)\n" + "\n" + "Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Multiplier\n" + "y : array_like\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "z : scalar or ndarray\n" + " Computed x*log1p(y)\n" + "\n" + "Notes\n" + "-----\n" + "\n" + ".. versionadded:: 0.13.0\n" + "\n" + "Examples\n" + "--------\n" + "This example shows how the function can be used to calculate the log of\n" + "the probability mass function for a geometric discrete random variable.\n" + "The probability mass function of the geometric distribution is defined\n" + "as follows:\n" + "\n" + ".. math:: f(k) = (1-p)^{k-1} p\n" + "\n" + "where :math:`p` is the probability of a single success\n" + "and :math:`1-p` is the probability of a single failure\n" + "and :math:`k` is the number of trials to get the first success.\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import xlog1py\n" + ">>> p = 0.5\n" + ">>> k = 100\n" + ">>> _pmf = np.power(1 - p, k - 1) * p\n" + ">>> _pmf\n" + "7.888609052210118e-31\n" + "\n" + "If we take k as a relatively large number the value of the probability\n" + "mass function can become very low. In such cases taking the log of the\n" + "pmf would be more suitable as the log function can change the values\n" + "to a scale that is more appropriate to work with.\n" + "\n" + ">>> _log_pmf = xlog1py(k - 1, -p) + np.log(p)\n" + ">>> _log_pmf\n" + "-69.31471805599453\n" + "\n" + "We can confirm that we get a value close to the original pmf value by\n" + "taking the exponential of the log pmf.\n" + "\n" + ">>> _orig_pmf = np.exp(_log_pmf)\n" + ">>> np.isclose(_pmf, _orig_pmf)\n" + "True") +ufunc_xlog1py_loops[0] = loop_d_dd__As_ff_f +ufunc_xlog1py_loops[1] = loop_d_dd__As_dd_d +ufunc_xlog1py_loops[2] = loop_D_DD__As_FF_F +ufunc_xlog1py_loops[3] = loop_D_DD__As_DD_D +ufunc_xlog1py_types[0] = NPY_FLOAT +ufunc_xlog1py_types[1] = NPY_FLOAT +ufunc_xlog1py_types[2] = NPY_FLOAT +ufunc_xlog1py_types[3] = NPY_DOUBLE +ufunc_xlog1py_types[4] = NPY_DOUBLE +ufunc_xlog1py_types[5] = NPY_DOUBLE +ufunc_xlog1py_types[6] = NPY_CFLOAT +ufunc_xlog1py_types[7] = NPY_CFLOAT +ufunc_xlog1py_types[8] = NPY_CFLOAT +ufunc_xlog1py_types[9] = NPY_CDOUBLE +ufunc_xlog1py_types[10] = NPY_CDOUBLE +ufunc_xlog1py_types[11] = NPY_CDOUBLE +ufunc_xlog1py_ptr[2*0] = _func_xlog1py[double] +ufunc_xlog1py_ptr[2*0+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*1] = _func_xlog1py[double] +ufunc_xlog1py_ptr[2*1+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*2] = _func_xlog1py[double_complex] +ufunc_xlog1py_ptr[2*2+1] = ("xlog1py") +ufunc_xlog1py_ptr[2*3] = _func_xlog1py[double_complex] +ufunc_xlog1py_ptr[2*3+1] = ("xlog1py") +ufunc_xlog1py_data[0] = &ufunc_xlog1py_ptr[2*0] +ufunc_xlog1py_data[1] = &ufunc_xlog1py_ptr[2*1] +ufunc_xlog1py_data[2] = &ufunc_xlog1py_ptr[2*2] +ufunc_xlog1py_data[3] = &ufunc_xlog1py_ptr[2*3] +xlog1py = np.PyUFunc_FromFuncAndData(ufunc_xlog1py_loops, ufunc_xlog1py_data, ufunc_xlog1py_types, 4, 2, 1, 0, "xlog1py", ufunc_xlog1py_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_xlogy_loops[4] +cdef void *ufunc_xlogy_ptr[8] +cdef void *ufunc_xlogy_data[4] +cdef char ufunc_xlogy_types[12] +cdef char *ufunc_xlogy_doc = ( + "xlogy(x, y, out=None)\n" + "\n" + "Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Multiplier\n" + "y : array_like\n" + " Argument\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "z : scalar or ndarray\n" + " Computed x*log(y)\n" + "\n" + "Notes\n" + "-----\n" + "The log function used in the computation is the natural log.\n" + "\n" + ".. versionadded:: 0.13.0\n" + "\n" + "Examples\n" + "--------\n" + "We can use this function to calculate the binary logistic loss also\n" + "known as the binary cross entropy. This loss function is used for\n" + "binary classification problems and is defined as:\n" + "\n" + ".. math::\n" + " L = 1/n * \\sum_{i=0}^n -(y_i*log(y\\_pred_i) + (1-y_i)*log(1-y\\_pred_i))\n" + "\n" + "We can define the parameters `x` and `y` as y and y_pred respectively.\n" + "y is the array of the actual labels which over here can be either 0 or 1.\n" + "y_pred is the array of the predicted probabilities with respect to\n" + "the positive class (1).\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import xlogy\n" + ">>> y = np.array([0, 1, 0, 1, 1, 0])\n" + ">>> y_pred = np.array([0.3, 0.8, 0.4, 0.7, 0.9, 0.2])\n" + ">>> n = len(y)\n" + ">>> loss = -(xlogy(y, y_pred) + xlogy(1 - y, 1 - y_pred)).sum()\n" + ">>> loss /= n\n" + ">>> loss\n" + "0.29597052165495025\n" + "\n" + "A lower loss is usually better as it indicates that the predictions are\n" + "similar to the actual labels. In this example since our predicted\n" + "probabilities are close to the actual labels, we get an overall loss\n" + "that is reasonably low and appropriate.") +ufunc_xlogy_loops[0] = loop_d_dd__As_ff_f +ufunc_xlogy_loops[1] = loop_d_dd__As_dd_d +ufunc_xlogy_loops[2] = loop_D_DD__As_FF_F +ufunc_xlogy_loops[3] = loop_D_DD__As_DD_D +ufunc_xlogy_types[0] = NPY_FLOAT +ufunc_xlogy_types[1] = NPY_FLOAT +ufunc_xlogy_types[2] = NPY_FLOAT +ufunc_xlogy_types[3] = NPY_DOUBLE +ufunc_xlogy_types[4] = NPY_DOUBLE +ufunc_xlogy_types[5] = NPY_DOUBLE +ufunc_xlogy_types[6] = NPY_CFLOAT +ufunc_xlogy_types[7] = NPY_CFLOAT +ufunc_xlogy_types[8] = NPY_CFLOAT +ufunc_xlogy_types[9] = NPY_CDOUBLE +ufunc_xlogy_types[10] = NPY_CDOUBLE +ufunc_xlogy_types[11] = NPY_CDOUBLE +ufunc_xlogy_ptr[2*0] = _func_xlogy[double] +ufunc_xlogy_ptr[2*0+1] = ("xlogy") +ufunc_xlogy_ptr[2*1] = _func_xlogy[double] +ufunc_xlogy_ptr[2*1+1] = ("xlogy") +ufunc_xlogy_ptr[2*2] = _func_xlogy[double_complex] +ufunc_xlogy_ptr[2*2+1] = ("xlogy") +ufunc_xlogy_ptr[2*3] = _func_xlogy[double_complex] +ufunc_xlogy_ptr[2*3+1] = ("xlogy") +ufunc_xlogy_data[0] = &ufunc_xlogy_ptr[2*0] +ufunc_xlogy_data[1] = &ufunc_xlogy_ptr[2*1] +ufunc_xlogy_data[2] = &ufunc_xlogy_ptr[2*2] +ufunc_xlogy_data[3] = &ufunc_xlogy_ptr[2*3] +xlogy = np.PyUFunc_FromFuncAndData(ufunc_xlogy_loops, ufunc_xlogy_data, ufunc_xlogy_types, 4, 2, 1, 0, "xlogy", ufunc_xlogy_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_y0_loops[2] +cdef void *ufunc_y0_ptr[4] +cdef void *ufunc_y0_data[2] +cdef char ufunc_y0_types[4] +cdef char *ufunc_y0_doc = ( + "y0(x, out=None)\n" + "\n" + "Bessel function of the second kind of order 0.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function of the second kind of order 0 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "j0: Bessel function of the first kind of order 0\n" + "yv: Bessel function of the first kind\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n" + "first interval a rational approximation :math:`R(x)` is employed to\n" + "compute,\n" + "\n" + ".. math::\n" + "\n" + " Y_0(x) = R(x) + \\frac{2 \\log(x) J_0(x)}{\\pi},\n" + "\n" + "where :math:`J_0` is the Bessel function of the first kind of order 0.\n" + "\n" + "In the second interval, the Hankel asymptotic expansion is employed with\n" + "two rational functions of degree 6/6 and 7/7.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `y0`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import y0\n" + ">>> y0(1.)\n" + "0.08825696421567697\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> y0(np.array([0.5, 2., 3.]))\n" + "array([-0.44451873, 0.51037567, 0.37685001])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = y0(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_y0_loops[0] = loop_d_d__As_f_f +ufunc_y0_loops[1] = loop_d_d__As_d_d +ufunc_y0_types[0] = NPY_FLOAT +ufunc_y0_types[1] = NPY_FLOAT +ufunc_y0_types[2] = NPY_DOUBLE +ufunc_y0_types[3] = NPY_DOUBLE +ufunc_y0_ptr[2*0] = _func_y0 +ufunc_y0_ptr[2*0+1] = ("y0") +ufunc_y0_ptr[2*1] = _func_y0 +ufunc_y0_ptr[2*1+1] = ("y0") +ufunc_y0_data[0] = &ufunc_y0_ptr[2*0] +ufunc_y0_data[1] = &ufunc_y0_ptr[2*1] +y0 = np.PyUFunc_FromFuncAndData(ufunc_y0_loops, ufunc_y0_data, ufunc_y0_types, 2, 1, 1, 0, "y0", ufunc_y0_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_y1_loops[2] +cdef void *ufunc_y1_ptr[4] +cdef void *ufunc_y1_data[2] +cdef char ufunc_y1_types[4] +cdef char *ufunc_y1_doc = ( + "y1(x, out=None)\n" + "\n" + "Bessel function of the second kind of order 1.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function of the second kind of order 1 at `x`.\n" + "\n" + "See Also\n" + "--------\n" + "j1: Bessel function of the first kind of order 1\n" + "yn: Bessel function of the second kind\n" + "yv: Bessel function of the second kind\n" + "\n" + "Notes\n" + "-----\n" + "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n" + "first interval a 25 term Chebyshev expansion is used, and computing\n" + ":math:`J_1` (the Bessel function of the first kind) is required. In the\n" + "second, the asymptotic trigonometric representation is employed using two\n" + "rational functions of degree 5/5.\n" + "\n" + "This function is a wrapper for the Cephes [1]_ routine `y1`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Calculate the function at one point:\n" + "\n" + ">>> from scipy.special import y1\n" + ">>> y1(1.)\n" + "-0.7812128213002888\n" + "\n" + "Calculate at several points:\n" + "\n" + ">>> import numpy as np\n" + ">>> y1(np.array([0.5, 2., 3.]))\n" + "array([-1.47147239, -0.10703243, 0.32467442])\n" + "\n" + "Plot the function from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> y = y1(x)\n" + ">>> ax.plot(x, y)\n" + ">>> plt.show()") +ufunc_y1_loops[0] = loop_d_d__As_f_f +ufunc_y1_loops[1] = loop_d_d__As_d_d +ufunc_y1_types[0] = NPY_FLOAT +ufunc_y1_types[1] = NPY_FLOAT +ufunc_y1_types[2] = NPY_DOUBLE +ufunc_y1_types[3] = NPY_DOUBLE +ufunc_y1_ptr[2*0] = _func_y1 +ufunc_y1_ptr[2*0+1] = ("y1") +ufunc_y1_ptr[2*1] = _func_y1 +ufunc_y1_ptr[2*1+1] = ("y1") +ufunc_y1_data[0] = &ufunc_y1_ptr[2*0] +ufunc_y1_data[1] = &ufunc_y1_ptr[2*1] +y1 = np.PyUFunc_FromFuncAndData(ufunc_y1_loops, ufunc_y1_data, ufunc_y1_types, 2, 1, 1, 0, "y1", ufunc_y1_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_yn_loops[3] +cdef void *ufunc_yn_ptr[6] +cdef void *ufunc_yn_data[3] +cdef char ufunc_yn_types[9] +cdef char *ufunc_yn_doc = ( + "yn(n, x, out=None)\n" + "\n" + "Bessel function of the second kind of integer order and real argument.\n" + "\n" + "Parameters\n" + "----------\n" + "n : array_like\n" + " Order (integer).\n" + "x : array_like\n" + " Argument (float).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function, :math:`Y_n(x)`.\n" + "\n" + "See Also\n" + "--------\n" + "yv : For real order and real or complex argument.\n" + "y0: faster implementation of this function for order 0\n" + "y1: faster implementation of this function for order 1\n" + "\n" + "Notes\n" + "-----\n" + "Wrapper for the Cephes [1]_ routine `yn`.\n" + "\n" + "The function is evaluated by forward recurrence on `n`, starting with\n" + "values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,\n" + "the routine for `y0` or `y1` is called directly.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Cephes Mathematical Functions Library,\n" + " http://www.netlib.org/cephes/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function of order 0 at one point.\n" + "\n" + ">>> from scipy.special import yn\n" + ">>> yn(0, 1.)\n" + "0.08825696421567697\n" + "\n" + "Evaluate the function at one point for different orders.\n" + "\n" + ">>> yn(0, 1.), yn(1, 1.), yn(2, 1.)\n" + "(0.08825696421567697, -0.7812128213002888, -1.6506826068162546)\n" + "\n" + "The evaluation for different orders can be carried out in one call by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> yn([0, 1, 2], 1.)\n" + "array([ 0.08825696, -0.78121282, -1.65068261])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0.5, 3., 8.])\n" + ">>> yn(0, points)\n" + "array([-0.44451873, 0.37685001, 0.22352149])\n" + "\n" + "If `z` is an array, the order parameter `v` must be broadcastable to\n" + "the correct shape if different orders shall be computed in one call.\n" + "To calculate the orders 0 and 1 for an 1D array:\n" + "\n" + ">>> orders = np.array([[0], [1]])\n" + ">>> orders.shape\n" + "(2, 1)\n" + "\n" + ">>> yn(orders, points)\n" + "array([[-0.44451873, 0.37685001, 0.22352149],\n" + " [-1.47147239, 0.32467442, -0.15806046]])\n" + "\n" + "Plot the functions of order 0 to 3 from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, yn(i, x), label=f'$Y_{i!r}$')\n" + ">>> ax.set_ylim(-3, 1)\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_yn_loops[0] = loop_d_id__As_ld_d +ufunc_yn_loops[1] = loop_d_dd__As_ff_f +ufunc_yn_loops[2] = loop_d_dd__As_dd_d +ufunc_yn_types[0] = NPY_LONG +ufunc_yn_types[1] = NPY_DOUBLE +ufunc_yn_types[2] = NPY_DOUBLE +ufunc_yn_types[3] = NPY_FLOAT +ufunc_yn_types[4] = NPY_FLOAT +ufunc_yn_types[5] = NPY_FLOAT +ufunc_yn_types[6] = NPY_DOUBLE +ufunc_yn_types[7] = NPY_DOUBLE +ufunc_yn_types[8] = NPY_DOUBLE +ufunc_yn_ptr[2*0] = _func_yn +ufunc_yn_ptr[2*0+1] = ("yn") +ufunc_yn_ptr[2*1] = _func_yn_unsafe +ufunc_yn_ptr[2*1+1] = ("yn") +ufunc_yn_ptr[2*2] = _func_yn_unsafe +ufunc_yn_ptr[2*2+1] = ("yn") +ufunc_yn_data[0] = &ufunc_yn_ptr[2*0] +ufunc_yn_data[1] = &ufunc_yn_ptr[2*1] +ufunc_yn_data[2] = &ufunc_yn_ptr[2*2] +yn = np.PyUFunc_FromFuncAndData(ufunc_yn_loops, ufunc_yn_data, ufunc_yn_types, 3, 2, 1, 0, "yn", ufunc_yn_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_yv_loops[4] +cdef void *ufunc_yv_ptr[8] +cdef void *ufunc_yv_data[4] +cdef char ufunc_yv_types[12] +cdef char *ufunc_yv_doc = ( + "yv(v, z, out=None)\n" + "\n" + "Bessel function of the second kind of real order and complex argument.\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the Bessel function of the second kind, :math:`Y_v(x)`.\n" + "\n" + "See Also\n" + "--------\n" + "yve : :math:`Y_v` with leading exponential behavior stripped off.\n" + "y0: faster implementation of this function for order 0\n" + "y1: faster implementation of this function for order 1\n" + "\n" + "Notes\n" + "-----\n" + "For positive `v` values, the computation is carried out using the\n" + "AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel\n" + "Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,\n" + "\n" + ".. math:: Y_v(z) = \\frac{1}{2\\imath} (H_v^{(1)} - H_v^{(2)}).\n" + "\n" + "For negative `v` values the formula,\n" + "\n" + ".. math:: Y_{-v}(z) = Y_v(z) \\cos(\\pi v) + J_v(z) \\sin(\\pi v)\n" + "\n" + "is used, where :math:`J_v(z)` is the Bessel function of the first kind,\n" + "computed using the AMOS routine `zbesj`. Note that the second term is\n" + "exactly zero for integer `v`; to improve accuracy the second term is\n" + "explicitly omitted for `v` values such that `v = floor(v)`.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Evaluate the function of order 0 at one point.\n" + "\n" + ">>> from scipy.special import yv\n" + ">>> yv(0, 1.)\n" + "0.088256964215677\n" + "\n" + "Evaluate the function at one point for different orders.\n" + "\n" + ">>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.)\n" + "(0.088256964215677, -0.7812128213002889, -1.102495575160179)\n" + "\n" + "The evaluation for different orders can be carried out in one call by\n" + "providing a list or NumPy array as argument for the `v` parameter:\n" + "\n" + ">>> yv([0, 1, 1.5], 1.)\n" + "array([ 0.08825696, -0.78121282, -1.10249558])\n" + "\n" + "Evaluate the function at several points for order 0 by providing an\n" + "array for `z`.\n" + "\n" + ">>> import numpy as np\n" + ">>> points = np.array([0.5, 3., 8.])\n" + ">>> yv(0, points)\n" + "array([-0.44451873, 0.37685001, 0.22352149])\n" + "\n" + "If `z` is an array, the order parameter `v` must be broadcastable to\n" + "the correct shape if different orders shall be computed in one call.\n" + "To calculate the orders 0 and 1 for an 1D array:\n" + "\n" + ">>> orders = np.array([[0], [1]])\n" + ">>> orders.shape\n" + "(2, 1)\n" + "\n" + ">>> yv(orders, points)\n" + "array([[-0.44451873, 0.37685001, 0.22352149],\n" + " [-1.47147239, 0.32467442, -0.15806046]])\n" + "\n" + "Plot the functions of order 0 to 3 from 0 to 10.\n" + "\n" + ">>> import matplotlib.pyplot as plt\n" + ">>> fig, ax = plt.subplots()\n" + ">>> x = np.linspace(0., 10., 1000)\n" + ">>> for i in range(4):\n" + "... ax.plot(x, yv(i, x), label=f'$Y_{i!r}$')\n" + ">>> ax.set_ylim(-3, 1)\n" + ">>> ax.legend()\n" + ">>> plt.show()") +ufunc_yv_loops[0] = loop_d_dd__As_ff_f +ufunc_yv_loops[1] = loop_D_dD__As_fF_F +ufunc_yv_loops[2] = loop_d_dd__As_dd_d +ufunc_yv_loops[3] = loop_D_dD__As_dD_D +ufunc_yv_types[0] = NPY_FLOAT +ufunc_yv_types[1] = NPY_FLOAT +ufunc_yv_types[2] = NPY_FLOAT +ufunc_yv_types[3] = NPY_FLOAT +ufunc_yv_types[4] = NPY_CFLOAT +ufunc_yv_types[5] = NPY_CFLOAT +ufunc_yv_types[6] = NPY_DOUBLE +ufunc_yv_types[7] = NPY_DOUBLE +ufunc_yv_types[8] = NPY_DOUBLE +ufunc_yv_types[9] = NPY_DOUBLE +ufunc_yv_types[10] = NPY_CDOUBLE +ufunc_yv_types[11] = NPY_CDOUBLE +ufunc_yv_ptr[2*0] = _func_cbesy_wrap_real +ufunc_yv_ptr[2*0+1] = ("yv") +ufunc_yv_ptr[2*1] = _func_cbesy_wrap +ufunc_yv_ptr[2*1+1] = ("yv") +ufunc_yv_ptr[2*2] = _func_cbesy_wrap_real +ufunc_yv_ptr[2*2+1] = ("yv") +ufunc_yv_ptr[2*3] = _func_cbesy_wrap +ufunc_yv_ptr[2*3+1] = ("yv") +ufunc_yv_data[0] = &ufunc_yv_ptr[2*0] +ufunc_yv_data[1] = &ufunc_yv_ptr[2*1] +ufunc_yv_data[2] = &ufunc_yv_ptr[2*2] +ufunc_yv_data[3] = &ufunc_yv_ptr[2*3] +yv = np.PyUFunc_FromFuncAndData(ufunc_yv_loops, ufunc_yv_data, ufunc_yv_types, 4, 2, 1, 0, "yv", ufunc_yv_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_yve_loops[4] +cdef void *ufunc_yve_ptr[8] +cdef void *ufunc_yve_data[4] +cdef char ufunc_yve_types[12] +cdef char *ufunc_yve_doc = ( + "yve(v, z, out=None)\n" + "\n" + "Exponentially scaled Bessel function of the second kind of real order.\n" + "\n" + "Returns the exponentially scaled Bessel function of the second\n" + "kind of real order `v` at complex `z`::\n" + "\n" + " yve(v, z) = yv(v, z) * exp(-abs(z.imag))\n" + "\n" + "Parameters\n" + "----------\n" + "v : array_like\n" + " Order (float).\n" + "z : array_like\n" + " Argument (float or complex).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "Y : scalar or ndarray\n" + " Value of the exponentially scaled Bessel function.\n" + "\n" + "See Also\n" + "--------\n" + "yv: Unscaled Bessel function of the second kind of real order.\n" + "\n" + "Notes\n" + "-----\n" + "For positive `v` values, the computation is carried out using the\n" + "AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel\n" + "Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,\n" + "\n" + ".. math:: Y_v(z) = \\frac{1}{2\\imath} (H_v^{(1)} - H_v^{(2)}).\n" + "\n" + "For negative `v` values the formula,\n" + "\n" + ".. math:: Y_{-v}(z) = Y_v(z) \\cos(\\pi v) + J_v(z) \\sin(\\pi v)\n" + "\n" + "is used, where :math:`J_v(z)` is the Bessel function of the first kind,\n" + "computed using the AMOS routine `zbesj`. Note that the second term is\n" + "exactly zero for integer `v`; to improve accuracy the second term is\n" + "explicitly omitted for `v` values such that `v = floor(v)`.\n" + "\n" + "Exponentially scaled Bessel functions are useful for large `z`:\n" + "for these, the unscaled Bessel functions can easily under-or overflow.\n" + "\n" + "References\n" + "----------\n" + ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n" + " of a Complex Argument and Nonnegative Order\",\n" + " http://netlib.org/amos/\n" + "\n" + "Examples\n" + "--------\n" + "Compare the output of `yv` and `yve` for large complex arguments for `z`\n" + "by computing their values for order ``v=1`` at ``z=1000j``. We see that\n" + "`yv` returns nan but `yve` returns a finite number:\n" + "\n" + ">>> import numpy as np\n" + ">>> from scipy.special import yv, yve\n" + ">>> v = 1\n" + ">>> z = 1000j\n" + ">>> yv(v, z), yve(v, z)\n" + "((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j))\n" + "\n" + "For real arguments for `z`, `yve` returns the same as `yv` up to\n" + "floating point errors.\n" + "\n" + ">>> v, z = 1, 1000\n" + ">>> yv(v, z), yve(v, z)\n" + "(-0.02478433129235178, -0.02478433129235179)\n" + "\n" + "The function can be evaluated for several orders at the same time by\n" + "providing a list or NumPy array for `v`:\n" + "\n" + ">>> yve([1, 2, 3], 1j)\n" + "array([-0.20791042+0.14096627j, 0.38053618-0.04993878j,\n" + " 0.00815531-1.66311097j])\n" + "\n" + "In the same way, the function can be evaluated at several points in one\n" + "call by providing a list or NumPy array for `z`:\n" + "\n" + ">>> yve(1, np.array([1j, 2j, 3j]))\n" + "array([-0.20791042+0.14096627j, -0.21526929+0.01205044j,\n" + " -0.19682671+0.00127278j])\n" + "\n" + "It is also possible to evaluate several orders at several points\n" + "at the same time by providing arrays for `v` and `z` with\n" + "broadcasting compatible shapes. Compute `yve` for two different orders\n" + "`v` and three points `z` resulting in a 2x3 array.\n" + "\n" + ">>> v = np.array([[1], [2]])\n" + ">>> z = np.array([3j, 4j, 5j])\n" + ">>> v.shape, z.shape\n" + "((2, 1), (3,))\n" + "\n" + ">>> yve(v, z)\n" + "array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j,\n" + " -1.63972267e-01+1.73494110e-05j],\n" + " [1.94960056e-03-1.11782545e-01j, 2.02902325e-04-1.17626501e-01j,\n" + " 2.27727687e-05-1.17951906e-01j]])") +ufunc_yve_loops[0] = loop_d_dd__As_ff_f +ufunc_yve_loops[1] = loop_D_dD__As_fF_F +ufunc_yve_loops[2] = loop_d_dd__As_dd_d +ufunc_yve_loops[3] = loop_D_dD__As_dD_D +ufunc_yve_types[0] = NPY_FLOAT +ufunc_yve_types[1] = NPY_FLOAT +ufunc_yve_types[2] = NPY_FLOAT +ufunc_yve_types[3] = NPY_FLOAT +ufunc_yve_types[4] = NPY_CFLOAT +ufunc_yve_types[5] = NPY_CFLOAT +ufunc_yve_types[6] = NPY_DOUBLE +ufunc_yve_types[7] = NPY_DOUBLE +ufunc_yve_types[8] = NPY_DOUBLE +ufunc_yve_types[9] = NPY_DOUBLE +ufunc_yve_types[10] = NPY_CDOUBLE +ufunc_yve_types[11] = NPY_CDOUBLE +ufunc_yve_ptr[2*0] = _func_cbesy_wrap_e_real +ufunc_yve_ptr[2*0+1] = ("yve") +ufunc_yve_ptr[2*1] = _func_cbesy_wrap_e +ufunc_yve_ptr[2*1+1] = ("yve") +ufunc_yve_ptr[2*2] = _func_cbesy_wrap_e_real +ufunc_yve_ptr[2*2+1] = ("yve") +ufunc_yve_ptr[2*3] = _func_cbesy_wrap_e +ufunc_yve_ptr[2*3+1] = ("yve") +ufunc_yve_data[0] = &ufunc_yve_ptr[2*0] +ufunc_yve_data[1] = &ufunc_yve_ptr[2*1] +ufunc_yve_data[2] = &ufunc_yve_ptr[2*2] +ufunc_yve_data[3] = &ufunc_yve_ptr[2*3] +yve = np.PyUFunc_FromFuncAndData(ufunc_yve_loops, ufunc_yve_data, ufunc_yve_types, 4, 2, 1, 0, "yve", ufunc_yve_doc, 0) + +cdef np.PyUFuncGenericFunction ufunc_zetac_loops[2] +cdef void *ufunc_zetac_ptr[4] +cdef void *ufunc_zetac_data[2] +cdef char ufunc_zetac_types[4] +cdef char *ufunc_zetac_doc = ( + "zetac(x, out=None)\n" + "\n" + "Riemann zeta function minus 1.\n" + "\n" + "This function is defined as\n" + "\n" + ".. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,\n" + "\n" + "where ``x > 1``. For ``x < 1`` the analytic continuation is\n" + "computed. For more information on the Riemann zeta function, see\n" + "[dlmf]_.\n" + "\n" + "Parameters\n" + "----------\n" + "x : array_like of float\n" + " Values at which to compute zeta(x) - 1 (must be real).\n" + "out : ndarray, optional\n" + " Optional output array for the function results\n" + "\n" + "Returns\n" + "-------\n" + "scalar or ndarray\n" + " Values of zeta(x) - 1.\n" + "\n" + "See Also\n" + "--------\n" + "zeta\n" + "\n" + "References\n" + "----------\n" + ".. [dlmf] NIST Digital Library of Mathematical Functions\n" + " https://dlmf.nist.gov/25\n" + "\n" + "Examples\n" + "--------\n" + ">>> import numpy as np\n" + ">>> from scipy.special import zetac, zeta\n" + "\n" + "Some special values:\n" + "\n" + ">>> zetac(2), np.pi**2/6 - 1\n" + "(0.64493406684822641, 0.6449340668482264)\n" + "\n" + ">>> zetac(-1), -1.0/12 - 1\n" + "(-1.0833333333333333, -1.0833333333333333)\n" + "\n" + "Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:\n" + "\n" + ">>> zetac(60), zeta(60) - 1\n" + "(8.673617380119933e-19, 0.0)") +ufunc_zetac_loops[0] = loop_d_d__As_f_f +ufunc_zetac_loops[1] = loop_d_d__As_d_d +ufunc_zetac_types[0] = NPY_FLOAT +ufunc_zetac_types[1] = NPY_FLOAT +ufunc_zetac_types[2] = NPY_DOUBLE +ufunc_zetac_types[3] = NPY_DOUBLE +ufunc_zetac_ptr[2*0] = _func_zetac +ufunc_zetac_ptr[2*0+1] = ("zetac") +ufunc_zetac_ptr[2*1] = _func_zetac +ufunc_zetac_ptr[2*1+1] = ("zetac") +ufunc_zetac_data[0] = &ufunc_zetac_ptr[2*0] +ufunc_zetac_data[1] = &ufunc_zetac_ptr[2*1] +zetac = np.PyUFunc_FromFuncAndData(ufunc_zetac_loops, ufunc_zetac_data, ufunc_zetac_types, 2, 1, 1, 0, "zetac", ufunc_zetac_doc, 0) + +# +# Aliases +# +jn = jv diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..40dfb76c4d3b4ef7b4ea3eb1d8c4dd727d014e2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd new file mode 100644 index 0000000000000000000000000000000000000000..eb89ddfe2c1a8d97eb9b7e4453cc241ed21ac9cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pxd @@ -0,0 +1,60 @@ +from . cimport sf_error +cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) noexcept nogil +cdef void *_export_ccospi +cdef void *_export_lambertw_scalar +cdef void *_export_csinpi +cdef void *_export__stirling2_inexact +cdef void *_export_ibeta_float +cdef void *_export_ibeta_double +cdef void *_export_ibetac_float +cdef void *_export_ibetac_double +cdef void *_export_ibetac_inv_float +cdef void *_export_ibetac_inv_double +cdef void *_export_ibeta_inv_float +cdef void *_export_ibeta_inv_double +cdef void *_export_binom +cdef void *_export_faddeeva_dawsn +cdef void *_export_faddeeva_dawsn_complex +cdef void *_export_fellint_RC +cdef void *_export_cellint_RC +cdef void *_export_fellint_RD +cdef void *_export_cellint_RD +cdef void *_export_fellint_RF +cdef void *_export_cellint_RF +cdef void *_export_fellint_RG +cdef void *_export_cellint_RG +cdef void *_export_fellint_RJ +cdef void *_export_cellint_RJ +cdef void *_export_faddeeva_erf +cdef void *_export_faddeeva_erfc_complex +cdef void *_export_faddeeva_erfcx +cdef void *_export_faddeeva_erfcx_complex +cdef void *_export_faddeeva_erfi +cdef void *_export_faddeeva_erfi_complex +cdef void *_export_erfinv_float +cdef void *_export_erfinv_double +cdef void *_export_expit +cdef void *_export_expitf +cdef void *_export_expitl +cdef void *_export_cgamma +cdef void *_export_hyp1f1_double +cdef void *_export_log_expit +cdef void *_export_log_expitf +cdef void *_export_log_expitl +cdef void *_export_faddeeva_log_ndtr +cdef void *_export_faddeeva_log_ndtr_complex +cdef void *_export_loggamma_real +cdef void *_export_loggamma +cdef void *_export_logit +cdef void *_export_logitf +cdef void *_export_logitl +cdef void *_export_faddeeva_ndtr +cdef void *_export_powm1_float +cdef void *_export_powm1_double +cdef void *_export_cdigamma +cdef void *_export_digamma +cdef void *_export_crgamma +cdef void *_export_faddeeva_voigt_profile +cdef void *_export_faddeeva_w +cdef void *_export_wrightomega +cdef void *_export_wrightomega_real \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx new file mode 100644 index 0000000000000000000000000000000000000000..cc2b8a0528c6d3ad3076234256d89efe31541146 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.pyx @@ -0,0 +1,181 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +from libc.math cimport NAN + +include "_ufuncs_extra_code_common.pxi" + +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_ccospi "ccospi"(double complex) noexcept nogil +cdef void *_export_ccospi = _func_ccospi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_lambertw_scalar "lambertw_scalar"(double complex, long, double) noexcept nogil +cdef void *_export_lambertw_scalar = _func_lambertw_scalar +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_csinpi "csinpi"(double complex) noexcept nogil +cdef void *_export_csinpi = _func_csinpi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func__stirling2_inexact "_stirling2_inexact"(double, double) noexcept nogil +cdef void *_export__stirling2_inexact = _func__stirling2_inexact +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_float "ibeta_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_float = _func_ibeta_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_double "ibeta_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_double = _func_ibeta_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_float "ibetac_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_float = _func_ibetac_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_double "ibetac_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_double = _func_ibetac_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibetac_inv_float "ibetac_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibetac_inv_float = _func_ibetac_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibetac_inv_double "ibetac_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibetac_inv_double = _func_ibetac_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_ibeta_inv_float "ibeta_inv_float"(float, float, float) noexcept nogil +cdef void *_export_ibeta_inv_float = _func_ibeta_inv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_ibeta_inv_double "ibeta_inv_double"(double, double, double) noexcept nogil +cdef void *_export_ibeta_inv_double = _func_ibeta_inv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_binom "binom"(double, double) noexcept nogil +cdef void *_export_binom = _func_binom +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_dawsn "faddeeva_dawsn"(double) noexcept nogil +cdef void *_export_faddeeva_dawsn = _func_faddeeva_dawsn +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_dawsn_complex "faddeeva_dawsn_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_dawsn_complex = _func_faddeeva_dawsn_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RC "fellint_RC"(double, double) noexcept nogil +cdef void *_export_fellint_RC = _func_fellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RC "cellint_RC"(double complex, double complex) noexcept nogil +cdef void *_export_cellint_RC = _func_cellint_RC +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RD "fellint_RD"(double, double, double) noexcept nogil +cdef void *_export_fellint_RD = _func_fellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RD "cellint_RD"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RD = _func_cellint_RD +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RF "fellint_RF"(double, double, double) noexcept nogil +cdef void *_export_fellint_RF = _func_fellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RF "cellint_RF"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RF = _func_cellint_RF +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RG "fellint_RG"(double, double, double) noexcept nogil +cdef void *_export_fellint_RG = _func_fellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RG "cellint_RG"(double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RG = _func_cellint_RG +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_fellint_RJ "fellint_RJ"(double, double, double, double) noexcept nogil +cdef void *_export_fellint_RJ = _func_fellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cellint_RJ "cellint_RJ"(double complex, double complex, double complex, double complex) noexcept nogil +cdef void *_export_cellint_RJ = _func_cellint_RJ +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erf "faddeeva_erf"(double complex) noexcept nogil +cdef void *_export_faddeeva_erf = _func_faddeeva_erf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfc_complex "faddeeva_erfc_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfc_complex = _func_faddeeva_erfc_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfcx "faddeeva_erfcx"(double) noexcept nogil +cdef void *_export_faddeeva_erfcx = _func_faddeeva_erfcx +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfcx_complex "faddeeva_erfcx_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfcx_complex = _func_faddeeva_erfcx_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_erfi "faddeeva_erfi"(double) noexcept nogil +cdef void *_export_faddeeva_erfi = _func_faddeeva_erfi +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_erfi_complex "faddeeva_erfi_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_erfi_complex = _func_faddeeva_erfi_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_erfinv_float "erfinv_float"(float) noexcept nogil +cdef void *_export_erfinv_float = _func_erfinv_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_erfinv_double "erfinv_double"(double) noexcept nogil +cdef void *_export_erfinv_double = _func_erfinv_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_expit "expit"(double) noexcept nogil +cdef void *_export_expit = _func_expit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_expitf "expitf"(float) noexcept nogil +cdef void *_export_expitf = _func_expitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_expitl "expitl"(long double) noexcept nogil +cdef void *_export_expitl = _func_expitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cgamma "cgamma"(double complex) noexcept nogil +cdef void *_export_cgamma = _func_cgamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_hyp1f1_double "hyp1f1_double"(double, double, double) noexcept nogil +cdef void *_export_hyp1f1_double = _func_hyp1f1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_log_expit "log_expit"(double) noexcept nogil +cdef void *_export_log_expit = _func_log_expit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_log_expitf "log_expitf"(float) noexcept nogil +cdef void *_export_log_expitf = _func_log_expitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_log_expitl "log_expitl"(long double) noexcept nogil +cdef void *_export_log_expitl = _func_log_expitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_log_ndtr "faddeeva_log_ndtr"(double) noexcept nogil +cdef void *_export_faddeeva_log_ndtr = _func_faddeeva_log_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_log_ndtr_complex "faddeeva_log_ndtr_complex"(double complex) noexcept nogil +cdef void *_export_faddeeva_log_ndtr_complex = _func_faddeeva_log_ndtr_complex +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_loggamma_real "loggamma_real"(double) noexcept nogil +cdef void *_export_loggamma_real = _func_loggamma_real +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_loggamma "loggamma"(double complex) noexcept nogil +cdef void *_export_loggamma = _func_loggamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_logit "logit"(double) noexcept nogil +cdef void *_export_logit = _func_logit +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_logitf "logitf"(float) noexcept nogil +cdef void *_export_logitf = _func_logitf +cdef extern from r"_ufuncs_cxx_defs.h": + cdef long double _func_logitl "logitl"(long double) noexcept nogil +cdef void *_export_logitl = _func_logitl +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_ndtr "faddeeva_ndtr"(double complex) noexcept nogil +cdef void *_export_faddeeva_ndtr = _func_faddeeva_ndtr +cdef extern from r"_ufuncs_cxx_defs.h": + cdef float _func_powm1_float "powm1_float"(float, float) noexcept nogil +cdef void *_export_powm1_float = _func_powm1_float +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_powm1_double "powm1_double"(double, double) noexcept nogil +cdef void *_export_powm1_double = _func_powm1_double +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_cdigamma "cdigamma"(double complex) noexcept nogil +cdef void *_export_cdigamma = _func_cdigamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_digamma "digamma"(double) noexcept nogil +cdef void *_export_digamma = _func_digamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_crgamma "crgamma"(double complex) noexcept nogil +cdef void *_export_crgamma = _func_crgamma +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_faddeeva_voigt_profile "faddeeva_voigt_profile"(double, double, double) noexcept nogil +cdef void *_export_faddeeva_voigt_profile = _func_faddeeva_voigt_profile +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_faddeeva_w "faddeeva_w"(double complex) noexcept nogil +cdef void *_export_faddeeva_w = _func_faddeeva_w +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double complex _func_wrightomega "wrightomega"(double complex) noexcept nogil +cdef void *_export_wrightomega = _func_wrightomega +cdef extern from r"_ufuncs_cxx_defs.h": + cdef double _func_wrightomega_real "wrightomega_real"(double) noexcept nogil +cdef void *_export_wrightomega_real = _func_wrightomega_real \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..4f0262e0497535cb15b28d888b587af2c6b96461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx_defs.h @@ -0,0 +1,68 @@ +#ifndef UFUNCS_PROTO_H +#define UFUNCS_PROTO_H 1 +#include "_special.h" +npy_cdouble ccospi(npy_cdouble); +npy_cdouble lambertw_scalar(npy_cdouble, npy_long, npy_double); +npy_cdouble csinpi(npy_cdouble); +#include "stirling2.h" +npy_double _stirling2_inexact(npy_double, npy_double); +#include "boost_special_functions.h" +npy_float ibeta_float(npy_float, npy_float, npy_float); +npy_double ibeta_double(npy_double, npy_double, npy_double); +npy_float ibetac_float(npy_float, npy_float, npy_float); +npy_double ibetac_double(npy_double, npy_double, npy_double); +npy_float ibetac_inv_float(npy_float, npy_float, npy_float); +npy_double ibetac_inv_double(npy_double, npy_double, npy_double); +npy_float ibeta_inv_float(npy_float, npy_float, npy_float); +npy_double ibeta_inv_double(npy_double, npy_double, npy_double); +npy_double binom(npy_double, npy_double); +#include "_faddeeva.h" +npy_double faddeeva_dawsn(npy_double); +npy_cdouble faddeeva_dawsn_complex(npy_cdouble); +#include "ellint_carlson_wrap.hh" +npy_double fellint_RC(npy_double, npy_double); +npy_cdouble cellint_RC(npy_cdouble, npy_cdouble); +npy_double fellint_RD(npy_double, npy_double, npy_double); +npy_cdouble cellint_RD(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RF(npy_double, npy_double, npy_double); +npy_cdouble cellint_RF(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RG(npy_double, npy_double, npy_double); +npy_cdouble cellint_RG(npy_cdouble, npy_cdouble, npy_cdouble); +npy_double fellint_RJ(npy_double, npy_double, npy_double, npy_double); +npy_cdouble cellint_RJ(npy_cdouble, npy_cdouble, npy_cdouble, npy_cdouble); +npy_cdouble faddeeva_erf(npy_cdouble); +npy_cdouble faddeeva_erfc_complex(npy_cdouble); +npy_double faddeeva_erfcx(npy_double); +npy_cdouble faddeeva_erfcx_complex(npy_cdouble); +npy_double faddeeva_erfi(npy_double); +npy_cdouble faddeeva_erfi_complex(npy_cdouble); +npy_float erfinv_float(npy_float); +npy_double erfinv_double(npy_double); +#include "_logit.h" +npy_double expit(npy_double); +npy_float expitf(npy_float); +npy_longdouble expitl(npy_longdouble); +npy_cdouble cgamma(npy_cdouble); +npy_double hyp1f1_double(npy_double, npy_double, npy_double); +npy_double log_expit(npy_double); +npy_float log_expitf(npy_float); +npy_longdouble log_expitl(npy_longdouble); +npy_double faddeeva_log_ndtr(npy_double); +npy_cdouble faddeeva_log_ndtr_complex(npy_cdouble); +npy_double loggamma_real(npy_double); +npy_cdouble loggamma(npy_cdouble); +npy_double logit(npy_double); +npy_float logitf(npy_float); +npy_longdouble logitl(npy_longdouble); +npy_cdouble faddeeva_ndtr(npy_cdouble); +npy_float powm1_float(npy_float, npy_float); +npy_double powm1_double(npy_double, npy_double); +npy_cdouble cdigamma(npy_cdouble); +npy_double digamma(npy_double); +npy_cdouble crgamma(npy_cdouble); +npy_double faddeeva_voigt_profile(npy_double, npy_double, npy_double); +npy_cdouble faddeeva_w(npy_cdouble); +#include "_wright.h" +npy_cdouble wrightomega(npy_cdouble); +npy_double wrightomega_real(npy_double); +#endif diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..bf634d76844b6f28b2ba8bd4696f933b139e444b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs_defs.h @@ -0,0 +1,185 @@ +#ifndef UFUNCS_PROTO_H +#define UFUNCS_PROTO_H 1 +#include "_cosine.h" +npy_double cosine_cdf(npy_double); +npy_double cosine_invcdf(npy_double); +#include "cephes.h" +npy_double cospi(npy_double); +npy_double igam_fac(npy_double, npy_double); +npy_double kolmogc(npy_double); +npy_double kolmogci(npy_double); +npy_double kolmogp(npy_double); +npy_double lanczos_sum_expg_scaled(npy_double); +npy_double lgam1p(npy_double); +npy_double log1pmx(npy_double); +npy_double riemann_zeta(npy_double); +#include "scaled_exp1.h" +npy_double scaled_exp1(npy_double); +npy_double sinpi(npy_double); +npy_double smirnovc(npy_int, npy_double); +npy_double smirnovci(npy_int, npy_double); +npy_double smirnovp(npy_int, npy_double); +npy_double struve_asymp_large_z(npy_double, npy_double, npy_int, npy_double *); +npy_double struve_bessel_series(npy_double, npy_double, npy_int, npy_double *); +npy_double struve_power_series(npy_double, npy_double, npy_int, npy_double *); +npy_double zeta(npy_double, npy_double); +#include "amos_wrappers.h" +npy_int airy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_int cairy_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_int cairy_wrap_e(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_int cairy_wrap_e_real(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_double bdtr(npy_double, npy_int, npy_double); +npy_double bdtrc(npy_double, npy_int, npy_double); +npy_double bdtri(npy_double, npy_int, npy_double); +#include "specfun_wrappers.h" +npy_double bei_wrap(npy_double); +npy_double beip_wrap(npy_double); +npy_double ber_wrap(npy_double); +npy_double berp_wrap(npy_double); +npy_double besselpoly(npy_double, npy_double, npy_double); +npy_double beta(npy_double, npy_double); +npy_double lbeta(npy_double, npy_double); +npy_double btdtr(npy_double, npy_double, npy_double); +npy_double incbi(npy_double, npy_double, npy_double); +npy_double cbrt(npy_double); +npy_double chdtr(npy_double, npy_double); +npy_double chdtrc(npy_double, npy_double); +npy_double chdtri(npy_double, npy_double); +npy_double cosdg(npy_double); +npy_double cosm1(npy_double); +npy_double cotdg(npy_double); +npy_double ellpe(npy_double); +npy_double ellie(npy_double, npy_double); +npy_int ellpj(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_double ellik(npy_double, npy_double); +npy_double ellpk(npy_double); +npy_double erf(npy_double); +npy_double erfc(npy_double); +npy_double erfcinv(npy_double); +npy_cdouble cexp1_wrap(npy_cdouble); +npy_double exp1_wrap(npy_double); +npy_double exp10(npy_double); +npy_double exp2(npy_double); +npy_cdouble cexpi_wrap(npy_cdouble); +npy_double expi_wrap(npy_double); +npy_double expm1(npy_double); +npy_double expn(npy_int, npy_double); +npy_double fdtr(npy_double, npy_double, npy_double); +npy_double fdtrc(npy_double, npy_double, npy_double); +npy_double fdtri(npy_double, npy_double, npy_double); +npy_int fresnl(npy_double, npy_double *, npy_double *); +npy_int cfresnl_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *); +npy_double Gamma(npy_double); +npy_double igam(npy_double, npy_double); +npy_double igamc(npy_double, npy_double); +npy_double igamci(npy_double, npy_double); +npy_double igami(npy_double, npy_double); +npy_double lgam(npy_double); +npy_double gammasgn(npy_double); +npy_double gdtr(npy_double, npy_double, npy_double); +npy_double gdtrc(npy_double, npy_double, npy_double); +npy_cdouble cbesh_wrap1(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap1_e(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap2(npy_double, npy_cdouble); +npy_cdouble cbesh_wrap2_e(npy_double, npy_cdouble); +npy_cdouble chyp1f1_wrap(npy_double, npy_double, npy_cdouble); +npy_double hyp2f1(npy_double, npy_double, npy_double, npy_double); +npy_double i0(npy_double); +npy_double i0e(npy_double); +npy_double i1(npy_double); +npy_double i1e(npy_double); +npy_int it2i0k0_wrap(npy_double, npy_double *, npy_double *); +npy_int it2j0y0_wrap(npy_double, npy_double *, npy_double *); +npy_double it2struve0_wrap(npy_double); +npy_int itairy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *); +npy_int it1i0k0_wrap(npy_double, npy_double *, npy_double *); +npy_int it1j0y0_wrap(npy_double, npy_double *, npy_double *); +npy_double itmodstruve0_wrap(npy_double); +npy_double itstruve0_wrap(npy_double); +npy_cdouble cbesi_wrap(npy_double, npy_cdouble); +npy_double iv(npy_double, npy_double); +npy_cdouble cbesi_wrap_e(npy_double, npy_cdouble); +npy_double cbesi_wrap_e_real(npy_double, npy_double); +npy_double j0(npy_double); +npy_double j1(npy_double); +npy_cdouble cbesj_wrap(npy_double, npy_cdouble); +npy_double cbesj_wrap_real(npy_double, npy_double); +npy_cdouble cbesj_wrap_e(npy_double, npy_cdouble); +npy_double cbesj_wrap_e_real(npy_double, npy_double); +npy_double k0(npy_double); +npy_double k0e(npy_double); +npy_double k1(npy_double); +npy_double k1e(npy_double); +npy_double kei_wrap(npy_double); +npy_double keip_wrap(npy_double); +npy_int kelvin_wrap(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *); +npy_double ker_wrap(npy_double); +npy_double kerp_wrap(npy_double); +npy_double cbesk_wrap_real_int(npy_int, npy_double); +npy_double kolmogi(npy_double); +npy_double kolmogorov(npy_double); +npy_cdouble cbesk_wrap(npy_double, npy_cdouble); +npy_double cbesk_wrap_real(npy_double, npy_double); +npy_cdouble cbesk_wrap_e(npy_double, npy_cdouble); +npy_double cbesk_wrap_e_real(npy_double, npy_double); +npy_double log1p(npy_double); +npy_double pmv_wrap(npy_double, npy_double, npy_double); +npy_double cem_cva_wrap(npy_double, npy_double); +npy_double sem_cva_wrap(npy_double, npy_double); +npy_int cem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int mcm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int mcm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int msm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int msm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int sem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_int modified_fresnel_minus_wrap(npy_double, npy_cdouble *, npy_cdouble *); +npy_int modified_fresnel_plus_wrap(npy_double, npy_cdouble *, npy_cdouble *); +npy_double struve_l(npy_double, npy_double); +npy_double nbdtr(npy_int, npy_int, npy_double); +npy_double nbdtrc(npy_int, npy_int, npy_double); +npy_double nbdtri(npy_int, npy_int, npy_double); +npy_double ndtr(npy_double); +npy_double ndtri(npy_double); +npy_double oblate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double oblate_segv_wrap(npy_double, npy_double, npy_double); +npy_double oblate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double oblate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int oblate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double owens_t(npy_double, npy_double); +npy_int pbdv_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_int pbvv_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_int pbwa_wrap(npy_double, npy_double, npy_double *, npy_double *); +npy_double pdtr(npy_double, npy_double); +npy_double pdtrc(npy_double, npy_double); +npy_double pdtri(npy_int, npy_double); +npy_double poch(npy_double, npy_double); +npy_double prolate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double prolate_segv_wrap(npy_double, npy_double, npy_double); +npy_double prolate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double prolate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *); +npy_int prolate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *); +npy_double radian(npy_double, npy_double, npy_double); +npy_double rgamma(npy_double); +npy_double round(npy_double); +npy_int shichi(npy_double, npy_double *, npy_double *); +npy_int sici(npy_double, npy_double *, npy_double *); +npy_double sindg(npy_double); +npy_double smirnov(npy_int, npy_double); +npy_double smirnovi(npy_int, npy_double); +npy_double spence(npy_double); +npy_double struve_h(npy_double, npy_double); +npy_double tandg(npy_double); +npy_double tukeylambdacdf(npy_double, npy_double); +npy_double y0(npy_double); +npy_double y1(npy_double); +npy_double yn(npy_int, npy_double); +npy_cdouble cbesy_wrap(npy_double, npy_cdouble); +npy_double cbesy_wrap_real(npy_double, npy_double); +npy_cdouble cbesy_wrap_e(npy_double, npy_cdouble); +npy_double cbesy_wrap_e_real(npy_double, npy_double); +npy_double zetac(npy_double); +#endif diff --git a/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py b/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d60bf8e02da4d63e80e2e12abf9a1a08bbe956 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/add_newdocs.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['get', 'add_newdoc', 'docdict'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="add_newdocs", + private_modules=["_add_newdocs"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/basic.py b/venv/lib/python3.10/site-packages/scipy/special/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..e55695f44d05187d6c83f1ebefd70270af2c2d76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/basic.py @@ -0,0 +1,87 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'ai_zeros', + 'assoc_laguerre', + 'bei_zeros', + 'beip_zeros', + 'ber_zeros', + 'bernoulli', + 'berp_zeros', + 'bi_zeros', + 'clpmn', + 'comb', + 'digamma', + 'diric', + 'erf_zeros', + 'euler', + 'factorial', + 'factorial2', + 'factorialk', + 'fresnel_zeros', + 'fresnelc_zeros', + 'fresnels_zeros', + 'gamma', + 'h1vp', + 'h2vp', + 'hankel1', + 'hankel2', + 'iv', + 'ivp', + 'jn_zeros', + 'jnjnp_zeros', + 'jnp_zeros', + 'jnyn_zeros', + 'jv', + 'jvp', + 'kei_zeros', + 'keip_zeros', + 'kelvin_zeros', + 'ker_zeros', + 'kerp_zeros', + 'kv', + 'kvp', + 'lmbda', + 'lpmn', + 'lpn', + 'lqmn', + 'lqn', + 'mathieu_a', + 'mathieu_b', + 'mathieu_even_coef', + 'mathieu_odd_coef', + 'obl_cv_seq', + 'pbdn_seq', + 'pbdv_seq', + 'pbvv_seq', + 'perm', + 'polygamma', + 'pro_cv_seq', + 'psi', + 'riccati_jn', + 'riccati_yn', + 'sinc', + 'y0_zeros', + 'y1_zeros', + 'y1p_zeros', + 'yn_zeros', + 'ynp_zeros', + 'yv', + 'yvp', + 'zeta' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="basic", + private_modules=["_basic", "_ufuncs"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..70d34600058c732c2fb702fd7d342bf997f600d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pxd @@ -0,0 +1,261 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +ctypedef fused number_t: + double complex + double + +cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_in(long n, number_t z, bint derivative=*) noexcept nogil +cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) noexcept nogil + +ctypedef fused Dd_number_t: + double complex + double + +ctypedef fused df_number_t: + double + float + +ctypedef fused dfg_number_t: + double + float + long double + +ctypedef fused dl_number_t: + double + long + +cpdef double voigt_profile(double x0, double x1, double x2) noexcept nogil +cpdef double agm(double x0, double x1) noexcept nogil +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil +cpdef double bdtr(double x0, dl_number_t x1, double x2) noexcept nogil +cpdef double bdtrc(double x0, dl_number_t x1, double x2) noexcept nogil +cpdef double bdtri(double x0, dl_number_t x1, double x2) noexcept nogil +cpdef double bdtrik(double x0, double x1, double x2) noexcept nogil +cpdef double bdtrin(double x0, double x1, double x2) noexcept nogil +cpdef double bei(double x0) noexcept nogil +cpdef double beip(double x0) noexcept nogil +cpdef double ber(double x0) noexcept nogil +cpdef double berp(double x0) noexcept nogil +cpdef double besselpoly(double x0, double x1, double x2) noexcept nogil +cpdef double beta(double x0, double x1) noexcept nogil +cpdef df_number_t betainc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betaincc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betaincinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef df_number_t betainccinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil +cpdef double betaln(double x0, double x1) noexcept nogil +cpdef double binom(double x0, double x1) noexcept nogil +cpdef double boxcox(double x0, double x1) noexcept nogil +cpdef double boxcox1p(double x0, double x1) noexcept nogil +cpdef double btdtr(double x0, double x1, double x2) noexcept nogil +cpdef double btdtri(double x0, double x1, double x2) noexcept nogil +cpdef double btdtria(double x0, double x1, double x2) noexcept nogil +cpdef double btdtrib(double x0, double x1, double x2) noexcept nogil +cpdef double cbrt(double x0) noexcept nogil +cpdef double chdtr(double x0, double x1) noexcept nogil +cpdef double chdtrc(double x0, double x1) noexcept nogil +cpdef double chdtri(double x0, double x1) noexcept nogil +cpdef double chdtriv(double x0, double x1) noexcept nogil +cpdef double chndtr(double x0, double x1, double x2) noexcept nogil +cpdef double chndtridf(double x0, double x1, double x2) noexcept nogil +cpdef double chndtrinc(double x0, double x1, double x2) noexcept nogil +cpdef double chndtrix(double x0, double x1, double x2) noexcept nogil +cpdef double cosdg(double x0) noexcept nogil +cpdef double cosm1(double x0) noexcept nogil +cpdef double cotdg(double x0) noexcept nogil +cpdef Dd_number_t dawsn(Dd_number_t x0) noexcept nogil +cpdef double ellipe(double x0) noexcept nogil +cpdef double ellipeinc(double x0, double x1) noexcept nogil +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) noexcept nogil +cpdef double ellipkinc(double x0, double x1) noexcept nogil +cpdef double ellipkm1(double x0) noexcept nogil +cpdef double ellipk(double x0) noexcept nogil +cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) noexcept nogil +cpdef double entr(double x0) noexcept nogil +cpdef Dd_number_t erf(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfc(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfcx(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t erfi(Dd_number_t x0) noexcept nogil +cpdef df_number_t erfinv(df_number_t x0) noexcept nogil +cpdef double erfcinv(double x0) noexcept nogil +cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil +cpdef double eval_hermite(long x0, double x1) noexcept nogil +cpdef double eval_hermitenorm(long x0, double x1) noexcept nogil +cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t exp1(Dd_number_t x0) noexcept nogil +cpdef double exp10(double x0) noexcept nogil +cpdef double exp2(double x0) noexcept nogil +cpdef Dd_number_t expi(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t expit(dfg_number_t x0) noexcept nogil +cpdef Dd_number_t expm1(Dd_number_t x0) noexcept nogil +cpdef double expn(dl_number_t x0, double x1) noexcept nogil +cpdef double exprel(double x0) noexcept nogil +cpdef double fdtr(double x0, double x1, double x2) noexcept nogil +cpdef double fdtrc(double x0, double x1, double x2) noexcept nogil +cpdef double fdtri(double x0, double x1, double x2) noexcept nogil +cpdef double fdtridfd(double x0, double x1, double x2) noexcept nogil +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cpdef Dd_number_t gamma(Dd_number_t x0) noexcept nogil +cpdef double gammainc(double x0, double x1) noexcept nogil +cpdef double gammaincc(double x0, double x1) noexcept nogil +cpdef double gammainccinv(double x0, double x1) noexcept nogil +cpdef double gammaincinv(double x0, double x1) noexcept nogil +cpdef double gammaln(double x0) noexcept nogil +cpdef double gammasgn(double x0) noexcept nogil +cpdef double gdtr(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrc(double x0, double x1, double x2) noexcept nogil +cpdef double gdtria(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrib(double x0, double x1, double x2) noexcept nogil +cpdef double gdtrix(double x0, double x1, double x2) noexcept nogil +cpdef double complex hankel1(double x0, double complex x1) noexcept nogil +cpdef double complex hankel1e(double x0, double complex x1) noexcept nogil +cpdef double complex hankel2(double x0, double complex x1) noexcept nogil +cpdef double complex hankel2e(double x0, double complex x1) noexcept nogil +cpdef double huber(double x0, double x1) noexcept nogil +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) noexcept nogil +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) noexcept nogil +cpdef double hyperu(double x0, double x1, double x2) noexcept nogil +cpdef double i0(double x0) noexcept nogil +cpdef double i0e(double x0) noexcept nogil +cpdef double i1(double x0) noexcept nogil +cpdef double i1e(double x0) noexcept nogil +cpdef double inv_boxcox(double x0, double x1) noexcept nogil +cpdef double inv_boxcox1p(double x0, double x1) noexcept nogil +cdef void it2i0k0(double x0, double *y0, double *y1) noexcept nogil +cdef void it2j0y0(double x0, double *y0, double *y1) noexcept nogil +cpdef double it2struve0(double x0) noexcept nogil +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) noexcept nogil +cdef void iti0k0(double x0, double *y0, double *y1) noexcept nogil +cdef void itj0y0(double x0, double *y0, double *y1) noexcept nogil +cpdef double itmodstruve0(double x0) noexcept nogil +cpdef double itstruve0(double x0) noexcept nogil +cpdef Dd_number_t iv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t ive(double x0, Dd_number_t x1) noexcept nogil +cpdef double j0(double x0) noexcept nogil +cpdef double j1(double x0) noexcept nogil +cpdef Dd_number_t jv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t jve(double x0, Dd_number_t x1) noexcept nogil +cpdef double k0(double x0) noexcept nogil +cpdef double k0e(double x0) noexcept nogil +cpdef double k1(double x0) noexcept nogil +cpdef double k1e(double x0) noexcept nogil +cpdef double kei(double x0) noexcept nogil +cpdef double keip(double x0) noexcept nogil +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) noexcept nogil +cpdef double ker(double x0) noexcept nogil +cpdef double kerp(double x0) noexcept nogil +cpdef double kl_div(double x0, double x1) noexcept nogil +cpdef double kn(dl_number_t x0, double x1) noexcept nogil +cpdef double kolmogi(double x0) noexcept nogil +cpdef double kolmogorov(double x0) noexcept nogil +cpdef Dd_number_t kv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t kve(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t log1p(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t log_expit(dfg_number_t x0) noexcept nogil +cpdef Dd_number_t log_ndtr(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t loggamma(Dd_number_t x0) noexcept nogil +cpdef dfg_number_t logit(dfg_number_t x0) noexcept nogil +cpdef double lpmv(double x0, double x1, double x2) noexcept nogil +cpdef double mathieu_a(double x0, double x1) noexcept nogil +cpdef double mathieu_b(double x0, double x1) noexcept nogil +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) noexcept nogil +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) noexcept nogil +cpdef double modstruve(double x0, double x1) noexcept nogil +cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil +cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil +cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil +cpdef double nbdtrik(double x0, double x1, double x2) noexcept nogil +cpdef double nbdtrin(double x0, double x1, double x2) noexcept nogil +cpdef double ncfdtr(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtri(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) noexcept nogil +cpdef double nctdtr(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtridf(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtrinc(double x0, double x1, double x2) noexcept nogil +cpdef double nctdtrit(double x0, double x1, double x2) noexcept nogil +cpdef Dd_number_t ndtr(Dd_number_t x0) noexcept nogil +cpdef double ndtri(double x0) noexcept nogil +cpdef double nrdtrimn(double x0, double x1, double x2) noexcept nogil +cpdef double nrdtrisd(double x0, double x1, double x2) noexcept nogil +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double obl_cv(double x0, double x1, double x2) noexcept nogil +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double owens_t(double x0, double x1) noexcept nogil +cdef void pbdv(double x0, double x1, double *y0, double *y1) noexcept nogil +cdef void pbvv(double x0, double x1, double *y0, double *y1) noexcept nogil +cdef void pbwa(double x0, double x1, double *y0, double *y1) noexcept nogil +cpdef double pdtr(double x0, double x1) noexcept nogil +cpdef double pdtrc(double x0, double x1) noexcept nogil +cpdef double pdtri(dl_number_t x0, double x1) noexcept nogil +cpdef double pdtrik(double x0, double x1) noexcept nogil +cpdef double poch(double x0, double x1) noexcept nogil +cpdef df_number_t powm1(df_number_t x0, df_number_t x1) noexcept nogil +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double pro_cv(double x0, double x1, double x2) noexcept nogil +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil +cpdef double pseudo_huber(double x0, double x1) noexcept nogil +cpdef Dd_number_t psi(Dd_number_t x0) noexcept nogil +cpdef double radian(double x0, double x1, double x2) noexcept nogil +cpdef double rel_entr(double x0, double x1) noexcept nogil +cpdef Dd_number_t rgamma(Dd_number_t x0) noexcept nogil +cpdef double round(double x0) noexcept nogil +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil +cpdef double sindg(double x0) noexcept nogil +cpdef double smirnov(dl_number_t x0, double x1) noexcept nogil +cpdef double smirnovi(dl_number_t x0, double x1) noexcept nogil +cpdef Dd_number_t spence(Dd_number_t x0) noexcept nogil +cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) noexcept nogil +cpdef double stdtr(double x0, double x1) noexcept nogil +cpdef double stdtridf(double x0, double x1) noexcept nogil +cpdef double stdtrit(double x0, double x1) noexcept nogil +cpdef double struve(double x0, double x1) noexcept nogil +cpdef double tandg(double x0) noexcept nogil +cpdef double tklmbda(double x0, double x1) noexcept nogil +cpdef double complex wofz(double complex x0) noexcept nogil +cpdef Dd_number_t wrightomega(Dd_number_t x0) noexcept nogil +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) noexcept nogil +cpdef double y0(double x0) noexcept nogil +cpdef double y1(double x0) noexcept nogil +cpdef double yn(dl_number_t x0, double x1) noexcept nogil +cpdef Dd_number_t yv(double x0, Dd_number_t x1) noexcept nogil +cpdef Dd_number_t yve(double x0, Dd_number_t x1) noexcept nogil +cpdef double zetac(double x0) noexcept nogil +cpdef double wright_bessel(double x0, double x1, double x2) noexcept nogil +cpdef double ndtri_exp(double x0) noexcept nogil \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi new file mode 100644 index 0000000000000000000000000000000000000000..024e962b10df8892631eaad20223f7fc8378ea83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name) -> Any: ... diff --git a/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyx b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2ffe3a26ccdf229b59bcb4f2ef37d3b2dd431167 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/cython_special.pyx @@ -0,0 +1,3698 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! +""" +.. highlight:: cython + +Cython API for special functions +================================ + +Scalar, typed versions of many of the functions in ``scipy.special`` +can be accessed directly from Cython; the complete list is given +below. Functions are overloaded using Cython fused types so their +names match their Python counterpart. The module follows the following +conventions: + +- If a function's Python counterpart returns multiple values, then the + function returns its outputs via pointers in the final arguments. +- If a function's Python counterpart returns a single value, then the + function's output is returned directly. + +The module is usable from Cython via:: + + cimport scipy.special.cython_special + +Error handling +-------------- + +Functions can indicate an error by returning ``nan``; however they +cannot emit warnings like their counterparts in ``scipy.special``. + +Available functions +------------------- + +- :py:func:`~scipy.special.voigt_profile`:: + + double voigt_profile(double, double, double) + +- :py:func:`~scipy.special.agm`:: + + double agm(double, double) + +- :py:func:`~scipy.special.airy`:: + + void airy(double, double *, double *, double *, double *) + void airy(double complex, double complex *, double complex *, double complex *, double complex *) + +- :py:func:`~scipy.special.airye`:: + + void airye(double complex, double complex *, double complex *, double complex *, double complex *) + void airye(double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.bdtr`:: + + double bdtr(double, double, double) + double bdtr(double, long, double) + +- :py:func:`~scipy.special.bdtrc`:: + + double bdtrc(double, double, double) + double bdtrc(double, long, double) + +- :py:func:`~scipy.special.bdtri`:: + + double bdtri(double, double, double) + double bdtri(double, long, double) + +- :py:func:`~scipy.special.bdtrik`:: + + double bdtrik(double, double, double) + +- :py:func:`~scipy.special.bdtrin`:: + + double bdtrin(double, double, double) + +- :py:func:`~scipy.special.bei`:: + + double bei(double) + +- :py:func:`~scipy.special.beip`:: + + double beip(double) + +- :py:func:`~scipy.special.ber`:: + + double ber(double) + +- :py:func:`~scipy.special.berp`:: + + double berp(double) + +- :py:func:`~scipy.special.besselpoly`:: + + double besselpoly(double, double, double) + +- :py:func:`~scipy.special.beta`:: + + double beta(double, double) + +- :py:func:`~scipy.special.betainc`:: + + float betainc(float, float, float) + double betainc(double, double, double) + +- :py:func:`~scipy.special.betaincc`:: + + float betaincc(float, float, float) + double betaincc(double, double, double) + +- :py:func:`~scipy.special.betaincinv`:: + + float betaincinv(float, float, float) + double betaincinv(double, double, double) + +- :py:func:`~scipy.special.betainccinv`:: + + float betainccinv(float, float, float) + double betainccinv(double, double, double) + +- :py:func:`~scipy.special.betaln`:: + + double betaln(double, double) + +- :py:func:`~scipy.special.binom`:: + + double binom(double, double) + +- :py:func:`~scipy.special.boxcox`:: + + double boxcox(double, double) + +- :py:func:`~scipy.special.boxcox1p`:: + + double boxcox1p(double, double) + +- :py:func:`~scipy.special.btdtr`:: + + double btdtr(double, double, double) + +- :py:func:`~scipy.special.btdtri`:: + + double btdtri(double, double, double) + +- :py:func:`~scipy.special.btdtria`:: + + double btdtria(double, double, double) + +- :py:func:`~scipy.special.btdtrib`:: + + double btdtrib(double, double, double) + +- :py:func:`~scipy.special.cbrt`:: + + double cbrt(double) + +- :py:func:`~scipy.special.chdtr`:: + + double chdtr(double, double) + +- :py:func:`~scipy.special.chdtrc`:: + + double chdtrc(double, double) + +- :py:func:`~scipy.special.chdtri`:: + + double chdtri(double, double) + +- :py:func:`~scipy.special.chdtriv`:: + + double chdtriv(double, double) + +- :py:func:`~scipy.special.chndtr`:: + + double chndtr(double, double, double) + +- :py:func:`~scipy.special.chndtridf`:: + + double chndtridf(double, double, double) + +- :py:func:`~scipy.special.chndtrinc`:: + + double chndtrinc(double, double, double) + +- :py:func:`~scipy.special.chndtrix`:: + + double chndtrix(double, double, double) + +- :py:func:`~scipy.special.cosdg`:: + + double cosdg(double) + +- :py:func:`~scipy.special.cosm1`:: + + double cosm1(double) + +- :py:func:`~scipy.special.cotdg`:: + + double cotdg(double) + +- :py:func:`~scipy.special.dawsn`:: + + double dawsn(double) + double complex dawsn(double complex) + +- :py:func:`~scipy.special.ellipe`:: + + double ellipe(double) + +- :py:func:`~scipy.special.ellipeinc`:: + + double ellipeinc(double, double) + +- :py:func:`~scipy.special.ellipj`:: + + void ellipj(double, double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.ellipkinc`:: + + double ellipkinc(double, double) + +- :py:func:`~scipy.special.ellipkm1`:: + + double ellipkm1(double) + +- :py:func:`~scipy.special.ellipk`:: + + double ellipk(double) + +- :py:func:`~scipy.special.elliprc`:: + + double elliprc(double, double) + double complex elliprc(double complex, double complex) + +- :py:func:`~scipy.special.elliprd`:: + + double elliprd(double, double, double) + double complex elliprd(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprf`:: + + double elliprf(double, double, double) + double complex elliprf(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprg`:: + + double elliprg(double, double, double) + double complex elliprg(double complex, double complex, double complex) + +- :py:func:`~scipy.special.elliprj`:: + + double elliprj(double, double, double, double) + double complex elliprj(double complex, double complex, double complex, double complex) + +- :py:func:`~scipy.special.entr`:: + + double entr(double) + +- :py:func:`~scipy.special.erf`:: + + double complex erf(double complex) + double erf(double) + +- :py:func:`~scipy.special.erfc`:: + + double complex erfc(double complex) + double erfc(double) + +- :py:func:`~scipy.special.erfcx`:: + + double erfcx(double) + double complex erfcx(double complex) + +- :py:func:`~scipy.special.erfi`:: + + double erfi(double) + double complex erfi(double complex) + +- :py:func:`~scipy.special.erfinv`:: + + float erfinv(float) + double erfinv(double) + +- :py:func:`~scipy.special.erfcinv`:: + + double erfcinv(double) + +- :py:func:`~scipy.special.eval_chebyc`:: + + double complex eval_chebyc(double, double complex) + double eval_chebyc(double, double) + double eval_chebyc(long, double) + +- :py:func:`~scipy.special.eval_chebys`:: + + double complex eval_chebys(double, double complex) + double eval_chebys(double, double) + double eval_chebys(long, double) + +- :py:func:`~scipy.special.eval_chebyt`:: + + double complex eval_chebyt(double, double complex) + double eval_chebyt(double, double) + double eval_chebyt(long, double) + +- :py:func:`~scipy.special.eval_chebyu`:: + + double complex eval_chebyu(double, double complex) + double eval_chebyu(double, double) + double eval_chebyu(long, double) + +- :py:func:`~scipy.special.eval_gegenbauer`:: + + double complex eval_gegenbauer(double, double, double complex) + double eval_gegenbauer(double, double, double) + double eval_gegenbauer(long, double, double) + +- :py:func:`~scipy.special.eval_genlaguerre`:: + + double complex eval_genlaguerre(double, double, double complex) + double eval_genlaguerre(double, double, double) + double eval_genlaguerre(long, double, double) + +- :py:func:`~scipy.special.eval_hermite`:: + + double eval_hermite(long, double) + +- :py:func:`~scipy.special.eval_hermitenorm`:: + + double eval_hermitenorm(long, double) + +- :py:func:`~scipy.special.eval_jacobi`:: + + double complex eval_jacobi(double, double, double, double complex) + double eval_jacobi(double, double, double, double) + double eval_jacobi(long, double, double, double) + +- :py:func:`~scipy.special.eval_laguerre`:: + + double complex eval_laguerre(double, double complex) + double eval_laguerre(double, double) + double eval_laguerre(long, double) + +- :py:func:`~scipy.special.eval_legendre`:: + + double complex eval_legendre(double, double complex) + double eval_legendre(double, double) + double eval_legendre(long, double) + +- :py:func:`~scipy.special.eval_sh_chebyt`:: + + double complex eval_sh_chebyt(double, double complex) + double eval_sh_chebyt(double, double) + double eval_sh_chebyt(long, double) + +- :py:func:`~scipy.special.eval_sh_chebyu`:: + + double complex eval_sh_chebyu(double, double complex) + double eval_sh_chebyu(double, double) + double eval_sh_chebyu(long, double) + +- :py:func:`~scipy.special.eval_sh_jacobi`:: + + double complex eval_sh_jacobi(double, double, double, double complex) + double eval_sh_jacobi(double, double, double, double) + double eval_sh_jacobi(long, double, double, double) + +- :py:func:`~scipy.special.eval_sh_legendre`:: + + double complex eval_sh_legendre(double, double complex) + double eval_sh_legendre(double, double) + double eval_sh_legendre(long, double) + +- :py:func:`~scipy.special.exp1`:: + + double complex exp1(double complex) + double exp1(double) + +- :py:func:`~scipy.special.exp10`:: + + double exp10(double) + +- :py:func:`~scipy.special.exp2`:: + + double exp2(double) + +- :py:func:`~scipy.special.expi`:: + + double complex expi(double complex) + double expi(double) + +- :py:func:`~scipy.special.expit`:: + + double expit(double) + float expit(float) + long double expit(long double) + +- :py:func:`~scipy.special.expm1`:: + + double complex expm1(double complex) + double expm1(double) + +- :py:func:`~scipy.special.expn`:: + + double expn(double, double) + double expn(long, double) + +- :py:func:`~scipy.special.exprel`:: + + double exprel(double) + +- :py:func:`~scipy.special.fdtr`:: + + double fdtr(double, double, double) + +- :py:func:`~scipy.special.fdtrc`:: + + double fdtrc(double, double, double) + +- :py:func:`~scipy.special.fdtri`:: + + double fdtri(double, double, double) + +- :py:func:`~scipy.special.fdtridfd`:: + + double fdtridfd(double, double, double) + +- :py:func:`~scipy.special.fresnel`:: + + void fresnel(double, double *, double *) + void fresnel(double complex, double complex *, double complex *) + +- :py:func:`~scipy.special.gamma`:: + + double complex gamma(double complex) + double gamma(double) + +- :py:func:`~scipy.special.gammainc`:: + + double gammainc(double, double) + +- :py:func:`~scipy.special.gammaincc`:: + + double gammaincc(double, double) + +- :py:func:`~scipy.special.gammainccinv`:: + + double gammainccinv(double, double) + +- :py:func:`~scipy.special.gammaincinv`:: + + double gammaincinv(double, double) + +- :py:func:`~scipy.special.gammaln`:: + + double gammaln(double) + +- :py:func:`~scipy.special.gammasgn`:: + + double gammasgn(double) + +- :py:func:`~scipy.special.gdtr`:: + + double gdtr(double, double, double) + +- :py:func:`~scipy.special.gdtrc`:: + + double gdtrc(double, double, double) + +- :py:func:`~scipy.special.gdtria`:: + + double gdtria(double, double, double) + +- :py:func:`~scipy.special.gdtrib`:: + + double gdtrib(double, double, double) + +- :py:func:`~scipy.special.gdtrix`:: + + double gdtrix(double, double, double) + +- :py:func:`~scipy.special.hankel1`:: + + double complex hankel1(double, double complex) + +- :py:func:`~scipy.special.hankel1e`:: + + double complex hankel1e(double, double complex) + +- :py:func:`~scipy.special.hankel2`:: + + double complex hankel2(double, double complex) + +- :py:func:`~scipy.special.hankel2e`:: + + double complex hankel2e(double, double complex) + +- :py:func:`~scipy.special.huber`:: + + double huber(double, double) + +- :py:func:`~scipy.special.hyp0f1`:: + + double complex hyp0f1(double, double complex) + double hyp0f1(double, double) + +- :py:func:`~scipy.special.hyp1f1`:: + + double hyp1f1(double, double, double) + double complex hyp1f1(double, double, double complex) + +- :py:func:`~scipy.special.hyp2f1`:: + + double hyp2f1(double, double, double, double) + double complex hyp2f1(double, double, double, double complex) + +- :py:func:`~scipy.special.hyperu`:: + + double hyperu(double, double, double) + +- :py:func:`~scipy.special.i0`:: + + double i0(double) + +- :py:func:`~scipy.special.i0e`:: + + double i0e(double) + +- :py:func:`~scipy.special.i1`:: + + double i1(double) + +- :py:func:`~scipy.special.i1e`:: + + double i1e(double) + +- :py:func:`~scipy.special.inv_boxcox`:: + + double inv_boxcox(double, double) + +- :py:func:`~scipy.special.inv_boxcox1p`:: + + double inv_boxcox1p(double, double) + +- :py:func:`~scipy.special.it2i0k0`:: + + void it2i0k0(double, double *, double *) + +- :py:func:`~scipy.special.it2j0y0`:: + + void it2j0y0(double, double *, double *) + +- :py:func:`~scipy.special.it2struve0`:: + + double it2struve0(double) + +- :py:func:`~scipy.special.itairy`:: + + void itairy(double, double *, double *, double *, double *) + +- :py:func:`~scipy.special.iti0k0`:: + + void iti0k0(double, double *, double *) + +- :py:func:`~scipy.special.itj0y0`:: + + void itj0y0(double, double *, double *) + +- :py:func:`~scipy.special.itmodstruve0`:: + + double itmodstruve0(double) + +- :py:func:`~scipy.special.itstruve0`:: + + double itstruve0(double) + +- :py:func:`~scipy.special.iv`:: + + double complex iv(double, double complex) + double iv(double, double) + +- :py:func:`~scipy.special.ive`:: + + double complex ive(double, double complex) + double ive(double, double) + +- :py:func:`~scipy.special.j0`:: + + double j0(double) + +- :py:func:`~scipy.special.j1`:: + + double j1(double) + +- :py:func:`~scipy.special.jv`:: + + double complex jv(double, double complex) + double jv(double, double) + +- :py:func:`~scipy.special.jve`:: + + double complex jve(double, double complex) + double jve(double, double) + +- :py:func:`~scipy.special.k0`:: + + double k0(double) + +- :py:func:`~scipy.special.k0e`:: + + double k0e(double) + +- :py:func:`~scipy.special.k1`:: + + double k1(double) + +- :py:func:`~scipy.special.k1e`:: + + double k1e(double) + +- :py:func:`~scipy.special.kei`:: + + double kei(double) + +- :py:func:`~scipy.special.keip`:: + + double keip(double) + +- :py:func:`~scipy.special.kelvin`:: + + void kelvin(double, double complex *, double complex *, double complex *, double complex *) + +- :py:func:`~scipy.special.ker`:: + + double ker(double) + +- :py:func:`~scipy.special.kerp`:: + + double kerp(double) + +- :py:func:`~scipy.special.kl_div`:: + + double kl_div(double, double) + +- :py:func:`~scipy.special.kn`:: + + double kn(double, double) + double kn(long, double) + +- :py:func:`~scipy.special.kolmogi`:: + + double kolmogi(double) + +- :py:func:`~scipy.special.kolmogorov`:: + + double kolmogorov(double) + +- :py:func:`~scipy.special.kv`:: + + double complex kv(double, double complex) + double kv(double, double) + +- :py:func:`~scipy.special.kve`:: + + double complex kve(double, double complex) + double kve(double, double) + +- :py:func:`~scipy.special.log1p`:: + + double complex log1p(double complex) + double log1p(double) + +- :py:func:`~scipy.special.log_expit`:: + + double log_expit(double) + float log_expit(float) + long double log_expit(long double) + +- :py:func:`~scipy.special.log_ndtr`:: + + double log_ndtr(double) + double complex log_ndtr(double complex) + +- :py:func:`~scipy.special.loggamma`:: + + double loggamma(double) + double complex loggamma(double complex) + +- :py:func:`~scipy.special.logit`:: + + double logit(double) + float logit(float) + long double logit(long double) + +- :py:func:`~scipy.special.lpmv`:: + + double lpmv(double, double, double) + +- :py:func:`~scipy.special.mathieu_a`:: + + double mathieu_a(double, double) + +- :py:func:`~scipy.special.mathieu_b`:: + + double mathieu_b(double, double) + +- :py:func:`~scipy.special.mathieu_cem`:: + + void mathieu_cem(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modcem1`:: + + void mathieu_modcem1(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modcem2`:: + + void mathieu_modcem2(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modsem1`:: + + void mathieu_modsem1(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_modsem2`:: + + void mathieu_modsem2(double, double, double, double *, double *) + +- :py:func:`~scipy.special.mathieu_sem`:: + + void mathieu_sem(double, double, double, double *, double *) + +- :py:func:`~scipy.special.modfresnelm`:: + + void modfresnelm(double, double complex *, double complex *) + +- :py:func:`~scipy.special.modfresnelp`:: + + void modfresnelp(double, double complex *, double complex *) + +- :py:func:`~scipy.special.modstruve`:: + + double modstruve(double, double) + +- :py:func:`~scipy.special.nbdtr`:: + + double nbdtr(double, double, double) + double nbdtr(long, long, double) + +- :py:func:`~scipy.special.nbdtrc`:: + + double nbdtrc(double, double, double) + double nbdtrc(long, long, double) + +- :py:func:`~scipy.special.nbdtri`:: + + double nbdtri(double, double, double) + double nbdtri(long, long, double) + +- :py:func:`~scipy.special.nbdtrik`:: + + double nbdtrik(double, double, double) + +- :py:func:`~scipy.special.nbdtrin`:: + + double nbdtrin(double, double, double) + +- :py:func:`~scipy.special.ncfdtr`:: + + double ncfdtr(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtri`:: + + double ncfdtri(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtridfd`:: + + double ncfdtridfd(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtridfn`:: + + double ncfdtridfn(double, double, double, double) + +- :py:func:`~scipy.special.ncfdtrinc`:: + + double ncfdtrinc(double, double, double, double) + +- :py:func:`~scipy.special.nctdtr`:: + + double nctdtr(double, double, double) + +- :py:func:`~scipy.special.nctdtridf`:: + + double nctdtridf(double, double, double) + +- :py:func:`~scipy.special.nctdtrinc`:: + + double nctdtrinc(double, double, double) + +- :py:func:`~scipy.special.nctdtrit`:: + + double nctdtrit(double, double, double) + +- :py:func:`~scipy.special.ndtr`:: + + double complex ndtr(double complex) + double ndtr(double) + +- :py:func:`~scipy.special.ndtri`:: + + double ndtri(double) + +- :py:func:`~scipy.special.nrdtrimn`:: + + double nrdtrimn(double, double, double) + +- :py:func:`~scipy.special.nrdtrisd`:: + + double nrdtrisd(double, double, double) + +- :py:func:`~scipy.special.obl_ang1`:: + + void obl_ang1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_ang1_cv`:: + + void obl_ang1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_cv`:: + + double obl_cv(double, double, double) + +- :py:func:`~scipy.special.obl_rad1`:: + + void obl_rad1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad1_cv`:: + + void obl_rad1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad2`:: + + void obl_rad2(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.obl_rad2_cv`:: + + void obl_rad2_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.owens_t`:: + + double owens_t(double, double) + +- :py:func:`~scipy.special.pbdv`:: + + void pbdv(double, double, double *, double *) + +- :py:func:`~scipy.special.pbvv`:: + + void pbvv(double, double, double *, double *) + +- :py:func:`~scipy.special.pbwa`:: + + void pbwa(double, double, double *, double *) + +- :py:func:`~scipy.special.pdtr`:: + + double pdtr(double, double) + +- :py:func:`~scipy.special.pdtrc`:: + + double pdtrc(double, double) + +- :py:func:`~scipy.special.pdtri`:: + + double pdtri(double, double) + double pdtri(long, double) + +- :py:func:`~scipy.special.pdtrik`:: + + double pdtrik(double, double) + +- :py:func:`~scipy.special.poch`:: + + double poch(double, double) + +- :py:func:`~scipy.special.powm1`:: + + float powm1(float, float) + double powm1(double, double) + +- :py:func:`~scipy.special.pro_ang1`:: + + void pro_ang1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_ang1_cv`:: + + void pro_ang1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_cv`:: + + double pro_cv(double, double, double) + +- :py:func:`~scipy.special.pro_rad1`:: + + void pro_rad1(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad1_cv`:: + + void pro_rad1_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad2`:: + + void pro_rad2(double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pro_rad2_cv`:: + + void pro_rad2_cv(double, double, double, double, double, double *, double *) + +- :py:func:`~scipy.special.pseudo_huber`:: + + double pseudo_huber(double, double) + +- :py:func:`~scipy.special.psi`:: + + double complex psi(double complex) + double psi(double) + +- :py:func:`~scipy.special.radian`:: + + double radian(double, double, double) + +- :py:func:`~scipy.special.rel_entr`:: + + double rel_entr(double, double) + +- :py:func:`~scipy.special.rgamma`:: + + double complex rgamma(double complex) + double rgamma(double) + +- :py:func:`~scipy.special.round`:: + + double round(double) + +- :py:func:`~scipy.special.shichi`:: + + void shichi(double complex, double complex *, double complex *) + void shichi(double, double *, double *) + +- :py:func:`~scipy.special.sici`:: + + void sici(double complex, double complex *, double complex *) + void sici(double, double *, double *) + +- :py:func:`~scipy.special.sindg`:: + + double sindg(double) + +- :py:func:`~scipy.special.smirnov`:: + + double smirnov(double, double) + double smirnov(long, double) + +- :py:func:`~scipy.special.smirnovi`:: + + double smirnovi(double, double) + double smirnovi(long, double) + +- :py:func:`~scipy.special.spence`:: + + double complex spence(double complex) + double spence(double) + +- :py:func:`~scipy.special.sph_harm`:: + + double complex sph_harm(double, double, double, double) + double complex sph_harm(long, long, double, double) + +- :py:func:`~scipy.special.stdtr`:: + + double stdtr(double, double) + +- :py:func:`~scipy.special.stdtridf`:: + + double stdtridf(double, double) + +- :py:func:`~scipy.special.stdtrit`:: + + double stdtrit(double, double) + +- :py:func:`~scipy.special.struve`:: + + double struve(double, double) + +- :py:func:`~scipy.special.tandg`:: + + double tandg(double) + +- :py:func:`~scipy.special.tklmbda`:: + + double tklmbda(double, double) + +- :py:func:`~scipy.special.wofz`:: + + double complex wofz(double complex) + +- :py:func:`~scipy.special.wrightomega`:: + + double complex wrightomega(double complex) + double wrightomega(double) + +- :py:func:`~scipy.special.xlog1py`:: + + double xlog1py(double, double) + double complex xlog1py(double complex, double complex) + +- :py:func:`~scipy.special.xlogy`:: + + double xlogy(double, double) + double complex xlogy(double complex, double complex) + +- :py:func:`~scipy.special.y0`:: + + double y0(double) + +- :py:func:`~scipy.special.y1`:: + + double y1(double) + +- :py:func:`~scipy.special.yn`:: + + double yn(double, double) + double yn(long, double) + +- :py:func:`~scipy.special.yv`:: + + double complex yv(double, double complex) + double yv(double, double) + +- :py:func:`~scipy.special.yve`:: + + double complex yve(double, double complex) + double yve(double, double) + +- :py:func:`~scipy.special.zetac`:: + + double zetac(double) + +- :py:func:`~scipy.special.wright_bessel`:: + + double wright_bessel(double, double, double) + +- :py:func:`~scipy.special.ndtri_exp`:: + + double ndtri_exp(double) + + +Custom functions +---------------- + +Some functions in ``scipy.special`` which are not ufuncs have custom +Cython wrappers. + +Spherical Bessel functions +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The optional ``derivative`` boolean argument is replaced with an +optional Cython ``bint``, leading to the following signatures. + +- :py:func:`~scipy.special.spherical_jn`:: + + double complex spherical_jn(long, double complex) + double complex spherical_jn(long, double complex, bint) + double spherical_jn(long, double) + double spherical_jn(long, double, bint) + +- :py:func:`~scipy.special.spherical_yn`:: + + double complex spherical_yn(long, double complex) + double complex spherical_yn(long, double complex, bint) + double spherical_yn(long, double) + double spherical_yn(long, double, bint) + +- :py:func:`~scipy.special.spherical_in`:: + + double complex spherical_in(long, double complex) + double complex spherical_in(long, double complex, bint) + double spherical_in(long, double) + double spherical_in(long, double, bint) + +- :py:func:`~scipy.special.spherical_kn`:: + + double complex spherical_kn(long, double complex) + double complex spherical_kn(long, double complex, bint) + double spherical_kn(long, double) + double spherical_kn(long, double, bint) + +""" + +from libc.math cimport NAN + +include "_cython_special.pxi" +include "_cython_special_custom.pxi" + +from ._agm cimport agm as _func_agm +ctypedef double _proto_agm_t(double, double) noexcept nogil +cdef _proto_agm_t *_proto_agm_t_var = &_func_agm +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_airy_wrap "airy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap "cairy_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap_e "cairy_wrap_e"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cairy_wrap_e_real "cairy_wrap_e_real"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe +ctypedef double _proto_bdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtr "bdtr"(npy_double, npy_int, npy_double)nogil +from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe +ctypedef double _proto_bdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtrc "bdtrc"(npy_double, npy_int, npy_double)nogil +from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe +ctypedef double _proto_bdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bdtri "bdtri"(npy_double, npy_int, npy_double)nogil +from ._cdflib_wrappers cimport bdtrik as _func_bdtrik +ctypedef double _proto_bdtrik_t(double, double, double) noexcept nogil +cdef _proto_bdtrik_t *_proto_bdtrik_t_var = &_func_bdtrik +from ._cdflib_wrappers cimport bdtrin as _func_bdtrin +ctypedef double _proto_bdtrin_t(double, double, double) noexcept nogil +cdef _proto_bdtrin_t *_proto_bdtrin_t_var = &_func_bdtrin +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_bei_wrap "bei_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_beip_wrap "beip_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ber_wrap "ber_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_berp_wrap "berp_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_besselpoly "besselpoly"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_beta "beta"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_lbeta "lbeta"(npy_double, npy_double)nogil +from ._boxcox cimport boxcox as _func_boxcox +ctypedef double _proto_boxcox_t(double, double) noexcept nogil +cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox +from ._boxcox cimport boxcox1p as _func_boxcox1p +ctypedef double _proto_boxcox1p_t(double, double) noexcept nogil +cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_btdtr "btdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_incbi "incbi"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport btdtria as _func_btdtria +ctypedef double _proto_btdtria_t(double, double, double) noexcept nogil +cdef _proto_btdtria_t *_proto_btdtria_t_var = &_func_btdtria +from ._cdflib_wrappers cimport btdtrib as _func_btdtrib +ctypedef double _proto_btdtrib_t(double, double, double) noexcept nogil +cdef _proto_btdtrib_t *_proto_btdtrib_t_var = &_func_btdtrib +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbrt "cbrt"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtr "chdtr"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtrc "chdtrc"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_chdtri "chdtri"(npy_double, npy_double)nogil +from ._cdflib_wrappers cimport chdtriv as _func_chdtriv +ctypedef double _proto_chdtriv_t(double, double) noexcept nogil +cdef _proto_chdtriv_t *_proto_chdtriv_t_var = &_func_chdtriv +from ._cdflib_wrappers cimport chndtr as _func_chndtr +ctypedef double _proto_chndtr_t(double, double, double) noexcept nogil +cdef _proto_chndtr_t *_proto_chndtr_t_var = &_func_chndtr +from ._cdflib_wrappers cimport chndtridf as _func_chndtridf +ctypedef double _proto_chndtridf_t(double, double, double) noexcept nogil +cdef _proto_chndtridf_t *_proto_chndtridf_t_var = &_func_chndtridf +from ._cdflib_wrappers cimport chndtrinc as _func_chndtrinc +ctypedef double _proto_chndtrinc_t(double, double, double) noexcept nogil +cdef _proto_chndtrinc_t *_proto_chndtrinc_t_var = &_func_chndtrinc +from ._cdflib_wrappers cimport chndtrix as _func_chndtrix +ctypedef double _proto_chndtrix_t(double, double, double) noexcept nogil +cdef _proto_chndtrix_t *_proto_chndtrix_t_var = &_func_chndtrix +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cosdg "cosdg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cosm1 "cosm1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cotdg "cotdg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellpe "ellpe"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellie "ellie"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_ellpj "ellpj"(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellik "ellik"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ellpk "ellpk"(npy_double)nogil +from ._ellipk cimport ellipk as _func_ellipk +ctypedef double _proto_ellipk_t(double) noexcept nogil +cdef _proto_ellipk_t *_proto_ellipk_t_var = &_func_ellipk +from ._convex_analysis cimport entr as _func_entr +ctypedef double _proto_entr_t(double) noexcept nogil +cdef _proto_entr_t *_proto_entr_t_var = &_func_entr +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erf "erf"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erfc "erfc"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_erfcinv "erfcinv"(npy_double)nogil +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex] +from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc +ctypedef double _proto_eval_chebyc_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double] +from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l +ctypedef double _proto_eval_chebyc_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex] +from .orthogonal_eval cimport eval_chebys as _func_eval_chebys +ctypedef double _proto_eval_chebys_double__t(double, double) noexcept nogil +cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double] +from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l +ctypedef double _proto_eval_chebys_l_t(long, double) noexcept nogil +cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex] +from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt +ctypedef double _proto_eval_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double] +from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l +ctypedef double _proto_eval_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex] +from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu +ctypedef double _proto_eval_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double] +from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l +ctypedef double _proto_eval_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex] +from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer +ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double] +from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l +ctypedef double _proto_eval_gegenbauer_l_t(long, double, double) noexcept nogil +cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) noexcept nogil +cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex] +from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre +ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double] +from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l +ctypedef double _proto_eval_genlaguerre_l_t(long, double, double) noexcept nogil +cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l +from .orthogonal_eval cimport eval_hermite as _func_eval_hermite +ctypedef double _proto_eval_hermite_t(long, double) noexcept nogil +cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite +from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm +ctypedef double _proto_eval_hermitenorm_t(long, double) noexcept nogil +cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex] +from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi +ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double] +from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l +ctypedef double _proto_eval_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex] +from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre +ctypedef double _proto_eval_laguerre_double__t(double, double) noexcept nogil +cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double] +from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l +ctypedef double _proto_eval_laguerre_l_t(long, double) noexcept nogil +cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex] +from .orthogonal_eval cimport eval_legendre as _func_eval_legendre +ctypedef double _proto_eval_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double] +from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l +ctypedef double _proto_eval_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex] +from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt +ctypedef double _proto_eval_sh_chebyt_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double] +from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l +ctypedef double _proto_eval_sh_chebyt_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex] +from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu +ctypedef double _proto_eval_sh_chebyu_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double] +from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l +ctypedef double _proto_eval_sh_chebyu_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) noexcept nogil +cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex] +from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi +ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double] +from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l +ctypedef double _proto_eval_sh_jacobi_l_t(long, double, double, double) noexcept nogil +cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) noexcept nogil +cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex] +from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre +ctypedef double _proto_eval_sh_legendre_double__t(double, double) noexcept nogil +cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double] +from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l +ctypedef double _proto_eval_sh_legendre_l_t(long, double) noexcept nogil +cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cexp1_wrap "cexp1_wrap"(npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp1_wrap "exp1_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp10 "exp10"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_exp2 "exp2"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cexpi_wrap "cexpi_wrap"(npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expi_wrap "expi_wrap"(npy_double)nogil +from ._cunity cimport cexpm1 as _func_cexpm1 +ctypedef double complex _proto_cexpm1_t(double complex) noexcept nogil +cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1 +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expm1 "expm1"(npy_double)nogil +from ._legacy cimport expn_unsafe as _func_expn_unsafe +ctypedef double _proto_expn_unsafe_t(double, double) noexcept nogil +cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_expn "expn"(npy_int, npy_double)nogil +from ._exprel cimport exprel as _func_exprel +ctypedef double _proto_exprel_t(double) noexcept nogil +cdef _proto_exprel_t *_proto_exprel_t_var = &_func_exprel +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtr "fdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtrc "fdtrc"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_fdtri "fdtri"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport fdtridfd as _func_fdtridfd +ctypedef double _proto_fdtridfd_t(double, double, double) noexcept nogil +cdef _proto_fdtridfd_t *_proto_fdtridfd_t_var = &_func_fdtridfd +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_fresnl "fresnl"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cfresnl_wrap "cfresnl_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_Gamma "Gamma"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igam "igam"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igamc "igamc"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igamci "igamci"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_igami "igami"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_lgam "lgam"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gammasgn "gammasgn"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gdtr "gdtr"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_gdtrc "gdtrc"(npy_double, npy_double, npy_double)nogil +from ._cdflib_wrappers cimport gdtria as _func_gdtria +ctypedef double _proto_gdtria_t(double, double, double) noexcept nogil +cdef _proto_gdtria_t *_proto_gdtria_t_var = &_func_gdtria +from ._cdflib_wrappers cimport gdtrib as _func_gdtrib +ctypedef double _proto_gdtrib_t(double, double, double) noexcept nogil +cdef _proto_gdtrib_t *_proto_gdtrib_t_var = &_func_gdtrib +from ._cdflib_wrappers cimport gdtrix as _func_gdtrix +ctypedef double _proto_gdtrix_t(double, double, double) noexcept nogil +cdef _proto_gdtrix_t *_proto_gdtrix_t_var = &_func_gdtrix +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap1 "cbesh_wrap1"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap1_e "cbesh_wrap1_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap2 "cbesh_wrap2"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesh_wrap2_e "cbesh_wrap2_e"(npy_double, npy_cdouble)nogil +from ._convex_analysis cimport huber as _func_huber +ctypedef double _proto_huber_t(double, double) noexcept nogil +cdef _proto_huber_t *_proto_huber_t_var = &_func_huber +from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx +ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) noexcept nogil +cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx +from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real +ctypedef double _proto__hyp0f1_real_t(double, double) noexcept nogil +cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_chyp1f1_wrap "chyp1f1_wrap"(npy_double, npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_hyp2f1 "hyp2f1"(npy_double, npy_double, npy_double, npy_double)nogil +from ._hyp2f1 cimport hyp2f1_complex as _func_hyp2f1_complex +ctypedef double complex _proto_hyp2f1_complex_t(double, double, double, double complex) noexcept nogil +cdef _proto_hyp2f1_complex_t *_proto_hyp2f1_complex_t_var = &_func_hyp2f1_complex +from ._hypergeometric cimport hyperu as _func_hyperu +ctypedef double _proto_hyperu_t(double, double, double) noexcept nogil +cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i0 "i0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i0e "i0e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i1 "i1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_i1e "i1e"(npy_double)nogil +from ._boxcox cimport inv_boxcox as _func_inv_boxcox +ctypedef double _proto_inv_boxcox_t(double, double) noexcept nogil +cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox +from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p +ctypedef double _proto_inv_boxcox1p_t(double, double) noexcept nogil +cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it2i0k0_wrap "it2i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it2j0y0_wrap "it2j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_it2struve0_wrap "it2struve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_itairy_wrap "itairy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it1i0k0_wrap "it1i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_it1j0y0_wrap "it1j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_itmodstruve0_wrap "itmodstruve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_itstruve0_wrap "itstruve0_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesi_wrap "cbesi_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_iv "iv"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesi_wrap_e "cbesi_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesi_wrap_e_real "cbesi_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_j0 "j0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_j1 "j1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesj_wrap "cbesj_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesj_wrap_real "cbesj_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesj_wrap_e "cbesj_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesj_wrap_e_real "cbesj_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k0 "k0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k0e "k0e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k1 "k1"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_k1e "k1e"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kei_wrap "kei_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_keip_wrap "keip_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_kelvin_wrap "kelvin_wrap"(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ker_wrap "ker_wrap"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kerp_wrap "kerp_wrap"(npy_double)nogil +from ._convex_analysis cimport kl_div as _func_kl_div +ctypedef double _proto_kl_div_t(double, double) noexcept nogil +cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div +from ._legacy cimport kn_unsafe as _func_kn_unsafe +ctypedef double _proto_kn_unsafe_t(double, double) noexcept nogil +cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_real_int "cbesk_wrap_real_int"(npy_int, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kolmogi "kolmogi"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_kolmogorov "kolmogorov"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesk_wrap "cbesk_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_real "cbesk_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesk_wrap_e "cbesk_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesk_wrap_e_real "cbesk_wrap_e_real"(npy_double, npy_double)nogil +from ._cunity cimport clog1p as _func_clog1p +ctypedef double complex _proto_clog1p_t(double complex) noexcept nogil +cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_log1p "log1p"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pmv_wrap "pmv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cem_cva_wrap "cem_cva_wrap"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_sem_cva_wrap "sem_cva_wrap"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_cem_wrap "cem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_mcm1_wrap "mcm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_mcm2_wrap "mcm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_msm1_wrap "msm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_msm2_wrap "msm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_sem_wrap "sem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_modified_fresnel_minus_wrap "modified_fresnel_minus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_modified_fresnel_plus_wrap "modified_fresnel_plus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_struve_l "struve_l"(npy_double, npy_double)nogil +from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe +ctypedef double _proto_nbdtr_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtr "nbdtr"(npy_int, npy_int, npy_double)nogil +from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe +ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtrc "nbdtrc"(npy_int, npy_int, npy_double)nogil +from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe +ctypedef double _proto_nbdtri_unsafe_t(double, double, double) noexcept nogil +cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_nbdtri "nbdtri"(npy_int, npy_int, npy_double)nogil +from ._cdflib_wrappers cimport nbdtrik as _func_nbdtrik +ctypedef double _proto_nbdtrik_t(double, double, double) noexcept nogil +cdef _proto_nbdtrik_t *_proto_nbdtrik_t_var = &_func_nbdtrik +from ._cdflib_wrappers cimport nbdtrin as _func_nbdtrin +ctypedef double _proto_nbdtrin_t(double, double, double) noexcept nogil +cdef _proto_nbdtrin_t *_proto_nbdtrin_t_var = &_func_nbdtrin +from ._cdflib_wrappers cimport ncfdtr as _func_ncfdtr +ctypedef double _proto_ncfdtr_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtr_t *_proto_ncfdtr_t_var = &_func_ncfdtr +from ._cdflib_wrappers cimport ncfdtri as _func_ncfdtri +ctypedef double _proto_ncfdtri_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtri_t *_proto_ncfdtri_t_var = &_func_ncfdtri +from ._cdflib_wrappers cimport ncfdtridfd as _func_ncfdtridfd +ctypedef double _proto_ncfdtridfd_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfd_t *_proto_ncfdtridfd_t_var = &_func_ncfdtridfd +from ._cdflib_wrappers cimport ncfdtridfn as _func_ncfdtridfn +ctypedef double _proto_ncfdtridfn_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtridfn_t *_proto_ncfdtridfn_t_var = &_func_ncfdtridfn +from ._cdflib_wrappers cimport ncfdtrinc as _func_ncfdtrinc +ctypedef double _proto_ncfdtrinc_t(double, double, double, double) noexcept nogil +cdef _proto_ncfdtrinc_t *_proto_ncfdtrinc_t_var = &_func_ncfdtrinc +from ._cdflib_wrappers cimport nctdtr as _func_nctdtr +ctypedef double _proto_nctdtr_t(double, double, double) noexcept nogil +cdef _proto_nctdtr_t *_proto_nctdtr_t_var = &_func_nctdtr +from ._cdflib_wrappers cimport nctdtridf as _func_nctdtridf +ctypedef double _proto_nctdtridf_t(double, double, double) noexcept nogil +cdef _proto_nctdtridf_t *_proto_nctdtridf_t_var = &_func_nctdtridf +from ._cdflib_wrappers cimport nctdtrinc as _func_nctdtrinc +ctypedef double _proto_nctdtrinc_t(double, double, double) noexcept nogil +cdef _proto_nctdtrinc_t *_proto_nctdtrinc_t_var = &_func_nctdtrinc +from ._cdflib_wrappers cimport nctdtrit as _func_nctdtrit +ctypedef double _proto_nctdtrit_t(double, double, double) noexcept nogil +cdef _proto_nctdtrit_t *_proto_nctdtrit_t_var = &_func_nctdtrit +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ndtr "ndtr"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_ndtri "ndtri"(npy_double)nogil +from ._cdflib_wrappers cimport nrdtrimn as _func_nrdtrimn +ctypedef double _proto_nrdtrimn_t(double, double, double) noexcept nogil +cdef _proto_nrdtrimn_t *_proto_nrdtrimn_t_var = &_func_nrdtrimn +from ._cdflib_wrappers cimport nrdtrisd as _func_nrdtrisd +ctypedef double _proto_nrdtrisd_t(double, double, double) noexcept nogil +cdef _proto_nrdtrisd_t *_proto_nrdtrisd_t_var = &_func_nrdtrisd +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_aswfa_nocv_wrap "oblate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_aswfa_wrap "oblate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_segv_wrap "oblate_segv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_radial1_nocv_wrap "oblate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_radial1_wrap "oblate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_oblate_radial2_nocv_wrap "oblate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_oblate_radial2_wrap "oblate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_owens_t "owens_t"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbdv_wrap "pbdv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbvv_wrap "pbvv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_pbwa_wrap "pbwa_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtr "pdtr"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtrc "pdtrc"(npy_double, npy_double)nogil +from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe +ctypedef double _proto_pdtri_unsafe_t(double, double) noexcept nogil +cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_pdtri "pdtri"(npy_int, npy_double)nogil +from ._cdflib_wrappers cimport pdtrik as _func_pdtrik +ctypedef double _proto_pdtrik_t(double, double) noexcept nogil +cdef _proto_pdtrik_t *_proto_pdtrik_t_var = &_func_pdtrik +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_poch "poch"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_aswfa_nocv_wrap "prolate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_aswfa_wrap "prolate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_segv_wrap "prolate_segv_wrap"(npy_double, npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_radial1_nocv_wrap "prolate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_radial1_wrap "prolate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_prolate_radial2_nocv_wrap "prolate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_prolate_radial2_wrap "prolate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil +from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber +ctypedef double _proto_pseudo_huber_t(double, double) noexcept nogil +cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_radian "radian"(npy_double, npy_double, npy_double)nogil +from ._convex_analysis cimport rel_entr as _func_rel_entr +ctypedef double _proto_rel_entr_t(double, double) noexcept nogil +cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_rgamma "rgamma"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_round "round"(npy_double)nogil +from ._sici cimport cshichi as _func_cshichi +ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_shichi "shichi"(npy_double, npy_double *, npy_double *)nogil +from ._sici cimport csici as _func_csici +ctypedef int _proto_csici_t(double complex, double complex *, double complex *) noexcept nogil +cdef _proto_csici_t *_proto_csici_t_var = &_func_csici +cdef extern from r"_ufuncs_defs.h": + cdef npy_int _func_sici "sici"(npy_double, npy_double *, npy_double *)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_sindg "sindg"(npy_double)nogil +from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe +ctypedef double _proto_smirnov_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_smirnov "smirnov"(npy_int, npy_double)nogil +from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe +ctypedef double _proto_smirnovi_unsafe_t(double, double) noexcept nogil +cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_smirnovi "smirnovi"(npy_int, npy_double)nogil +from ._spence cimport cspence as _func_cspence +ctypedef double complex _proto_cspence_t(double complex) noexcept nogil +cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_spence "spence"(npy_double)nogil +from ._legacy cimport sph_harmonic_unsafe as _func_sph_harmonic_unsafe +ctypedef double complex _proto_sph_harmonic_unsafe_t(double, double, double, double) noexcept nogil +cdef _proto_sph_harmonic_unsafe_t *_proto_sph_harmonic_unsafe_t_var = &_func_sph_harmonic_unsafe +from .sph_harm cimport sph_harmonic as _func_sph_harmonic +ctypedef double complex _proto_sph_harmonic_t(int, int, double, double) noexcept nogil +cdef _proto_sph_harmonic_t *_proto_sph_harmonic_t_var = &_func_sph_harmonic +from ._cdflib_wrappers cimport stdtr as _func_stdtr +ctypedef double _proto_stdtr_t(double, double) noexcept nogil +cdef _proto_stdtr_t *_proto_stdtr_t_var = &_func_stdtr +from ._cdflib_wrappers cimport stdtridf as _func_stdtridf +ctypedef double _proto_stdtridf_t(double, double) noexcept nogil +cdef _proto_stdtridf_t *_proto_stdtridf_t_var = &_func_stdtridf +from ._cdflib_wrappers cimport stdtrit as _func_stdtrit +ctypedef double _proto_stdtrit_t(double, double) noexcept nogil +cdef _proto_stdtrit_t *_proto_stdtrit_t_var = &_func_stdtrit +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_struve_h "struve_h"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_tandg "tandg"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_tukeylambdacdf "tukeylambdacdf"(npy_double, npy_double)nogil +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double _proto_xlog1py_double__t(double, double) noexcept nogil +cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double] +from ._xlogy cimport xlog1py as _func_xlog1py +ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double _proto_xlogy_double__t(double, double) noexcept nogil +cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double] +from ._xlogy cimport xlogy as _func_xlogy +ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) noexcept nogil +cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex] +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_y0 "y0"(npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_y1 "y1"(npy_double)nogil +from ._legacy cimport yn_unsafe as _func_yn_unsafe +ctypedef double _proto_yn_unsafe_t(double, double) noexcept nogil +cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_yn "yn"(npy_int, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesy_wrap "cbesy_wrap"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesy_wrap_real "cbesy_wrap_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_cdouble _func_cbesy_wrap_e "cbesy_wrap_e"(npy_double, npy_cdouble)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_cbesy_wrap_e_real "cbesy_wrap_e_real"(npy_double, npy_double)nogil +cdef extern from r"_ufuncs_defs.h": + cdef npy_double _func_zetac "zetac"(npy_double)nogil +from ._wright_bessel cimport wright_bessel_scalar as _func_wright_bessel_scalar +ctypedef double _proto_wright_bessel_scalar_t(double, double, double) noexcept nogil +cdef _proto_wright_bessel_scalar_t *_proto_wright_bessel_scalar_t_var = &_func_wright_bessel_scalar +from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp +ctypedef double _proto_ndtri_exp_t(double) noexcept nogil +cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp + +cpdef double voigt_profile(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.voigt_profile""" + return (scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile)(x0, x1, x2) + +cpdef double agm(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.agm""" + return _func_agm(x0, x1) + +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil: + """See the documentation for scipy.special.airy""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + if Dd_number_t is double: + _func_airy_wrap(x0, y0, y1, y2, y3) + elif Dd_number_t is double_complex: + _func_cairy_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + +def _airy_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + cdef Dd_number_t y2 + cdef Dd_number_t y3 + airy(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) noexcept nogil: + """See the documentation for scipy.special.airye""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + if Dd_number_t is double_complex: + _func_cairy_wrap_e(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + elif Dd_number_t is double: + _func_cairy_wrap_e_real(x0, y0, y1, y2, y3) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + y2[0] = NAN + y3[0] = NAN + +def _airye_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + cdef Dd_number_t y2 + cdef Dd_number_t y3 + airye(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double bdtr(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtr""" + if dl_number_t is double: + return _func_bdtr_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtr(x0, x1, x2) + else: + return NAN + +cpdef double bdtrc(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrc""" + if dl_number_t is double: + return _func_bdtrc_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtrc(x0, x1, x2) + else: + return NAN + +cpdef double bdtri(double x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtri""" + if dl_number_t is double: + return _func_bdtri_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_bdtri(x0, x1, x2) + else: + return NAN + +cpdef double bdtrik(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrik""" + return _func_bdtrik(x0, x1, x2) + +cpdef double bdtrin(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.bdtrin""" + return _func_bdtrin(x0, x1, x2) + +cpdef double bei(double x0) noexcept nogil: + """See the documentation for scipy.special.bei""" + return _func_bei_wrap(x0) + +cpdef double beip(double x0) noexcept nogil: + """See the documentation for scipy.special.beip""" + return _func_beip_wrap(x0) + +cpdef double ber(double x0) noexcept nogil: + """See the documentation for scipy.special.ber""" + return _func_ber_wrap(x0) + +cpdef double berp(double x0) noexcept nogil: + """See the documentation for scipy.special.berp""" + return _func_berp_wrap(x0) + +cpdef double besselpoly(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.besselpoly""" + return _func_besselpoly(x0, x1, x2) + +cpdef double beta(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.beta""" + return _func_beta(x0, x1) + +cpdef df_number_t betainc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betainc""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibeta_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibeta_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betaincc(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betaincc""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibetac_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibetac_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betaincinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betaincinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibeta_inv_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibeta_inv_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef df_number_t betainccinv(df_number_t x0, df_number_t x1, df_number_t x2) noexcept nogil: + """See the documentation for scipy.special.betainccinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_ibetac_inv_float)(x0, x1, x2) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_ibetac_inv_double)(x0, x1, x2) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef double betaln(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.betaln""" + return _func_lbeta(x0, x1) + +cpdef double binom(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.binom""" + return (scipy.special._ufuncs_cxx._export_binom)(x0, x1) + +cpdef double boxcox(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.boxcox""" + return _func_boxcox(x0, x1) + +cpdef double boxcox1p(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.boxcox1p""" + return _func_boxcox1p(x0, x1) + +cpdef double btdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtr""" + return _func_btdtr(x0, x1, x2) + +cpdef double btdtri(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtri""" + return _func_incbi(x0, x1, x2) + +cpdef double btdtria(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtria""" + return _func_btdtria(x0, x1, x2) + +cpdef double btdtrib(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.btdtrib""" + return _func_btdtrib(x0, x1, x2) + +cpdef double cbrt(double x0) noexcept nogil: + """See the documentation for scipy.special.cbrt""" + return _func_cbrt(x0) + +cpdef double chdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtr""" + return _func_chdtr(x0, x1) + +cpdef double chdtrc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtrc""" + return _func_chdtrc(x0, x1) + +cpdef double chdtri(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtri""" + return _func_chdtri(x0, x1) + +cpdef double chdtriv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.chdtriv""" + return _func_chdtriv(x0, x1) + +cpdef double chndtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtr""" + return _func_chndtr(x0, x1, x2) + +cpdef double chndtridf(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtridf""" + return _func_chndtridf(x0, x1, x2) + +cpdef double chndtrinc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtrinc""" + return _func_chndtrinc(x0, x1, x2) + +cpdef double chndtrix(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.chndtrix""" + return _func_chndtrix(x0, x1, x2) + +cpdef double cosdg(double x0) noexcept nogil: + """See the documentation for scipy.special.cosdg""" + return _func_cosdg(x0) + +cpdef double cosm1(double x0) noexcept nogil: + """See the documentation for scipy.special.cosm1""" + return _func_cosm1(x0) + +cpdef double cotdg(double x0) noexcept nogil: + """See the documentation for scipy.special.cotdg""" + return _func_cotdg(x0) + +cpdef Dd_number_t dawsn(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.dawsn""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double ellipe(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipe""" + return _func_ellpe(x0) + +cpdef double ellipeinc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.ellipeinc""" + return _func_ellie(x0, x1) + +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) noexcept nogil: + """See the documentation for scipy.special.ellipj""" + _func_ellpj(x0, x1, y0, y1, y2, y3) + +def _ellipj_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + ellipj(x0, x1, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double ellipkinc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.ellipkinc""" + return _func_ellik(x0, x1) + +cpdef double ellipkm1(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipkm1""" + return _func_ellpk(x0) + +cpdef double ellipk(double x0) noexcept nogil: + """See the documentation for scipy.special.ellipk""" + return _func_ellipk(x0) + +cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.elliprc""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RC)(x0, x1) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RC)(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprd""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RD)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RD)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprf""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RF)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RF)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.elliprg""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RG)(x0, x1, x2) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RG)(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.elliprj""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_fellint_RJ)(x0, x1, x2, x3) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cellint_RJ)(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double entr(double x0) noexcept nogil: + """See the documentation for scipy.special.entr""" + return _func_entr(x0) + +cpdef Dd_number_t erf(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erf""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erf)(x0) + elif Dd_number_t is double: + return _func_erf(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfc(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfc""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex)(x0) + elif Dd_number_t is double: + return _func_erfc(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfcx(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfcx""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t erfi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfi""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfi)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef df_number_t erfinv(df_number_t x0) noexcept nogil: + """See the documentation for scipy.special.erfinv""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_erfinv_float)(x0) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_erfinv_double)(x0) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cpdef double erfcinv(double x0) noexcept nogil: + """See the documentation for scipy.special.erfcinv""" + return _func_erfcinv(x0) + +cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyc""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyc[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyc[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyc_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebys""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebys[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebys[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebys_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyt""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyt[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyt[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyt_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_chebyu""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_chebyu[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_chebyu[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_chebyu_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.eval_gegenbauer""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_gegenbauer[double_complex](x0, x1, x2) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_gegenbauer[double](x0, x1, x2) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_gegenbauer_l(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.eval_genlaguerre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_genlaguerre[double_complex](x0, x1, x2) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_genlaguerre[double](x0, x1, x2) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_genlaguerre_l(x0, x1, x2) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double eval_hermite(long x0, double x1) noexcept nogil: + """See the documentation for scipy.special.eval_hermite""" + return _func_eval_hermite(x0, x1) + +cpdef double eval_hermitenorm(long x0, double x1) noexcept nogil: + """See the documentation for scipy.special.eval_hermitenorm""" + return _func_eval_hermitenorm(x0, x1) + +cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.eval_jacobi""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_jacobi[double_complex](x0, x1, x2, x3) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_jacobi[double](x0, x1, x2, x3) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_jacobi_l(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_laguerre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_laguerre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_laguerre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_laguerre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_legendre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_legendre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_legendre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_legendre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_chebyt""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_chebyt[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_chebyt[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_chebyt_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_chebyu""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_chebyu[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_chebyu[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_chebyu_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.eval_sh_jacobi""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_jacobi[double_complex](x0, x1, x2, x3) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_jacobi[double](x0, x1, x2, x3) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_jacobi_l(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.eval_sh_legendre""" + if dl_number_t is double and Dd_number_t is double_complex: + return _func_eval_sh_legendre[double_complex](x0, x1) + elif dl_number_t is double and Dd_number_t is double: + return _func_eval_sh_legendre[double](x0, x1) + elif dl_number_t is long and Dd_number_t is double: + return _func_eval_sh_legendre_l(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t exp1(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.exp1""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cexp1_wrap(_complexstuff.npy_cdouble_from_double_complex(x0))) + elif Dd_number_t is double: + return _func_exp1_wrap(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double exp10(double x0) noexcept nogil: + """See the documentation for scipy.special.exp10""" + return _func_exp10(x0) + +cpdef double exp2(double x0) noexcept nogil: + """See the documentation for scipy.special.exp2""" + return _func_exp2(x0) + +cpdef Dd_number_t expi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expi""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cexpi_wrap(_complexstuff.npy_cdouble_from_double_complex(x0))) + elif Dd_number_t is double: + return _func_expi_wrap(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t expit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_expit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_expitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_expitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef Dd_number_t expm1(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.expm1""" + if Dd_number_t is double_complex: + return _func_cexpm1(x0) + elif Dd_number_t is double: + return _func_expm1(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double expn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.expn""" + if dl_number_t is double: + return _func_expn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_expn(x0, x1) + else: + return NAN + +cpdef double exprel(double x0) noexcept nogil: + """See the documentation for scipy.special.exprel""" + return _func_exprel(x0) + +cpdef double fdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtr""" + return _func_fdtr(x0, x1, x2) + +cpdef double fdtrc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtrc""" + return _func_fdtrc(x0, x1, x2) + +cpdef double fdtri(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtri""" + return _func_fdtri(x0, x1, x2) + +cpdef double fdtridfd(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.fdtridfd""" + return _func_fdtridfd(x0, x1, x2) + +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.fresnel""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + if Dd_number_t is double: + _func_fresnl(x0, y0, y1) + elif Dd_number_t is double_complex: + _func_cfresnl_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _fresnel_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + fresnel(x0, &y0, &y1) + return y0, y1 + +cpdef Dd_number_t gamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.gamma""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cgamma)(x0) + elif Dd_number_t is double: + return _func_Gamma(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double gammainc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammainc""" + return _func_igam(x0, x1) + +cpdef double gammaincc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammaincc""" + return _func_igamc(x0, x1) + +cpdef double gammainccinv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammainccinv""" + return _func_igamci(x0, x1) + +cpdef double gammaincinv(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.gammaincinv""" + return _func_igami(x0, x1) + +cpdef double gammaln(double x0) noexcept nogil: + """See the documentation for scipy.special.gammaln""" + return _func_lgam(x0) + +cpdef double gammasgn(double x0) noexcept nogil: + """See the documentation for scipy.special.gammasgn""" + return _func_gammasgn(x0) + +cpdef double gdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtr""" + return _func_gdtr(x0, x1, x2) + +cpdef double gdtrc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrc""" + return _func_gdtrc(x0, x1, x2) + +cpdef double gdtria(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtria""" + return _func_gdtria(x0, x1, x2) + +cpdef double gdtrib(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrib""" + return _func_gdtrib(x0, x1, x2) + +cpdef double gdtrix(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.gdtrix""" + return _func_gdtrix(x0, x1, x2) + +cpdef double complex hankel1(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel1""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel1e(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel1e""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel2(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel2""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double complex hankel2e(double x0, double complex x1) noexcept nogil: + """See the documentation for scipy.special.hankel2e""" + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + +cpdef double huber(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.huber""" + return _func_huber(x0, x1) + +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.hyp0f1""" + if Dd_number_t is double_complex: + return _func__hyp0f1_cmplx(x0, x1) + elif Dd_number_t is double: + return _func__hyp0f1_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) noexcept nogil: + """See the documentation for scipy.special.hyp1f1""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_hyp1f1_double)(x0, x1, x2) + elif Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_chyp1f1_wrap(x0, x1, _complexstuff.npy_cdouble_from_double_complex(x2))) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) noexcept nogil: + """See the documentation for scipy.special.hyp2f1""" + if Dd_number_t is double: + return _func_hyp2f1(x0, x1, x2, x3) + elif Dd_number_t is double_complex: + return _func_hyp2f1_complex(x0, x1, x2, x3) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double hyperu(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.hyperu""" + return _func_hyperu(x0, x1, x2) + +cpdef double i0(double x0) noexcept nogil: + """See the documentation for scipy.special.i0""" + return _func_i0(x0) + +cpdef double i0e(double x0) noexcept nogil: + """See the documentation for scipy.special.i0e""" + return _func_i0e(x0) + +cpdef double i1(double x0) noexcept nogil: + """See the documentation for scipy.special.i1""" + return _func_i1(x0) + +cpdef double i1e(double x0) noexcept nogil: + """See the documentation for scipy.special.i1e""" + return _func_i1e(x0) + +cpdef double inv_boxcox(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.inv_boxcox""" + return _func_inv_boxcox(x0, x1) + +cpdef double inv_boxcox1p(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.inv_boxcox1p""" + return _func_inv_boxcox1p(x0, x1) + +cdef void it2i0k0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.it2i0k0""" + _func_it2i0k0_wrap(x0, y0, y1) + +def _it2i0k0_pywrap(double x0): + cdef double y0 + cdef double y1 + it2i0k0(x0, &y0, &y1) + return y0, y1 + +cdef void it2j0y0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.it2j0y0""" + _func_it2j0y0_wrap(x0, y0, y1) + +def _it2j0y0_pywrap(double x0): + cdef double y0 + cdef double y1 + it2j0y0(x0, &y0, &y1) + return y0, y1 + +cpdef double it2struve0(double x0) noexcept nogil: + """See the documentation for scipy.special.it2struve0""" + return _func_it2struve0_wrap(x0) + +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) noexcept nogil: + """See the documentation for scipy.special.itairy""" + _func_itairy_wrap(x0, y0, y1, y2, y3) + +def _itairy_pywrap(double x0): + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + itairy(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cdef void iti0k0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.iti0k0""" + _func_it1i0k0_wrap(x0, y0, y1) + +def _iti0k0_pywrap(double x0): + cdef double y0 + cdef double y1 + iti0k0(x0, &y0, &y1) + return y0, y1 + +cdef void itj0y0(double x0, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.itj0y0""" + _func_it1j0y0_wrap(x0, y0, y1) + +def _itj0y0_pywrap(double x0): + cdef double y0 + cdef double y1 + itj0y0(x0, &y0, &y1) + return y0, y1 + +cpdef double itmodstruve0(double x0) noexcept nogil: + """See the documentation for scipy.special.itmodstruve0""" + return _func_itmodstruve0_wrap(x0) + +cpdef double itstruve0(double x0) noexcept nogil: + """See the documentation for scipy.special.itstruve0""" + return _func_itstruve0_wrap(x0) + +cpdef Dd_number_t iv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.iv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_iv(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t ive(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.ive""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesi_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double j0(double x0) noexcept nogil: + """See the documentation for scipy.special.j0""" + return _func_j0(x0) + +cpdef double j1(double x0) noexcept nogil: + """See the documentation for scipy.special.j1""" + return _func_j1(x0) + +cpdef Dd_number_t jv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.jv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesj_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t jve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.jve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesj_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double k0(double x0) noexcept nogil: + """See the documentation for scipy.special.k0""" + return _func_k0(x0) + +cpdef double k0e(double x0) noexcept nogil: + """See the documentation for scipy.special.k0e""" + return _func_k0e(x0) + +cpdef double k1(double x0) noexcept nogil: + """See the documentation for scipy.special.k1""" + return _func_k1(x0) + +cpdef double k1e(double x0) noexcept nogil: + """See the documentation for scipy.special.k1e""" + return _func_k1e(x0) + +cpdef double kei(double x0) noexcept nogil: + """See the documentation for scipy.special.kei""" + return _func_kei_wrap(x0) + +cpdef double keip(double x0) noexcept nogil: + """See the documentation for scipy.special.keip""" + return _func_keip_wrap(x0) + +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) noexcept nogil: + """See the documentation for scipy.special.kelvin""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + cdef npy_cdouble tmp2 + cdef npy_cdouble tmp3 + _func_kelvin_wrap(x0, &tmp0, &tmp1, &tmp2, &tmp3) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2) + y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3) + +def _kelvin_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + cdef double complex y2 + cdef double complex y3 + kelvin(x0, &y0, &y1, &y2, &y3) + return y0, y1, y2, y3 + +cpdef double ker(double x0) noexcept nogil: + """See the documentation for scipy.special.ker""" + return _func_ker_wrap(x0) + +cpdef double kerp(double x0) noexcept nogil: + """See the documentation for scipy.special.kerp""" + return _func_kerp_wrap(x0) + +cpdef double kl_div(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.kl_div""" + return _func_kl_div(x0, x1) + +cpdef double kn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.kn""" + if dl_number_t is double: + return _func_kn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_cbesk_wrap_real_int(x0, x1) + else: + return NAN + +cpdef double kolmogi(double x0) noexcept nogil: + """See the documentation for scipy.special.kolmogi""" + return _func_kolmogi(x0) + +cpdef double kolmogorov(double x0) noexcept nogil: + """See the documentation for scipy.special.kolmogorov""" + return _func_kolmogorov(x0) + +cpdef Dd_number_t kv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.kv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesk_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t kve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.kve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesk_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t log1p(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log1p""" + if Dd_number_t is double_complex: + return _func_clog1p(x0) + elif Dd_number_t is double: + return _func_log1p(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t log_expit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log_expit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_log_expit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_log_expitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_log_expitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef Dd_number_t log_ndtr(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.log_ndtr""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t loggamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.loggamma""" + if Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_loggamma_real)(x0) + elif Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_loggamma)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef dfg_number_t logit(dfg_number_t x0) noexcept nogil: + """See the documentation for scipy.special.logit""" + if dfg_number_t is double: + return (scipy.special._ufuncs_cxx._export_logit)(x0) + elif dfg_number_t is float: + return (scipy.special._ufuncs_cxx._export_logitf)(x0) + elif dfg_number_t is long_double: + return (scipy.special._ufuncs_cxx._export_logitl)(x0) + else: + if dfg_number_t is double: + return NAN + elif dfg_number_t is float: + return NAN + else: + return NAN + +cpdef double lpmv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.lpmv""" + return _func_pmv_wrap(x0, x1, x2) + +cpdef double mathieu_a(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.mathieu_a""" + return _func_cem_cva_wrap(x0, x1) + +cpdef double mathieu_b(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.mathieu_b""" + return _func_sem_cva_wrap(x0, x1) + +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_cem""" + _func_cem_wrap(x0, x1, x2, y0, y1) + +def _mathieu_cem_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_cem(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modcem1""" + _func_mcm1_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modcem1_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modcem1(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modcem2""" + _func_mcm2_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modcem2_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modcem2(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modsem1""" + _func_msm1_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modsem1_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modsem1(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_modsem2""" + _func_msm2_wrap(x0, x1, x2, y0, y1) + +def _mathieu_modsem2_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_modsem2(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.mathieu_sem""" + _func_sem_wrap(x0, x1, x2, y0, y1) + +def _mathieu_sem_pywrap(double x0, double x1, double x2): + cdef double y0 + cdef double y1 + mathieu_sem(x0, x1, x2, &y0, &y1) + return y0, y1 + +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) noexcept nogil: + """See the documentation for scipy.special.modfresnelm""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + _func_modified_fresnel_minus_wrap(x0, &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + +def _modfresnelm_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + modfresnelm(x0, &y0, &y1) + return y0, y1 + +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) noexcept nogil: + """See the documentation for scipy.special.modfresnelp""" + cdef npy_cdouble tmp0 + cdef npy_cdouble tmp1 + _func_modified_fresnel_plus_wrap(x0, &tmp0, &tmp1) + y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0) + y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1) + +def _modfresnelp_pywrap(double x0): + cdef double complex y0 + cdef double complex y1 + modfresnelp(x0, &y0, &y1) + return y0, y1 + +cpdef double modstruve(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.modstruve""" + return _func_struve_l(x0, x1) + +cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtr""" + if dl_number_t is double: + return _func_nbdtr_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtr(x0, x1, x2) + else: + return NAN + +cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrc""" + if dl_number_t is double: + return _func_nbdtrc_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtrc(x0, x1, x2) + else: + return NAN + +cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtri""" + if dl_number_t is double: + return _func_nbdtri_unsafe(x0, x1, x2) + elif dl_number_t is long: + return _func_nbdtri(x0, x1, x2) + else: + return NAN + +cpdef double nbdtrik(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrik""" + return _func_nbdtrik(x0, x1, x2) + +cpdef double nbdtrin(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nbdtrin""" + return _func_nbdtrin(x0, x1, x2) + +cpdef double ncfdtr(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtr""" + return _func_ncfdtr(x0, x1, x2, x3) + +cpdef double ncfdtri(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtri""" + return _func_ncfdtri(x0, x1, x2, x3) + +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtridfd""" + return _func_ncfdtridfd(x0, x1, x2, x3) + +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtridfn""" + return _func_ncfdtridfn(x0, x1, x2, x3) + +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.ncfdtrinc""" + return _func_ncfdtrinc(x0, x1, x2, x3) + +cpdef double nctdtr(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtr""" + return _func_nctdtr(x0, x1, x2) + +cpdef double nctdtridf(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtridf""" + return _func_nctdtridf(x0, x1, x2) + +cpdef double nctdtrinc(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtrinc""" + return _func_nctdtrinc(x0, x1, x2) + +cpdef double nctdtrit(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nctdtrit""" + return _func_nctdtrit(x0, x1, x2) + +cpdef Dd_number_t ndtr(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.ndtr""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_faddeeva_ndtr)(x0) + elif Dd_number_t is double: + return _func_ndtr(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double ndtri(double x0) noexcept nogil: + """See the documentation for scipy.special.ndtri""" + return _func_ndtri(x0) + +cpdef double nrdtrimn(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nrdtrimn""" + return _func_nrdtrimn(x0, x1, x2) + +cpdef double nrdtrisd(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.nrdtrisd""" + return _func_nrdtrisd(x0, x1, x2) + +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_ang1""" + y0[0] = _func_oblate_aswfa_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_ang1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_ang1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_ang1_cv""" + _func_oblate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double obl_cv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.obl_cv""" + return _func_oblate_segv_wrap(x0, x1, x2) + +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad1""" + y0[0] = _func_oblate_radial1_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_rad1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_rad1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad1_cv""" + _func_oblate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad2""" + y0[0] = _func_oblate_radial2_nocv_wrap(x0, x1, x2, x3, y1) + +def _obl_rad2_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + obl_rad2(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.obl_rad2_cv""" + _func_oblate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _obl_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + obl_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double owens_t(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.owens_t""" + return _func_owens_t(x0, x1) + +cdef void pbdv(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbdv""" + _func_pbdv_wrap(x0, x1, y0, y1) + +def _pbdv_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbdv(x0, x1, &y0, &y1) + return y0, y1 + +cdef void pbvv(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbvv""" + _func_pbvv_wrap(x0, x1, y0, y1) + +def _pbvv_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbvv(x0, x1, &y0, &y1) + return y0, y1 + +cdef void pbwa(double x0, double x1, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pbwa""" + _func_pbwa_wrap(x0, x1, y0, y1) + +def _pbwa_pywrap(double x0, double x1): + cdef double y0 + cdef double y1 + pbwa(x0, x1, &y0, &y1) + return y0, y1 + +cpdef double pdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtr""" + return _func_pdtr(x0, x1) + +cpdef double pdtrc(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtrc""" + return _func_pdtrc(x0, x1) + +cpdef double pdtri(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtri""" + if dl_number_t is double: + return _func_pdtri_unsafe(x0, x1) + elif dl_number_t is long: + return _func_pdtri(x0, x1) + else: + return NAN + +cpdef double pdtrik(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pdtrik""" + return _func_pdtrik(x0, x1) + +cpdef double poch(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.poch""" + return _func_poch(x0, x1) + +cpdef df_number_t powm1(df_number_t x0, df_number_t x1) noexcept nogil: + """See the documentation for scipy.special.powm1""" + if df_number_t is float: + return (scipy.special._ufuncs_cxx._export_powm1_float)(x0, x1) + elif df_number_t is double: + return (scipy.special._ufuncs_cxx._export_powm1_double)(x0, x1) + else: + if df_number_t is double: + return NAN + else: + return NAN + +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_ang1""" + y0[0] = _func_prolate_aswfa_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_ang1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_ang1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_ang1_cv""" + _func_prolate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double pro_cv(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.pro_cv""" + return _func_prolate_segv_wrap(x0, x1, x2) + +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad1""" + y0[0] = _func_prolate_radial1_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_rad1_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_rad1(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad1_cv""" + _func_prolate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad2""" + y0[0] = _func_prolate_radial2_nocv_wrap(x0, x1, x2, x3, y1) + +def _pro_rad2_pywrap(double x0, double x1, double x2, double x3): + cdef double y0 + cdef double y1 + pro_rad2(x0, x1, x2, x3, &y0, &y1) + return y0, y1 + +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) noexcept nogil: + """See the documentation for scipy.special.pro_rad2_cv""" + _func_prolate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1) + +def _pro_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4): + cdef double y0 + cdef double y1 + pro_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1) + return y0, y1 + +cpdef double pseudo_huber(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.pseudo_huber""" + return _func_pseudo_huber(x0, x1) + +cpdef Dd_number_t psi(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.psi""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_cdigamma)(x0) + elif Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_digamma)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double radian(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.radian""" + return _func_radian(x0, x1, x2) + +cpdef double rel_entr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.rel_entr""" + return _func_rel_entr(x0, x1) + +cpdef Dd_number_t rgamma(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.rgamma""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_crgamma)(x0) + elif Dd_number_t is double: + return _func_rgamma(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double round(double x0) noexcept nogil: + """See the documentation for scipy.special.round""" + return _func_round(x0) + +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.shichi""" + if Dd_number_t is double_complex: + _func_cshichi(x0, y0, y1) + elif Dd_number_t is double: + _func_shichi(x0, y0, y1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _shichi_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + shichi(x0, &y0, &y1) + return y0, y1 + +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) noexcept nogil: + """See the documentation for scipy.special.sici""" + if Dd_number_t is double_complex: + _func_csici(x0, y0, y1) + elif Dd_number_t is double: + _func_sici(x0, y0, y1) + else: + if Dd_number_t is double_complex: + y0[0] = NAN + y1[0] = NAN + else: + y0[0] = NAN + y1[0] = NAN + +def _sici_pywrap(Dd_number_t x0): + cdef Dd_number_t y0 + cdef Dd_number_t y1 + sici(x0, &y0, &y1) + return y0, y1 + +cpdef double sindg(double x0) noexcept nogil: + """See the documentation for scipy.special.sindg""" + return _func_sindg(x0) + +cpdef double smirnov(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.smirnov""" + if dl_number_t is double: + return _func_smirnov_unsafe(x0, x1) + elif dl_number_t is long: + return _func_smirnov(x0, x1) + else: + return NAN + +cpdef double smirnovi(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.smirnovi""" + if dl_number_t is double: + return _func_smirnovi_unsafe(x0, x1) + elif dl_number_t is long: + return _func_smirnovi(x0, x1) + else: + return NAN + +cpdef Dd_number_t spence(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.spence""" + if Dd_number_t is double_complex: + return _func_cspence(x0) + elif Dd_number_t is double: + return _func_spence(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) noexcept nogil: + """See the documentation for scipy.special.sph_harm""" + if dl_number_t is double: + return _func_sph_harmonic_unsafe(x0, x1, x2, x3) + elif dl_number_t is long: + return _func_sph_harmonic(x0, x1, x2, x3) + else: + return NAN + +cpdef double stdtr(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtr""" + return _func_stdtr(x0, x1) + +cpdef double stdtridf(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtridf""" + return _func_stdtridf(x0, x1) + +cpdef double stdtrit(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.stdtrit""" + return _func_stdtrit(x0, x1) + +cpdef double struve(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.struve""" + return _func_struve_h(x0, x1) + +cpdef double tandg(double x0) noexcept nogil: + """See the documentation for scipy.special.tandg""" + return _func_tandg(x0) + +cpdef double tklmbda(double x0, double x1) noexcept nogil: + """See the documentation for scipy.special.tklmbda""" + return _func_tukeylambdacdf(x0, x1) + +cpdef double complex wofz(double complex x0) noexcept nogil: + """See the documentation for scipy.special.wofz""" + return (scipy.special._ufuncs_cxx._export_faddeeva_w)(x0) + +cpdef Dd_number_t wrightomega(Dd_number_t x0) noexcept nogil: + """See the documentation for scipy.special.wrightomega""" + if Dd_number_t is double_complex: + return (scipy.special._ufuncs_cxx._export_wrightomega)(x0) + elif Dd_number_t is double: + return (scipy.special._ufuncs_cxx._export_wrightomega_real)(x0) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.xlog1py""" + if Dd_number_t is double: + return _func_xlog1py[double](x0, x1) + elif Dd_number_t is double_complex: + return _func_xlog1py[double_complex](x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.xlogy""" + if Dd_number_t is double: + return _func_xlogy[double](x0, x1) + elif Dd_number_t is double_complex: + return _func_xlogy[double_complex](x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double y0(double x0) noexcept nogil: + """See the documentation for scipy.special.y0""" + return _func_y0(x0) + +cpdef double y1(double x0) noexcept nogil: + """See the documentation for scipy.special.y1""" + return _func_y1(x0) + +cpdef double yn(dl_number_t x0, double x1) noexcept nogil: + """See the documentation for scipy.special.yn""" + if dl_number_t is double: + return _func_yn_unsafe(x0, x1) + elif dl_number_t is long: + return _func_yn(x0, x1) + else: + return NAN + +cpdef Dd_number_t yv(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.yv""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesy_wrap_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef Dd_number_t yve(double x0, Dd_number_t x1) noexcept nogil: + """See the documentation for scipy.special.yve""" + if Dd_number_t is double_complex: + return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1))) + elif Dd_number_t is double: + return _func_cbesy_wrap_e_real(x0, x1) + else: + if Dd_number_t is double_complex: + return NAN + else: + return NAN + +cpdef double zetac(double x0) noexcept nogil: + """See the documentation for scipy.special.zetac""" + return _func_zetac(x0) + +cpdef double wright_bessel(double x0, double x1, double x2) noexcept nogil: + """See the documentation for scipy.special.wright_bessel""" + return _func_wright_bessel_scalar(x0, x1, x2) + +cpdef double ndtri_exp(double x0) noexcept nogil: + """See the documentation for scipy.special.ndtri_exp""" + return _func_ndtri_exp(x0) + +def _bench_airy_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.airy(x0) + +def _bench_airy_d_cy(int N, double x0): + cdef int n + cdef double y0 + cdef double y1 + cdef double y2 + cdef double y3 + for n in range(N): + airy(x0, &y0, &y1, &y2, &y3) + +def _bench_airy_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.airy(x0) + +def _bench_airy_D_cy(int N, double complex x0): + cdef int n + cdef double complex y0 + cdef double complex y1 + cdef double complex y2 + cdef double complex y3 + for n in range(N): + airy(x0, &y0, &y1, &y2, &y3) + +def _bench_beta_dd_py(int N, double x0, double x1): + cdef int n + for n in range(N): + _ufuncs.beta(x0, x1) + +def _bench_beta_dd_cy(int N, double x0, double x1): + cdef int n + for n in range(N): + beta(x0, x1) + +def _bench_erf_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.erf(x0) + +def _bench_erf_d_cy(int N, double x0): + cdef int n + for n in range(N): + erf(x0) + +def _bench_erf_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.erf(x0) + +def _bench_erf_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + erf(x0) + +def _bench_exprel_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.exprel(x0) + +def _bench_exprel_d_cy(int N, double x0): + cdef int n + for n in range(N): + exprel(x0) + +def _bench_gamma_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.gamma(x0) + +def _bench_gamma_d_cy(int N, double x0): + cdef int n + for n in range(N): + gamma(x0) + +def _bench_gamma_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.gamma(x0) + +def _bench_gamma_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + gamma(x0) + +def _bench_jv_dd_py(int N, double x0, double x1): + cdef int n + for n in range(N): + _ufuncs.jv(x0, x1) + +def _bench_jv_dd_cy(int N, double x0, double x1): + cdef int n + for n in range(N): + jv(x0, x1) + +def _bench_jv_dD_py(int N, double x0, double complex x1): + cdef int n + for n in range(N): + _ufuncs.jv(x0, x1) + +def _bench_jv_dD_cy(int N, double x0, double complex x1): + cdef int n + for n in range(N): + jv(x0, x1) + +def _bench_loggamma_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.loggamma(x0) + +def _bench_loggamma_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + loggamma(x0) + +def _bench_logit_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.logit(x0) + +def _bench_logit_d_cy(int N, double x0): + cdef int n + for n in range(N): + logit(x0) + +def _bench_psi_d_py(int N, double x0): + cdef int n + for n in range(N): + _ufuncs.psi(x0) + +def _bench_psi_d_cy(int N, double x0): + cdef int n + for n in range(N): + psi(x0) + +def _bench_psi_D_py(int N, double complex x0): + cdef int n + for n in range(N): + _ufuncs.psi(x0) + +def _bench_psi_D_cy(int N, double complex x0): + cdef int n + for n in range(N): + psi(x0) \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py b/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e39d75d4e21ea1f8e529715cdba427f8205389 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/orthogonal.py @@ -0,0 +1,47 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', + 'jacobi', 'laguerre', 'genlaguerre', 'hermite', + 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt', + 'sh_chebyu', 'sh_jacobi'] + +# Correspondence between new and old names of root functions +_rootfuns_map = {'roots_legendre': 'p_roots', + 'roots_chebyt': 't_roots', + 'roots_chebyu': 'u_roots', + 'roots_chebyc': 'c_roots', + 'roots_chebys': 's_roots', + 'roots_jacobi': 'j_roots', + 'roots_laguerre': 'l_roots', + 'roots_genlaguerre': 'la_roots', + 'roots_hermite': 'h_roots', + 'roots_hermitenorm': 'he_roots', + 'roots_gegenbauer': 'cg_roots', + 'roots_sh_legendre': 'ps_roots', + 'roots_sh_chebyt': 'ts_roots', + 'roots_sh_chebyu': 'us_roots', + 'roots_sh_jacobi': 'js_roots'} + + +__all__ = _polyfuns + list(_rootfuns_map.keys()) + [ # noqa: F822 + 'exp', 'inf', 'floor', 'around', 'hstack', 'arange', + 'linalg', 'airy', 'orthopoly1d', 'newfun', + 'oldfun', 'p_roots', 't_roots', 'u_roots', 'c_roots', 's_roots', + 'j_roots', 'l_roots', 'la_roots', 'h_roots', 'he_roots', 'cg_roots', + 'ps_roots', 'ts_roots', 'us_roots', 'js_roots' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="orthogonal", + private_modules=["_orthogonal"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/sf_error.py b/venv/lib/python3.10/site-packages/scipy/special/sf_error.py new file mode 100644 index 0000000000000000000000000000000000000000..00ff73756acd4219a4ba94eb089bce7d4c32266d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/sf_error.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'SpecialFunctionWarning', + 'SpecialFunctionError' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="sf_error", + private_modules=["_sf_error"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/specfun.py b/venv/lib/python3.10/site-packages/scipy/special/specfun.py new file mode 100644 index 0000000000000000000000000000000000000000..b6506602a2b877fd9290dd793fcdaa85ebe09447 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/specfun.py @@ -0,0 +1,43 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'airyzo', + 'bernob', + 'cerzo', + 'clpmn', + 'clpn', + 'clqmn', + 'clqn', + 'cpbdn', + 'cyzo', + 'eulerb', + 'fcoef', + 'fcszo', + 'jdzo', + 'jyzo', + 'klvnzo', + 'lamn', + 'lamv', + 'lpmn', + 'lpn', + 'lqmn', + 'lqnb', + 'pbdv', + 'rctj', + 'rcty', + 'segv' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="specfun", + private_modules=["_specfun"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py b/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..f51eacff7abe86d668b4e982876c16b6db38b578 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/spfun_stats.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.special` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['multigammaln', 'loggam'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="special", module="spfun_stats", + private_modules=["_spfun_stats"], all=__all__, + attribute=name)