applied-ai-018 commited on
Commit
472de0d
·
verified ·
1 Parent(s): 1d6b48b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. venv/lib/python3.10/site-packages/markupsafe/__init__.py +332 -0
  4. venv/lib/python3.10/site-packages/markupsafe/_native.py +63 -0
  5. venv/lib/python3.10/site-packages/markupsafe/_speedups.c +320 -0
  6. venv/lib/python3.10/site-packages/markupsafe/_speedups.cpython-310-x86_64-linux-gnu.so +0 -0
  7. venv/lib/python3.10/site-packages/markupsafe/_speedups.pyi +9 -0
  8. venv/lib/python3.10/site-packages/markupsafe/py.typed +0 -0
  9. venv/lib/python3.10/site-packages/mpmath/calculus/__init__.py +6 -0
  10. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/mpmath/calculus/approximation.py +246 -0
  21. venv/lib/python3.10/site-packages/mpmath/calculus/calculus.py +6 -0
  22. venv/lib/python3.10/site-packages/mpmath/calculus/differentiation.py +647 -0
  23. venv/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py +2115 -0
  24. venv/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py +973 -0
  25. venv/lib/python3.10/site-packages/mpmath/calculus/odes.py +288 -0
  26. venv/lib/python3.10/site-packages/mpmath/calculus/optimization.py +1102 -0
  27. venv/lib/python3.10/site-packages/mpmath/calculus/polynomials.py +213 -0
  28. venv/lib/python3.10/site-packages/mpmath/calculus/quadrature.py +1115 -0
  29. venv/lib/python3.10/site-packages/mpmath/libmp/__init__.py +77 -0
  30. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/backend.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/mpmath/libmp/backend.py +115 -0
  40. venv/lib/python3.10/site-packages/mpmath/libmp/gammazeta.py +2167 -0
  41. venv/lib/python3.10/site-packages/mpmath/libmp/libelefun.py +1428 -0
  42. venv/lib/python3.10/site-packages/mpmath/libmp/libhyper.py +1150 -0
  43. venv/lib/python3.10/site-packages/mpmath/libmp/libintmath.py +584 -0
  44. venv/lib/python3.10/site-packages/mpmath/libmp/libmpc.py +835 -0
  45. venv/lib/python3.10/site-packages/mpmath/libmp/libmpf.py +1414 -0
  46. venv/lib/python3.10/site-packages/mpmath/libmp/libmpi.py +935 -0
  47. venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/calculus.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/eigen.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc +0 -0
ckpts/universal/global_step80/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcccbe15e31fde6fd37986a1e18172c87d943d4d52f10f471b05ceb2fe0f1bdf
3
+ size 33555612
ckpts/universal/global_step80/zero/9.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12f74df9fb92ff8bfa3f23d6bdc7be15ac673a0f8be266e60ad6fe54a78a242
3
+ size 33555612
venv/lib/python3.10/site-packages/markupsafe/__init__.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import string
3
+ import sys
4
+ import typing as t
5
+
6
+ if t.TYPE_CHECKING:
7
+ import typing_extensions as te
8
+
9
+ class HasHTML(te.Protocol):
10
+ def __html__(self) -> str:
11
+ pass
12
+
13
+ _P = te.ParamSpec("_P")
14
+
15
+
16
+ __version__ = "2.1.5"
17
+
18
+
19
+ def _simple_escaping_wrapper(func: "t.Callable[_P, str]") -> "t.Callable[_P, Markup]":
20
+ @functools.wraps(func)
21
+ def wrapped(self: "Markup", *args: "_P.args", **kwargs: "_P.kwargs") -> "Markup":
22
+ arg_list = _escape_argspec(list(args), enumerate(args), self.escape)
23
+ _escape_argspec(kwargs, kwargs.items(), self.escape)
24
+ return self.__class__(func(self, *arg_list, **kwargs)) # type: ignore[arg-type]
25
+
26
+ return wrapped # type: ignore[return-value]
27
+
28
+
29
+ class Markup(str):
30
+ """A string that is ready to be safely inserted into an HTML or XML
31
+ document, either because it was escaped or because it was marked
32
+ safe.
33
+
34
+ Passing an object to the constructor converts it to text and wraps
35
+ it to mark it safe without escaping. To escape the text, use the
36
+ :meth:`escape` class method instead.
37
+
38
+ >>> Markup("Hello, <em>World</em>!")
39
+ Markup('Hello, <em>World</em>!')
40
+ >>> Markup(42)
41
+ Markup('42')
42
+ >>> Markup.escape("Hello, <em>World</em>!")
43
+ Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
44
+
45
+ This implements the ``__html__()`` interface that some frameworks
46
+ use. Passing an object that implements ``__html__()`` will wrap the
47
+ output of that method, marking it safe.
48
+
49
+ >>> class Foo:
50
+ ... def __html__(self):
51
+ ... return '<a href="/foo">foo</a>'
52
+ ...
53
+ >>> Markup(Foo())
54
+ Markup('<a href="/foo">foo</a>')
55
+
56
+ This is a subclass of :class:`str`. It has the same methods, but
57
+ escapes their arguments and returns a ``Markup`` instance.
58
+
59
+ >>> Markup("<em>%s</em>") % ("foo & bar",)
60
+ Markup('<em>foo &amp; bar</em>')
61
+ >>> Markup("<em>Hello</em> ") + "<foo>"
62
+ Markup('<em>Hello</em> &lt;foo&gt;')
63
+ """
64
+
65
+ __slots__ = ()
66
+
67
+ def __new__(
68
+ cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
69
+ ) -> "te.Self":
70
+ if hasattr(base, "__html__"):
71
+ base = base.__html__()
72
+
73
+ if encoding is None:
74
+ return super().__new__(cls, base)
75
+
76
+ return super().__new__(cls, base, encoding, errors)
77
+
78
+ def __html__(self) -> "te.Self":
79
+ return self
80
+
81
+ def __add__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
82
+ if isinstance(other, str) or hasattr(other, "__html__"):
83
+ return self.__class__(super().__add__(self.escape(other)))
84
+
85
+ return NotImplemented
86
+
87
+ def __radd__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
88
+ if isinstance(other, str) or hasattr(other, "__html__"):
89
+ return self.escape(other).__add__(self)
90
+
91
+ return NotImplemented
92
+
93
+ def __mul__(self, num: "te.SupportsIndex") -> "te.Self":
94
+ if isinstance(num, int):
95
+ return self.__class__(super().__mul__(num))
96
+
97
+ return NotImplemented
98
+
99
+ __rmul__ = __mul__
100
+
101
+ def __mod__(self, arg: t.Any) -> "te.Self":
102
+ if isinstance(arg, tuple):
103
+ # a tuple of arguments, each wrapped
104
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
105
+ elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
106
+ # a mapping of arguments, wrapped
107
+ arg = _MarkupEscapeHelper(arg, self.escape)
108
+ else:
109
+ # a single argument, wrapped with the helper and a tuple
110
+ arg = (_MarkupEscapeHelper(arg, self.escape),)
111
+
112
+ return self.__class__(super().__mod__(arg))
113
+
114
+ def __repr__(self) -> str:
115
+ return f"{self.__class__.__name__}({super().__repr__()})"
116
+
117
+ def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "te.Self":
118
+ return self.__class__(super().join(map(self.escape, seq)))
119
+
120
+ join.__doc__ = str.join.__doc__
121
+
122
+ def split( # type: ignore[override]
123
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
124
+ ) -> t.List["te.Self"]:
125
+ return [self.__class__(v) for v in super().split(sep, maxsplit)]
126
+
127
+ split.__doc__ = str.split.__doc__
128
+
129
+ def rsplit( # type: ignore[override]
130
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
131
+ ) -> t.List["te.Self"]:
132
+ return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
133
+
134
+ rsplit.__doc__ = str.rsplit.__doc__
135
+
136
+ def splitlines( # type: ignore[override]
137
+ self, keepends: bool = False
138
+ ) -> t.List["te.Self"]:
139
+ return [self.__class__(v) for v in super().splitlines(keepends)]
140
+
141
+ splitlines.__doc__ = str.splitlines.__doc__
142
+
143
+ def unescape(self) -> str:
144
+ """Convert escaped markup back into a text string. This replaces
145
+ HTML entities with the characters they represent.
146
+
147
+ >>> Markup("Main &raquo; <em>About</em>").unescape()
148
+ 'Main » <em>About</em>'
149
+ """
150
+ from html import unescape
151
+
152
+ return unescape(str(self))
153
+
154
+ def striptags(self) -> str:
155
+ """:meth:`unescape` the markup, remove tags, and normalize
156
+ whitespace to single spaces.
157
+
158
+ >>> Markup("Main &raquo;\t<em>About</em>").striptags()
159
+ 'Main » About'
160
+ """
161
+ value = str(self)
162
+
163
+ # Look for comments then tags separately. Otherwise, a comment that
164
+ # contains a tag would end early, leaving some of the comment behind.
165
+
166
+ while True:
167
+ # keep finding comment start marks
168
+ start = value.find("<!--")
169
+
170
+ if start == -1:
171
+ break
172
+
173
+ # find a comment end mark beyond the start, otherwise stop
174
+ end = value.find("-->", start)
175
+
176
+ if end == -1:
177
+ break
178
+
179
+ value = f"{value[:start]}{value[end + 3:]}"
180
+
181
+ # remove tags using the same method
182
+ while True:
183
+ start = value.find("<")
184
+
185
+ if start == -1:
186
+ break
187
+
188
+ end = value.find(">", start)
189
+
190
+ if end == -1:
191
+ break
192
+
193
+ value = f"{value[:start]}{value[end + 1:]}"
194
+
195
+ # collapse spaces
196
+ value = " ".join(value.split())
197
+ return self.__class__(value).unescape()
198
+
199
+ @classmethod
200
+ def escape(cls, s: t.Any) -> "te.Self":
201
+ """Escape a string. Calls :func:`escape` and ensures that for
202
+ subclasses the correct type is returned.
203
+ """
204
+ rv = escape(s)
205
+
206
+ if rv.__class__ is not cls:
207
+ return cls(rv)
208
+
209
+ return rv # type: ignore[return-value]
210
+
211
+ __getitem__ = _simple_escaping_wrapper(str.__getitem__)
212
+ capitalize = _simple_escaping_wrapper(str.capitalize)
213
+ title = _simple_escaping_wrapper(str.title)
214
+ lower = _simple_escaping_wrapper(str.lower)
215
+ upper = _simple_escaping_wrapper(str.upper)
216
+ replace = _simple_escaping_wrapper(str.replace)
217
+ ljust = _simple_escaping_wrapper(str.ljust)
218
+ rjust = _simple_escaping_wrapper(str.rjust)
219
+ lstrip = _simple_escaping_wrapper(str.lstrip)
220
+ rstrip = _simple_escaping_wrapper(str.rstrip)
221
+ center = _simple_escaping_wrapper(str.center)
222
+ strip = _simple_escaping_wrapper(str.strip)
223
+ translate = _simple_escaping_wrapper(str.translate)
224
+ expandtabs = _simple_escaping_wrapper(str.expandtabs)
225
+ swapcase = _simple_escaping_wrapper(str.swapcase)
226
+ zfill = _simple_escaping_wrapper(str.zfill)
227
+ casefold = _simple_escaping_wrapper(str.casefold)
228
+
229
+ if sys.version_info >= (3, 9):
230
+ removeprefix = _simple_escaping_wrapper(str.removeprefix)
231
+ removesuffix = _simple_escaping_wrapper(str.removesuffix)
232
+
233
+ def partition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
234
+ l, s, r = super().partition(self.escape(sep))
235
+ cls = self.__class__
236
+ return cls(l), cls(s), cls(r)
237
+
238
+ def rpartition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
239
+ l, s, r = super().rpartition(self.escape(sep))
240
+ cls = self.__class__
241
+ return cls(l), cls(s), cls(r)
242
+
243
+ def format(self, *args: t.Any, **kwargs: t.Any) -> "te.Self":
244
+ formatter = EscapeFormatter(self.escape)
245
+ return self.__class__(formatter.vformat(self, args, kwargs))
246
+
247
+ def format_map( # type: ignore[override]
248
+ self, map: t.Mapping[str, t.Any]
249
+ ) -> "te.Self":
250
+ formatter = EscapeFormatter(self.escape)
251
+ return self.__class__(formatter.vformat(self, (), map))
252
+
253
+ def __html_format__(self, format_spec: str) -> "te.Self":
254
+ if format_spec:
255
+ raise ValueError("Unsupported format specification for Markup.")
256
+
257
+ return self
258
+
259
+
260
+ class EscapeFormatter(string.Formatter):
261
+ __slots__ = ("escape",)
262
+
263
+ def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
264
+ self.escape = escape
265
+ super().__init__()
266
+
267
+ def format_field(self, value: t.Any, format_spec: str) -> str:
268
+ if hasattr(value, "__html_format__"):
269
+ rv = value.__html_format__(format_spec)
270
+ elif hasattr(value, "__html__"):
271
+ if format_spec:
272
+ raise ValueError(
273
+ f"Format specifier {format_spec} given, but {type(value)} does not"
274
+ " define __html_format__. A class that defines __html__ must define"
275
+ " __html_format__ to work with format specifiers."
276
+ )
277
+ rv = value.__html__()
278
+ else:
279
+ # We need to make sure the format spec is str here as
280
+ # otherwise the wrong callback methods are invoked.
281
+ rv = string.Formatter.format_field(self, value, str(format_spec))
282
+ return str(self.escape(rv))
283
+
284
+
285
+ _ListOrDict = t.TypeVar("_ListOrDict", list, dict)
286
+
287
+
288
+ def _escape_argspec(
289
+ obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
290
+ ) -> _ListOrDict:
291
+ """Helper for various string-wrapped functions."""
292
+ for key, value in iterable:
293
+ if isinstance(value, str) or hasattr(value, "__html__"):
294
+ obj[key] = escape(value)
295
+
296
+ return obj
297
+
298
+
299
+ class _MarkupEscapeHelper:
300
+ """Helper for :meth:`Markup.__mod__`."""
301
+
302
+ __slots__ = ("obj", "escape")
303
+
304
+ def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
305
+ self.obj = obj
306
+ self.escape = escape
307
+
308
+ def __getitem__(self, item: t.Any) -> "te.Self":
309
+ return self.__class__(self.obj[item], self.escape)
310
+
311
+ def __str__(self) -> str:
312
+ return str(self.escape(self.obj))
313
+
314
+ def __repr__(self) -> str:
315
+ return str(self.escape(repr(self.obj)))
316
+
317
+ def __int__(self) -> int:
318
+ return int(self.obj)
319
+
320
+ def __float__(self) -> float:
321
+ return float(self.obj)
322
+
323
+
324
+ # circular import
325
+ try:
326
+ from ._speedups import escape as escape
327
+ from ._speedups import escape_silent as escape_silent
328
+ from ._speedups import soft_str as soft_str
329
+ except ImportError:
330
+ from ._native import escape as escape
331
+ from ._native import escape_silent as escape_silent # noqa: F401
332
+ from ._native import soft_str as soft_str # noqa: F401
venv/lib/python3.10/site-packages/markupsafe/_native.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing as t
2
+
3
+ from . import Markup
4
+
5
+
6
+ def escape(s: t.Any) -> Markup:
7
+ """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
8
+ the string with HTML-safe sequences. Use this if you need to display
9
+ text that might contain such characters in HTML.
10
+
11
+ If the object has an ``__html__`` method, it is called and the
12
+ return value is assumed to already be safe for HTML.
13
+
14
+ :param s: An object to be converted to a string and escaped.
15
+ :return: A :class:`Markup` string with the escaped text.
16
+ """
17
+ if hasattr(s, "__html__"):
18
+ return Markup(s.__html__())
19
+
20
+ return Markup(
21
+ str(s)
22
+ .replace("&", "&amp;")
23
+ .replace(">", "&gt;")
24
+ .replace("<", "&lt;")
25
+ .replace("'", "&#39;")
26
+ .replace('"', "&#34;")
27
+ )
28
+
29
+
30
+ def escape_silent(s: t.Optional[t.Any]) -> Markup:
31
+ """Like :func:`escape` but treats ``None`` as the empty string.
32
+ Useful with optional values, as otherwise you get the string
33
+ ``'None'`` when the value is ``None``.
34
+
35
+ >>> escape(None)
36
+ Markup('None')
37
+ >>> escape_silent(None)
38
+ Markup('')
39
+ """
40
+ if s is None:
41
+ return Markup()
42
+
43
+ return escape(s)
44
+
45
+
46
+ def soft_str(s: t.Any) -> str:
47
+ """Convert an object to a string if it isn't already. This preserves
48
+ a :class:`Markup` string rather than converting it back to a basic
49
+ string, so it will still be marked as safe and won't be escaped
50
+ again.
51
+
52
+ >>> value = escape("<User 1>")
53
+ >>> value
54
+ Markup('&lt;User 1&gt;')
55
+ >>> escape(str(value))
56
+ Markup('&amp;lt;User 1&amp;gt;')
57
+ >>> escape(soft_str(value))
58
+ Markup('&lt;User 1&gt;')
59
+ """
60
+ if not isinstance(s, str):
61
+ return str(s)
62
+
63
+ return s
venv/lib/python3.10/site-packages/markupsafe/_speedups.c ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <Python.h>
2
+
3
+ static PyObject* markup;
4
+
5
+ static int
6
+ init_constants(void)
7
+ {
8
+ PyObject *module;
9
+
10
+ /* import markup type so that we can mark the return value */
11
+ module = PyImport_ImportModule("markupsafe");
12
+ if (!module)
13
+ return 0;
14
+ markup = PyObject_GetAttrString(module, "Markup");
15
+ Py_DECREF(module);
16
+
17
+ return 1;
18
+ }
19
+
20
+ #define GET_DELTA(inp, inp_end, delta) \
21
+ while (inp < inp_end) { \
22
+ switch (*inp++) { \
23
+ case '"': \
24
+ case '\'': \
25
+ case '&': \
26
+ delta += 4; \
27
+ break; \
28
+ case '<': \
29
+ case '>': \
30
+ delta += 3; \
31
+ break; \
32
+ } \
33
+ }
34
+
35
+ #define DO_ESCAPE(inp, inp_end, outp) \
36
+ { \
37
+ Py_ssize_t ncopy = 0; \
38
+ while (inp < inp_end) { \
39
+ switch (*inp) { \
40
+ case '"': \
41
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
42
+ outp += ncopy; ncopy = 0; \
43
+ *outp++ = '&'; \
44
+ *outp++ = '#'; \
45
+ *outp++ = '3'; \
46
+ *outp++ = '4'; \
47
+ *outp++ = ';'; \
48
+ break; \
49
+ case '\'': \
50
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
51
+ outp += ncopy; ncopy = 0; \
52
+ *outp++ = '&'; \
53
+ *outp++ = '#'; \
54
+ *outp++ = '3'; \
55
+ *outp++ = '9'; \
56
+ *outp++ = ';'; \
57
+ break; \
58
+ case '&': \
59
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
60
+ outp += ncopy; ncopy = 0; \
61
+ *outp++ = '&'; \
62
+ *outp++ = 'a'; \
63
+ *outp++ = 'm'; \
64
+ *outp++ = 'p'; \
65
+ *outp++ = ';'; \
66
+ break; \
67
+ case '<': \
68
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
69
+ outp += ncopy; ncopy = 0; \
70
+ *outp++ = '&'; \
71
+ *outp++ = 'l'; \
72
+ *outp++ = 't'; \
73
+ *outp++ = ';'; \
74
+ break; \
75
+ case '>': \
76
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
77
+ outp += ncopy; ncopy = 0; \
78
+ *outp++ = '&'; \
79
+ *outp++ = 'g'; \
80
+ *outp++ = 't'; \
81
+ *outp++ = ';'; \
82
+ break; \
83
+ default: \
84
+ ncopy++; \
85
+ } \
86
+ inp++; \
87
+ } \
88
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
89
+ }
90
+
91
+ static PyObject*
92
+ escape_unicode_kind1(PyUnicodeObject *in)
93
+ {
94
+ Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
95
+ Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
96
+ Py_UCS1 *outp;
97
+ PyObject *out;
98
+ Py_ssize_t delta = 0;
99
+
100
+ GET_DELTA(inp, inp_end, delta);
101
+ if (!delta) {
102
+ Py_INCREF(in);
103
+ return (PyObject*)in;
104
+ }
105
+
106
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
107
+ PyUnicode_IS_ASCII(in) ? 127 : 255);
108
+ if (!out)
109
+ return NULL;
110
+
111
+ inp = PyUnicode_1BYTE_DATA(in);
112
+ outp = PyUnicode_1BYTE_DATA(out);
113
+ DO_ESCAPE(inp, inp_end, outp);
114
+ return out;
115
+ }
116
+
117
+ static PyObject*
118
+ escape_unicode_kind2(PyUnicodeObject *in)
119
+ {
120
+ Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
121
+ Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
122
+ Py_UCS2 *outp;
123
+ PyObject *out;
124
+ Py_ssize_t delta = 0;
125
+
126
+ GET_DELTA(inp, inp_end, delta);
127
+ if (!delta) {
128
+ Py_INCREF(in);
129
+ return (PyObject*)in;
130
+ }
131
+
132
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
133
+ if (!out)
134
+ return NULL;
135
+
136
+ inp = PyUnicode_2BYTE_DATA(in);
137
+ outp = PyUnicode_2BYTE_DATA(out);
138
+ DO_ESCAPE(inp, inp_end, outp);
139
+ return out;
140
+ }
141
+
142
+
143
+ static PyObject*
144
+ escape_unicode_kind4(PyUnicodeObject *in)
145
+ {
146
+ Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
147
+ Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
148
+ Py_UCS4 *outp;
149
+ PyObject *out;
150
+ Py_ssize_t delta = 0;
151
+
152
+ GET_DELTA(inp, inp_end, delta);
153
+ if (!delta) {
154
+ Py_INCREF(in);
155
+ return (PyObject*)in;
156
+ }
157
+
158
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
159
+ if (!out)
160
+ return NULL;
161
+
162
+ inp = PyUnicode_4BYTE_DATA(in);
163
+ outp = PyUnicode_4BYTE_DATA(out);
164
+ DO_ESCAPE(inp, inp_end, outp);
165
+ return out;
166
+ }
167
+
168
+ static PyObject*
169
+ escape_unicode(PyUnicodeObject *in)
170
+ {
171
+ if (PyUnicode_READY(in))
172
+ return NULL;
173
+
174
+ switch (PyUnicode_KIND(in)) {
175
+ case PyUnicode_1BYTE_KIND:
176
+ return escape_unicode_kind1(in);
177
+ case PyUnicode_2BYTE_KIND:
178
+ return escape_unicode_kind2(in);
179
+ case PyUnicode_4BYTE_KIND:
180
+ return escape_unicode_kind4(in);
181
+ }
182
+ assert(0); /* shouldn't happen */
183
+ return NULL;
184
+ }
185
+
186
+ static PyObject*
187
+ escape(PyObject *self, PyObject *text)
188
+ {
189
+ static PyObject *id_html;
190
+ PyObject *s = NULL, *rv = NULL, *html;
191
+
192
+ if (id_html == NULL) {
193
+ id_html = PyUnicode_InternFromString("__html__");
194
+ if (id_html == NULL) {
195
+ return NULL;
196
+ }
197
+ }
198
+
199
+ /* we don't have to escape integers, bools or floats */
200
+ if (PyLong_CheckExact(text) ||
201
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
202
+ text == Py_None)
203
+ return PyObject_CallFunctionObjArgs(markup, text, NULL);
204
+
205
+ /* if the object has an __html__ method that performs the escaping */
206
+ html = PyObject_GetAttr(text ,id_html);
207
+ if (html) {
208
+ s = PyObject_CallObject(html, NULL);
209
+ Py_DECREF(html);
210
+ if (s == NULL) {
211
+ return NULL;
212
+ }
213
+ /* Convert to Markup object */
214
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
215
+ Py_DECREF(s);
216
+ return rv;
217
+ }
218
+
219
+ /* otherwise make the object unicode if it isn't, then escape */
220
+ PyErr_Clear();
221
+ if (!PyUnicode_Check(text)) {
222
+ PyObject *unicode = PyObject_Str(text);
223
+ if (!unicode)
224
+ return NULL;
225
+ s = escape_unicode((PyUnicodeObject*)unicode);
226
+ Py_DECREF(unicode);
227
+ }
228
+ else
229
+ s = escape_unicode((PyUnicodeObject*)text);
230
+
231
+ /* convert the unicode string into a markup object. */
232
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
233
+ Py_DECREF(s);
234
+ return rv;
235
+ }
236
+
237
+
238
+ static PyObject*
239
+ escape_silent(PyObject *self, PyObject *text)
240
+ {
241
+ if (text != Py_None)
242
+ return escape(self, text);
243
+ return PyObject_CallFunctionObjArgs(markup, NULL);
244
+ }
245
+
246
+
247
+ static PyObject*
248
+ soft_str(PyObject *self, PyObject *s)
249
+ {
250
+ if (!PyUnicode_Check(s))
251
+ return PyObject_Str(s);
252
+ Py_INCREF(s);
253
+ return s;
254
+ }
255
+
256
+
257
+ static PyMethodDef module_methods[] = {
258
+ {
259
+ "escape",
260
+ (PyCFunction)escape,
261
+ METH_O,
262
+ "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
263
+ " the string with HTML-safe sequences. Use this if you need to display"
264
+ " text that might contain such characters in HTML.\n\n"
265
+ "If the object has an ``__html__`` method, it is called and the"
266
+ " return value is assumed to already be safe for HTML.\n\n"
267
+ ":param s: An object to be converted to a string and escaped.\n"
268
+ ":return: A :class:`Markup` string with the escaped text.\n"
269
+ },
270
+ {
271
+ "escape_silent",
272
+ (PyCFunction)escape_silent,
273
+ METH_O,
274
+ "Like :func:`escape` but treats ``None`` as the empty string."
275
+ " Useful with optional values, as otherwise you get the string"
276
+ " ``'None'`` when the value is ``None``.\n\n"
277
+ ">>> escape(None)\n"
278
+ "Markup('None')\n"
279
+ ">>> escape_silent(None)\n"
280
+ "Markup('')\n"
281
+ },
282
+ {
283
+ "soft_str",
284
+ (PyCFunction)soft_str,
285
+ METH_O,
286
+ "Convert an object to a string if it isn't already. This preserves"
287
+ " a :class:`Markup` string rather than converting it back to a basic"
288
+ " string, so it will still be marked as safe and won't be escaped"
289
+ " again.\n\n"
290
+ ">>> value = escape(\"<User 1>\")\n"
291
+ ">>> value\n"
292
+ "Markup('&lt;User 1&gt;')\n"
293
+ ">>> escape(str(value))\n"
294
+ "Markup('&amp;lt;User 1&amp;gt;')\n"
295
+ ">>> escape(soft_str(value))\n"
296
+ "Markup('&lt;User 1&gt;')\n"
297
+ },
298
+ {NULL, NULL, 0, NULL} /* Sentinel */
299
+ };
300
+
301
+ static struct PyModuleDef module_definition = {
302
+ PyModuleDef_HEAD_INIT,
303
+ "markupsafe._speedups",
304
+ NULL,
305
+ -1,
306
+ module_methods,
307
+ NULL,
308
+ NULL,
309
+ NULL,
310
+ NULL
311
+ };
312
+
313
+ PyMODINIT_FUNC
314
+ PyInit__speedups(void)
315
+ {
316
+ if (!init_constants())
317
+ return NULL;
318
+
319
+ return PyModule_Create(&module_definition);
320
+ }
venv/lib/python3.10/site-packages/markupsafe/_speedups.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (44.2 kB). View file
 
venv/lib/python3.10/site-packages/markupsafe/_speedups.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from typing import Optional
3
+
4
+ from . import Markup
5
+
6
+ def escape(s: Any) -> Markup: ...
7
+ def escape_silent(s: Optional[Any]) -> Markup: ...
8
+ def soft_str(s: Any) -> str: ...
9
+ def soft_unicode(s: Any) -> str: ...
venv/lib/python3.10/site-packages/markupsafe/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/mpmath/calculus/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import calculus
2
+ # XXX: hack to set methods
3
+ from . import approximation
4
+ from . import differentiation
5
+ from . import extrapolation
6
+ from . import polynomials
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (360 Bytes). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc ADDED
Binary file (484 Bytes). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc ADDED
Binary file (69.6 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc ADDED
Binary file (7.74 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc ADDED
Binary file (39.6 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/calculus/approximation.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Approximation methods #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # The Chebyshev approximation formula is given at:
9
+ # http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
10
+
11
+ # The only major changes in the following code is that we return the
12
+ # expanded polynomial coefficients instead of Chebyshev coefficients,
13
+ # and that we automatically transform [a,b] -> [-1,1] and back
14
+ # for convenience.
15
+
16
+ # Coefficient in Chebyshev approximation
17
+ def chebcoeff(ctx,f,a,b,j,N):
18
+ s = ctx.mpf(0)
19
+ h = ctx.mpf(0.5)
20
+ for k in range(1, N+1):
21
+ t = ctx.cospi((k-h)/N)
22
+ s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
23
+ return 2*s/N
24
+
25
+ # Generate Chebyshev polynomials T_n(ax+b) in expanded form
26
+ def chebT(ctx, a=1, b=0):
27
+ Tb = [1]
28
+ yield Tb
29
+ Ta = [b, a]
30
+ while 1:
31
+ yield Ta
32
+ # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
33
+ Tmp = [0] + [2*a*t for t in Ta]
34
+ for i, c in enumerate(Ta): Tmp[i] += 2*b*c
35
+ for i, c in enumerate(Tb): Tmp[i] -= c
36
+ Ta, Tb = Tmp, Ta
37
+
38
+ @defun
39
+ def chebyfit(ctx, f, interval, N, error=False):
40
+ r"""
41
+ Computes a polynomial of degree `N-1` that approximates the
42
+ given function `f` on the interval `[a, b]`. With ``error=True``,
43
+ :func:`~mpmath.chebyfit` also returns an accurate estimate of the
44
+ maximum absolute error; that is, the maximum value of
45
+ `|f(x) - P(x)|` for `x \in [a, b]`.
46
+
47
+ :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
48
+ which gives a nearly optimal solution: that is, the maximum
49
+ error of the approximating polynomial is very close to
50
+ the smallest possible for any polynomial of the same degree.
51
+
52
+ Chebyshev approximation is very useful if one needs repeated
53
+ evaluation of an expensive function, such as function defined
54
+ implicitly by an integral or a differential equation. (For
55
+ example, it could be used to turn a slow mpmath function
56
+ into a fast machine-precision version of the same.)
57
+
58
+ **Examples**
59
+
60
+ Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
61
+ of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
62
+
63
+ >>> from mpmath import *
64
+ >>> mp.dps = 15; mp.pretty = True
65
+ >>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
66
+ >>> nprint(poly)
67
+ [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
68
+ >>> nprint(err, 12)
69
+ 1.61351758081e-5
70
+
71
+ The polynomial can be evaluated using ``polyval``::
72
+
73
+ >>> nprint(polyval(poly, 1.6), 12)
74
+ -0.0291858904138
75
+ >>> nprint(cos(1.6), 12)
76
+ -0.0291995223013
77
+
78
+ Sampling the true error at 1000 points shows that the error
79
+ estimate generated by ``chebyfit`` is remarkably good::
80
+
81
+ >>> error = lambda x: abs(cos(x) - polyval(poly, x))
82
+ >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
83
+ 1.61349954245e-5
84
+
85
+ **Choice of degree**
86
+
87
+ The degree `N` can be set arbitrarily high, to obtain an
88
+ arbitrarily good approximation. As a rule of thumb, an
89
+ `N`-term Chebyshev approximation is good to `N/(b-a)` decimal
90
+ places on a unit interval (although this depends on how
91
+ well-behaved `f` is). The cost grows accordingly: ``chebyfit``
92
+ evaluates the function `(N^2)/2` times to compute the
93
+ coefficients and an additional `N` times to estimate the error.
94
+
95
+ **Possible issues**
96
+
97
+ One should be careful to use a sufficiently high working
98
+ precision both when calling ``chebyfit`` and when evaluating
99
+ the resulting polynomial, as the polynomial is sometimes
100
+ ill-conditioned. It is for example difficult to reach
101
+ 15-digit accuracy when evaluating the polynomial using
102
+ machine precision floats, no matter the theoretical
103
+ accuracy of the polynomial. (The option to return the
104
+ coefficients in Chebyshev form should be made available
105
+ in the future.)
106
+
107
+ It is important to note the Chebyshev approximation works
108
+ poorly if `f` is not smooth. A function containing singularities,
109
+ rapid oscillation, etc can be approximated more effectively by
110
+ multiplying it by a weight function that cancels out the
111
+ nonsmooth features, or by dividing the interval into several
112
+ segments.
113
+ """
114
+ a, b = ctx._as_points(interval)
115
+ orig = ctx.prec
116
+ try:
117
+ ctx.prec = orig + int(N**0.5) + 20
118
+ c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
119
+ d = [ctx.zero] * N
120
+ d[0] = -c[0]/2
121
+ h = ctx.mpf(0.5)
122
+ T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
123
+ for (k, Tk) in zip(range(N), T):
124
+ for i in range(len(Tk)):
125
+ d[i] += c[k]*Tk[i]
126
+ d = d[::-1]
127
+ # Estimate maximum error
128
+ err = ctx.zero
129
+ for k in range(N):
130
+ x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
131
+ err = max(err, abs(f(x) - ctx.polyval(d, x)))
132
+ finally:
133
+ ctx.prec = orig
134
+ if error:
135
+ return d, +err
136
+ else:
137
+ return d
138
+
139
+ @defun
140
+ def fourier(ctx, f, interval, N):
141
+ r"""
142
+ Computes the Fourier series of degree `N` of the given function
143
+ on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
144
+ two lists `(c, s)` of coefficients (the cosine series and sine
145
+ series, respectively), such that
146
+
147
+ .. math ::
148
+
149
+ f(x) \sim \sum_{k=0}^N
150
+ c_k \cos(k m x) + s_k \sin(k m x)
151
+
152
+ where `m = 2 \pi / (b-a)`.
153
+
154
+ Note that many texts define the first coefficient as `2 c_0` instead
155
+ of `c_0`. The easiest way to evaluate the computed series correctly
156
+ is to pass it to :func:`~mpmath.fourierval`.
157
+
158
+ **Examples**
159
+
160
+ The function `f(x) = x` has a simple Fourier series on the standard
161
+ interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
162
+ the function has odd symmetry), and the sine coefficients are
163
+ rational numbers::
164
+
165
+ >>> from mpmath import *
166
+ >>> mp.dps = 15; mp.pretty = True
167
+ >>> c, s = fourier(lambda x: x, [-pi, pi], 5)
168
+ >>> nprint(c)
169
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
170
+ >>> nprint(s)
171
+ [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
172
+
173
+ This computes a Fourier series of a nonsymmetric function on
174
+ a nonstandard interval::
175
+
176
+ >>> I = [-1, 1.5]
177
+ >>> f = lambda x: x**2 - 4*x + 1
178
+ >>> cs = fourier(f, I, 4)
179
+ >>> nprint(cs[0])
180
+ [0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
181
+ >>> nprint(cs[1])
182
+ [0.0, -2.6255, 0.580905, 0.219974, -0.540057]
183
+
184
+ It is instructive to plot a function along with its truncated
185
+ Fourier series::
186
+
187
+ >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
188
+
189
+ Fourier series generally converge slowly (and may not converge
190
+ pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
191
+ series gives an `L^2` error corresponding to 2-digit accuracy::
192
+
193
+ >>> I = [-1, 1]
194
+ >>> cs = fourier(cosh, I, 9)
195
+ >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
196
+ >>> nprint(sqrt(quad(g, I)))
197
+ 0.00467963
198
+
199
+ :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
200
+ the accuracy (and speed) can be improved by including all singular
201
+ points in the interval specification::
202
+
203
+ >>> nprint(fourier(abs, [-1, 1], 0), 10)
204
+ ([0.5000441648], [0.0])
205
+ >>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
206
+ ([0.5], [0.0])
207
+
208
+ """
209
+ interval = ctx._as_points(interval)
210
+ a = interval[0]
211
+ b = interval[-1]
212
+ L = b-a
213
+ cos_series = []
214
+ sin_series = []
215
+ cutoff = ctx.eps*10
216
+ for n in xrange(N+1):
217
+ m = 2*n*ctx.pi/L
218
+ an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
219
+ bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
220
+ if n == 0:
221
+ an /= 2
222
+ if abs(an) < cutoff: an = ctx.zero
223
+ if abs(bn) < cutoff: bn = ctx.zero
224
+ cos_series.append(an)
225
+ sin_series.append(bn)
226
+ return cos_series, sin_series
227
+
228
+ @defun
229
+ def fourierval(ctx, series, interval, x):
230
+ """
231
+ Evaluates a Fourier series (in the format computed by
232
+ by :func:`~mpmath.fourier` for the given interval) at the point `x`.
233
+
234
+ The series should be a pair `(c, s)` where `c` is the
235
+ cosine series and `s` is the sine series. The two lists
236
+ need not have the same length.
237
+ """
238
+ cs, ss = series
239
+ ab = ctx._as_points(interval)
240
+ a = interval[0]
241
+ b = interval[-1]
242
+ m = 2*ctx.pi/(ab[-1]-ab[0])
243
+ s = ctx.zero
244
+ s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
245
+ s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
246
+ return s
venv/lib/python3.10/site-packages/mpmath/calculus/calculus.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class CalculusMethods(object):
2
+ pass
3
+
4
+ def defun(f):
5
+ setattr(CalculusMethods, f.__name__, f)
6
+ return f
venv/lib/python3.10/site-packages/mpmath/calculus/differentiation.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ try:
5
+ iteritems = dict.iteritems
6
+ except AttributeError:
7
+ iteritems = dict.items
8
+
9
+ #----------------------------------------------------------------------------#
10
+ # Differentiation #
11
+ #----------------------------------------------------------------------------#
12
+
13
+ @defun
14
+ def difference(ctx, s, n):
15
+ r"""
16
+ Given a sequence `(s_k)` containing at least `n+1` items, returns the
17
+ `n`-th forward difference,
18
+
19
+ .. math ::
20
+
21
+ \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
22
+ """
23
+ n = int(n)
24
+ d = ctx.zero
25
+ b = (-1) ** (n & 1)
26
+ for k in xrange(n+1):
27
+ d += b * s[k]
28
+ b = (b * (k-n)) // (k+1)
29
+ return d
30
+
31
+ def hsteps(ctx, f, x, n, prec, **options):
32
+ singular = options.get('singular')
33
+ addprec = options.get('addprec', 10)
34
+ direction = options.get('direction', 0)
35
+ workprec = (prec+2*addprec) * (n+1)
36
+ orig = ctx.prec
37
+ try:
38
+ ctx.prec = workprec
39
+ h = options.get('h')
40
+ if h is None:
41
+ if options.get('relative'):
42
+ hextramag = int(ctx.mag(x))
43
+ else:
44
+ hextramag = 0
45
+ h = ctx.ldexp(1, -prec-addprec-hextramag)
46
+ else:
47
+ h = ctx.convert(h)
48
+ # Directed: steps x, x+h, ... x+n*h
49
+ direction = options.get('direction', 0)
50
+ if direction:
51
+ h *= ctx.sign(direction)
52
+ steps = xrange(n+1)
53
+ norm = h
54
+ # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
55
+ else:
56
+ steps = xrange(-n, n+1, 2)
57
+ norm = (2*h)
58
+ # Perturb
59
+ if singular:
60
+ x += 0.5*h
61
+ values = [f(x+k*h) for k in steps]
62
+ return values, norm, workprec
63
+ finally:
64
+ ctx.prec = orig
65
+
66
+
67
+ @defun
68
+ def diff(ctx, f, x, n=1, **options):
69
+ r"""
70
+ Numerically computes the derivative of `f`, `f'(x)`, or generally for
71
+ an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
72
+ A few basic examples are::
73
+
74
+ >>> from mpmath import *
75
+ >>> mp.dps = 15; mp.pretty = True
76
+ >>> diff(lambda x: x**2 + x, 1.0)
77
+ 3.0
78
+ >>> diff(lambda x: x**2 + x, 1.0, 2)
79
+ 2.0
80
+ >>> diff(lambda x: x**2 + x, 1.0, 3)
81
+ 0.0
82
+ >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
83
+ [20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
84
+
85
+ Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
86
+ and order `(n_1, \ldots, n_k)`, the partial derivative
87
+ `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
88
+
89
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
90
+ 2.75
91
+ >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
92
+ 3.0
93
+
94
+ **Options**
95
+
96
+ The following optional keyword arguments are recognized:
97
+
98
+ ``method``
99
+ Supported methods are ``'step'`` or ``'quad'``: derivatives may be
100
+ computed using either a finite difference with a small step
101
+ size `h` (default), or numerical quadrature.
102
+ ``direction``
103
+ Direction of finite difference: can be -1 for a left
104
+ difference, 0 for a central difference (default), or +1
105
+ for a right difference; more generally can be any complex number.
106
+ ``addprec``
107
+ Extra precision for `h` used to account for the function's
108
+ sensitivity to perturbations (default = 10).
109
+ ``relative``
110
+ Choose `h` relative to the magnitude of `x`, rather than an
111
+ absolute value; useful for large or tiny `x` (default = False).
112
+ ``h``
113
+ As an alternative to ``addprec`` and ``relative``, manually
114
+ select the step size `h`.
115
+ ``singular``
116
+ If True, evaluation exactly at the point `x` is avoided; this is
117
+ useful for differentiating functions with removable singularities.
118
+ Default = False.
119
+ ``radius``
120
+ Radius of integration contour (with ``method = 'quad'``).
121
+ Default = 0.25. A larger radius typically is faster and more
122
+ accurate, but it must be chosen so that `f` has no
123
+ singularities within the radius from the evaluation point.
124
+
125
+ A finite difference requires `n+1` function evaluations and must be
126
+ performed at `(n+1)` times the target precision. Accordingly, `f` must
127
+ support fast evaluation at high precision.
128
+
129
+ With integration, a larger number of function evaluations is
130
+ required, but not much extra precision is required. For high order
131
+ derivatives, this method may thus be faster if f is very expensive to
132
+ evaluate at high precision.
133
+
134
+ **Further examples**
135
+
136
+ The direction option is useful for computing left- or right-sided
137
+ derivatives of nonsmooth functions::
138
+
139
+ >>> diff(abs, 0, direction=0)
140
+ 0.0
141
+ >>> diff(abs, 0, direction=1)
142
+ 1.0
143
+ >>> diff(abs, 0, direction=-1)
144
+ -1.0
145
+
146
+ More generally, if the direction is nonzero, a right difference
147
+ is computed where the step size is multiplied by sign(direction).
148
+ For example, with direction=+j, the derivative from the positive
149
+ imaginary direction will be computed::
150
+
151
+ >>> diff(abs, 0, direction=j)
152
+ (0.0 - 1.0j)
153
+
154
+ With integration, the result may have a small imaginary part
155
+ even even if the result is purely real::
156
+
157
+ >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
158
+ (0.5 - 4.59...e-26j)
159
+ >>> chop(_)
160
+ 0.5
161
+
162
+ Adding precision to obtain an accurate value::
163
+
164
+ >>> diff(cos, 1e-30)
165
+ 0.0
166
+ >>> diff(cos, 1e-30, h=0.0001)
167
+ -9.99999998328279e-31
168
+ >>> diff(cos, 1e-30, addprec=100)
169
+ -1.0e-30
170
+
171
+ """
172
+ partial = False
173
+ try:
174
+ orders = list(n)
175
+ x = list(x)
176
+ partial = True
177
+ except TypeError:
178
+ pass
179
+ if partial:
180
+ x = [ctx.convert(_) for _ in x]
181
+ return _partial_diff(ctx, f, x, orders, options)
182
+ method = options.get('method', 'step')
183
+ if n == 0 and method != 'quad' and not options.get('singular'):
184
+ return f(ctx.convert(x))
185
+ prec = ctx.prec
186
+ try:
187
+ if method == 'step':
188
+ values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
189
+ ctx.prec = workprec
190
+ v = ctx.difference(values, n) / norm**n
191
+ elif method == 'quad':
192
+ ctx.prec += 10
193
+ radius = ctx.convert(options.get('radius', 0.25))
194
+ def g(t):
195
+ rei = radius*ctx.expj(t)
196
+ z = x + rei
197
+ return f(z) / rei**n
198
+ d = ctx.quadts(g, [0, 2*ctx.pi])
199
+ v = d * ctx.factorial(n) / (2*ctx.pi)
200
+ else:
201
+ raise ValueError("unknown method: %r" % method)
202
+ finally:
203
+ ctx.prec = prec
204
+ return +v
205
+
206
+ def _partial_diff(ctx, f, xs, orders, options):
207
+ if not orders:
208
+ return f()
209
+ if not sum(orders):
210
+ return f(*xs)
211
+ i = 0
212
+ for i in range(len(orders)):
213
+ if orders[i]:
214
+ break
215
+ order = orders[i]
216
+ def fdiff_inner(*f_args):
217
+ def inner(t):
218
+ return f(*(f_args[:i] + (t,) + f_args[i+1:]))
219
+ return ctx.diff(inner, f_args[i], order, **options)
220
+ orders[i] = 0
221
+ return _partial_diff(ctx, fdiff_inner, xs, orders, options)
222
+
223
+ @defun
224
+ def diffs(ctx, f, x, n=None, **options):
225
+ r"""
226
+ Returns a generator that yields the sequence of derivatives
227
+
228
+ .. math ::
229
+
230
+ f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
231
+
232
+ With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
233
+ function evaluations to generate the first `k` derivatives,
234
+ rather than the roughly `O(k^2)` evaluations
235
+ required if one calls :func:`~mpmath.diff` `k` separate times.
236
+
237
+ With `n < \infty`, the generator stops as soon as the
238
+ `n`-th derivative has been generated. If the exact number of
239
+ needed derivatives is known in advance, this is further
240
+ slightly more efficient.
241
+
242
+ Options are the same as for :func:`~mpmath.diff`.
243
+
244
+ **Examples**
245
+
246
+ >>> from mpmath import *
247
+ >>> mp.dps = 15
248
+ >>> nprint(list(diffs(cos, 1, 5)))
249
+ [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
250
+ >>> for i, d in zip(range(6), diffs(cos, 1)):
251
+ ... print("%s %s" % (i, d))
252
+ ...
253
+ 0 0.54030230586814
254
+ 1 -0.841470984807897
255
+ 2 -0.54030230586814
256
+ 3 0.841470984807897
257
+ 4 0.54030230586814
258
+ 5 -0.841470984807897
259
+
260
+ """
261
+ if n is None:
262
+ n = ctx.inf
263
+ else:
264
+ n = int(n)
265
+ if options.get('method', 'step') != 'step':
266
+ k = 0
267
+ while k < n + 1:
268
+ yield ctx.diff(f, x, k, **options)
269
+ k += 1
270
+ return
271
+ singular = options.get('singular')
272
+ if singular:
273
+ yield ctx.diff(f, x, 0, singular=True)
274
+ else:
275
+ yield f(ctx.convert(x))
276
+ if n < 1:
277
+ return
278
+ if n == ctx.inf:
279
+ A, B = 1, 2
280
+ else:
281
+ A, B = 1, n+1
282
+ while 1:
283
+ callprec = ctx.prec
284
+ y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
285
+ for k in xrange(A, B):
286
+ try:
287
+ ctx.prec = workprec
288
+ d = ctx.difference(y, k) / norm**k
289
+ finally:
290
+ ctx.prec = callprec
291
+ yield +d
292
+ if k >= n:
293
+ return
294
+ A, B = B, int(A*1.4+1)
295
+ B = min(B, n)
296
+
297
+ def iterable_to_function(gen):
298
+ gen = iter(gen)
299
+ data = []
300
+ def f(k):
301
+ for i in xrange(len(data), k+1):
302
+ data.append(next(gen))
303
+ return data[k]
304
+ return f
305
+
306
+ @defun
307
+ def diffs_prod(ctx, factors):
308
+ r"""
309
+ Given a list of `N` iterables or generators yielding
310
+ `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
311
+ generate `g(x), g'(x), g''(x), \ldots` where
312
+ `g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
313
+
314
+ At high precision and for large orders, this is typically more efficient
315
+ than numerical differentiation if the derivatives of each `f_k(x)`
316
+ admit direct computation.
317
+
318
+ Note: This function does not increase the working precision internally,
319
+ so guard digits may have to be added externally for full accuracy.
320
+
321
+ **Examples**
322
+
323
+ >>> from mpmath import *
324
+ >>> mp.dps = 15; mp.pretty = True
325
+ >>> f = lambda x: exp(x)*cos(x)*sin(x)
326
+ >>> u = diffs(f, 1)
327
+ >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
328
+ >>> next(u); next(v)
329
+ 1.23586333600241
330
+ 1.23586333600241
331
+ >>> next(u); next(v)
332
+ 0.104658952245596
333
+ 0.104658952245596
334
+ >>> next(u); next(v)
335
+ -5.96999877552086
336
+ -5.96999877552086
337
+ >>> next(u); next(v)
338
+ -12.4632923122697
339
+ -12.4632923122697
340
+
341
+ """
342
+ N = len(factors)
343
+ if N == 1:
344
+ for c in factors[0]:
345
+ yield c
346
+ else:
347
+ u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
348
+ v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
349
+ n = 0
350
+ while 1:
351
+ #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
352
+ s = u(n) * v(0)
353
+ a = 1
354
+ for k in xrange(1,n+1):
355
+ a = a * (n-k+1) // k
356
+ s += a * u(n-k) * v(k)
357
+ yield s
358
+ n += 1
359
+
360
+ def dpoly(n, _cache={}):
361
+ """
362
+ nth differentiation polynomial for exp (Faa di Bruno's formula).
363
+
364
+ TODO: most exponents are zero, so maybe a sparse representation
365
+ would be better.
366
+ """
367
+ if n in _cache:
368
+ return _cache[n]
369
+ if not _cache:
370
+ _cache[0] = {(0,):1}
371
+ R = dpoly(n-1)
372
+ R = dict((c+(0,),v) for (c,v) in iteritems(R))
373
+ Ra = {}
374
+ for powers, count in iteritems(R):
375
+ powers1 = (powers[0]+1,) + powers[1:]
376
+ if powers1 in Ra:
377
+ Ra[powers1] += count
378
+ else:
379
+ Ra[powers1] = count
380
+ for powers, count in iteritems(R):
381
+ if not sum(powers):
382
+ continue
383
+ for k,p in enumerate(powers):
384
+ if p:
385
+ powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
386
+ if powers2 in Ra:
387
+ Ra[powers2] += p*count
388
+ else:
389
+ Ra[powers2] = p*count
390
+ _cache[n] = Ra
391
+ return _cache[n]
392
+
393
+ @defun
394
+ def diffs_exp(ctx, fdiffs):
395
+ r"""
396
+ Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
397
+ generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
398
+
399
+ At high precision and for large orders, this is typically more efficient
400
+ than numerical differentiation if the derivatives of `f(x)`
401
+ admit direct computation.
402
+
403
+ Note: This function does not increase the working precision internally,
404
+ so guard digits may have to be added externally for full accuracy.
405
+
406
+ **Examples**
407
+
408
+ The derivatives of the gamma function can be computed using
409
+ logarithmic differentiation::
410
+
411
+ >>> from mpmath import *
412
+ >>> mp.dps = 15; mp.pretty = True
413
+ >>>
414
+ >>> def diffs_loggamma(x):
415
+ ... yield loggamma(x)
416
+ ... i = 0
417
+ ... while 1:
418
+ ... yield psi(i,x)
419
+ ... i += 1
420
+ ...
421
+ >>> u = diffs_exp(diffs_loggamma(3))
422
+ >>> v = diffs(gamma, 3)
423
+ >>> next(u); next(v)
424
+ 2.0
425
+ 2.0
426
+ >>> next(u); next(v)
427
+ 1.84556867019693
428
+ 1.84556867019693
429
+ >>> next(u); next(v)
430
+ 2.49292999190269
431
+ 2.49292999190269
432
+ >>> next(u); next(v)
433
+ 3.44996501352367
434
+ 3.44996501352367
435
+
436
+ """
437
+ fn = iterable_to_function(fdiffs)
438
+ f0 = ctx.exp(fn(0))
439
+ yield f0
440
+ i = 1
441
+ while 1:
442
+ s = ctx.mpf(0)
443
+ for powers, c in iteritems(dpoly(i)):
444
+ s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
445
+ yield s * f0
446
+ i += 1
447
+
448
+ @defun
449
+ def differint(ctx, f, x, n=1, x0=0):
450
+ r"""
451
+ Calculates the Riemann-Liouville differintegral, or fractional
452
+ derivative, defined by
453
+
454
+ .. math ::
455
+
456
+ \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
457
+ \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
458
+
459
+ where `f` is a given (presumably well-behaved) function,
460
+ `x` is the evaluation point, `n` is the order, and `x_0` is
461
+ the reference point of integration (`m` is an arbitrary
462
+ parameter selected automatically).
463
+
464
+ With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
465
+ the second derivative `f''(x)`, etc. With `n = -1`, it gives
466
+ `\int_{x_0}^x f(t) dt`, with `n = -2`
467
+ it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
468
+
469
+ As `n` is permitted to be any number, this operator generalizes
470
+ iterated differentiation and iterated integration to a single
471
+ operator with a continuous order parameter.
472
+
473
+ **Examples**
474
+
475
+ There is an exact formula for the fractional derivative of a
476
+ monomial `x^p`, which may be used as a reference. For example,
477
+ the following gives a half-derivative (order 0.5)::
478
+
479
+ >>> from mpmath import *
480
+ >>> mp.dps = 15; mp.pretty = True
481
+ >>> x = mpf(3); p = 2; n = 0.5
482
+ >>> differint(lambda t: t**p, x, n)
483
+ 7.81764019044672
484
+ >>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
485
+ 7.81764019044672
486
+
487
+ Another useful test function is the exponential function, whose
488
+ integration / differentiation formula easy generalizes
489
+ to arbitrary order. Here we first compute a third derivative,
490
+ and then a triply nested integral. (The reference point `x_0`
491
+ is set to `-\infty` to avoid nonzero endpoint terms.)::
492
+
493
+ >>> differint(lambda x: exp(pi*x), -1.5, 3)
494
+ 0.278538406900792
495
+ >>> exp(pi*-1.5) * pi**3
496
+ 0.278538406900792
497
+ >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
498
+ 1922.50563031149
499
+ >>> exp(pi*3.5) / pi**3
500
+ 1922.50563031149
501
+
502
+ However, for noninteger `n`, the differentiation formula for the
503
+ exponential function must be modified to give the same result as the
504
+ Riemann-Liouville differintegral::
505
+
506
+ >>> x = mpf(3.5)
507
+ >>> c = pi
508
+ >>> n = 1+2*j
509
+ >>> differint(lambda x: exp(c*x), x, n)
510
+ (-123295.005390743 + 140955.117867654j)
511
+ >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
512
+ (-123295.005390743 + 140955.117867654j)
513
+
514
+
515
+ """
516
+ m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
517
+ r = m-n-1
518
+ g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
519
+ return ctx.diff(g, x, m) / ctx.gamma(m-n)
520
+
521
+ @defun
522
+ def diffun(ctx, f, n=1, **options):
523
+ r"""
524
+ Given a function `f`, returns a function `g(x)` that evaluates the nth
525
+ derivative `f^{(n)}(x)`::
526
+
527
+ >>> from mpmath import *
528
+ >>> mp.dps = 15; mp.pretty = True
529
+ >>> cos2 = diffun(sin)
530
+ >>> sin2 = diffun(sin, 4)
531
+ >>> cos(1.3), cos2(1.3)
532
+ (0.267498828624587, 0.267498828624587)
533
+ >>> sin(1.3), sin2(1.3)
534
+ (0.963558185417193, 0.963558185417193)
535
+
536
+ The function `f` must support arbitrary precision evaluation.
537
+ See :func:`~mpmath.diff` for additional details and supported
538
+ keyword options.
539
+ """
540
+ if n == 0:
541
+ return f
542
+ def g(x):
543
+ return ctx.diff(f, x, n, **options)
544
+ return g
545
+
546
+ @defun
547
+ def taylor(ctx, f, x, n, **options):
548
+ r"""
549
+ Produces a degree-`n` Taylor polynomial around the point `x` of the
550
+ given function `f`. The coefficients are returned as a list.
551
+
552
+ >>> from mpmath import *
553
+ >>> mp.dps = 15; mp.pretty = True
554
+ >>> nprint(chop(taylor(sin, 0, 5)))
555
+ [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
556
+
557
+ The coefficients are computed using high-order numerical
558
+ differentiation. The function must be possible to evaluate
559
+ to arbitrary precision. See :func:`~mpmath.diff` for additional details
560
+ and supported keyword options.
561
+
562
+ Note that to evaluate the Taylor polynomial as an approximation
563
+ of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
564
+ and the point of the Taylor expansion must be subtracted from
565
+ the argument:
566
+
567
+ >>> p = taylor(exp, 2.0, 10)
568
+ >>> polyval(p[::-1], 2.5 - 2.0)
569
+ 12.1824939606092
570
+ >>> exp(2.5)
571
+ 12.1824939607035
572
+
573
+ """
574
+ gen = enumerate(ctx.diffs(f, x, n, **options))
575
+ if options.get("chop", True):
576
+ return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
577
+ else:
578
+ return [d/ctx.factorial(i) for i, d in gen]
579
+
580
+ @defun
581
+ def pade(ctx, a, L, M):
582
+ r"""
583
+ Computes a Pade approximation of degree `(L, M)` to a function.
584
+ Given at least `L+M+1` Taylor coefficients `a` approximating
585
+ a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
586
+ polynomials `P, Q` satisfying
587
+
588
+ .. math ::
589
+
590
+ P = \sum_{k=0}^L p_k x^k
591
+
592
+ Q = \sum_{k=0}^M q_k x^k
593
+
594
+ Q_0 = 1
595
+
596
+ A(x) Q(x) = P(x) + O(x^{L+M+1})
597
+
598
+ `P(x)/Q(x)` can provide a good approximation to an analytic function
599
+ beyond the radius of convergence of its Taylor series (example
600
+ from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
601
+ Ch.1A)::
602
+
603
+ >>> from mpmath import *
604
+ >>> mp.dps = 15; mp.pretty = True
605
+ >>> one = mpf(1)
606
+ >>> def f(x):
607
+ ... return sqrt((one + 2*x)/(one + x))
608
+ ...
609
+ >>> a = taylor(f, 0, 6)
610
+ >>> p, q = pade(a, 3, 3)
611
+ >>> x = 10
612
+ >>> polyval(p[::-1], x)/polyval(q[::-1], x)
613
+ 1.38169105566806
614
+ >>> f(x)
615
+ 1.38169855941551
616
+
617
+ """
618
+ # To determine L+1 coefficients of P and M coefficients of Q
619
+ # L+M+1 coefficients of A must be provided
620
+ if len(a) < L+M+1:
621
+ raise ValueError("L+M+1 Coefficients should be provided")
622
+
623
+ if M == 0:
624
+ if L == 0:
625
+ return [ctx.one], [ctx.one]
626
+ else:
627
+ return a[:L+1], [ctx.one]
628
+
629
+ # Solve first
630
+ # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
631
+ # ...
632
+ # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
633
+ A = ctx.matrix(M)
634
+ for j in range(M):
635
+ for i in range(min(M, L+j+1)):
636
+ A[j, i] = a[L+j-i]
637
+ v = -ctx.matrix(a[(L+1):(L+M+1)])
638
+ x = ctx.lu_solve(A, v)
639
+ q = [ctx.one] + list(x)
640
+ # compute p
641
+ p = [0]*(L+1)
642
+ for i in range(L+1):
643
+ s = a[i]
644
+ for j in range(1, min(M,i) + 1):
645
+ s += q[j]*a[i-j]
646
+ p[i] = s
647
+ return p, q
venv/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py ADDED
@@ -0,0 +1,2115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from itertools import izip
3
+ except ImportError:
4
+ izip = zip
5
+
6
+ from ..libmp.backend import xrange
7
+ from .calculus import defun
8
+
9
+ try:
10
+ next = next
11
+ except NameError:
12
+ next = lambda _: _.next()
13
+
14
+ @defun
15
+ def richardson(ctx, seq):
16
+ r"""
17
+ Given a list ``seq`` of the first `N` elements of a slowly convergent
18
+ infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
19
+ Richardson extrapolate for the limit.
20
+
21
+ :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
22
+ limit and `c` is the magnitude of the largest weight used during the
23
+ computation. The weight provides an estimate of the precision
24
+ lost to cancellation. Due to cancellation effects, the sequence must
25
+ be typically be computed at a much higher precision than the target
26
+ accuracy of the extrapolation.
27
+
28
+ **Applicability and issues**
29
+
30
+ The `N`-step Richardson extrapolation algorithm used by
31
+ :func:`~mpmath.richardson` is described in [1].
32
+
33
+ Richardson extrapolation only works for a specific type of sequence,
34
+ namely one converging like partial sums of
35
+ `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
36
+ When the sequence does not convergence at such a rate
37
+ :func:`~mpmath.richardson` generally produces garbage.
38
+
39
+ Richardson extrapolation has the advantage of being fast: the `N`-term
40
+ extrapolate requires only `O(N)` arithmetic operations, and usually
41
+ produces an estimate that is accurate to `O(N)` digits. Contrast with
42
+ the Shanks transformation (see :func:`~mpmath.shanks`), which requires
43
+ `O(N^2)` operations.
44
+
45
+ :func:`~mpmath.richardson` is unable to produce an estimate for the
46
+ approximation error. One way to estimate the error is to perform
47
+ two extrapolations with slightly different `N` and comparing the
48
+ results.
49
+
50
+ Richardson extrapolation does not work for oscillating sequences.
51
+ As a simple workaround, :func:`~mpmath.richardson` detects if the last
52
+ three elements do not differ monotonically, and in that case
53
+ applies extrapolation only to the even-index elements.
54
+
55
+ **Example**
56
+
57
+ Applying Richardson extrapolation to the Leibniz series for `\pi`::
58
+
59
+ >>> from mpmath import *
60
+ >>> mp.dps = 30; mp.pretty = True
61
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
62
+ ... for m in range(1,30)]
63
+ >>> v, c = richardson(S[:10])
64
+ >>> v
65
+ 3.2126984126984126984126984127
66
+ >>> nprint([v-pi, c])
67
+ [0.0711058, 2.0]
68
+
69
+ >>> v, c = richardson(S[:30])
70
+ >>> v
71
+ 3.14159265468624052829954206226
72
+ >>> nprint([v-pi, c])
73
+ [1.09645e-9, 20833.3]
74
+
75
+ **References**
76
+
77
+ 1. [BenderOrszag]_ pp. 375-376
78
+
79
+ """
80
+ if len(seq) < 3:
81
+ raise ValueError("seq should be of minimum length 3")
82
+ if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
83
+ seq = seq[::2]
84
+ N = len(seq)//2-1
85
+ s = ctx.zero
86
+ # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
87
+ # To avoid repeated factorials, we simplify the quotient
88
+ # of successive weights to obtain a recurrence relation
89
+ c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
90
+ maxc = 1
91
+ for k in xrange(N+1):
92
+ s += c * seq[N+k]
93
+ maxc = max(abs(c), maxc)
94
+ c *= (k-N)*ctx.mpf(k+N+1)**N
95
+ c /= ((1+k)*ctx.mpf(k+N)**N)
96
+ return s, maxc
97
+
98
+ @defun
99
+ def shanks(ctx, seq, table=None, randomized=False):
100
+ r"""
101
+ Given a list ``seq`` of the first `N` elements of a slowly
102
+ convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
103
+ Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
104
+ transformation often provides strong convergence acceleration,
105
+ especially if the sequence is oscillating.
106
+
107
+ The iterated Shanks transformation is computed using the Wynn
108
+ epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
109
+ epsilon table generated by Wynn's algorithm, which can be read
110
+ off as follows:
111
+
112
+ * The table is a list of lists forming a lower triangular matrix,
113
+ where higher row and column indices correspond to more accurate
114
+ values.
115
+ * The columns with even index hold dummy entries (required for the
116
+ computation) and the columns with odd index hold the actual
117
+ extrapolates.
118
+ * The last element in the last row is typically the most
119
+ accurate estimate of the limit.
120
+ * The difference to the third last element in the last row
121
+ provides an estimate of the approximation error.
122
+ * The magnitude of the second last element provides an estimate
123
+ of the numerical accuracy lost to cancellation.
124
+
125
+ For convenience, so the extrapolation is stopped at an odd index
126
+ so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
127
+ limit.
128
+
129
+ Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
130
+ This can be used to efficiently extend a previous computation after
131
+ new elements have been appended to the sequence. The table will
132
+ then be updated in-place.
133
+
134
+ **The Shanks transformation**
135
+
136
+ The Shanks transformation is defined as follows (see [2]): given
137
+ the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
138
+ given by
139
+
140
+ .. math ::
141
+
142
+ S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
143
+
144
+ The Shanks transformation gives the exact limit `A_{\infty}` in a
145
+ single step if `A_k = A + a q^k`. Note in particular that it
146
+ extrapolates the exact sum of a geometric series in a single step.
147
+
148
+ Applying the Shanks transformation once often improves convergence
149
+ substantially for an arbitrary sequence, but the optimal effect is
150
+ obtained by applying it iteratively:
151
+ `S(S(A_k)), S(S(S(A_k))), \ldots`.
152
+
153
+ Wynn's epsilon algorithm provides an efficient way to generate
154
+ the table of iterated Shanks transformations. It reduces the
155
+ computation of each element to essentially a single division, at
156
+ the cost of requiring dummy elements in the table. See [1] for
157
+ details.
158
+
159
+ **Precision issues**
160
+
161
+ Due to cancellation effects, the sequence must be typically be
162
+ computed at a much higher precision than the target accuracy
163
+ of the extrapolation.
164
+
165
+ If the Shanks transformation converges to the exact limit (such
166
+ as if the sequence is a geometric series), then a division by
167
+ zero occurs. By default, :func:`~mpmath.shanks` handles this case by
168
+ terminating the iteration and returning the table it has
169
+ generated so far. With *randomized=True*, it will instead
170
+ replace the zero by a pseudorandom number close to zero.
171
+ (TODO: find a better solution to this problem.)
172
+
173
+ **Examples**
174
+
175
+ We illustrate by applying Shanks transformation to the Leibniz
176
+ series for `\pi`::
177
+
178
+ >>> from mpmath import *
179
+ >>> mp.dps = 50
180
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
181
+ ... for m in range(1,30)]
182
+ >>>
183
+ >>> T = shanks(S[:7])
184
+ >>> for row in T:
185
+ ... nprint(row)
186
+ ...
187
+ [-0.75]
188
+ [1.25, 3.16667]
189
+ [-1.75, 3.13333, -28.75]
190
+ [2.25, 3.14524, 82.25, 3.14234]
191
+ [-2.75, 3.13968, -177.75, 3.14139, -969.937]
192
+ [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
193
+
194
+ The extrapolated accuracy is about 4 digits, and about 4 digits
195
+ may have been lost due to cancellation::
196
+
197
+ >>> L = T[-1]
198
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
199
+ [2.22532e-5, 4.78309e-5, 3515.06]
200
+
201
+ Now we extend the computation::
202
+
203
+ >>> T = shanks(S[:25], T)
204
+ >>> L = T[-1]
205
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
206
+ [3.75527e-19, 1.48478e-19, 2.96014e+17]
207
+
208
+ The value for pi is now accurate to 18 digits. About 18 digits may
209
+ also have been lost to cancellation.
210
+
211
+ Here is an example with a geometric series, where the convergence
212
+ is immediate (the sum is exactly 1)::
213
+
214
+ >>> mp.dps = 15
215
+ >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
216
+ ... nprint(row)
217
+ [4.0]
218
+ [8.0, 1.0]
219
+
220
+ **References**
221
+
222
+ 1. [GravesMorris]_
223
+
224
+ 2. [BenderOrszag]_ pp. 368-375
225
+
226
+ """
227
+ if len(seq) < 2:
228
+ raise ValueError("seq should be of minimum length 2")
229
+ if table:
230
+ START = len(table)
231
+ else:
232
+ START = 0
233
+ table = []
234
+ STOP = len(seq) - 1
235
+ if STOP & 1:
236
+ STOP -= 1
237
+ one = ctx.one
238
+ eps = +ctx.eps
239
+ if randomized:
240
+ from random import Random
241
+ rnd = Random()
242
+ rnd.seed(START)
243
+ for i in xrange(START, STOP):
244
+ row = []
245
+ for j in xrange(i+1):
246
+ if j == 0:
247
+ a, b = 0, seq[i+1]-seq[i]
248
+ else:
249
+ if j == 1:
250
+ a = seq[i]
251
+ else:
252
+ a = table[i-1][j-2]
253
+ b = row[j-1] - table[i-1][j-1]
254
+ if not b:
255
+ if randomized:
256
+ b = (1 + rnd.getrandbits(10))*eps
257
+ elif i & 1:
258
+ return table[:-1]
259
+ else:
260
+ return table
261
+ row.append(a + one/b)
262
+ table.append(row)
263
+ return table
264
+
265
+
266
+ class levin_class:
267
+ # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
268
+ r"""
269
+ This interface implements Levin's (nonlinear) sequence transformation for
270
+ convergence acceleration and summation of divergent series. It performs
271
+ better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
272
+ or alternating divergent series.
273
+
274
+ Let *A* be the series we want to sum:
275
+
276
+ .. math ::
277
+
278
+ A = \sum_{k=0}^{\infty} a_k
279
+
280
+ Attention: all `a_k` must be non-zero!
281
+
282
+ Let `s_n` be the partial sums of this series:
283
+
284
+ .. math ::
285
+
286
+ s_n = \sum_{k=0}^n a_k.
287
+
288
+ **Methods**
289
+
290
+ Calling ``levin`` returns an object with the following methods.
291
+
292
+ ``update(...)`` works with the list of individual terms `a_k` of *A*, and
293
+ ``update_step(...)`` works with the list of partial sums `s_k` of *A*:
294
+
295
+ .. code ::
296
+
297
+ v, e = ...update([a_0, a_1,..., a_k])
298
+ v, e = ...update_psum([s_0, s_1,..., s_k])
299
+
300
+ ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
301
+ works with the partial sums `s_k`:
302
+
303
+ .. code ::
304
+
305
+ v, e = ...step(a_k)
306
+ v, e = ...step_psum(s_k)
307
+
308
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
309
+ simply the difference between the current estimate and the last estimate.
310
+ One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
311
+
312
+ **A word of caution**
313
+
314
+ One can only hope for good results (i.e. convergence acceleration or
315
+ resummation) if the `s_n` have some well defind asymptotic behavior for
316
+ large `n` and are not erratic or random. Furthermore one usually needs very
317
+ high working precision because of the numerical cancellation. If the working
318
+ precision is insufficient, levin may produce silently numerical garbage.
319
+ Furthermore even if the Levin-transformation converges, in the general case
320
+ there is no proof that the result is mathematically sound. Only for very
321
+ special classes of problems one can prove that the Levin-transformation
322
+ converges to the expected result (for example Stieltjes-type integrals).
323
+ Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
324
+ to Shanks/Wynn-epsilon, Richardson & co.
325
+ In summary one can say that the Levin-transformation is powerful but
326
+ unreliable and that it may need a copious amount of working precision.
327
+
328
+ The Levin transform has several variants differing in the choice of weights.
329
+ Some variants are better suited for the possible flavours of convergence
330
+ behaviour of *A* than other variants:
331
+
332
+ .. code ::
333
+
334
+ convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
335
+
336
+ logarithmic + - + -
337
+ linear + + + +
338
+ alternating divergent + + + +
339
+
340
+ "+" means the variant is suitable,"-" means the variant is not suitable;
341
+ for comparison the Shanks/Wynn-epsilon transform is listed, too.
342
+
343
+ The variant is controlled though the variant keyword (i.e. ``variant="u"``,
344
+ ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
345
+
346
+ Finally it is possible to use the Sidi-S transform instead of the Levin transform
347
+ by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
348
+ Levin transformation for some divergent series (see the examples).
349
+
350
+ Parameters:
351
+
352
+ .. code ::
353
+
354
+ method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
355
+ variant "u","t" or "v" chooses the weight variant.
356
+
357
+ The Levin transform is also accessible through the nsum interface.
358
+ ``method="l"`` or ``method="levin"`` select the normal Levin transform while
359
+ ``method="sidi"``
360
+ selects the Sidi-S transform. The variant is in both cases selected through the
361
+ levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
362
+ it will miss the point where the Levin transform converges resulting in numerical
363
+ overflow/garbage. For highly divergent series a copious amount of working precision
364
+ must be chosen.
365
+
366
+ **Examples**
367
+
368
+ First we sum the zeta function::
369
+
370
+ >>> from mpmath import mp
371
+ >>> mp.prec = 53
372
+ >>> eps = mp.mpf(mp.eps)
373
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
374
+ ... L = mp.levin(method = "levin", variant = "u")
375
+ ... S, s, n = [], 0, 1
376
+ ... while 1:
377
+ ... s += mp.one / (n * n)
378
+ ... n += 1
379
+ ... S.append(s)
380
+ ... v, e = L.update_psum(S)
381
+ ... if e < eps:
382
+ ... break
383
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
384
+ >>> print(mp.chop(v - mp.pi ** 2 / 6))
385
+ 0.0
386
+ >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
387
+ >>> print(mp.chop(v - w))
388
+ 0.0
389
+
390
+ Now we sum the zeta function outside its range of convergence
391
+ (attention: This does not work at the negative integers!)::
392
+
393
+ >>> eps = mp.mpf(mp.eps)
394
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
395
+ ... L = mp.levin(method = "levin", variant = "v")
396
+ ... A, n = [], 1
397
+ ... while 1:
398
+ ... s = mp.mpf(n) ** (2 + 3j)
399
+ ... n += 1
400
+ ... A.append(s)
401
+ ... v, e = L.update(A)
402
+ ... if e < eps:
403
+ ... break
404
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
405
+ >>> print(mp.chop(v - mp.zeta(-2-3j)))
406
+ 0.0
407
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
408
+ >>> print(mp.chop(v - w))
409
+ 0.0
410
+
411
+ Now we sum the divergent asymptotic expansion of an integral related to the
412
+ exponential integral (see also [2] p.373). The Sidi-S transform works best here::
413
+
414
+ >>> z = mp.mpf(10)
415
+ >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
416
+ >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
417
+ >>> eps = mp.mpf(mp.eps)
418
+ >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
419
+ ... L = mp.levin(method = "sidi", variant = "t")
420
+ ... n = 0
421
+ ... while 1:
422
+ ... s = (-1)**n * mp.fac(n) * z ** (-n)
423
+ ... v, e = L.step(s)
424
+ ... n += 1
425
+ ... if e < eps:
426
+ ... break
427
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
428
+ >>> print(mp.chop(v - exact))
429
+ 0.0
430
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
431
+ >>> print(mp.chop(v - w))
432
+ 0.0
433
+
434
+ Another highly divergent integral is also summable::
435
+
436
+ >>> z = mp.mpf(2)
437
+ >>> eps = mp.mpf(mp.eps)
438
+ >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
439
+ >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
440
+ >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
441
+ ... L = mp.levin(method = "levin", variant = "t")
442
+ ... n, s = 0, 0
443
+ ... while 1:
444
+ ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
445
+ ... n += 1
446
+ ... v, e = L.step_psum(s)
447
+ ... if e < eps:
448
+ ... break
449
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
450
+ >>> print(mp.chop(v - exact))
451
+ 0.0
452
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
453
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
454
+ >>> print(mp.chop(v - w))
455
+ 0.0
456
+
457
+ These examples run with 15-20 decimal digits precision. For higher precision the
458
+ working precision must be raised.
459
+
460
+ **Examples for nsum**
461
+
462
+ Here we calculate Euler's constant as the constant term in the Laurent
463
+ expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
464
+ the logarithmic convergence behaviour of the Dirichlet series for zeta::
465
+
466
+ >>> mp.dps = 30
467
+ >>> z = mp.mpf(10) ** (-10)
468
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
469
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
470
+ 0.0
471
+
472
+ The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
473
+
474
+ >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
475
+ >>> print(mp.chop(a - mp.log(2)))
476
+ 0.0
477
+
478
+ Hypergeometric series can also be summed outside their range of convergence.
479
+ The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
480
+ point where the Levin transform converges resulting in numerical overflow/garbage::
481
+
482
+ >>> z = 2 + 1j
483
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
484
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
485
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
486
+ >>> print(mp.chop(exact-v))
487
+ 0.0
488
+
489
+ References:
490
+
491
+ [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
492
+ Convergence and the Summation of Divergent Series" arXiv:math/0306302
493
+
494
+ [2] A. Sidi - "Pratical Extrapolation Methods"
495
+
496
+ [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
497
+
498
+ """
499
+
500
+ def __init__(self, method = "levin", variant = "u"):
501
+ self.variant = variant
502
+ self.n = 0
503
+ self.a0 = 0
504
+ self.theta = 1
505
+ self.A = []
506
+ self.B = []
507
+ self.last = 0
508
+ self.last_s = False
509
+
510
+ if method == "levin":
511
+ self.factor = self.factor_levin
512
+ elif method == "sidi":
513
+ self.factor = self.factor_sidi
514
+ else:
515
+ raise ValueError("levin: unknown method \"%s\"" % method)
516
+
517
+ def factor_levin(self, i):
518
+ # original levin
519
+ # [1] p.50,e.7.5-7 (with n-j replaced by i)
520
+ return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
521
+
522
+ def factor_sidi(self, i):
523
+ # sidi analogon to levin (factorial series)
524
+ # [1] p.59,e.8.3-16 (with n-j replaced by i)
525
+ return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
526
+
527
+ def run(self, s, a0, a1 = 0):
528
+ if self.variant=="t":
529
+ # levin t
530
+ w=a0
531
+ elif self.variant=="u":
532
+ # levin u
533
+ w=a0*(self.theta+self.n)
534
+ elif self.variant=="v":
535
+ # levin v
536
+ w=a0*a1/(a0-a1)
537
+ else:
538
+ assert False, "unknown variant"
539
+
540
+ if w==0:
541
+ raise ValueError("levin: zero weight")
542
+
543
+ self.A.append(s/w)
544
+ self.B.append(1/w)
545
+
546
+ for i in range(self.n-1,-1,-1):
547
+ if i==self.n-1:
548
+ f=1
549
+ else:
550
+ f=self.factor(i)
551
+
552
+ self.A[i]=self.A[i+1]-f*self.A[i]
553
+ self.B[i]=self.B[i+1]-f*self.B[i]
554
+
555
+ self.n+=1
556
+
557
+ ###########################################################################
558
+
559
+ def update_psum(self,S):
560
+ """
561
+ This routine applies the convergence acceleration to the list of partial sums.
562
+
563
+ A = sum(a_k, k = 0..infinity)
564
+ s_n = sum(a_k, k = 0..n)
565
+
566
+ v, e = ...update_psum([s_0, s_1,..., s_k])
567
+
568
+ output:
569
+ v current estimate of the series A
570
+ e an error estimate which is simply the difference between the current
571
+ estimate and the last estimate.
572
+ """
573
+
574
+ if self.variant!="v":
575
+ if self.n==0:
576
+ self.run(S[0],S[0])
577
+ while self.n<len(S):
578
+ self.run(S[self.n],S[self.n]-S[self.n-1])
579
+ else:
580
+ if len(S)==1:
581
+ self.last=0
582
+ return S[0],abs(S[0])
583
+
584
+ if self.n==0:
585
+ self.a1=S[1]-S[0]
586
+ self.run(S[0],S[0],self.a1)
587
+
588
+ while self.n<len(S)-1:
589
+ na1=S[self.n+1]-S[self.n]
590
+ self.run(S[self.n],self.a1,na1)
591
+ self.a1=na1
592
+
593
+ value=self.A[0]/self.B[0]
594
+ err=abs(value-self.last)
595
+ self.last=value
596
+
597
+ return value,err
598
+
599
+ def update(self,X):
600
+ """
601
+ This routine applies the convergence acceleration to the list of individual terms.
602
+
603
+ A = sum(a_k, k = 0..infinity)
604
+
605
+ v, e = ...update([a_0, a_1,..., a_k])
606
+
607
+ output:
608
+ v current estimate of the series A
609
+ e an error estimate which is simply the difference between the current
610
+ estimate and the last estimate.
611
+ """
612
+
613
+ if self.variant!="v":
614
+ if self.n==0:
615
+ self.s=X[0]
616
+ self.run(self.s,X[0])
617
+ while self.n<len(X):
618
+ self.s+=X[self.n]
619
+ self.run(self.s,X[self.n])
620
+ else:
621
+ if len(X)==1:
622
+ self.last=0
623
+ return X[0],abs(X[0])
624
+
625
+ if self.n==0:
626
+ self.s=X[0]
627
+ self.run(self.s,X[0],X[1])
628
+
629
+ while self.n<len(X)-1:
630
+ self.s+=X[self.n]
631
+ self.run(self.s,X[self.n],X[self.n+1])
632
+
633
+ value=self.A[0]/self.B[0]
634
+ err=abs(value-self.last)
635
+ self.last=value
636
+
637
+ return value,err
638
+
639
+ ###########################################################################
640
+
641
+ def step_psum(self,s):
642
+ """
643
+ This routine applies the convergence acceleration to the partial sums.
644
+
645
+ A = sum(a_k, k = 0..infinity)
646
+ s_n = sum(a_k, k = 0..n)
647
+
648
+ v, e = ...step_psum(s_k)
649
+
650
+ output:
651
+ v current estimate of the series A
652
+ e an error estimate which is simply the difference between the current
653
+ estimate and the last estimate.
654
+ """
655
+
656
+ if self.variant!="v":
657
+ if self.n==0:
658
+ self.last_s=s
659
+ self.run(s,s)
660
+ else:
661
+ self.run(s,s-self.last_s)
662
+ self.last_s=s
663
+ else:
664
+ if isinstance(self.last_s,bool):
665
+ self.last_s=s
666
+ self.last_w=s
667
+ self.last=0
668
+ return s,abs(s)
669
+
670
+ na1=s-self.last_s
671
+ self.run(self.last_s,self.last_w,na1)
672
+ self.last_w=na1
673
+ self.last_s=s
674
+
675
+ value=self.A[0]/self.B[0]
676
+ err=abs(value-self.last)
677
+ self.last=value
678
+
679
+ return value,err
680
+
681
+ def step(self,x):
682
+ """
683
+ This routine applies the convergence acceleration to the individual terms.
684
+
685
+ A = sum(a_k, k = 0..infinity)
686
+
687
+ v, e = ...step(a_k)
688
+
689
+ output:
690
+ v current estimate of the series A
691
+ e an error estimate which is simply the difference between the current
692
+ estimate and the last estimate.
693
+ """
694
+
695
+ if self.variant!="v":
696
+ if self.n==0:
697
+ self.s=x
698
+ self.run(self.s,x)
699
+ else:
700
+ self.s+=x
701
+ self.run(self.s,x)
702
+ else:
703
+ if isinstance(self.last_s,bool):
704
+ self.last_s=x
705
+ self.s=0
706
+ self.last=0
707
+ return x,abs(x)
708
+
709
+ self.s+=self.last_s
710
+ self.run(self.s,self.last_s,x)
711
+ self.last_s=x
712
+
713
+ value=self.A[0]/self.B[0]
714
+ err=abs(value-self.last)
715
+ self.last=value
716
+
717
+ return value,err
718
+
719
+ def levin(ctx, method = "levin", variant = "u"):
720
+ L = levin_class(method = method, variant = variant)
721
+ L.ctx = ctx
722
+ return L
723
+
724
+ levin.__doc__ = levin_class.__doc__
725
+ defun(levin)
726
+
727
+
728
+ class cohen_alt_class:
729
+ # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
730
+ r"""
731
+ This interface implements the convergence acceleration of alternating series
732
+ as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
733
+ of Alternating Series". This series transformation works only well if the
734
+ individual terms of the series have an alternating sign. It belongs to the
735
+ class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
736
+ or Levin transform). This series transformation is also able to sum some types
737
+ of divergent series. See the paper under which conditions this resummation is
738
+ mathematical sound.
739
+
740
+ Let *A* be the series we want to sum:
741
+
742
+ .. math ::
743
+
744
+ A = \sum_{k=0}^{\infty} a_k
745
+
746
+ Let `s_n` be the partial sums of this series:
747
+
748
+ .. math ::
749
+
750
+ s_n = \sum_{k=0}^n a_k.
751
+
752
+
753
+ **Interface**
754
+
755
+ Calling ``cohen_alt`` returns an object with the following methods.
756
+
757
+ Then ``update(...)`` works with the list of individual terms `a_k` and
758
+ ``update_psum(...)`` works with the list of partial sums `s_k`:
759
+
760
+ .. code ::
761
+
762
+ v, e = ...update([a_0, a_1,..., a_k])
763
+ v, e = ...update_psum([s_0, s_1,..., s_k])
764
+
765
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
766
+ simply the difference between the current estimate and the last estimate.
767
+
768
+ **Examples**
769
+
770
+ Here we compute the alternating zeta function using ``update_psum``::
771
+
772
+ >>> from mpmath import mp
773
+ >>> AC = mp.cohen_alt()
774
+ >>> S, s, n = [], 0, 1
775
+ >>> while 1:
776
+ ... s += -((-1) ** n) * mp.one / (n * n)
777
+ ... n += 1
778
+ ... S.append(s)
779
+ ... v, e = AC.update_psum(S)
780
+ ... if e < mp.eps:
781
+ ... break
782
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
783
+ >>> print(mp.chop(v - mp.pi ** 2 / 12))
784
+ 0.0
785
+
786
+ Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
787
+
788
+ >>> A = []
789
+ >>> AC = mp.cohen_alt()
790
+ >>> n = 1
791
+ >>> while 1:
792
+ ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
793
+ ... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
794
+ ... n += 1
795
+ ... v, e = AC.update(A)
796
+ ... if e < mp.eps:
797
+ ... break
798
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
799
+ >>> v = mp.exp(v)
800
+ >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
801
+ 0.0
802
+
803
+ ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
804
+
805
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
806
+ >>> print(mp.chop(v - mp.log(2)))
807
+ 0.0
808
+ >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
809
+ >>> print(mp.chop(v - mp.pi / 4))
810
+ 0.0
811
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
812
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
813
+ 0.0
814
+
815
+ """
816
+
817
+ def __init__(self):
818
+ self.last=0
819
+
820
+ def update(self, A):
821
+ """
822
+ This routine applies the convergence acceleration to the list of individual terms.
823
+
824
+ A = sum(a_k, k = 0..infinity)
825
+
826
+ v, e = ...update([a_0, a_1,..., a_k])
827
+
828
+ output:
829
+ v current estimate of the series A
830
+ e an error estimate which is simply the difference between the current
831
+ estimate and the last estimate.
832
+ """
833
+
834
+ n = len(A)
835
+ d = (3 + self.ctx.sqrt(8)) ** n
836
+ d = (d + 1 / d) / 2
837
+ b = -self.ctx.one
838
+ c = -d
839
+ s = 0
840
+
841
+ for k in xrange(n):
842
+ c = b - c
843
+ if k % 2 == 0:
844
+ s = s + c * A[k]
845
+ else:
846
+ s = s - c * A[k]
847
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
848
+
849
+ value = s / d
850
+
851
+ err = abs(value - self.last)
852
+ self.last = value
853
+
854
+ return value, err
855
+
856
+ def update_psum(self, S):
857
+ """
858
+ This routine applies the convergence acceleration to the list of partial sums.
859
+
860
+ A = sum(a_k, k = 0..infinity)
861
+ s_n = sum(a_k ,k = 0..n)
862
+
863
+ v, e = ...update_psum([s_0, s_1,..., s_k])
864
+
865
+ output:
866
+ v current estimate of the series A
867
+ e an error estimate which is simply the difference between the current
868
+ estimate and the last estimate.
869
+ """
870
+
871
+ n = len(S)
872
+ d = (3 + self.ctx.sqrt(8)) ** n
873
+ d = (d + 1 / d) / 2
874
+ b = self.ctx.one
875
+ s = 0
876
+
877
+ for k in xrange(n):
878
+ b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
879
+ s += b * S[k]
880
+
881
+ value = s / d
882
+
883
+ err = abs(value - self.last)
884
+ self.last = value
885
+
886
+ return value, err
887
+
888
+ def cohen_alt(ctx):
889
+ L = cohen_alt_class()
890
+ L.ctx = ctx
891
+ return L
892
+
893
+ cohen_alt.__doc__ = cohen_alt_class.__doc__
894
+ defun(cohen_alt)
895
+
896
+
897
+ @defun
898
+ def sumap(ctx, f, interval, integral=None, error=False):
899
+ r"""
900
+ Evaluates an infinite series of an analytic summand *f* using the
901
+ Abel-Plana formula
902
+
903
+ .. math ::
904
+
905
+ \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
906
+ i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
907
+
908
+ Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
909
+ the Abel-Plana formula does not require derivatives. However,
910
+ it only works when `|f(it)-f(-it)|` does not
911
+ increase too rapidly with `t`.
912
+
913
+ **Examples**
914
+
915
+ The Abel-Plana formula is particularly useful when the summand
916
+ decreases like a power of `k`; for example when the sum is a pure
917
+ zeta function::
918
+
919
+ >>> from mpmath import *
920
+ >>> mp.dps = 25; mp.pretty = True
921
+ >>> sumap(lambda k: 1/k**2.5, [1,inf])
922
+ 1.34148725725091717975677
923
+ >>> zeta(2.5)
924
+ 1.34148725725091717975677
925
+ >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
926
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
927
+ >>> zeta(2.5+2.5j, 1+1j)
928
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
929
+
930
+ If the series is alternating, numerical quadrature along the real
931
+ line is likely to give poor results, so it is better to evaluate
932
+ the first term symbolically whenever possible:
933
+
934
+ >>> n=3; z=-0.75
935
+ >>> I = expint(n,-log(z))
936
+ >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
937
+ -0.6917036036904594510141448
938
+ >>> polylog(n,z)
939
+ -0.6917036036904594510141448
940
+
941
+ """
942
+ prec = ctx.prec
943
+ try:
944
+ ctx.prec += 10
945
+ a, b = interval
946
+ if b != ctx.inf:
947
+ raise ValueError("b should be equal to ctx.inf")
948
+ g = lambda x: f(x+a)
949
+ if integral is None:
950
+ i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
951
+ else:
952
+ i1, err1 = integral, 0
953
+ j = ctx.j
954
+ p = ctx.pi * 2
955
+ if ctx._is_real_type(i1):
956
+ h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
957
+ else:
958
+ h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
959
+ i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
960
+ err = err1+err2
961
+ v = i1+i2+0.5*g(ctx.mpf(0))
962
+ finally:
963
+ ctx.prec = prec
964
+ if error:
965
+ return +v, err
966
+ return +v
967
+
968
+
969
+ @defun
970
+ def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
971
+ adiffs=None, bdiffs=None, verbose=False, error=False,
972
+ _fast_abort=False):
973
+ r"""
974
+ Uses the Euler-Maclaurin formula to compute an approximation accurate
975
+ to within ``tol`` (which defaults to the present epsilon) of the sum
976
+
977
+ .. math ::
978
+
979
+ S = \sum_{k=a}^b f(k)
980
+
981
+ where `(a,b)` are given by ``interval`` and `a` or `b` may be
982
+ infinite. The approximation is
983
+
984
+ .. math ::
985
+
986
+ S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
987
+ \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
988
+ \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
989
+
990
+ The last sum in the Euler-Maclaurin formula is not generally
991
+ convergent (a notable exception is if `f` is a polynomial, in
992
+ which case Euler-Maclaurin actually gives an exact result).
993
+
994
+ The summation is stopped as soon as the quotient between two
995
+ consecutive terms falls below *reject*. That is, by default
996
+ (*reject* = 10), the summation is continued as long as each
997
+ term adds at least one decimal.
998
+
999
+ Although not convergent, convergence to a given tolerance can
1000
+ often be "forced" if `b = \infty` by summing up to `a+N` and then
1001
+ applying the Euler-Maclaurin formula to the sum over the range
1002
+ `(a+N+1, \ldots, \infty)`. This procedure is implemented by
1003
+ :func:`~mpmath.nsum`.
1004
+
1005
+ By default numerical quadrature and differentiation is used.
1006
+ If the symbolic values of the integral and endpoint derivatives
1007
+ are known, it is more efficient to pass the value of the
1008
+ integral explicitly as ``integral`` and the derivatives
1009
+ explicitly as ``adiffs`` and ``bdiffs``. The derivatives
1010
+ should be given as iterables that yield
1011
+ `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
1012
+
1013
+ **Examples**
1014
+
1015
+ Summation of an infinite series, with automatic and symbolic
1016
+ integral and derivative values (the second should be much faster)::
1017
+
1018
+ >>> from mpmath import *
1019
+ >>> mp.dps = 50; mp.pretty = True
1020
+ >>> sumem(lambda n: 1/n**2, [32, inf])
1021
+ 0.03174336652030209012658168043874142714132886413417
1022
+ >>> I = mpf(1)/32
1023
+ >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
1024
+ >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
1025
+ 0.03174336652030209012658168043874142714132886413417
1026
+
1027
+ An exact evaluation of a finite polynomial sum::
1028
+
1029
+ >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
1030
+ 10500155000624963999742499550000.0
1031
+ >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
1032
+ 10500155000624963999742499550000
1033
+
1034
+ """
1035
+ tol = tol or +ctx.eps
1036
+ interval = ctx._as_points(interval)
1037
+ a = ctx.convert(interval[0])
1038
+ b = ctx.convert(interval[-1])
1039
+ err = ctx.zero
1040
+ prev = 0
1041
+ M = 10000
1042
+ if a == ctx.ninf: adiffs = (0 for n in xrange(M))
1043
+ else: adiffs = adiffs or ctx.diffs(f, a)
1044
+ if b == ctx.inf: bdiffs = (0 for n in xrange(M))
1045
+ else: bdiffs = bdiffs or ctx.diffs(f, b)
1046
+ orig = ctx.prec
1047
+ #verbose = 1
1048
+ try:
1049
+ ctx.prec += 10
1050
+ s = ctx.zero
1051
+ for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
1052
+ if k & 1:
1053
+ term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
1054
+ mag = abs(term)
1055
+ if verbose:
1056
+ print("term", k, "magnitude =", ctx.nstr(mag))
1057
+ if k > 4 and mag < tol:
1058
+ s += term
1059
+ break
1060
+ elif k > 4 and abs(prev) / mag < reject:
1061
+ err += mag
1062
+ if _fast_abort:
1063
+ return [s, (s, err)][error]
1064
+ if verbose:
1065
+ print("Failed to converge")
1066
+ break
1067
+ else:
1068
+ s += term
1069
+ prev = term
1070
+ # Endpoint correction
1071
+ if a != ctx.ninf: s += f(a)/2
1072
+ if b != ctx.inf: s += f(b)/2
1073
+ # Tail integral
1074
+ if verbose:
1075
+ print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
1076
+ if integral:
1077
+ s += integral
1078
+ else:
1079
+ integral, ierr = ctx.quad(f, interval, error=True)
1080
+ if verbose:
1081
+ print("Integration error:", ierr)
1082
+ s += integral
1083
+ err += ierr
1084
+ finally:
1085
+ ctx.prec = orig
1086
+ if error:
1087
+ return s, err
1088
+ else:
1089
+ return s
1090
+
1091
+ @defun
1092
+ def adaptive_extrapolation(ctx, update, emfun, kwargs):
1093
+ option = kwargs.get
1094
+ if ctx._fixed_precision:
1095
+ tol = option('tol', ctx.eps*2**10)
1096
+ else:
1097
+ tol = option('tol', ctx.eps/2**10)
1098
+ verbose = option('verbose', False)
1099
+ maxterms = option('maxterms', ctx.dps*10)
1100
+ method = set(option('method', 'r+s').split('+'))
1101
+ skip = option('skip', 0)
1102
+ steps = iter(option('steps', xrange(10, 10**9, 10)))
1103
+ strict = option('strict')
1104
+ #steps = (10 for i in xrange(1000))
1105
+ summer=[]
1106
+ if 'd' in method or 'direct' in method:
1107
+ TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
1108
+ else:
1109
+ TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
1110
+ TRY_SHANKS = ('s' in method) or ('shanks' in method)
1111
+ TRY_EULER_MACLAURIN = ('e' in method) or \
1112
+ ('euler-maclaurin' in method)
1113
+
1114
+ def init_levin(m):
1115
+ variant = kwargs.get("levin_variant", "u")
1116
+ if isinstance(variant, str):
1117
+ if variant == "all":
1118
+ variant = ["u", "v", "t"]
1119
+ else:
1120
+ variant = [variant]
1121
+ for s in variant:
1122
+ L = levin_class(method = m, variant = s)
1123
+ L.ctx = ctx
1124
+ L.name = m + "(" + s + ")"
1125
+ summer.append(L)
1126
+
1127
+ if ('l' in method) or ('levin' in method):
1128
+ init_levin("levin")
1129
+
1130
+ if ('sidi' in method):
1131
+ init_levin("sidi")
1132
+
1133
+ if ('a' in method) or ('alternating' in method):
1134
+ L = cohen_alt_class()
1135
+ L.ctx = ctx
1136
+ L.name = "alternating"
1137
+ summer.append(L)
1138
+
1139
+ last_richardson_value = 0
1140
+ shanks_table = []
1141
+ index = 0
1142
+ step = 10
1143
+ partial = []
1144
+ best = ctx.zero
1145
+ orig = ctx.prec
1146
+ try:
1147
+ if 'workprec' in kwargs:
1148
+ ctx.prec = kwargs['workprec']
1149
+ elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
1150
+ ctx.prec = (ctx.prec+10) * 4
1151
+ else:
1152
+ ctx.prec += 30
1153
+ while 1:
1154
+ if index >= maxterms:
1155
+ break
1156
+
1157
+ # Get new batch of terms
1158
+ try:
1159
+ step = next(steps)
1160
+ except StopIteration:
1161
+ pass
1162
+ if verbose:
1163
+ print("-"*70)
1164
+ print("Adding terms #%i-#%i" % (index, index+step))
1165
+ update(partial, xrange(index, index+step))
1166
+ index += step
1167
+
1168
+ # Check direct error
1169
+ best = partial[-1]
1170
+ error = abs(best - partial[-2])
1171
+ if verbose:
1172
+ print("Direct error: %s" % ctx.nstr(error))
1173
+ if error <= tol:
1174
+ return best
1175
+
1176
+ # Check each extrapolation method
1177
+ if TRY_RICHARDSON:
1178
+ value, maxc = ctx.richardson(partial)
1179
+ # Convergence
1180
+ richardson_error = abs(value - last_richardson_value)
1181
+ if verbose:
1182
+ print("Richardson error: %s" % ctx.nstr(richardson_error))
1183
+ # Convergence
1184
+ if richardson_error <= tol:
1185
+ return value
1186
+ last_richardson_value = value
1187
+ # Unreliable due to cancellation
1188
+ if ctx.eps*maxc > tol:
1189
+ if verbose:
1190
+ print("Ran out of precision for Richardson")
1191
+ TRY_RICHARDSON = False
1192
+ if richardson_error < error:
1193
+ error = richardson_error
1194
+ best = value
1195
+ if TRY_SHANKS:
1196
+ shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
1197
+ row = shanks_table[-1]
1198
+ if len(row) == 2:
1199
+ est1 = row[-1]
1200
+ shanks_error = 0
1201
+ else:
1202
+ est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
1203
+ shanks_error = abs(est1-est2)
1204
+ if verbose:
1205
+ print("Shanks error: %s" % ctx.nstr(shanks_error))
1206
+ if shanks_error <= tol:
1207
+ return est1
1208
+ if ctx.eps*maxc > tol:
1209
+ if verbose:
1210
+ print("Ran out of precision for Shanks")
1211
+ TRY_SHANKS = False
1212
+ if shanks_error < error:
1213
+ error = shanks_error
1214
+ best = est1
1215
+ for L in summer:
1216
+ est, lerror = L.update_psum(partial)
1217
+ if verbose:
1218
+ print("%s error: %s" % (L.name, ctx.nstr(lerror)))
1219
+ if lerror <= tol:
1220
+ return est
1221
+ if lerror < error:
1222
+ error = lerror
1223
+ best = est
1224
+ if TRY_EULER_MACLAURIN:
1225
+ if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
1226
+ if verbose:
1227
+ print ("NOT using Euler-Maclaurin: the series appears"
1228
+ " to be alternating, so numerical\n quadrature"
1229
+ " will most likely fail")
1230
+ TRY_EULER_MACLAURIN = False
1231
+ else:
1232
+ value, em_error = emfun(index, tol)
1233
+ value += partial[-1]
1234
+ if verbose:
1235
+ print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
1236
+ if em_error <= tol:
1237
+ return value
1238
+ if em_error < error:
1239
+ best = value
1240
+ finally:
1241
+ ctx.prec = orig
1242
+ if strict:
1243
+ raise ctx.NoConvergence
1244
+ if verbose:
1245
+ print("Warning: failed to converge to target accuracy")
1246
+ return best
1247
+
1248
+ @defun
1249
+ def nsum(ctx, f, *intervals, **options):
1250
+ r"""
1251
+ Computes the sum
1252
+
1253
+ .. math :: S = \sum_{k=a}^b f(k)
1254
+
1255
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1256
+ `b = \infty` are allowed, or more generally
1257
+
1258
+ .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
1259
+ \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
1260
+
1261
+ if multiple intervals are given.
1262
+
1263
+ Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
1264
+ where the first converges rapidly and the second converges slowly,
1265
+ are::
1266
+
1267
+ >>> from mpmath import *
1268
+ >>> mp.dps = 15; mp.pretty = True
1269
+ >>> nsum(lambda n: 1/fac(n), [0, inf])
1270
+ 2.71828182845905
1271
+ >>> nsum(lambda n: 1/n**2, [1, inf])
1272
+ 1.64493406684823
1273
+
1274
+ When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
1275
+ accurately estimate the sums of slowly convergent series. If the series is
1276
+ finite, :func:`~mpmath.nsum` currently does not attempt to perform any
1277
+ extrapolation, and simply calls :func:`~mpmath.fsum`.
1278
+
1279
+ Multidimensional infinite series are reduced to a single-dimensional
1280
+ series over expanding hypercubes; if both infinite and finite dimensions
1281
+ are present, the finite ranges are moved innermost. For more advanced
1282
+ control over the summation order, use nested calls to :func:`~mpmath.nsum`,
1283
+ or manually rewrite the sum as a single-dimensional series.
1284
+
1285
+ **Options**
1286
+
1287
+ *tol*
1288
+ Desired maximum final error. Defaults roughly to the
1289
+ epsilon of the working precision.
1290
+
1291
+ *method*
1292
+ Which summation algorithm to use (described below).
1293
+ Default: ``'richardson+shanks'``.
1294
+
1295
+ *maxterms*
1296
+ Cancel after at most this many terms. Default: 10*dps.
1297
+
1298
+ *steps*
1299
+ An iterable giving the number of terms to add between
1300
+ each extrapolation attempt. The default sequence is
1301
+ [10, 20, 30, 40, ...]. For example, if you know that
1302
+ approximately 100 terms will be required, efficiency might be
1303
+ improved by setting this to [100, 10]. Then the first
1304
+ extrapolation will be performed after 100 terms, the second
1305
+ after 110, etc.
1306
+
1307
+ *verbose*
1308
+ Print details about progress.
1309
+
1310
+ *ignore*
1311
+ If enabled, any term that raises ``ArithmeticError``
1312
+ or ``ValueError`` (e.g. through division by zero) is replaced
1313
+ by a zero. This is convenient for lattice sums with
1314
+ a singular term near the origin.
1315
+
1316
+ **Methods**
1317
+
1318
+ Unfortunately, an algorithm that can efficiently sum any infinite
1319
+ series does not exist. :func:`~mpmath.nsum` implements several different
1320
+ algorithms that each work well in different cases. The *method*
1321
+ keyword argument selects a method.
1322
+
1323
+ The default method is ``'r+s'``, i.e. both Richardson extrapolation
1324
+ and Shanks transformation is attempted. A slower method that
1325
+ handles more cases is ``'r+s+e'``. For very high precision
1326
+ summation, or if the summation needs to be fast (for example if
1327
+ multiple sums need to be evaluated), it is a good idea to
1328
+ investigate which one method works best and only use that.
1329
+
1330
+ ``'richardson'`` / ``'r'``:
1331
+ Uses Richardson extrapolation. Provides useful extrapolation
1332
+ when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
1333
+ for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
1334
+ additional information.
1335
+
1336
+ ``'shanks'`` / ``'s'``:
1337
+ Uses Shanks transformation. Typically provides useful
1338
+ extrapolation when `f(k) \sim c^k` or when successive terms
1339
+ alternate signs. Is able to sum some divergent series.
1340
+ See :func:`~mpmath.shanks` for additional information.
1341
+
1342
+ ``'levin'`` / ``'l'``:
1343
+ Uses the Levin transformation. It performs better than the Shanks
1344
+ transformation for logarithmic convergent or alternating divergent
1345
+ series. The ``'levin_variant'``-keyword selects the variant. Valid
1346
+ choices are "u", "t", "v" and "all" whereby "all" uses all three
1347
+ u,t and v simultanously (This is good for performance comparison in
1348
+ conjunction with "verbose=True"). Instead of the Levin transform one can
1349
+ also use the Sidi-S transform by selecting the method ``'sidi'``.
1350
+ See :func:`~mpmath.levin` for additional details.
1351
+
1352
+ ``'alternating'`` / ``'a'``:
1353
+ This is the convergence acceleration of alternating series developped
1354
+ by Cohen, Villegras and Zagier.
1355
+ See :func:`~mpmath.cohen_alt` for additional details.
1356
+
1357
+ ``'euler-maclaurin'`` / ``'e'``:
1358
+ Uses the Euler-Maclaurin summation formula to approximate
1359
+ the remainder sum by an integral. This requires high-order
1360
+ numerical derivatives and numerical integration. The advantage
1361
+ of this algorithm is that it works regardless of the
1362
+ decay rate of `f`, as long as `f` is sufficiently smooth.
1363
+ See :func:`~mpmath.sumem` for additional information.
1364
+
1365
+ ``'direct'`` / ``'d'``:
1366
+ Does not perform any extrapolation. This can be used
1367
+ (and should only be used for) rapidly convergent series.
1368
+ The summation automatically stops when the terms
1369
+ decrease below the target tolerance.
1370
+
1371
+ **Basic examples**
1372
+
1373
+ A finite sum::
1374
+
1375
+ >>> nsum(lambda k: 1/k, [1, 6])
1376
+ 2.45
1377
+
1378
+ Summation of a series going to negative infinity and a doubly
1379
+ infinite series::
1380
+
1381
+ >>> nsum(lambda k: 1/k**2, [-inf, -1])
1382
+ 1.64493406684823
1383
+ >>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
1384
+ 3.15334809493716
1385
+
1386
+ :func:`~mpmath.nsum` handles sums of complex numbers::
1387
+
1388
+ >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
1389
+ (1.6 + 0.8j)
1390
+
1391
+ The following sum converges very rapidly, so it is most
1392
+ efficient to sum it by disabling convergence acceleration::
1393
+
1394
+ >>> mp.dps = 1000
1395
+ >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
1396
+ ... method='direct')
1397
+ >>> b = (cos(1)+sin(1))/4
1398
+ >>> abs(a-b) < mpf('1e-998')
1399
+ True
1400
+
1401
+ **Examples with Richardson extrapolation**
1402
+
1403
+ Richardson extrapolation works well for sums over rational
1404
+ functions, as well as their alternating counterparts::
1405
+
1406
+ >>> mp.dps = 50
1407
+ >>> nsum(lambda k: 1 / k**3, [1, inf],
1408
+ ... method='richardson')
1409
+ 1.2020569031595942853997381615114499907649862923405
1410
+ >>> zeta(3)
1411
+ 1.2020569031595942853997381615114499907649862923405
1412
+
1413
+ >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
1414
+ ... method='richardson')
1415
+ 2.9348022005446793094172454999380755676568497036204
1416
+ >>> pi**2/2-2
1417
+ 2.9348022005446793094172454999380755676568497036204
1418
+
1419
+ >>> nsum(lambda k: (-1)**k / k**3, [1, inf],
1420
+ ... method='richardson')
1421
+ -0.90154267736969571404980362113358749307373971925537
1422
+ >>> -3*zeta(3)/4
1423
+ -0.90154267736969571404980362113358749307373971925538
1424
+
1425
+ **Examples with Shanks transformation**
1426
+
1427
+ The Shanks transformation works well for geometric series
1428
+ and typically provides excellent acceleration for Taylor
1429
+ series near the border of their disk of convergence.
1430
+ Here we apply it to a series for `\log(2)`, which can be
1431
+ seen as the Taylor series for `\log(1+x)` with `x = 1`::
1432
+
1433
+ >>> nsum(lambda k: -(-1)**k/k, [1, inf],
1434
+ ... method='shanks')
1435
+ 0.69314718055994530941723212145817656807550013436025
1436
+ >>> log(2)
1437
+ 0.69314718055994530941723212145817656807550013436025
1438
+
1439
+ Here we apply it to a slowly convergent geometric series::
1440
+
1441
+ >>> nsum(lambda k: mpf('0.995')**k, [0, inf],
1442
+ ... method='shanks')
1443
+ 200.0
1444
+
1445
+ Finally, Shanks' method works very well for alternating series
1446
+ where `f(k) = (-1)^k g(k)`, and often does so regardless of
1447
+ the exact decay rate of `g(k)`::
1448
+
1449
+ >>> mp.dps = 15
1450
+ >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
1451
+ ... method='shanks')
1452
+ 0.765147024625408
1453
+ >>> (2-sqrt(2))*zeta(1.5)/2
1454
+ 0.765147024625408
1455
+
1456
+ The following slowly convergent alternating series has no known
1457
+ closed-form value. Evaluating the sum a second time at higher
1458
+ precision indicates that the value is probably correct::
1459
+
1460
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1461
+ ... method='shanks')
1462
+ 0.924299897222939
1463
+ >>> mp.dps = 30
1464
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1465
+ ... method='shanks')
1466
+ 0.92429989722293885595957018136
1467
+
1468
+ **Examples with Levin transformation**
1469
+
1470
+ The following example calculates Euler's constant as the constant term in
1471
+ the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
1472
+ because of the logarithmic convergence behaviour of the Dirichlet series
1473
+ for zeta.
1474
+
1475
+ >>> mp.dps = 30
1476
+ >>> z = mp.mpf(10) ** (-10)
1477
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
1478
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
1479
+ 0.0
1480
+
1481
+ Now we sum the zeta function outside its range of convergence
1482
+ (attention: This does not work at the negative integers!):
1483
+
1484
+ >>> mp.dps = 15
1485
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
1486
+ >>> print(mp.chop(w - mp.zeta(-2-3j)))
1487
+ 0.0
1488
+
1489
+ The next example resummates an asymptotic series expansion of an integral
1490
+ related to the exponential integral.
1491
+
1492
+ >>> mp.dps = 15
1493
+ >>> z = mp.mpf(10)
1494
+ >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
1495
+ >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
1496
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
1497
+ >>> print(mp.chop(w - exact))
1498
+ 0.0
1499
+
1500
+ Following highly divergent asymptotic expansion needs some care. Firstly we
1501
+ need copious amount of working precision. Secondly the stepsize must not be
1502
+ chosen to large, otherwise nsum may miss the point where the Levin transform
1503
+ converges and reach the point where only numerical garbage is produced due to
1504
+ numerical cancellation.
1505
+
1506
+ >>> mp.dps = 15
1507
+ >>> z = mp.mpf(2)
1508
+ >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
1509
+ >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
1510
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
1511
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
1512
+ >>> print(mp.chop(w - exact))
1513
+ 0.0
1514
+
1515
+ The hypergeoemtric function can also be summed outside its range of convergence:
1516
+
1517
+ >>> mp.dps = 15
1518
+ >>> z = 2 + 1j
1519
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
1520
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
1521
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
1522
+ >>> print(mp.chop(exact-v))
1523
+ 0.0
1524
+
1525
+ **Examples with Cohen's alternating series resummation**
1526
+
1527
+ The next example sums the alternating zeta function:
1528
+
1529
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
1530
+ >>> print(mp.chop(v - mp.log(2)))
1531
+ 0.0
1532
+
1533
+ The derivate of the alternating zeta function outside its range of
1534
+ convergence:
1535
+
1536
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
1537
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
1538
+ 0.0
1539
+
1540
+ **Examples with Euler-Maclaurin summation**
1541
+
1542
+ The sum in the following example has the wrong rate of convergence
1543
+ for either Richardson or Shanks to be effective.
1544
+
1545
+ >>> f = lambda k: log(k)/k**2.5
1546
+ >>> mp.dps = 15
1547
+ >>> nsum(f, [1, inf], method='euler-maclaurin')
1548
+ 0.38734195032621
1549
+ >>> -diff(zeta, 2.5)
1550
+ 0.38734195032621
1551
+
1552
+ Increasing ``steps`` improves speed at higher precision::
1553
+
1554
+ >>> mp.dps = 50
1555
+ >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
1556
+ 0.38734195032620997271199237593105101319948228874688
1557
+ >>> -diff(zeta, 2.5)
1558
+ 0.38734195032620997271199237593105101319948228874688
1559
+
1560
+ **Divergent series**
1561
+
1562
+ The Shanks transformation is able to sum some *divergent*
1563
+ series. In particular, it is often able to sum Taylor series
1564
+ beyond their radius of convergence (this is due to a relation
1565
+ between the Shanks transformation and Pade approximations;
1566
+ see :func:`~mpmath.pade` for an alternative way to evaluate divergent
1567
+ Taylor series). Furthermore the Levin-transform examples above
1568
+ contain some divergent series resummation.
1569
+
1570
+ Here we apply it to `\log(1+x)` far outside the region of
1571
+ convergence::
1572
+
1573
+ >>> mp.dps = 50
1574
+ >>> nsum(lambda k: -(-9)**k/k, [1, inf],
1575
+ ... method='shanks')
1576
+ 2.3025850929940456840179914546843642076011014886288
1577
+ >>> log(10)
1578
+ 2.3025850929940456840179914546843642076011014886288
1579
+
1580
+ A particular type of divergent series that can be summed
1581
+ using the Shanks transformation is geometric series.
1582
+ The result is the same as using the closed-form formula
1583
+ for an infinite geometric series::
1584
+
1585
+ >>> mp.dps = 15
1586
+ >>> for n in range(-8, 8):
1587
+ ... if n == 1:
1588
+ ... continue
1589
+ ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
1590
+ ... nsum(lambda k: n**k, [0, inf], method='shanks')))
1591
+ ...
1592
+ -8.0 0.111111111111111 0.111111111111111
1593
+ -7.0 0.125 0.125
1594
+ -6.0 0.142857142857143 0.142857142857143
1595
+ -5.0 0.166666666666667 0.166666666666667
1596
+ -4.0 0.2 0.2
1597
+ -3.0 0.25 0.25
1598
+ -2.0 0.333333333333333 0.333333333333333
1599
+ -1.0 0.5 0.5
1600
+ 0.0 1.0 1.0
1601
+ 2.0 -1.0 -1.0
1602
+ 3.0 -0.5 -0.5
1603
+ 4.0 -0.333333333333333 -0.333333333333333
1604
+ 5.0 -0.25 -0.25
1605
+ 6.0 -0.2 -0.2
1606
+ 7.0 -0.166666666666667 -0.166666666666667
1607
+
1608
+ **Multidimensional sums**
1609
+
1610
+ Any combination of finite and infinite ranges is allowed for the
1611
+ summation indices::
1612
+
1613
+ >>> mp.dps = 15
1614
+ >>> nsum(lambda x,y: x+y, [2,3], [4,5])
1615
+ 28.0
1616
+ >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
1617
+ 6.0
1618
+ >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
1619
+ 6.0
1620
+ >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
1621
+ 7.0
1622
+ >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
1623
+ 7.0
1624
+ >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
1625
+ 7.0
1626
+
1627
+ Some nice examples of double series with analytic solutions or
1628
+ reductions to single-dimensional series (see [1])::
1629
+
1630
+ >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
1631
+ 1.60669515241529
1632
+ >>> nsum(lambda n: 1/(2**n-1), [1,inf])
1633
+ 1.60669515241529
1634
+
1635
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
1636
+ 0.278070510848213
1637
+ >>> pi*(pi-3*ln2)/12
1638
+ 0.278070510848213
1639
+
1640
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
1641
+ 0.129319852864168
1642
+ >>> altzeta(2) - altzeta(1)
1643
+ 0.129319852864168
1644
+
1645
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
1646
+ 0.0790756439455825
1647
+ >>> altzeta(3) - altzeta(2)
1648
+ 0.0790756439455825
1649
+
1650
+ >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
1651
+ ... [1,inf], [1,inf])
1652
+ 0.28125
1653
+ >>> mpf(9)/32
1654
+ 0.28125
1655
+
1656
+ >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
1657
+ ... [1,inf], [1,inf], workprec=400)
1658
+ 1.64493406684823
1659
+ >>> zeta(2)
1660
+ 1.64493406684823
1661
+
1662
+ A hard example of a multidimensional sum is the Madelung constant
1663
+ in three dimensions (see [2]). The defining sum converges very
1664
+ slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
1665
+ obtain an accurate value through convergence acceleration. The
1666
+ second evaluation below uses a much more efficient, rapidly
1667
+ convergent 2D sum::
1668
+
1669
+ >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
1670
+ ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
1671
+ -1.74756459463318
1672
+ >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
1673
+ ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
1674
+ -1.74756459463318
1675
+
1676
+ Another example of a lattice sum in 2D::
1677
+
1678
+ >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
1679
+ ... [-inf,inf], ignore=True)
1680
+ -2.1775860903036
1681
+ >>> -pi*ln2
1682
+ -2.1775860903036
1683
+
1684
+ An example of an Eisenstein series::
1685
+
1686
+ >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
1687
+ ... ignore=True)
1688
+ (3.1512120021539 + 0.0j)
1689
+
1690
+ **References**
1691
+
1692
+ 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
1693
+ 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
1694
+
1695
+ """
1696
+ infinite, g = standardize(ctx, f, intervals, options)
1697
+ if not infinite:
1698
+ return +g()
1699
+
1700
+ def update(partial_sums, indices):
1701
+ if partial_sums:
1702
+ psum = partial_sums[-1]
1703
+ else:
1704
+ psum = ctx.zero
1705
+ for k in indices:
1706
+ psum = psum + g(ctx.mpf(k))
1707
+ partial_sums.append(psum)
1708
+
1709
+ prec = ctx.prec
1710
+
1711
+ def emfun(point, tol):
1712
+ workprec = ctx.prec
1713
+ ctx.prec = prec + 10
1714
+ v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
1715
+ ctx.prec = workprec
1716
+ return v
1717
+
1718
+ return +ctx.adaptive_extrapolation(update, emfun, options)
1719
+
1720
+
1721
+ def wrapsafe(f):
1722
+ def g(*args):
1723
+ try:
1724
+ return f(*args)
1725
+ except (ArithmeticError, ValueError):
1726
+ return 0
1727
+ return g
1728
+
1729
+ def standardize(ctx, f, intervals, options):
1730
+ if options.get("ignore"):
1731
+ f = wrapsafe(f)
1732
+ finite = []
1733
+ infinite = []
1734
+ for k, points in enumerate(intervals):
1735
+ a, b = ctx._as_points(points)
1736
+ if b < a:
1737
+ return False, (lambda: ctx.zero)
1738
+ if a == ctx.ninf or b == ctx.inf:
1739
+ infinite.append((k, (a,b)))
1740
+ else:
1741
+ finite.append((k, (int(a), int(b))))
1742
+ if finite:
1743
+ f = fold_finite(ctx, f, finite)
1744
+ if not infinite:
1745
+ return False, lambda: f(*([0]*len(intervals)))
1746
+ if infinite:
1747
+ f = standardize_infinite(ctx, f, infinite)
1748
+ f = fold_infinite(ctx, f, infinite)
1749
+ args = [0] * len(intervals)
1750
+ d = infinite[0][0]
1751
+ def g(k):
1752
+ args[d] = k
1753
+ return f(*args)
1754
+ return True, g
1755
+
1756
+ # backwards compatible itertools.product
1757
+ def cartesian_product(args):
1758
+ pools = map(tuple, args)
1759
+ result = [[]]
1760
+ for pool in pools:
1761
+ result = [x+[y] for x in result for y in pool]
1762
+ for prod in result:
1763
+ yield tuple(prod)
1764
+
1765
+ def fold_finite(ctx, f, intervals):
1766
+ if not intervals:
1767
+ return f
1768
+ indices = [v[0] for v in intervals]
1769
+ points = [v[1] for v in intervals]
1770
+ ranges = [xrange(a, b+1) for (a,b) in points]
1771
+ def g(*args):
1772
+ args = list(args)
1773
+ s = ctx.zero
1774
+ for xs in cartesian_product(ranges):
1775
+ for dim, x in zip(indices, xs):
1776
+ args[dim] = ctx.mpf(x)
1777
+ s += f(*args)
1778
+ return s
1779
+ #print "Folded finite", indices
1780
+ return g
1781
+
1782
+ # Standardize each interval to [0,inf]
1783
+ def standardize_infinite(ctx, f, intervals):
1784
+ if not intervals:
1785
+ return f
1786
+ dim, [a,b] = intervals[-1]
1787
+ if a == ctx.ninf:
1788
+ if b == ctx.inf:
1789
+ def g(*args):
1790
+ args = list(args)
1791
+ k = args[dim]
1792
+ if k:
1793
+ s = f(*args)
1794
+ args[dim] = -k
1795
+ s += f(*args)
1796
+ return s
1797
+ else:
1798
+ return f(*args)
1799
+ else:
1800
+ def g(*args):
1801
+ args = list(args)
1802
+ args[dim] = b - args[dim]
1803
+ return f(*args)
1804
+ else:
1805
+ def g(*args):
1806
+ args = list(args)
1807
+ args[dim] += a
1808
+ return f(*args)
1809
+ #print "Standardized infinity along dimension", dim, a, b
1810
+ return standardize_infinite(ctx, g, intervals[:-1])
1811
+
1812
+ def fold_infinite(ctx, f, intervals):
1813
+ if len(intervals) < 2:
1814
+ return f
1815
+ dim1 = intervals[-2][0]
1816
+ dim2 = intervals[-1][0]
1817
+ # Assume intervals are [0,inf] x [0,inf] x ...
1818
+ def g(*args):
1819
+ args = list(args)
1820
+ #args.insert(dim2, None)
1821
+ n = int(args[dim1])
1822
+ s = ctx.zero
1823
+ #y = ctx.mpf(n)
1824
+ args[dim2] = ctx.mpf(n) #y
1825
+ for x in xrange(n+1):
1826
+ args[dim1] = ctx.mpf(x)
1827
+ s += f(*args)
1828
+ args[dim1] = ctx.mpf(n) #ctx.mpf(n)
1829
+ for y in xrange(n):
1830
+ args[dim2] = ctx.mpf(y)
1831
+ s += f(*args)
1832
+ return s
1833
+ #print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
1834
+ return fold_infinite(ctx, g, intervals[:-1])
1835
+
1836
+ @defun
1837
+ def nprod(ctx, f, interval, nsum=False, **kwargs):
1838
+ r"""
1839
+ Computes the product
1840
+
1841
+ .. math ::
1842
+
1843
+ P = \prod_{k=a}^b f(k)
1844
+
1845
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1846
+ `b = \infty` are allowed.
1847
+
1848
+ By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
1849
+ :func:`~mpmath.nsum`, except applied to the partial products rather than
1850
+ partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
1851
+ supported. If ``nsum=True``, the product is instead computed via
1852
+ :func:`~mpmath.nsum` as
1853
+
1854
+ .. math ::
1855
+
1856
+ P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
1857
+
1858
+ This is slower, but can sometimes yield better results. It is
1859
+ also required (and used automatically) when Euler-Maclaurin
1860
+ summation is requested.
1861
+
1862
+ **Examples**
1863
+
1864
+ A simple finite product::
1865
+
1866
+ >>> from mpmath import *
1867
+ >>> mp.dps = 25; mp.pretty = True
1868
+ >>> nprod(lambda k: k, [1, 4])
1869
+ 24.0
1870
+
1871
+ A large number of infinite products have known exact values,
1872
+ and can therefore be used as a reference. Most of the following
1873
+ examples are taken from MathWorld [1].
1874
+
1875
+ A few infinite products with simple values are::
1876
+
1877
+ >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
1878
+ 3.141592653589793238462643
1879
+ >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
1880
+ 2.0
1881
+ >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
1882
+ 0.6666666666666666666666667
1883
+ >>> nprod(lambda k: (1-1/k**2), [2, inf])
1884
+ 0.5
1885
+
1886
+ Next, several more infinite products with more complicated
1887
+ values::
1888
+
1889
+ >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
1890
+ 5.180668317897115748416626
1891
+ 5.180668317897115748416626
1892
+
1893
+ >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
1894
+ 0.2720290549821331629502366
1895
+ 0.2720290549821331629502366
1896
+
1897
+ >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
1898
+ 0.8480540493529003921296502
1899
+ >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
1900
+ 0.8480540493529003921296502
1901
+
1902
+ >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
1903
+ 1.848936182858244485224927
1904
+ >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
1905
+ 1.848936182858244485224927
1906
+
1907
+ >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
1908
+ 0.9190194775937444301739244
1909
+ 0.9190194775937444301739244
1910
+
1911
+ >>> nprod(lambda k: (1-1/k**6), [2, inf])
1912
+ 0.9826842777421925183244759
1913
+ >>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
1914
+ 0.9826842777421925183244759
1915
+
1916
+ >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
1917
+ 1.838038955187488860347849
1918
+ 1.838038955187488860347849
1919
+
1920
+ >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
1921
+ 1.447255926890365298959138
1922
+ >>> exp(1+euler/2)/sqrt(2*pi)
1923
+ 1.447255926890365298959138
1924
+
1925
+ The following two products are equivalent and can be evaluated in
1926
+ terms of a Jacobi theta function. Pi can be replaced by any value
1927
+ (as long as convergence is preserved)::
1928
+
1929
+ >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
1930
+ 0.3838451207481672404778686
1931
+ >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
1932
+ 0.3838451207481672404778686
1933
+ >>> jtheta(4,0,1/pi)
1934
+ 0.3838451207481672404778686
1935
+
1936
+ This product does not have a known closed form value::
1937
+
1938
+ >>> nprod(lambda k: (1-1/2**k), [1, inf])
1939
+ 0.2887880950866024212788997
1940
+
1941
+ A product taken from `-\infty`::
1942
+
1943
+ >>> nprod(lambda k: 1-k**(-3), [-inf,-2])
1944
+ 0.8093965973662901095786805
1945
+ >>> cosh(pi*sqrt(3)/2)/(3*pi)
1946
+ 0.8093965973662901095786805
1947
+
1948
+ A doubly infinite product::
1949
+
1950
+ >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
1951
+ 23.41432688231864337420035
1952
+ >>> exp(pi/tanh(pi))
1953
+ 23.41432688231864337420035
1954
+
1955
+ A product requiring the use of Euler-Maclaurin summation to compute
1956
+ an accurate value::
1957
+
1958
+ >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
1959
+ 0.696155111336231052898125
1960
+
1961
+ **References**
1962
+
1963
+ 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
1964
+
1965
+ """
1966
+ if nsum or ('e' in kwargs.get('method', '')):
1967
+ orig = ctx.prec
1968
+ try:
1969
+ # TODO: we are evaluating log(1+eps) -> eps, which is
1970
+ # inaccurate. This currently works because nsum greatly
1971
+ # increases the working precision. But we should be
1972
+ # more intelligent and handle the precision here.
1973
+ ctx.prec += 10
1974
+ v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
1975
+ finally:
1976
+ ctx.prec = orig
1977
+ return +ctx.exp(v)
1978
+
1979
+ a, b = ctx._as_points(interval)
1980
+ if a == ctx.ninf:
1981
+ if b == ctx.inf:
1982
+ return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
1983
+ return ctx.nprod(f, [-b, ctx.inf], **kwargs)
1984
+ elif b != ctx.inf:
1985
+ return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
1986
+
1987
+ a = int(a)
1988
+
1989
+ def update(partial_products, indices):
1990
+ if partial_products:
1991
+ pprod = partial_products[-1]
1992
+ else:
1993
+ pprod = ctx.one
1994
+ for k in indices:
1995
+ pprod = pprod * f(a + ctx.mpf(k))
1996
+ partial_products.append(pprod)
1997
+
1998
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
1999
+
2000
+
2001
+ @defun
2002
+ def limit(ctx, f, x, direction=1, exp=False, **kwargs):
2003
+ r"""
2004
+ Computes an estimate of the limit
2005
+
2006
+ .. math ::
2007
+
2008
+ \lim_{t \to x} f(t)
2009
+
2010
+ where `x` may be finite or infinite.
2011
+
2012
+ For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
2013
+ consecutive integer values of `n`, where the approach direction
2014
+ `d` may be specified using the *direction* keyword argument.
2015
+ For infinite `x`, :func:`~mpmath.limit` evaluates values of
2016
+ `f(\mathrm{sign}(x) \cdot n)`.
2017
+
2018
+ If the approach to the limit is not sufficiently fast to give
2019
+ an accurate estimate directly, :func:`~mpmath.limit` attempts to find
2020
+ the limit using Richardson extrapolation or the Shanks
2021
+ transformation. You can select between these methods using
2022
+ the *method* keyword (see documentation of :func:`~mpmath.nsum` for
2023
+ more information).
2024
+
2025
+ **Options**
2026
+
2027
+ The following options are available with essentially the
2028
+ same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
2029
+ *steps*, *verbose*.
2030
+
2031
+ If the option *exp=True* is set, `f` will be
2032
+ sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
2033
+ instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
2034
+ This can sometimes improve the rate of convergence so that
2035
+ :func:`~mpmath.limit` may return a more accurate answer (and faster).
2036
+ However, do note that this can only be used if `f`
2037
+ supports fast and accurate evaluation for arguments that
2038
+ are extremely close to the limit point (or if infinite,
2039
+ very large arguments).
2040
+
2041
+ **Examples**
2042
+
2043
+ A basic evaluation of a removable singularity::
2044
+
2045
+ >>> from mpmath import *
2046
+ >>> mp.dps = 30; mp.pretty = True
2047
+ >>> limit(lambda x: (x-sin(x))/x**3, 0)
2048
+ 0.166666666666666666666666666667
2049
+
2050
+ Computing the exponential function using its limit definition::
2051
+
2052
+ >>> limit(lambda n: (1+3/n)**n, inf)
2053
+ 20.0855369231876677409285296546
2054
+ >>> exp(3)
2055
+ 20.0855369231876677409285296546
2056
+
2057
+ A limit for `\pi`::
2058
+
2059
+ >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
2060
+ >>> limit(f, inf)
2061
+ 3.14159265358979323846264338328
2062
+
2063
+ Calculating the coefficient in Stirling's formula::
2064
+
2065
+ >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
2066
+ 2.50662827463100050241576528481
2067
+ >>> sqrt(2*pi)
2068
+ 2.50662827463100050241576528481
2069
+
2070
+ Evaluating Euler's constant `\gamma` using the limit representation
2071
+
2072
+ .. math ::
2073
+
2074
+ \gamma = \lim_{n \rightarrow \infty } \left[ \left(
2075
+ \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
2076
+
2077
+ (which converges notoriously slowly)::
2078
+
2079
+ >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
2080
+ >>> limit(f, inf)
2081
+ 0.577215664901532860606512090082
2082
+ >>> +euler
2083
+ 0.577215664901532860606512090082
2084
+
2085
+ With default settings, the following limit converges too slowly
2086
+ to be evaluated accurately. Changing to exponential sampling
2087
+ however gives a perfect result::
2088
+
2089
+ >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
2090
+ >>> limit(f, inf)
2091
+ 0.992831158558330281129249686491
2092
+ >>> limit(f, inf, exp=True)
2093
+ 1.0
2094
+
2095
+ """
2096
+
2097
+ if ctx.isinf(x):
2098
+ direction = ctx.sign(x)
2099
+ g = lambda k: f(ctx.mpf(k+1)*direction)
2100
+ else:
2101
+ direction *= ctx.one
2102
+ g = lambda k: f(x + direction/(k+1))
2103
+ if exp:
2104
+ h = g
2105
+ g = lambda k: h(2**k)
2106
+
2107
+ def update(values, indices):
2108
+ for k in indices:
2109
+ values.append(g(k+1))
2110
+
2111
+ # XXX: steps used by nsum don't work well
2112
+ if not 'steps' in kwargs:
2113
+ kwargs['steps'] = [10]
2114
+
2115
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
venv/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # contributed to mpmath by Kristopher L. Kuhlman, February 2017
2
+ # contributed to mpmath by Guillermo Navas-Palencia, February 2022
3
+
4
+ class InverseLaplaceTransform(object):
5
+ r"""
6
+ Inverse Laplace transform methods are implemented using this
7
+ class, in order to simplify the code and provide a common
8
+ infrastructure.
9
+
10
+ Implement a custom inverse Laplace transform algorithm by
11
+ subclassing :class:`InverseLaplaceTransform` and implementing the
12
+ appropriate methods. The subclass can then be used by
13
+ :func:`~mpmath.invertlaplace` by passing it as the *method*
14
+ argument.
15
+ """
16
+
17
+ def __init__(self, ctx):
18
+ self.ctx = ctx
19
+
20
+ def calc_laplace_parameter(self, t, **kwargs):
21
+ r"""
22
+ Determine the vector of Laplace parameter values needed for an
23
+ algorithm, this will depend on the choice of algorithm (de
24
+ Hoog is default), the algorithm-specific parameters passed (or
25
+ default ones), and desired time.
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def calc_time_domain_solution(self, fp):
30
+ r"""
31
+ Compute the time domain solution, after computing the
32
+ Laplace-space function evaluations at the abscissa required
33
+ for the algorithm. Abscissa computed for one algorithm are
34
+ typically not useful for another algorithm.
35
+ """
36
+ raise NotImplementedError
37
+
38
+
39
+ class FixedTalbot(InverseLaplaceTransform):
40
+
41
+ def calc_laplace_parameter(self, t, **kwargs):
42
+ r"""The "fixed" Talbot method deforms the Bromwich contour towards
43
+ `-\infty` in the shape of a parabola. Traditionally the Talbot
44
+ algorithm has adjustable parameters, but the "fixed" version
45
+ does not. The `r` parameter could be passed in as a parameter,
46
+ if you want to override the default given by (Abate & Valko,
47
+ 2004).
48
+
49
+ The Laplace parameter is sampled along a parabola opening
50
+ along the negative imaginary axis, with the base of the
51
+ parabola along the real axis at
52
+ `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
53
+ the approximation (degree) grows, the abscissa required for
54
+ function evaluation tend towards `-\infty`, requiring high
55
+ precision to prevent overflow. If any poles, branch cuts or
56
+ other singularities exist such that the deformed Bromwich
57
+ contour lies to the left of the singularity, the method will
58
+ fail.
59
+
60
+ **Optional arguments**
61
+
62
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
63
+ recognizes the following keywords
64
+
65
+ *tmax*
66
+ maximum time associated with vector of times
67
+ (typically just the time requested)
68
+ *degree*
69
+ integer order of approximation (M = number of terms)
70
+ *r*
71
+ abscissa for `p_0` (otherwise computed using rule
72
+ of thumb `2M/5`)
73
+
74
+ The working precision will be increased according to a rule of
75
+ thumb. If 'degree' is not specified, the working precision and
76
+ degree are chosen to hopefully achieve the dps of the calling
77
+ context. If 'degree' is specified, the working precision is
78
+ chosen to achieve maximum resulting precision for the
79
+ specified degree.
80
+
81
+ .. math ::
82
+
83
+ p_0=\frac{r}{t}
84
+
85
+ .. math ::
86
+
87
+ p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
88
+ \frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
89
+
90
+ where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
91
+ maximum specified time.
92
+
93
+ """
94
+
95
+ # required
96
+ # ------------------------------
97
+ # time of desired approximation
98
+ self.t = self.ctx.convert(t)
99
+
100
+ # optional
101
+ # ------------------------------
102
+ # maximum time desired (used for scaling) default is requested
103
+ # time.
104
+ self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
105
+
106
+ # empirical relationships used here based on a linear fit of
107
+ # requested and delivered dps for exponentially decaying time
108
+ # functions for requested dps up to 512.
109
+
110
+ if 'degree' in kwargs:
111
+ self.degree = kwargs['degree']
112
+ self.dps_goal = self.degree
113
+ else:
114
+ self.dps_goal = int(1.72*self.ctx.dps)
115
+ self.degree = max(12, int(1.38*self.dps_goal))
116
+
117
+ M = self.degree
118
+
119
+ # this is adjusting the dps of the calling context hopefully
120
+ # the caller doesn't monkey around with it between calling
121
+ # this routine and calc_time_domain_solution()
122
+ self.dps_orig = self.ctx.dps
123
+ self.ctx.dps = self.dps_goal
124
+
125
+ # Abate & Valko rule of thumb for r parameter
126
+ self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
127
+
128
+ self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
129
+
130
+ self.cot_theta = self.ctx.matrix(M, 1)
131
+ self.cot_theta[0] = 0 # not used
132
+
133
+ # all but time-dependent part of p
134
+ self.delta = self.ctx.matrix(M, 1)
135
+ self.delta[0] = self.r
136
+
137
+ for i in range(1, M):
138
+ self.cot_theta[i] = self.ctx.cot(self.theta[i])
139
+ self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
140
+
141
+ self.p = self.ctx.matrix(M, 1)
142
+ self.p = self.delta/self.tmax
143
+
144
+ # NB: p is complex (mpc)
145
+
146
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
147
+ r"""The fixed Talbot time-domain solution is computed from the
148
+ Laplace-space function evaluations using
149
+
150
+ .. math ::
151
+
152
+ f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
153
+ \gamma_k \bar{f}(p_k)\right]
154
+
155
+ where
156
+
157
+ .. math ::
158
+
159
+ \gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
160
+
161
+ .. math ::
162
+
163
+ \gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
164
+ \cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
165
+ \frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
166
+
167
+ Again, `j=\sqrt{-1}`.
168
+
169
+ Before calling this function, call
170
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
171
+ to set the parameters and compute the required coefficients.
172
+
173
+ **References**
174
+
175
+ 1. Abate, J., P. Valko (2004). Multi-precision Laplace
176
+ transform inversion. *International Journal for Numerical
177
+ Methods in Engineering* 60:979-993,
178
+ http://dx.doi.org/10.1002/nme.995
179
+ 2. Talbot, A. (1979). The accurate numerical inversion of
180
+ Laplace transforms. *IMA Journal of Applied Mathematics*
181
+ 23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
182
+ """
183
+
184
+ # required
185
+ # ------------------------------
186
+ self.t = self.ctx.convert(t)
187
+
188
+ # assume fp was computed from p matrix returned from
189
+ # calc_laplace_parameter(), so is already a list or matrix of
190
+ # mpmath 'mpc' types
191
+
192
+ # these were computed in previous call to
193
+ # calc_laplace_parameter()
194
+ theta = self.theta
195
+ delta = self.delta
196
+ M = self.degree
197
+ p = self.p
198
+ r = self.r
199
+
200
+ ans = self.ctx.matrix(M, 1)
201
+ ans[0] = self.ctx.exp(delta[0])*fp[0]/2
202
+
203
+ for i in range(1, M):
204
+ ans[i] = self.ctx.exp(delta[i])*fp[i]*(
205
+ 1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
206
+ 1j*self.cot_theta[i])
207
+
208
+ result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
209
+
210
+ # setting dps back to value when calc_laplace_parameter was
211
+ # called, unless flag is set.
212
+ if not manual_prec:
213
+ self.ctx.dps = self.dps_orig
214
+
215
+ return result.real
216
+
217
+
218
+ # ****************************************
219
+
220
+ class Stehfest(InverseLaplaceTransform):
221
+
222
+ def calc_laplace_parameter(self, t, **kwargs):
223
+ r"""
224
+ The Gaver-Stehfest method is a discrete approximation of the
225
+ Widder-Post inversion algorithm, rather than a direct
226
+ approximation of the Bromwich contour integral.
227
+
228
+ The method abscissa along the real axis, and therefore has
229
+ issues inverting oscillatory functions (which have poles in
230
+ pairs away from the real axis).
231
+
232
+ The working precision will be increased according to a rule of
233
+ thumb. If 'degree' is not specified, the working precision and
234
+ degree are chosen to hopefully achieve the dps of the calling
235
+ context. If 'degree' is specified, the working precision is
236
+ chosen to achieve maximum resulting precision for the
237
+ specified degree.
238
+
239
+ .. math ::
240
+
241
+ p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
242
+ """
243
+
244
+ # required
245
+ # ------------------------------
246
+ # time of desired approximation
247
+ self.t = self.ctx.convert(t)
248
+
249
+ # optional
250
+ # ------------------------------
251
+
252
+ # empirical relationships used here based on a linear fit of
253
+ # requested and delivered dps for exponentially decaying time
254
+ # functions for requested dps up to 512.
255
+
256
+ if 'degree' in kwargs:
257
+ self.degree = kwargs['degree']
258
+ self.dps_goal = int(1.38*self.degree)
259
+ else:
260
+ self.dps_goal = int(2.93*self.ctx.dps)
261
+ self.degree = max(16, self.dps_goal)
262
+
263
+ # _coeff routine requires even degree
264
+ if self.degree % 2 > 0:
265
+ self.degree += 1
266
+
267
+ M = self.degree
268
+
269
+ # this is adjusting the dps of the calling context
270
+ # hopefully the caller doesn't monkey around with it
271
+ # between calling this routine and calc_time_domain_solution()
272
+ self.dps_orig = self.ctx.dps
273
+ self.ctx.dps = self.dps_goal
274
+
275
+ self.V = self._coeff()
276
+ self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
277
+
278
+ # NB: p is real (mpf)
279
+
280
+ def _coeff(self):
281
+ r"""Salzer summation weights (aka, "Stehfest coefficients")
282
+ only depend on the approximation order (M) and the precision"""
283
+
284
+ M = self.degree
285
+ M2 = int(M/2) # checked earlier that M is even
286
+
287
+ V = self.ctx.matrix(M, 1)
288
+
289
+ # Salzer summation weights
290
+ # get very large in magnitude and oscillate in sign,
291
+ # if the precision is not high enough, there will be
292
+ # catastrophic cancellation
293
+ for k in range(1, M+1):
294
+ z = self.ctx.matrix(min(k, M2)+1, 1)
295
+ for j in range(int((k+1)/2), min(k, M2)+1):
296
+ z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
297
+ (self.ctx.fac(M2-j)*self.ctx.fac(j)*
298
+ self.ctx.fac(j-1)*self.ctx.fac(k-j)*
299
+ self.ctx.fac(2*j-k)))
300
+ V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
301
+
302
+ return V
303
+
304
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
305
+ r"""Compute time-domain Stehfest algorithm solution.
306
+
307
+ .. math ::
308
+
309
+ f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
310
+ p_k \right)
311
+
312
+ where
313
+
314
+ .. math ::
315
+
316
+ V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
317
+ \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
318
+ \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
319
+
320
+ As the degree increases, the abscissa (`p_k`) only increase
321
+ linearly towards `\infty`, but the Stehfest coefficients
322
+ (`V_k`) alternate in sign and increase rapidly in sign,
323
+ requiring high precision to prevent overflow or loss of
324
+ significance when evaluating the sum.
325
+
326
+ **References**
327
+
328
+ 1. Widder, D. (1941). *The Laplace Transform*. Princeton.
329
+ 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
330
+ Laplace transforms. *Communications of the ACM* 13(1):47-49,
331
+ http://dx.doi.org/10.1145/361953.361969
332
+
333
+ """
334
+
335
+ # required
336
+ self.t = self.ctx.convert(t)
337
+
338
+ # assume fp was computed from p matrix returned from
339
+ # calc_laplace_parameter(), so is already
340
+ # a list or matrix of mpmath 'mpf' types
341
+
342
+ result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
343
+
344
+ # setting dps back to value when calc_laplace_parameter was called
345
+ if not manual_prec:
346
+ self.ctx.dps = self.dps_orig
347
+
348
+ # ignore any small imaginary part
349
+ return result.real
350
+
351
+
352
+ # ****************************************
353
+
354
+ class deHoog(InverseLaplaceTransform):
355
+
356
+ def calc_laplace_parameter(self, t, **kwargs):
357
+ r"""the de Hoog, Knight & Stokes algorithm is an
358
+ accelerated form of the Fourier series numerical
359
+ inverse Laplace transform algorithms.
360
+
361
+ .. math ::
362
+
363
+ p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
364
+
365
+ where
366
+
367
+ .. math ::
368
+
369
+ \gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
370
+
371
+ `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
372
+ `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
373
+ rightmost pole or singularity, which is chosen based on the
374
+ desired accuracy (assuming the rightmost singularity is 0),
375
+ and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
376
+ chosen in relation to `\alpha`.`
377
+
378
+ When increasing the degree, the abscissa increase towards
379
+ `j\infty`, but more slowly than the fixed Talbot
380
+ algorithm. The de Hoog et al. algorithm typically does better
381
+ with oscillatory functions of time, and less well-behaved
382
+ functions. The method tends to be slower than the Talbot and
383
+ Stehfest algorithsm, especially so at very high precision
384
+ (e.g., `>500` digits precision).
385
+
386
+ """
387
+
388
+ # required
389
+ # ------------------------------
390
+ self.t = self.ctx.convert(t)
391
+
392
+ # optional
393
+ # ------------------------------
394
+ self.tmax = kwargs.get('tmax', self.t)
395
+
396
+ # empirical relationships used here based on a linear fit of
397
+ # requested and delivered dps for exponentially decaying time
398
+ # functions for requested dps up to 512.
399
+
400
+ if 'degree' in kwargs:
401
+ self.degree = kwargs['degree']
402
+ self.dps_goal = int(1.38*self.degree)
403
+ else:
404
+ self.dps_goal = int(self.ctx.dps*1.36)
405
+ self.degree = max(10, self.dps_goal)
406
+
407
+ # 2*M+1 terms in approximation
408
+ M = self.degree
409
+
410
+ # adjust alpha component of abscissa of convergence for higher
411
+ # precision
412
+ tmp = self.ctx.power(10.0, -self.dps_goal)
413
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
414
+
415
+ # desired tolerance (here simply related to alpha)
416
+ self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
417
+ self.np = 2*self.degree+1 # number of terms in approximation
418
+
419
+ # this is adjusting the dps of the calling context
420
+ # hopefully the caller doesn't monkey around with it
421
+ # between calling this routine and calc_time_domain_solution()
422
+ self.dps_orig = self.ctx.dps
423
+ self.ctx.dps = self.dps_goal
424
+
425
+ # scaling factor (likely tun-able, but 2 is typical)
426
+ self.scale = kwargs.get('scale', 2)
427
+ self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
428
+
429
+ self.p = self.ctx.matrix(2*M+1, 1)
430
+ self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
431
+ self.p = (self.gamma + self.ctx.pi*
432
+ self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
433
+
434
+ # NB: p is complex (mpc)
435
+
436
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
437
+ r"""Calculate time-domain solution for
438
+ de Hoog, Knight & Stokes algorithm.
439
+
440
+ The un-accelerated Fourier series approach is:
441
+
442
+ .. math ::
443
+
444
+ f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
445
+ \Re\left[\bar{f}\left( p_k \right)
446
+ e^{i\pi t/T} \right],
447
+
448
+ where the prime on the summation indicates the first term is halved.
449
+
450
+ This simplistic approach requires so many function evaluations
451
+ that it is not practical. Non-linear acceleration is
452
+ accomplished via Pade-approximation and an analytic expression
453
+ for the remainder of the continued fraction. See the original
454
+ paper (reference 2 below) a detailed description of the
455
+ numerical approach.
456
+
457
+ **References**
458
+
459
+ 1. Davies, B. (2005). *Integral Transforms and their
460
+ Applications*, Third Edition. Springer.
461
+ 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
462
+ method for numerical inversion of Laplace transforms. *SIAM
463
+ Journal of Scientific and Statistical Computing* 3:357-366,
464
+ http://dx.doi.org/10.1137/0903022
465
+
466
+ """
467
+
468
+ M = self.degree
469
+ np = self.np
470
+ T = self.T
471
+
472
+ self.t = self.ctx.convert(t)
473
+
474
+ # would it be useful to try re-using
475
+ # space between e&q and A&B?
476
+ e = self.ctx.zeros(np, M+1)
477
+ q = self.ctx.matrix(2*M, M)
478
+ d = self.ctx.matrix(np, 1)
479
+ A = self.ctx.zeros(np+1, 1)
480
+ B = self.ctx.ones(np+1, 1)
481
+
482
+ # initialize Q-D table
483
+ e[:, 0] = 0.0 + 0j
484
+ q[0, 0] = fp[1]/(fp[0]/2)
485
+ for i in range(1, 2*M):
486
+ q[i, 0] = fp[i+1]/fp[i]
487
+
488
+ # rhombus rule for filling triangular Q-D table (e & q)
489
+ for r in range(1, M+1):
490
+ # start with e, column 1, 0:2*M-2
491
+ mr = 2*(M-r) + 1
492
+ e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
493
+ if not r == M:
494
+ rq = r+1
495
+ mr = 2*(M-rq)+1 + 2
496
+ for i in range(mr):
497
+ q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
498
+
499
+ # build up continued fraction coefficients (d)
500
+ d[0] = fp[0]/2
501
+ for r in range(1, M+1):
502
+ d[2*r-1] = -q[0, r-1] # even terms
503
+ d[2*r] = -e[0, r] # odd terms
504
+
505
+ # seed A and B for recurrence
506
+ A[0] = 0.0 + 0.0j
507
+ A[1] = d[0]
508
+ B[0:2] = 1.0 + 0.0j
509
+
510
+ # base of the power series
511
+ z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
512
+
513
+ # coefficients of Pade approximation (A & B)
514
+ # using recurrence for all but last term
515
+ for i in range(1, 2*M):
516
+ A[i+1] = A[i] + d[i]*A[i-1]*z
517
+ B[i+1] = B[i] + d[i]*B[i-1]*z
518
+
519
+ # "improved remainder" to continued fraction
520
+ brem = (1 + (d[2*M-1] - d[2*M])*z)/2
521
+ # powm1(x,y) computes x^y - 1 more accurately near zero
522
+ rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
523
+ self.ctx.fraction(1, 2))
524
+
525
+ # last term of recurrence using new remainder
526
+ A[np] = A[2*M] + rem*A[2*M-1]
527
+ B[np] = B[2*M] + rem*B[2*M-1]
528
+
529
+ # diagonal Pade approximation
530
+ # F=A/B represents accelerated trapezoid rule
531
+ result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
532
+
533
+ # setting dps back to value when calc_laplace_parameter was called
534
+ if not manual_prec:
535
+ self.ctx.dps = self.dps_orig
536
+
537
+ return result
538
+
539
+
540
+ # ****************************************
541
+
542
+ class Cohen(InverseLaplaceTransform):
543
+
544
+ def calc_laplace_parameter(self, t, **kwargs):
545
+ r"""The Cohen algorithm accelerates the convergence of the nearly
546
+ alternating series resulting from the application of the trapezoidal
547
+ rule to the Bromwich contour inversion integral.
548
+
549
+ .. math ::
550
+
551
+ p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
552
+
553
+ where
554
+
555
+ .. math ::
556
+
557
+ \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
558
+
559
+ `d = \mathrm{dps\_goal}`, which is chosen based on the desired
560
+ accuracy using the method developed in [1] to improve numerical
561
+ stability. The Cohen algorithm shows robustness similar to the de Hoog
562
+ et al. algorithm, but it is faster than the fixed Talbot algorithm.
563
+
564
+ **Optional arguments**
565
+
566
+ *degree*
567
+ integer order of the approximation (M = number of terms)
568
+ *alpha*
569
+ abscissa for `p_0` (controls the discretization error)
570
+
571
+ The working precision will be increased according to a rule of
572
+ thumb. If 'degree' is not specified, the working precision and
573
+ degree are chosen to hopefully achieve the dps of the calling
574
+ context. If 'degree' is specified, the working precision is
575
+ chosen to achieve maximum resulting precision for the
576
+ specified degree.
577
+
578
+ **References**
579
+
580
+ 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
581
+ distribution in the Gaussian copula model: a comparison of methods.
582
+ *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
583
+
584
+ """
585
+ self.t = self.ctx.convert(t)
586
+
587
+ if 'degree' in kwargs:
588
+ self.degree = kwargs['degree']
589
+ self.dps_goal = int(1.5 * self.degree)
590
+ else:
591
+ self.dps_goal = int(self.ctx.dps * 1.74)
592
+ self.degree = max(22, int(1.31 * self.dps_goal))
593
+
594
+ M = self.degree + 1
595
+
596
+ # this is adjusting the dps of the calling context hopefully
597
+ # the caller doesn't monkey around with it between calling
598
+ # this routine and calc_time_domain_solution()
599
+ self.dps_orig = self.ctx.dps
600
+ self.ctx.dps = self.dps_goal
601
+
602
+ ttwo = 2 * self.t
603
+ tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
604
+ tmp = self.ctx.fraction(2, 3) * tmp
605
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
606
+
607
+ # all but time-dependent part of p
608
+ a_t = self.alpha / ttwo
609
+ p_t = self.ctx.pi * 1j / self.t
610
+
611
+ self.p = self.ctx.matrix(M, 1)
612
+ self.p[0] = a_t
613
+
614
+ for i in range(1, M):
615
+ self.p[i] = a_t + i * p_t
616
+
617
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
618
+ r"""Calculate time-domain solution for Cohen algorithm.
619
+
620
+ The accelerated nearly alternating series is:
621
+
622
+ .. math ::
623
+
624
+ f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
625
+ \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
626
+ \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
627
+ \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
628
+
629
+ where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
630
+
631
+ 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
632
+ acceleration of alternating series. *Experiment. Math* 9(1):3-12
633
+
634
+ """
635
+ self.t = self.ctx.convert(t)
636
+
637
+ n = self.degree
638
+ M = n + 1
639
+
640
+ A = self.ctx.matrix(M, 1)
641
+ for i in range(M):
642
+ A[i] = fp[i].real
643
+
644
+ d = (3 + self.ctx.sqrt(8)) ** n
645
+ d = (d + 1 / d) / 2
646
+ b = -self.ctx.one
647
+ c = -d
648
+ s = 0
649
+
650
+ for k in range(n):
651
+ c = b - c
652
+ s = s + c * A[k + 1]
653
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
654
+
655
+ result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
656
+
657
+ # setting dps back to value when calc_laplace_parameter was
658
+ # called, unless flag is set.
659
+ if not manual_prec:
660
+ self.ctx.dps = self.dps_orig
661
+
662
+ return result
663
+
664
+
665
+ # ****************************************
666
+
667
+ class LaplaceTransformInversionMethods(object):
668
+ def __init__(ctx, *args, **kwargs):
669
+ ctx._fixed_talbot = FixedTalbot(ctx)
670
+ ctx._stehfest = Stehfest(ctx)
671
+ ctx._de_hoog = deHoog(ctx)
672
+ ctx._cohen = Cohen(ctx)
673
+
674
+ def invertlaplace(ctx, f, t, **kwargs):
675
+ r"""Computes the numerical inverse Laplace transform for a
676
+ Laplace-space function at a given time. The function being
677
+ evaluated is assumed to be a real-valued function of time.
678
+
679
+ The user must supply a Laplace-space function `\bar{f}(p)`,
680
+ and a desired time at which to estimate the time-domain
681
+ solution `f(t)`.
682
+
683
+ A few basic examples of Laplace-space functions with known
684
+ inverses (see references [1,2]) :
685
+
686
+ .. math ::
687
+
688
+ \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
689
+
690
+ .. math ::
691
+
692
+ \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
693
+
694
+ .. math ::
695
+
696
+ \bar{f}(p) = \frac{1}{(p+1)^2}
697
+
698
+ .. math ::
699
+
700
+ f(t) = t e^{-t}
701
+
702
+ >>> from mpmath import *
703
+ >>> mp.dps = 15; mp.pretty = True
704
+ >>> tt = [0.001, 0.01, 0.1, 1, 10]
705
+ >>> fp = lambda p: 1/(p+1)**2
706
+ >>> ft = lambda t: t*exp(-t)
707
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
708
+ (0.000999000499833375, 8.57923043561212e-20)
709
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
710
+ (0.00990049833749168, 3.27007646698047e-19)
711
+ >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
712
+ (0.090483741803596, -1.75215800052168e-18)
713
+ >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
714
+ (0.367879441171442, 1.2428864009344e-17)
715
+ >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
716
+ (0.000453999297624849, 4.04513489306658e-20)
717
+
718
+ The methods also work for higher precision:
719
+
720
+ >>> mp.dps = 100; mp.pretty = True
721
+ >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
722
+ ('0.000999000499833375', '-4.96868310693356e-105')
723
+ >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
724
+ ('0.00990049833749168', '1.23032291513122e-104')
725
+
726
+ .. math ::
727
+
728
+ \bar{f}(p) = \frac{1}{p^2+1}
729
+
730
+ .. math ::
731
+
732
+ f(t) = \mathrm{J}_0(t)
733
+
734
+ >>> mp.dps = 15; mp.pretty = True
735
+ >>> fp = lambda p: 1/sqrt(p*p + 1)
736
+ >>> ft = lambda t: besselj(0,t)
737
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
738
+ (0.999999750000016, -6.09717765032273e-18)
739
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
740
+ (0.99997500015625, -5.61756281076169e-17)
741
+
742
+ .. math ::
743
+
744
+ \bar{f}(p) = \frac{\log p}{p}
745
+
746
+ .. math ::
747
+
748
+ f(t) = -\gamma -\log t
749
+
750
+ >>> mp.dps = 15; mp.pretty = True
751
+ >>> fp = lambda p: log(p)/p
752
+ >>> ft = lambda t: -euler-log(t)
753
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
754
+ (6.3305396140806, -1.92126634837863e-16)
755
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
756
+ (4.02795452108656, -4.81486093200704e-16)
757
+
758
+ **Options**
759
+
760
+ :func:`~mpmath.invertlaplace` recognizes the following optional
761
+ keywords valid for all methods:
762
+
763
+ *method*
764
+ Chooses numerical inverse Laplace transform algorithm
765
+ (described below).
766
+ *degree*
767
+ Number of terms used in the approximation
768
+
769
+ **Algorithms**
770
+
771
+ Mpmath implements four numerical inverse Laplace transform
772
+ algorithms, attributed to: Talbot, Stehfest, and de Hoog,
773
+ Knight and Stokes. These can be selected by using
774
+ *method='talbot'*, *method='stehfest'*, *method='dehoog'* or
775
+ *method='cohen'* or by passing the classes *method=FixedTalbot*,
776
+ *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
777
+ :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
778
+ :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
779
+ are also available as shortcuts.
780
+
781
+ All four algorithms implement a heuristic balance between the
782
+ requested precision and the precision used internally for the
783
+ calculations. This has been tuned for a typical exponentially
784
+ decaying function and precision up to few hundred decimal
785
+ digits.
786
+
787
+ The Laplace transform converts the variable time (i.e., along
788
+ a line) into a parameter given by the right half of the
789
+ complex `p`-plane. Singularities, poles, and branch cuts in
790
+ the complex `p`-plane contain all the information regarding
791
+ the time behavior of the corresponding function. Any numerical
792
+ method must therefore sample `p`-plane "close enough" to the
793
+ singularities to accurately characterize them, while not
794
+ getting too close to have catastrophic cancellation, overflow,
795
+ or underflow issues. Most significantly, if one or more of the
796
+ singularities in the `p`-plane is not on the left side of the
797
+ Bromwich contour, its effects will be left out of the computed
798
+ solution, and the answer will be completely wrong.
799
+
800
+ *Talbot*
801
+
802
+ The fixed Talbot method is high accuracy and fast, but the
803
+ method can catastrophically fail for certain classes of time-domain
804
+ behavior, including a Heaviside step function for positive
805
+ time (e.g., `H(t-2)`), or some oscillatory behaviors. The
806
+ Talbot method usually has adjustable parameters, but the
807
+ "fixed" variety implemented here does not. This method
808
+ deforms the Bromwich integral contour in the shape of a
809
+ parabola towards `-\infty`, which leads to problems
810
+ when the solution has a decaying exponential in it (e.g., a
811
+ Heaviside step function is equivalent to multiplying by a
812
+ decaying exponential in Laplace space).
813
+
814
+ *Stehfest*
815
+
816
+ The Stehfest algorithm only uses abscissa along the real axis
817
+ of the complex `p`-plane to estimate the time-domain
818
+ function. Oscillatory time-domain functions have poles away
819
+ from the real axis, so this method does not work well with
820
+ oscillatory functions, especially high-frequency ones. This
821
+ method also depends on summation of terms in a series that
822
+ grows very large, and will have catastrophic cancellation
823
+ during summation if the working precision is too low.
824
+
825
+ *de Hoog et al.*
826
+
827
+ The de Hoog, Knight, and Stokes method is essentially a
828
+ Fourier-series quadrature-type approximation to the Bromwich
829
+ contour integral, with non-linear series acceleration and an
830
+ analytical expression for the remainder term. This method is
831
+ typically one of the most robust. This method also involves the
832
+ greatest amount of overhead, so it is typically the slowest of the
833
+ four methods at high precision.
834
+
835
+ *Cohen*
836
+
837
+ The Cohen method is a trapezoidal rule approximation to the Bromwich
838
+ contour integral, with linear acceleration for alternating
839
+ series. This method is as robust as the de Hoog et al method and the
840
+ fastest of the four methods at high precision, and is therefore the
841
+ default method.
842
+
843
+ **Singularities**
844
+
845
+ All numerical inverse Laplace transform methods have problems
846
+ at large time when the Laplace-space function has poles,
847
+ singularities, or branch cuts to the right of the origin in
848
+ the complex plane. For simple poles in `\bar{f}(p)` at the
849
+ `p`-plane origin, the time function is constant in time (e.g.,
850
+ `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
851
+ `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
852
+ decreasing function of time (e.g., `\mathcal{L}\left\lbrace
853
+ e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
854
+ a pole to the right of the origin leads to an increasing
855
+ function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
856
+ \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
857
+ singularities occur off the real `p` axis, the time-domain
858
+ function is oscillatory. For example `\mathcal{L}\left\lbrace
859
+ \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
860
+ starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
861
+ function, This range of behaviors is illustrated in Duffy [3]
862
+ Figure 4.10.4, p. 228.
863
+
864
+ In general as `p \rightarrow \infty` `t \rightarrow 0` and
865
+ vice-versa. All numerical inverse Laplace transform methods
866
+ require their abscissa to shift closer to the origin for
867
+ larger times. If the abscissa shift left of the rightmost
868
+ singularity in the Laplace domain, the answer will be
869
+ completely wrong (the effect of singularities to the right of
870
+ the Bromwich contour are not included in the results).
871
+
872
+ For example, the following exponentially growing function has
873
+ a pole at `p=3`:
874
+
875
+ .. math ::
876
+
877
+ \bar{f}(p)=\frac{1}{p^2-9}
878
+
879
+ .. math ::
880
+
881
+ f(t)=\frac{1}{3}\sinh 3t
882
+
883
+ >>> mp.dps = 15; mp.pretty = True
884
+ >>> fp = lambda p: 1/(p*p-9)
885
+ >>> ft = lambda t: sinh(3*t)/3
886
+ >>> tt = [0.01,0.1,1.0,10.0]
887
+ >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
888
+ (0.0100015000675014, 0.0100015000675014)
889
+ >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
890
+ (0.101506764482381, 0.101506764482381)
891
+ >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
892
+ (3.33929164246997, 3.33929164246997)
893
+ >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
894
+ (1781079096920.74, -1.61331069624091e-14)
895
+
896
+ **References**
897
+
898
+ 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
899
+ 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
900
+ Inversion, Springer.
901
+ 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
902
+
903
+ **Numerical Inverse Laplace Transform Reviews**
904
+
905
+ 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
906
+ inversion of the Laplace transform: Applications to Biology,
907
+ Economics, Engineering, and Physics*. Elsevier.
908
+ 2. Davies, B., B. Martin (1979). Numerical inversion of the
909
+ Laplace transform: a survey and comparison of methods. *Journal
910
+ of Computational Physics* 33:1-32,
911
+ http://dx.doi.org/10.1016/0021-9991(79)90025-1
912
+ 3. Duffy, D.G. (1993). On the numerical inversion of Laplace
913
+ transforms: Comparison of three new methods on characteristic
914
+ problems from applications. *ACM Transactions on Mathematical
915
+ Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
916
+ 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
917
+ Algorithms for Laplace-Space Numerical Approaches, *Numerical
918
+ Algorithms*, 63(2):339-355.
919
+ http://dx.doi.org/10.1007/s11075-012-9625-3
920
+
921
+ """
922
+
923
+ rule = kwargs.get('method', 'cohen')
924
+ if type(rule) is str:
925
+ lrule = rule.lower()
926
+ if lrule == 'talbot':
927
+ rule = ctx._fixed_talbot
928
+ elif lrule == 'stehfest':
929
+ rule = ctx._stehfest
930
+ elif lrule == 'dehoog':
931
+ rule = ctx._de_hoog
932
+ elif rule == 'cohen':
933
+ rule = ctx._cohen
934
+ else:
935
+ raise ValueError("unknown invlap algorithm: %s" % rule)
936
+ else:
937
+ rule = rule(ctx)
938
+
939
+ # determine the vector of Laplace-space parameter
940
+ # needed for the requested method and desired time
941
+ rule.calc_laplace_parameter(t, **kwargs)
942
+
943
+ # compute the Laplace-space function evalutations
944
+ # at the required abscissa.
945
+ fp = [f(p) for p in rule.p]
946
+
947
+ # compute the time-domain solution from the
948
+ # Laplace-space function evaluations
949
+ return rule.calc_time_domain_solution(fp, t)
950
+
951
+ # shortcuts for the above function for specific methods
952
+ def invlaptalbot(ctx, *args, **kwargs):
953
+ kwargs['method'] = 'talbot'
954
+ return ctx.invertlaplace(*args, **kwargs)
955
+
956
+ def invlapstehfest(ctx, *args, **kwargs):
957
+ kwargs['method'] = 'stehfest'
958
+ return ctx.invertlaplace(*args, **kwargs)
959
+
960
+ def invlapdehoog(ctx, *args, **kwargs):
961
+ kwargs['method'] = 'dehoog'
962
+ return ctx.invertlaplace(*args, **kwargs)
963
+
964
+ def invlapcohen(ctx, *args, **kwargs):
965
+ kwargs['method'] = 'cohen'
966
+ return ctx.invertlaplace(*args, **kwargs)
967
+
968
+
969
+ # ****************************************
970
+
971
+ if __name__ == '__main__':
972
+ import doctest
973
+ doctest.testmod()
venv/lib/python3.10/site-packages/mpmath/calculus/odes.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bisect import bisect
2
+ from ..libmp.backend import xrange
3
+
4
+ class ODEMethods(object):
5
+ pass
6
+
7
+ def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
8
+ h = tol = ctx.ldexp(1, -tol_prec)
9
+ dim = len(y0)
10
+ xs = [x0]
11
+ ys = [y0]
12
+ x = x0
13
+ y = y0
14
+ orig = ctx.prec
15
+ try:
16
+ ctx.prec = orig*(1+n)
17
+ # Use n steps with Euler's method to get
18
+ # evaluation points for derivatives
19
+ for i in range(n):
20
+ fxy = derivs(x, y)
21
+ y = [y[i]+h*fxy[i] for i in xrange(len(y))]
22
+ x += h
23
+ xs.append(x)
24
+ ys.append(y)
25
+ # Compute derivatives
26
+ ser = [[] for d in range(dim)]
27
+ for j in range(n+1):
28
+ s = [0]*dim
29
+ b = (-1) ** (j & 1)
30
+ k = 1
31
+ for i in range(j+1):
32
+ for d in range(dim):
33
+ s[d] += b * ys[i][d]
34
+ b = (b * (j-k+1)) // (-k)
35
+ k += 1
36
+ scale = h**(-j) / ctx.fac(j)
37
+ for d in range(dim):
38
+ s[d] = s[d] * scale
39
+ ser[d].append(s[d])
40
+ finally:
41
+ ctx.prec = orig
42
+ # Estimate radius for which we can get full accuracy.
43
+ # XXX: do this right for zeros
44
+ radius = ctx.one
45
+ for ts in ser:
46
+ if ts[-1]:
47
+ radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
48
+ radius /= 2 # XXX
49
+ return ser, x0+radius
50
+
51
+ def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
52
+ r"""
53
+ Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
54
+ that is a numerical solution of the `n+1`-dimensional first-order
55
+ ordinary differential equation (ODE) system
56
+
57
+ .. math ::
58
+
59
+ y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
60
+
61
+ y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
62
+
63
+ \vdots
64
+
65
+ y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
66
+
67
+ The derivatives are specified by the vector-valued function
68
+ *F* that evaluates
69
+ `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
70
+ The initial point `x_0` is specified by the scalar argument *x0*,
71
+ and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
72
+ specified by the vector argument *y0*.
73
+
74
+ For convenience, if the system is one-dimensional, you may optionally
75
+ provide just a scalar value for *y0*. In this case, *F* should accept
76
+ a scalar *y* argument and return a scalar. The solution function
77
+ *y* will return scalar values instead of length-1 vectors.
78
+
79
+ Evaluation of the solution function `y(x)` is permitted
80
+ for any `x \ge x_0`.
81
+
82
+ A high-order ODE can be solved by transforming it into first-order
83
+ vector form. This transformation is described in standard texts
84
+ on ODEs. Examples will also be given below.
85
+
86
+ **Options, speed and accuracy**
87
+
88
+ By default, :func:`~mpmath.odefun` uses a high-order Taylor series
89
+ method. For reasonably well-behaved problems, the solution will
90
+ be fully accurate to within the working precision. Note that
91
+ *F* must be possible to evaluate to very high precision
92
+ for the generation of Taylor series to work.
93
+
94
+ To get a faster but less accurate solution, you can set a large
95
+ value for *tol* (which defaults roughly to *eps*). If you just
96
+ want to plot the solution or perform a basic simulation,
97
+ *tol = 0.01* is likely sufficient.
98
+
99
+ The *degree* argument controls the degree of the solver (with
100
+ *method='taylor'*, this is the degree of the Taylor series
101
+ expansion). A higher degree means that a longer step can be taken
102
+ before a new local solution must be generated from *F*,
103
+ meaning that fewer steps are required to get from `x_0` to a given
104
+ `x_1`. On the other hand, a higher degree also means that each
105
+ local solution becomes more expensive (i.e., more evaluations of
106
+ *F* are required per step, and at higher precision).
107
+
108
+ The optimal setting therefore involves a tradeoff. Generally,
109
+ decreasing the *degree* for Taylor series is likely to give faster
110
+ solution at low precision, while increasing is likely to be better
111
+ at higher precision.
112
+
113
+ The function
114
+ object returned by :func:`~mpmath.odefun` caches the solutions at all step
115
+ points and uses polynomial interpolation between step points.
116
+ Therefore, once `y(x_1)` has been evaluated for some `x_1`,
117
+ `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
118
+ and continuing the evaluation up to `x_2 > x_1` is also fast.
119
+
120
+ **Examples of first-order ODEs**
121
+
122
+ We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
123
+ which has explicit solution `y(x) = \exp(x)`::
124
+
125
+ >>> from mpmath import *
126
+ >>> mp.dps = 15; mp.pretty = True
127
+ >>> f = odefun(lambda x, y: y, 0, 1)
128
+ >>> for x in [0, 1, 2.5]:
129
+ ... print((f(x), exp(x)))
130
+ ...
131
+ (1.0, 1.0)
132
+ (2.71828182845905, 2.71828182845905)
133
+ (12.1824939607035, 12.1824939607035)
134
+
135
+ The solution with high precision::
136
+
137
+ >>> mp.dps = 50
138
+ >>> f = odefun(lambda x, y: y, 0, 1)
139
+ >>> f(1)
140
+ 2.7182818284590452353602874713526624977572470937
141
+ >>> exp(1)
142
+ 2.7182818284590452353602874713526624977572470937
143
+
144
+ Using the more general vectorized form, the test problem
145
+ can be input as (note that *f* returns a 1-element vector)::
146
+
147
+ >>> mp.dps = 15
148
+ >>> f = odefun(lambda x, y: [y[0]], 0, [1])
149
+ >>> f(1)
150
+ [2.71828182845905]
151
+
152
+ :func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
153
+ impossible (and at best difficult) to solve analytically. As
154
+ an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
155
+ for `y(0) = \pi/2`. An exact solution happens to be known
156
+ for this problem, and is given by
157
+ `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
158
+
159
+ >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
160
+ >>> for x in [2, 5, 10]:
161
+ ... print((f(x), 2*atan(exp(mpf(x)**2/2))))
162
+ ...
163
+ (2.87255666284091, 2.87255666284091)
164
+ (3.14158520028345, 3.14158520028345)
165
+ (3.14159265358979, 3.14159265358979)
166
+
167
+ If `F` is independent of `y`, an ODE can be solved using direct
168
+ integration. We can therefore obtain a reference solution with
169
+ :func:`~mpmath.quad`::
170
+
171
+ >>> f = lambda x: (1+x**2)/(1+x**3)
172
+ >>> g = odefun(lambda x, y: f(x), pi, 0)
173
+ >>> g(2*pi)
174
+ 0.72128263801696
175
+ >>> quad(f, [pi, 2*pi])
176
+ 0.72128263801696
177
+
178
+ **Examples of second-order ODEs**
179
+
180
+ We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
181
+ To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
182
+ whereby the original equation can be written as `y_1' + y_0' = 0`. Put
183
+ together, we get the first-order, two-dimensional vector ODE
184
+
185
+ .. math ::
186
+
187
+ \begin{cases}
188
+ y_0' = y_1 \\
189
+ y_1' = -y_0
190
+ \end{cases}
191
+
192
+ To get a well-defined IVP, we need two initial values. With
193
+ `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
194
+ course be solved by `y(x) = y_0(x) = \cos(x)` and
195
+ `-y'(x) = y_1(x) = \sin(x)`. We check this::
196
+
197
+ >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
198
+ >>> for x in [0, 1, 2.5, 10]:
199
+ ... nprint(f(x), 15)
200
+ ... nprint([cos(x), sin(x)], 15)
201
+ ... print("---")
202
+ ...
203
+ [1.0, 0.0]
204
+ [1.0, 0.0]
205
+ ---
206
+ [0.54030230586814, 0.841470984807897]
207
+ [0.54030230586814, 0.841470984807897]
208
+ ---
209
+ [-0.801143615546934, 0.598472144103957]
210
+ [-0.801143615546934, 0.598472144103957]
211
+ ---
212
+ [-0.839071529076452, -0.54402111088937]
213
+ [-0.839071529076452, -0.54402111088937]
214
+ ---
215
+
216
+ Note that we get both the sine and the cosine solutions
217
+ simultaneously.
218
+
219
+ **TODO**
220
+
221
+ * Better automatic choice of degree and step size
222
+ * Make determination of Taylor series convergence radius
223
+ more robust
224
+ * Allow solution for `x < x_0`
225
+ * Allow solution for complex `x`
226
+ * Test for difficult (ill-conditioned) problems
227
+ * Implement Runge-Kutta and other algorithms
228
+
229
+ """
230
+ if tol:
231
+ tol_prec = int(-ctx.log(tol, 2))+10
232
+ else:
233
+ tol_prec = ctx.prec+10
234
+ degree = degree or (3 + int(3*ctx.dps/2.))
235
+ workprec = ctx.prec + 40
236
+ try:
237
+ len(y0)
238
+ return_vector = True
239
+ except TypeError:
240
+ F_ = F
241
+ F = lambda x, y: [F_(x, y[0])]
242
+ y0 = [y0]
243
+ return_vector = False
244
+ ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
245
+ series_boundaries = [x0, xb]
246
+ series_data = [(ser, x0, xb)]
247
+ # We will be working with vectors of Taylor series
248
+ def mpolyval(ser, a):
249
+ return [ctx.polyval(s[::-1], a) for s in ser]
250
+ # Find nearest expansion point; compute if necessary
251
+ def get_series(x):
252
+ if x < x0:
253
+ raise ValueError
254
+ n = bisect(series_boundaries, x)
255
+ if n < len(series_boundaries):
256
+ return series_data[n-1]
257
+ while 1:
258
+ ser, xa, xb = series_data[-1]
259
+ if verbose:
260
+ print("Computing Taylor series for [%f, %f]" % (xa, xb))
261
+ y = mpolyval(ser, xb-xa)
262
+ xa = xb
263
+ ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
264
+ series_boundaries.append(xb)
265
+ series_data.append((ser, xa, xb))
266
+ if x <= xb:
267
+ return series_data[-1]
268
+ # Evaluation function
269
+ def interpolant(x):
270
+ x = ctx.convert(x)
271
+ orig = ctx.prec
272
+ try:
273
+ ctx.prec = workprec
274
+ ser, xa, xb = get_series(x)
275
+ y = mpolyval(ser, x-xa)
276
+ finally:
277
+ ctx.prec = orig
278
+ if return_vector:
279
+ return [+yk for yk in y]
280
+ else:
281
+ return +y[0]
282
+ return interpolant
283
+
284
+ ODEMethods.odefun = odefun
285
+
286
+ if __name__ == "__main__":
287
+ import doctest
288
+ doctest.testmod()
venv/lib/python3.10/site-packages/mpmath/calculus/optimization.py ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+
3
+ from copy import copy
4
+
5
+ from ..libmp.backend import xrange
6
+
7
+ class OptimizationMethods(object):
8
+ def __init__(ctx):
9
+ pass
10
+
11
+ ##############
12
+ # 1D-SOLVERS #
13
+ ##############
14
+
15
+ class Newton:
16
+ """
17
+ 1d-solver generating pairs of approximative root and error.
18
+
19
+ Needs starting points x0 close to the root.
20
+
21
+ Pro:
22
+
23
+ * converges fast
24
+ * sometimes more robust than secant with bad second starting point
25
+
26
+ Contra:
27
+
28
+ * converges slowly for multiple roots
29
+ * needs first derivative
30
+ * 2 function evaluations per iteration
31
+ """
32
+ maxsteps = 20
33
+
34
+ def __init__(self, ctx, f, x0, **kwargs):
35
+ self.ctx = ctx
36
+ if len(x0) == 1:
37
+ self.x0 = x0[0]
38
+ else:
39
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
40
+ self.f = f
41
+ if not 'df' in kwargs:
42
+ def df(x):
43
+ return self.ctx.diff(f, x)
44
+ else:
45
+ df = kwargs['df']
46
+ self.df = df
47
+
48
+ def __iter__(self):
49
+ f = self.f
50
+ df = self.df
51
+ x0 = self.x0
52
+ while True:
53
+ x1 = x0 - f(x0) / df(x0)
54
+ error = abs(x1 - x0)
55
+ x0 = x1
56
+ yield (x1, error)
57
+
58
+ class Secant:
59
+ """
60
+ 1d-solver generating pairs of approximative root and error.
61
+
62
+ Needs starting points x0 and x1 close to the root.
63
+ x1 defaults to x0 + 0.25.
64
+
65
+ Pro:
66
+
67
+ * converges fast
68
+
69
+ Contra:
70
+
71
+ * converges slowly for multiple roots
72
+ """
73
+ maxsteps = 30
74
+
75
+ def __init__(self, ctx, f, x0, **kwargs):
76
+ self.ctx = ctx
77
+ if len(x0) == 1:
78
+ self.x0 = x0[0]
79
+ self.x1 = self.x0 + 0.25
80
+ elif len(x0) == 2:
81
+ self.x0 = x0[0]
82
+ self.x1 = x0[1]
83
+ else:
84
+ raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
85
+ self.f = f
86
+
87
+ def __iter__(self):
88
+ f = self.f
89
+ x0 = self.x0
90
+ x1 = self.x1
91
+ f0 = f(x0)
92
+ while True:
93
+ f1 = f(x1)
94
+ l = x1 - x0
95
+ if not l:
96
+ break
97
+ s = (f1 - f0) / l
98
+ if not s:
99
+ break
100
+ x0, x1 = x1, x1 - f1/s
101
+ f0 = f1
102
+ yield x1, abs(l)
103
+
104
+ class MNewton:
105
+ """
106
+ 1d-solver generating pairs of approximative root and error.
107
+
108
+ Needs starting point x0 close to the root.
109
+ Uses modified Newton's method that converges fast regardless of the
110
+ multiplicity of the root.
111
+
112
+ Pro:
113
+
114
+ * converges fast for multiple roots
115
+
116
+ Contra:
117
+
118
+ * needs first and second derivative of f
119
+ * 3 function evaluations per iteration
120
+ """
121
+ maxsteps = 20
122
+
123
+ def __init__(self, ctx, f, x0, **kwargs):
124
+ self.ctx = ctx
125
+ if not len(x0) == 1:
126
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
127
+ self.x0 = x0[0]
128
+ self.f = f
129
+ if not 'df' in kwargs:
130
+ def df(x):
131
+ return self.ctx.diff(f, x)
132
+ else:
133
+ df = kwargs['df']
134
+ self.df = df
135
+ if not 'd2f' in kwargs:
136
+ def d2f(x):
137
+ return self.ctx.diff(df, x)
138
+ else:
139
+ d2f = kwargs['df']
140
+ self.d2f = d2f
141
+
142
+ def __iter__(self):
143
+ x = self.x0
144
+ f = self.f
145
+ df = self.df
146
+ d2f = self.d2f
147
+ while True:
148
+ prevx = x
149
+ fx = f(x)
150
+ if fx == 0:
151
+ break
152
+ dfx = df(x)
153
+ d2fx = d2f(x)
154
+ # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
155
+ x -= fx / (dfx - fx * d2fx / dfx)
156
+ error = abs(x - prevx)
157
+ yield x, error
158
+
159
+ class Halley:
160
+ """
161
+ 1d-solver generating pairs of approximative root and error.
162
+
163
+ Needs a starting point x0 close to the root.
164
+ Uses Halley's method with cubic convergence rate.
165
+
166
+ Pro:
167
+
168
+ * converges even faster the Newton's method
169
+ * useful when computing with *many* digits
170
+
171
+ Contra:
172
+
173
+ * needs first and second derivative of f
174
+ * 3 function evaluations per iteration
175
+ * converges slowly for multiple roots
176
+ """
177
+
178
+ maxsteps = 20
179
+
180
+ def __init__(self, ctx, f, x0, **kwargs):
181
+ self.ctx = ctx
182
+ if not len(x0) == 1:
183
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
184
+ self.x0 = x0[0]
185
+ self.f = f
186
+ if not 'df' in kwargs:
187
+ def df(x):
188
+ return self.ctx.diff(f, x)
189
+ else:
190
+ df = kwargs['df']
191
+ self.df = df
192
+ if not 'd2f' in kwargs:
193
+ def d2f(x):
194
+ return self.ctx.diff(df, x)
195
+ else:
196
+ d2f = kwargs['df']
197
+ self.d2f = d2f
198
+
199
+ def __iter__(self):
200
+ x = self.x0
201
+ f = self.f
202
+ df = self.df
203
+ d2f = self.d2f
204
+ while True:
205
+ prevx = x
206
+ fx = f(x)
207
+ dfx = df(x)
208
+ d2fx = d2f(x)
209
+ x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
210
+ error = abs(x - prevx)
211
+ yield x, error
212
+
213
+ class Muller:
214
+ """
215
+ 1d-solver generating pairs of approximative root and error.
216
+
217
+ Needs starting points x0, x1 and x2 close to the root.
218
+ x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
219
+ Uses Muller's method that converges towards complex roots.
220
+
221
+ Pro:
222
+
223
+ * converges fast (somewhat faster than secant)
224
+ * can find complex roots
225
+
226
+ Contra:
227
+
228
+ * converges slowly for multiple roots
229
+ * may have complex values for real starting points and real roots
230
+
231
+ http://en.wikipedia.org/wiki/Muller's_method
232
+ """
233
+ maxsteps = 30
234
+
235
+ def __init__(self, ctx, f, x0, **kwargs):
236
+ self.ctx = ctx
237
+ if len(x0) == 1:
238
+ self.x0 = x0[0]
239
+ self.x1 = self.x0 + 0.25
240
+ self.x2 = self.x1 + 0.25
241
+ elif len(x0) == 2:
242
+ self.x0 = x0[0]
243
+ self.x1 = x0[1]
244
+ self.x2 = self.x1 + 0.25
245
+ elif len(x0) == 3:
246
+ self.x0 = x0[0]
247
+ self.x1 = x0[1]
248
+ self.x2 = x0[2]
249
+ else:
250
+ raise ValueError('expected 1, 2 or 3 starting points, got %i'
251
+ % len(x0))
252
+ self.f = f
253
+ self.verbose = kwargs['verbose']
254
+
255
+ def __iter__(self):
256
+ f = self.f
257
+ x0 = self.x0
258
+ x1 = self.x1
259
+ x2 = self.x2
260
+ fx0 = f(x0)
261
+ fx1 = f(x1)
262
+ fx2 = f(x2)
263
+ while True:
264
+ # TODO: maybe refactoring with function for divided differences
265
+ # calculate divided differences
266
+ fx2x1 = (fx1 - fx2) / (x1 - x2)
267
+ fx2x0 = (fx0 - fx2) / (x0 - x2)
268
+ fx1x0 = (fx0 - fx1) / (x0 - x1)
269
+ w = fx2x1 + fx2x0 - fx1x0
270
+ fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
271
+ if w == 0 and fx2x1x0 == 0:
272
+ if self.verbose:
273
+ print('canceled with')
274
+ print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
275
+ break
276
+ x0 = x1
277
+ fx0 = fx1
278
+ x1 = x2
279
+ fx1 = fx2
280
+ # denominator should be as large as possible => choose sign
281
+ r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
282
+ if abs(w - r) > abs(w + r):
283
+ r = -r
284
+ x2 -= 2*fx2 / (w + r)
285
+ fx2 = f(x2)
286
+ error = abs(x2 - x1)
287
+ yield x2, error
288
+
289
+ # TODO: consider raising a ValueError when there's no sign change in a and b
290
+ class Bisection:
291
+ """
292
+ 1d-solver generating pairs of approximative root and error.
293
+
294
+ Uses bisection method to find a root of f in [a, b].
295
+ Might fail for multiple roots (needs sign change).
296
+
297
+ Pro:
298
+
299
+ * robust and reliable
300
+
301
+ Contra:
302
+
303
+ * converges slowly
304
+ * needs sign change
305
+ """
306
+ maxsteps = 100
307
+
308
+ def __init__(self, ctx, f, x0, **kwargs):
309
+ self.ctx = ctx
310
+ if len(x0) != 2:
311
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
312
+ self.f = f
313
+ self.a = x0[0]
314
+ self.b = x0[1]
315
+
316
+ def __iter__(self):
317
+ f = self.f
318
+ a = self.a
319
+ b = self.b
320
+ l = b - a
321
+ fb = f(b)
322
+ while True:
323
+ m = self.ctx.ldexp(a + b, -1)
324
+ fm = f(m)
325
+ sign = fm * fb
326
+ if sign < 0:
327
+ a = m
328
+ elif sign > 0:
329
+ b = m
330
+ fb = fm
331
+ else:
332
+ yield m, self.ctx.zero
333
+ l /= 2
334
+ yield (a + b)/2, abs(l)
335
+
336
+ def _getm(method):
337
+ """
338
+ Return a function to calculate m for Illinois-like methods.
339
+ """
340
+ if method == 'illinois':
341
+ def getm(fz, fb):
342
+ return 0.5
343
+ elif method == 'pegasus':
344
+ def getm(fz, fb):
345
+ return fb/(fb + fz)
346
+ elif method == 'anderson':
347
+ def getm(fz, fb):
348
+ m = 1 - fz/fb
349
+ if m > 0:
350
+ return m
351
+ else:
352
+ return 0.5
353
+ else:
354
+ raise ValueError("method '%s' not recognized" % method)
355
+ return getm
356
+
357
+ class Illinois:
358
+ """
359
+ 1d-solver generating pairs of approximative root and error.
360
+
361
+ Uses Illinois method or similar to find a root of f in [a, b].
362
+ Might fail for multiple roots (needs sign change).
363
+ Combines bisect with secant (improved regula falsi).
364
+
365
+ The only difference between the methods is the scaling factor m, which is
366
+ used to ensure convergence (you can choose one using the 'method' keyword):
367
+
368
+ Illinois method ('illinois'):
369
+ m = 0.5
370
+
371
+ Pegasus method ('pegasus'):
372
+ m = fb/(fb + fz)
373
+
374
+ Anderson-Bjoerk method ('anderson'):
375
+ m = 1 - fz/fb if positive else 0.5
376
+
377
+ Pro:
378
+
379
+ * converges very fast
380
+
381
+ Contra:
382
+
383
+ * has problems with multiple roots
384
+ * needs sign change
385
+ """
386
+ maxsteps = 30
387
+
388
+ def __init__(self, ctx, f, x0, **kwargs):
389
+ self.ctx = ctx
390
+ if len(x0) != 2:
391
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
392
+ self.a = x0[0]
393
+ self.b = x0[1]
394
+ self.f = f
395
+ self.tol = kwargs['tol']
396
+ self.verbose = kwargs['verbose']
397
+ self.method = kwargs.get('method', 'illinois')
398
+ self.getm = _getm(self.method)
399
+ if self.verbose:
400
+ print('using %s method' % self.method)
401
+
402
+ def __iter__(self):
403
+ method = self.method
404
+ f = self.f
405
+ a = self.a
406
+ b = self.b
407
+ fa = f(a)
408
+ fb = f(b)
409
+ m = None
410
+ while True:
411
+ l = b - a
412
+ if l == 0:
413
+ break
414
+ s = (fb - fa) / l
415
+ z = a - fa/s
416
+ fz = f(z)
417
+ if abs(fz) < self.tol:
418
+ # TODO: better condition (when f is very flat)
419
+ if self.verbose:
420
+ print('canceled with z =', z)
421
+ yield z, l
422
+ break
423
+ if fz * fb < 0: # root in [z, b]
424
+ a = b
425
+ fa = fb
426
+ b = z
427
+ fb = fz
428
+ else: # root in [a, z]
429
+ m = self.getm(fz, fb)
430
+ b = z
431
+ fb = fz
432
+ fa = m*fa # scale down to ensure convergence
433
+ if self.verbose and m and not method == 'illinois':
434
+ print('m:', m)
435
+ yield (a + b)/2, abs(l)
436
+
437
+ def Pegasus(*args, **kwargs):
438
+ """
439
+ 1d-solver generating pairs of approximative root and error.
440
+
441
+ Uses Pegasus method to find a root of f in [a, b].
442
+ Wrapper for illinois to use method='pegasus'.
443
+ """
444
+ kwargs['method'] = 'pegasus'
445
+ return Illinois(*args, **kwargs)
446
+
447
+ def Anderson(*args, **kwargs):
448
+ """
449
+ 1d-solver generating pairs of approximative root and error.
450
+
451
+ Uses Anderson-Bjoerk method to find a root of f in [a, b].
452
+ Wrapper for illinois to use method='pegasus'.
453
+ """
454
+ kwargs['method'] = 'anderson'
455
+ return Illinois(*args, **kwargs)
456
+
457
+ # TODO: check whether it's possible to combine it with Illinois stuff
458
+ class Ridder:
459
+ """
460
+ 1d-solver generating pairs of approximative root and error.
461
+
462
+ Ridders' method to find a root of f in [a, b].
463
+ Is told to perform as well as Brent's method while being simpler.
464
+
465
+ Pro:
466
+
467
+ * very fast
468
+ * simpler than Brent's method
469
+
470
+ Contra:
471
+
472
+ * two function evaluations per step
473
+ * has problems with multiple roots
474
+ * needs sign change
475
+
476
+ http://en.wikipedia.org/wiki/Ridders'_method
477
+ """
478
+ maxsteps = 30
479
+
480
+ def __init__(self, ctx, f, x0, **kwargs):
481
+ self.ctx = ctx
482
+ self.f = f
483
+ if len(x0) != 2:
484
+ raise ValueError('expected interval of 2 points, got %i' % len(x0))
485
+ self.x1 = x0[0]
486
+ self.x2 = x0[1]
487
+ self.verbose = kwargs['verbose']
488
+ self.tol = kwargs['tol']
489
+
490
+ def __iter__(self):
491
+ ctx = self.ctx
492
+ f = self.f
493
+ x1 = self.x1
494
+ fx1 = f(x1)
495
+ x2 = self.x2
496
+ fx2 = f(x2)
497
+ while True:
498
+ x3 = 0.5*(x1 + x2)
499
+ fx3 = f(x3)
500
+ x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
501
+ fx4 = f(x4)
502
+ if abs(fx4) < self.tol:
503
+ # TODO: better condition (when f is very flat)
504
+ if self.verbose:
505
+ print('canceled with f(x4) =', fx4)
506
+ yield x4, abs(x1 - x2)
507
+ break
508
+ if fx4 * fx2 < 0: # root in [x4, x2]
509
+ x1 = x4
510
+ fx1 = fx4
511
+ else: # root in [x1, x4]
512
+ x2 = x4
513
+ fx2 = fx4
514
+ error = abs(x1 - x2)
515
+ yield (x1 + x2)/2, error
516
+
517
+ class ANewton:
518
+ """
519
+ EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
520
+
521
+ Uses Newton's method modified to use Steffensens method when convergence is
522
+ slow. (I.e. for multiple roots.)
523
+ """
524
+ maxsteps = 20
525
+
526
+ def __init__(self, ctx, f, x0, **kwargs):
527
+ self.ctx = ctx
528
+ if not len(x0) == 1:
529
+ raise ValueError('expected 1 starting point, got %i' % len(x0))
530
+ self.x0 = x0[0]
531
+ self.f = f
532
+ if not 'df' in kwargs:
533
+ def df(x):
534
+ return self.ctx.diff(f, x)
535
+ else:
536
+ df = kwargs['df']
537
+ self.df = df
538
+ def phi(x):
539
+ return x - f(x) / df(x)
540
+ self.phi = phi
541
+ self.verbose = kwargs['verbose']
542
+
543
+ def __iter__(self):
544
+ x0 = self.x0
545
+ f = self.f
546
+ df = self.df
547
+ phi = self.phi
548
+ error = 0
549
+ counter = 0
550
+ while True:
551
+ prevx = x0
552
+ try:
553
+ x0 = phi(x0)
554
+ except ZeroDivisionError:
555
+ if self.verbose:
556
+ print('ZeroDivisionError: canceled with x =', x0)
557
+ break
558
+ preverror = error
559
+ error = abs(prevx - x0)
560
+ # TODO: decide not to use convergence acceleration
561
+ if error and abs(error - preverror) / error < 1:
562
+ if self.verbose:
563
+ print('converging slowly')
564
+ counter += 1
565
+ if counter >= 3:
566
+ # accelerate convergence
567
+ phi = steffensen(phi)
568
+ counter = 0
569
+ if self.verbose:
570
+ print('accelerating convergence')
571
+ yield x0, error
572
+
573
+ # TODO: add Brent
574
+
575
+ ############################
576
+ # MULTIDIMENSIONAL SOLVERS #
577
+ ############################
578
+
579
+ def jacobian(ctx, f, x):
580
+ """
581
+ Calculate the Jacobian matrix of a function at the point x0.
582
+
583
+ This is the first derivative of a vectorial function:
584
+
585
+ f : R^m -> R^n with m >= n
586
+ """
587
+ x = ctx.matrix(x)
588
+ h = ctx.sqrt(ctx.eps)
589
+ fx = ctx.matrix(f(*x))
590
+ m = len(fx)
591
+ n = len(x)
592
+ J = ctx.matrix(m, n)
593
+ for j in xrange(n):
594
+ xj = x.copy()
595
+ xj[j] += h
596
+ Jj = (ctx.matrix(f(*xj)) - fx) / h
597
+ for i in xrange(m):
598
+ J[i,j] = Jj[i]
599
+ return J
600
+
601
+ # TODO: test with user-specified jacobian matrix
602
+ class MDNewton:
603
+ """
604
+ Find the root of a vector function numerically using Newton's method.
605
+
606
+ f is a vector function representing a nonlinear equation system.
607
+
608
+ x0 is the starting point close to the root.
609
+
610
+ J is a function returning the Jacobian matrix for a point.
611
+
612
+ Supports overdetermined systems.
613
+
614
+ Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
615
+ The function to calculate the Jacobian matrix can be given using the
616
+ keyword 'J'. Otherwise it will be calculated numerically.
617
+
618
+ Please note that this method converges only locally. Especially for high-
619
+ dimensional systems it is not trivial to find a good starting point being
620
+ close enough to the root.
621
+
622
+ It is recommended to use a faster, low-precision solver from SciPy [1] or
623
+ OpenOpt [2] to get an initial guess. Afterwards you can use this method for
624
+ root-polishing to any precision.
625
+
626
+ [1] http://scipy.org
627
+
628
+ [2] http://openopt.org/Welcome
629
+ """
630
+ maxsteps = 10
631
+
632
+ def __init__(self, ctx, f, x0, **kwargs):
633
+ self.ctx = ctx
634
+ self.f = f
635
+ if isinstance(x0, (tuple, list)):
636
+ x0 = ctx.matrix(x0)
637
+ assert x0.cols == 1, 'need a vector'
638
+ self.x0 = x0
639
+ if 'J' in kwargs:
640
+ self.J = kwargs['J']
641
+ else:
642
+ def J(*x):
643
+ return ctx.jacobian(f, x)
644
+ self.J = J
645
+ self.norm = kwargs['norm']
646
+ self.verbose = kwargs['verbose']
647
+
648
+ def __iter__(self):
649
+ f = self.f
650
+ x0 = self.x0
651
+ norm = self.norm
652
+ J = self.J
653
+ fx = self.ctx.matrix(f(*x0))
654
+ fxnorm = norm(fx)
655
+ cancel = False
656
+ while not cancel:
657
+ # get direction of descent
658
+ fxn = -fx
659
+ Jx = J(*x0)
660
+ s = self.ctx.lu_solve(Jx, fxn)
661
+ if self.verbose:
662
+ print('Jx:')
663
+ print(Jx)
664
+ print('s:', s)
665
+ # damping step size TODO: better strategy (hard task)
666
+ l = self.ctx.one
667
+ x1 = x0 + s
668
+ while True:
669
+ if x1 == x0:
670
+ if self.verbose:
671
+ print("canceled, won't get more excact")
672
+ cancel = True
673
+ break
674
+ fx = self.ctx.matrix(f(*x1))
675
+ newnorm = norm(fx)
676
+ if newnorm < fxnorm:
677
+ # new x accepted
678
+ fxnorm = newnorm
679
+ x0 = x1
680
+ break
681
+ l /= 2
682
+ x1 = x0 + l*s
683
+ yield (x0, fxnorm)
684
+
685
+ #############
686
+ # UTILITIES #
687
+ #############
688
+
689
+ str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
690
+ 'halley':Halley, 'muller':Muller, 'bisect':Bisection,
691
+ 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
692
+ 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
693
+
694
+ def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
695
+ r"""
696
+ Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
697
+ interval for *x*.
698
+
699
+ Multidimensional overdetermined systems are supported.
700
+ You can specify them using a function or a list of functions.
701
+
702
+ Mathematically speaking, this function returns `x` such that
703
+ `|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
704
+ If the computed value does not meet this criterion, an exception is raised.
705
+ This exception can be disabled with *verify=False*.
706
+
707
+ For interval arithmetic (``iv.findroot()``), please note that
708
+ the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
709
+ It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
710
+ regardless of numerical error. This may be improved in the future.
711
+
712
+ **Arguments**
713
+
714
+ *f*
715
+ one dimensional function
716
+ *x0*
717
+ starting point, several starting points or interval (depends on solver)
718
+ *tol*
719
+ the returned solution has an error smaller than this
720
+ *verbose*
721
+ print additional information for each iteration if true
722
+ *verify*
723
+ verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
724
+ *solver*
725
+ a generator for *f* and *x0* returning approximative solution and error
726
+ *maxsteps*
727
+ after how many steps the solver will cancel
728
+ *df*
729
+ first derivative of *f* (used by some solvers)
730
+ *d2f*
731
+ second derivative of *f* (used by some solvers)
732
+ *multidimensional*
733
+ force multidimensional solving
734
+ *J*
735
+ Jacobian matrix of *f* (used by multidimensional solvers)
736
+ *norm*
737
+ used vector norm (used by multidimensional solvers)
738
+
739
+ solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
740
+ yielding pairs of approximative solution and estimated error (which is
741
+ expected to be positive).
742
+ You can use the following string aliases:
743
+ 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
744
+ 'ridder', 'anewton', 'bisect'
745
+
746
+ See mpmath.calculus.optimization for their documentation.
747
+
748
+ **Examples**
749
+
750
+ The function :func:`~mpmath.findroot` locates a root of a given function using the
751
+ secant method by default. A simple example use of the secant method is to
752
+ compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
753
+
754
+ >>> from mpmath import *
755
+ >>> mp.dps = 30; mp.pretty = True
756
+ >>> findroot(sin, 3)
757
+ 3.14159265358979323846264338328
758
+
759
+ The secant method can be used to find complex roots of analytic functions,
760
+ although it must in that case generally be given a nonreal starting value
761
+ (or else it will never leave the real line)::
762
+
763
+ >>> mp.dps = 15
764
+ >>> findroot(lambda x: x**3 + 2*x + 1, j)
765
+ (0.226698825758202 + 1.46771150871022j)
766
+
767
+ A nice application is to compute nontrivial roots of the Riemann zeta
768
+ function with many digits (good initial values are needed for convergence)::
769
+
770
+ >>> mp.dps = 30
771
+ >>> findroot(zeta, 0.5+14j)
772
+ (0.5 + 14.1347251417346937904572519836j)
773
+
774
+ The secant method can also be used as an optimization algorithm, by passing
775
+ it a derivative of a function. The following example locates the positive
776
+ minimum of the gamma function::
777
+
778
+ >>> mp.dps = 20
779
+ >>> findroot(lambda x: diff(gamma, x), 1)
780
+ 1.4616321449683623413
781
+
782
+ Finally, a useful application is to compute inverse functions, such as the
783
+ Lambert W function which is the inverse of `w e^w`, given the first
784
+ term of the solution's asymptotic expansion as the initial value. In basic
785
+ cases, this gives identical results to mpmath's built-in ``lambertw``
786
+ function::
787
+
788
+ >>> def lambert(x):
789
+ ... return findroot(lambda w: w*exp(w) - x, log(1+x))
790
+ ...
791
+ >>> mp.dps = 15
792
+ >>> lambert(1); lambertw(1)
793
+ 0.567143290409784
794
+ 0.567143290409784
795
+ >>> lambert(1000); lambert(1000)
796
+ 5.2496028524016
797
+ 5.2496028524016
798
+
799
+ Multidimensional functions are also supported::
800
+
801
+ >>> f = [lambda x1, x2: x1**2 + x2,
802
+ ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
803
+ >>> findroot(f, (0, 0))
804
+ [-0.618033988749895]
805
+ [-0.381966011250105]
806
+ >>> findroot(f, (10, 10))
807
+ [ 1.61803398874989]
808
+ [-2.61803398874989]
809
+
810
+ You can verify this by solving the system manually.
811
+
812
+ Please note that the following (more general) syntax also works::
813
+
814
+ >>> def f(x1, x2):
815
+ ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
816
+ ...
817
+ >>> findroot(f, (0, 0))
818
+ [-0.618033988749895]
819
+ [-0.381966011250105]
820
+
821
+
822
+ **Multiple roots**
823
+
824
+ For multiple roots all methods of the Newtonian family (including secant)
825
+ converge slowly. Consider this example::
826
+
827
+ >>> f = lambda x: (x - 1)**99
828
+ >>> findroot(f, 0.9, verify=False)
829
+ 0.918073542444929
830
+
831
+ Even for a very close starting point the secant method converges very
832
+ slowly. Use ``verbose=True`` to illustrate this.
833
+
834
+ It is possible to modify Newton's method to make it converge regardless of
835
+ the root's multiplicity::
836
+
837
+ >>> findroot(f, -10, solver='mnewton')
838
+ 1.0
839
+
840
+ This variant uses the first and second derivative of the function, which is
841
+ not very efficient.
842
+
843
+ Alternatively you can use an experimental Newtonian solver that keeps track
844
+ of the speed of convergence and accelerates it using Steffensen's method if
845
+ necessary::
846
+
847
+ >>> findroot(f, -10, solver='anewton', verbose=True)
848
+ x: -9.88888888888888888889
849
+ error: 0.111111111111111111111
850
+ converging slowly
851
+ x: -9.77890011223344556678
852
+ error: 0.10998877665544332211
853
+ converging slowly
854
+ x: -9.67002233332199662166
855
+ error: 0.108877778911448945119
856
+ converging slowly
857
+ accelerating convergence
858
+ x: -9.5622443299551077669
859
+ error: 0.107778003366888854764
860
+ converging slowly
861
+ x: 0.99999999999999999214
862
+ error: 10.562244329955107759
863
+ x: 1.0
864
+ error: 7.8598304758094664213e-18
865
+ ZeroDivisionError: canceled with x = 1.0
866
+ 1.0
867
+
868
+ **Complex roots**
869
+
870
+ For complex roots it's recommended to use Muller's method as it converges
871
+ even for real starting points very fast::
872
+
873
+ >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
874
+ (0.727136084491197 + 0.934099289460529j)
875
+
876
+
877
+ **Intersection methods**
878
+
879
+ When you need to find a root in a known interval, it's highly recommended to
880
+ use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
881
+ Usually they converge faster and more reliable. They have however problems
882
+ with multiple roots and usually need a sign change to find a root::
883
+
884
+ >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
885
+ 0.0
886
+
887
+ Be careful with symmetric functions::
888
+
889
+ >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
890
+ Traceback (most recent call last):
891
+ ...
892
+ ZeroDivisionError
893
+
894
+ It fails even for better starting points, because there is no sign change::
895
+
896
+ >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
897
+ Traceback (most recent call last):
898
+ ...
899
+ ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
900
+ Try another starting point or tweak arguments.
901
+
902
+ """
903
+ prec = ctx.prec
904
+ try:
905
+ ctx.prec += 20
906
+
907
+ # initialize arguments
908
+ if tol is None:
909
+ tol = ctx.eps * 2**10
910
+
911
+ kwargs['verbose'] = kwargs.get('verbose', verbose)
912
+
913
+ if 'd1f' in kwargs:
914
+ kwargs['df'] = kwargs['d1f']
915
+
916
+ kwargs['tol'] = tol
917
+ if isinstance(x0, (list, tuple)):
918
+ x0 = [ctx.convert(x) for x in x0]
919
+ else:
920
+ x0 = [ctx.convert(x0)]
921
+
922
+ if isinstance(solver, str):
923
+ try:
924
+ solver = str2solver[solver]
925
+ except KeyError:
926
+ raise ValueError('could not recognize solver')
927
+
928
+ # accept list of functions
929
+ if isinstance(f, (list, tuple)):
930
+ f2 = copy(f)
931
+ def tmp(*args):
932
+ return [fn(*args) for fn in f2]
933
+ f = tmp
934
+
935
+ # detect multidimensional functions
936
+ try:
937
+ fx = f(*x0)
938
+ multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
939
+ except TypeError:
940
+ fx = f(x0[0])
941
+ multidimensional = False
942
+ if 'multidimensional' in kwargs:
943
+ multidimensional = kwargs['multidimensional']
944
+ if multidimensional:
945
+ # only one multidimensional solver available at the moment
946
+ solver = MDNewton
947
+ if not 'norm' in kwargs:
948
+ norm = lambda x: ctx.norm(x, 'inf')
949
+ kwargs['norm'] = norm
950
+ else:
951
+ norm = kwargs['norm']
952
+ else:
953
+ norm = abs
954
+
955
+ # happily return starting point if it's a root
956
+ if norm(fx) == 0:
957
+ if multidimensional:
958
+ return ctx.matrix(x0)
959
+ else:
960
+ return x0[0]
961
+
962
+ # use solver
963
+ iterations = solver(ctx, f, x0, **kwargs)
964
+ if 'maxsteps' in kwargs:
965
+ maxsteps = kwargs['maxsteps']
966
+ else:
967
+ maxsteps = iterations.maxsteps
968
+ i = 0
969
+ for x, error in iterations:
970
+ if verbose:
971
+ print('x: ', x)
972
+ print('error:', error)
973
+ i += 1
974
+ if error < tol * max(1, norm(x)) or i >= maxsteps:
975
+ break
976
+ else:
977
+ if not i:
978
+ raise ValueError('Could not find root using the given solver.\n'
979
+ 'Try another starting point or tweak arguments.')
980
+ if not isinstance(x, (list, tuple, ctx.matrix)):
981
+ xl = [x]
982
+ else:
983
+ xl = x
984
+ if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
985
+ raise ValueError('Could not find root within given tolerance. '
986
+ '(%s > %s)\n'
987
+ 'Try another starting point or tweak arguments.'
988
+ % (norm(f(*xl))**2, tol))
989
+ return x
990
+ finally:
991
+ ctx.prec = prec
992
+
993
+
994
+ def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
995
+ """
996
+ Return the multiplicity of a given root of f.
997
+
998
+ Internally, numerical derivatives are used. This might be inefficient for
999
+ higher order derviatives. Due to this, ``multiplicity`` cancels after
1000
+ evaluating 10 derivatives by default. You can be specify the n-th derivative
1001
+ using the dnf keyword.
1002
+
1003
+ >>> from mpmath import *
1004
+ >>> multiplicity(lambda x: sin(x) - 1, pi/2)
1005
+ 2
1006
+
1007
+ """
1008
+ if tol is None:
1009
+ tol = ctx.eps ** 0.8
1010
+ kwargs['d0f'] = f
1011
+ for i in xrange(maxsteps):
1012
+ dfstr = 'd' + str(i) + 'f'
1013
+ if dfstr in kwargs:
1014
+ df = kwargs[dfstr]
1015
+ else:
1016
+ df = lambda x: ctx.diff(f, x, i)
1017
+ if not abs(df(root)) < tol:
1018
+ break
1019
+ return i
1020
+
1021
+ def steffensen(f):
1022
+ """
1023
+ linear convergent function -> quadratic convergent function
1024
+
1025
+ Steffensen's method for quadratic convergence of a linear converging
1026
+ sequence.
1027
+ Don not use it for higher rates of convergence.
1028
+ It may even work for divergent sequences.
1029
+
1030
+ Definition:
1031
+ F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
1032
+
1033
+ Example
1034
+ .......
1035
+
1036
+ You can use Steffensen's method to accelerate a fixpoint iteration of linear
1037
+ (or less) convergence.
1038
+
1039
+ x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
1040
+ phi(x) = x**2 there are two fixpoints: 0 and 1.
1041
+
1042
+ Let's try Steffensen's method:
1043
+
1044
+ >>> f = lambda x: x**2
1045
+ >>> from mpmath.calculus.optimization import steffensen
1046
+ >>> F = steffensen(f)
1047
+ >>> for x in [0.5, 0.9, 2.0]:
1048
+ ... fx = Fx = x
1049
+ ... for i in xrange(9):
1050
+ ... try:
1051
+ ... fx = f(fx)
1052
+ ... except OverflowError:
1053
+ ... pass
1054
+ ... try:
1055
+ ... Fx = F(Fx)
1056
+ ... except ZeroDivisionError:
1057
+ ... pass
1058
+ ... print('%20g %20g' % (fx, Fx))
1059
+ 0.25 -0.5
1060
+ 0.0625 0.1
1061
+ 0.00390625 -0.0011236
1062
+ 1.52588e-05 1.41691e-09
1063
+ 2.32831e-10 -2.84465e-27
1064
+ 5.42101e-20 2.30189e-80
1065
+ 2.93874e-39 -1.2197e-239
1066
+ 8.63617e-78 0
1067
+ 7.45834e-155 0
1068
+ 0.81 1.02676
1069
+ 0.6561 1.00134
1070
+ 0.430467 1
1071
+ 0.185302 1
1072
+ 0.0343368 1
1073
+ 0.00117902 1
1074
+ 1.39008e-06 1
1075
+ 1.93233e-12 1
1076
+ 3.73392e-24 1
1077
+ 4 1.6
1078
+ 16 1.2962
1079
+ 256 1.10194
1080
+ 65536 1.01659
1081
+ 4.29497e+09 1.00053
1082
+ 1.84467e+19 1
1083
+ 3.40282e+38 1
1084
+ 1.15792e+77 1
1085
+ 1.34078e+154 1
1086
+
1087
+ Unmodified, the iteration converges only towards 0. Modified it converges
1088
+ not only much faster, it converges even to the repelling fixpoint 1.
1089
+ """
1090
+ def F(x):
1091
+ fx = f(x)
1092
+ ffx = f(fx)
1093
+ return (x*ffx - fx**2) / (ffx - 2*fx + x)
1094
+ return F
1095
+
1096
+ OptimizationMethods.jacobian = jacobian
1097
+ OptimizationMethods.findroot = findroot
1098
+ OptimizationMethods.multiplicity = multiplicity
1099
+
1100
+ if __name__ == '__main__':
1101
+ import doctest
1102
+ doctest.testmod()
venv/lib/python3.10/site-packages/mpmath/calculus/polynomials.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Polynomials #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # XXX: extra precision
9
+ @defun
10
+ def polyval(ctx, coeffs, x, derivative=False):
11
+ r"""
12
+ Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
13
+ :func:`~mpmath.polyval` evaluates the polynomial
14
+
15
+ .. math ::
16
+
17
+ P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
18
+
19
+ If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
20
+ evaluates `P(x)` with the derivative, `P'(x)`, and returns the
21
+ tuple `(P(x), P'(x))`.
22
+
23
+ >>> from mpmath import *
24
+ >>> mp.pretty = True
25
+ >>> polyval([3, 0, 2], 0.5)
26
+ 2.75
27
+ >>> polyval([3, 0, 2], 0.5, derivative=True)
28
+ (2.75, 3.0)
29
+
30
+ The coefficients and the evaluation point may be any combination
31
+ of real or complex numbers.
32
+ """
33
+ if not coeffs:
34
+ return ctx.zero
35
+ p = ctx.convert(coeffs[0])
36
+ q = ctx.zero
37
+ for c in coeffs[1:]:
38
+ if derivative:
39
+ q = p + x*q
40
+ p = c + x*p
41
+ if derivative:
42
+ return p, q
43
+ else:
44
+ return p
45
+
46
+ @defun
47
+ def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
48
+ error=False, roots_init=None):
49
+ """
50
+ Computes all roots (real or complex) of a given polynomial.
51
+
52
+ The roots are returned as a sorted list, where real roots appear first
53
+ followed by complex conjugate roots as adjacent elements. The polynomial
54
+ should be given as a list of coefficients, in the format used by
55
+ :func:`~mpmath.polyval`. The leading coefficient must be nonzero.
56
+
57
+ With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
58
+ where *err* is an estimate of the maximum error among the computed roots.
59
+
60
+ **Examples**
61
+
62
+ Finding the three real roots of `x^3 - x^2 - 14x + 24`::
63
+
64
+ >>> from mpmath import *
65
+ >>> mp.dps = 15; mp.pretty = True
66
+ >>> nprint(polyroots([1,-1,-14,24]), 4)
67
+ [-4.0, 2.0, 3.0]
68
+
69
+ Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
70
+ error estimate::
71
+
72
+ >>> roots, err = polyroots([4,3,2], error=True)
73
+ >>> for r in roots:
74
+ ... print(r)
75
+ ...
76
+ (-0.375 + 0.59947894041409j)
77
+ (-0.375 - 0.59947894041409j)
78
+ >>>
79
+ >>> err
80
+ 2.22044604925031e-16
81
+ >>>
82
+ >>> polyval([4,3,2], roots[0])
83
+ (2.22044604925031e-16 + 0.0j)
84
+ >>> polyval([4,3,2], roots[1])
85
+ (2.22044604925031e-16 + 0.0j)
86
+
87
+ The following example computes all the 5th roots of unity; that is,
88
+ the roots of `x^5 - 1`::
89
+
90
+ >>> mp.dps = 20
91
+ >>> for r in polyroots([1, 0, 0, 0, 0, -1]):
92
+ ... print(r)
93
+ ...
94
+ 1.0
95
+ (-0.8090169943749474241 + 0.58778525229247312917j)
96
+ (-0.8090169943749474241 - 0.58778525229247312917j)
97
+ (0.3090169943749474241 + 0.95105651629515357212j)
98
+ (0.3090169943749474241 - 0.95105651629515357212j)
99
+
100
+ **Precision and conditioning**
101
+
102
+ The roots are computed to the current working precision accuracy. If this
103
+ accuracy cannot be achieved in ``maxsteps`` steps, then a
104
+ ``NoConvergence`` exception is raised. The algorithm internally is using
105
+ the current working precision extended by ``extraprec``. If
106
+ ``NoConvergence`` was raised, that is caused either by not having enough
107
+ extra precision to achieve convergence (in which case increasing
108
+ ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
109
+ case increasing ``maxsteps`` should fix the problem), or a combination of
110
+ both.
111
+
112
+ The user should always do a convergence study with regards to
113
+ ``extraprec`` to ensure accurate results. It is possible to get
114
+ convergence to a wrong answer with too low ``extraprec``.
115
+
116
+ Provided there are no repeated roots, :func:`~mpmath.polyroots` can
117
+ typically compute all roots of an arbitrary polynomial to high precision::
118
+
119
+ >>> mp.dps = 60
120
+ >>> for r in polyroots([1, 0, -10, 0, 1]):
121
+ ... print(r)
122
+ ...
123
+ -3.14626436994197234232913506571557044551247712918732870123249
124
+ -0.317837245195782244725757617296174288373133378433432554879127
125
+ 0.317837245195782244725757617296174288373133378433432554879127
126
+ 3.14626436994197234232913506571557044551247712918732870123249
127
+ >>>
128
+ >>> sqrt(3) + sqrt(2)
129
+ 3.14626436994197234232913506571557044551247712918732870123249
130
+ >>> sqrt(3) - sqrt(2)
131
+ 0.317837245195782244725757617296174288373133378433432554879127
132
+
133
+ **Algorithm**
134
+
135
+ :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
136
+ uses complex arithmetic to locate all roots simultaneously.
137
+ The Durand-Kerner method can be viewed as approximately performing
138
+ simultaneous Newton iteration for all the roots. In particular,
139
+ the convergence to simple roots is quadratic, just like Newton's
140
+ method.
141
+
142
+ Although all roots are internally calculated using complex arithmetic, any
143
+ root found to have an imaginary part smaller than the estimated numerical
144
+ error is truncated to a real number (small real parts are also chopped).
145
+ Real roots are placed first in the returned list, sorted by value. The
146
+ remaining complex roots are sorted by their real parts so that conjugate
147
+ roots end up next to each other.
148
+
149
+ **References**
150
+
151
+ 1. http://en.wikipedia.org/wiki/Durand-Kerner_method
152
+
153
+ """
154
+ if len(coeffs) <= 1:
155
+ if not coeffs or not coeffs[0]:
156
+ raise ValueError("Input to polyroots must not be the zero polynomial")
157
+ # Constant polynomial with no roots
158
+ return []
159
+
160
+ orig = ctx.prec
161
+ tol = +ctx.eps
162
+ with ctx.extraprec(extraprec):
163
+ deg = len(coeffs) - 1
164
+ # Must be monic
165
+ lead = ctx.convert(coeffs[0])
166
+ if lead == 1:
167
+ coeffs = [ctx.convert(c) for c in coeffs]
168
+ else:
169
+ coeffs = [c/lead for c in coeffs]
170
+ f = lambda x: ctx.polyval(coeffs, x)
171
+ if roots_init is None:
172
+ roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
173
+ else:
174
+ roots = [None]*deg;
175
+ deg_init = min(deg, len(roots_init))
176
+ roots[:deg_init] = list(roots_init[:deg_init])
177
+ roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
178
+ in xrange(deg_init,deg)]
179
+ err = [ctx.one for n in xrange(deg)]
180
+ # Durand-Kerner iteration until convergence
181
+ for step in xrange(maxsteps):
182
+ if abs(max(err)) < tol:
183
+ break
184
+ for i in xrange(deg):
185
+ p = roots[i]
186
+ x = f(p)
187
+ for j in range(deg):
188
+ if i != j:
189
+ try:
190
+ x /= (p-roots[j])
191
+ except ZeroDivisionError:
192
+ continue
193
+ roots[i] = p - x
194
+ err[i] = abs(x)
195
+ if abs(max(err)) >= tol:
196
+ raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
197
+ % maxsteps)
198
+ # Remove small real or imaginary parts
199
+ if cleanup:
200
+ for i in xrange(deg):
201
+ if abs(roots[i]) < tol:
202
+ roots[i] = ctx.zero
203
+ elif abs(ctx._im(roots[i])) < tol:
204
+ roots[i] = roots[i].real
205
+ elif abs(ctx._re(roots[i])) < tol:
206
+ roots[i] = roots[i].imag * 1j
207
+ roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
208
+ if error:
209
+ err = max(err)
210
+ err = max(err, ctx.ldexp(1, -orig+1))
211
+ return [+r for r in roots], +err
212
+ else:
213
+ return [+r for r in roots]
venv/lib/python3.10/site-packages/mpmath/calculus/quadrature.py ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from ..libmp.backend import xrange
4
+
5
+ class QuadratureRule(object):
6
+ """
7
+ Quadrature rules are implemented using this class, in order to
8
+ simplify the code and provide a common infrastructure
9
+ for tasks such as error estimation and node caching.
10
+
11
+ You can implement a custom quadrature rule by subclassing
12
+ :class:`QuadratureRule` and implementing the appropriate
13
+ methods. The subclass can then be used by :func:`~mpmath.quad` by
14
+ passing it as the *method* argument.
15
+
16
+ :class:`QuadratureRule` instances are supposed to be singletons.
17
+ :class:`QuadratureRule` therefore implements instance caching
18
+ in :func:`~mpmath.__new__`.
19
+ """
20
+
21
+ def __init__(self, ctx):
22
+ self.ctx = ctx
23
+ self.standard_cache = {}
24
+ self.transformed_cache = {}
25
+ self.interval_count = {}
26
+
27
+ def clear(self):
28
+ """
29
+ Delete cached node data.
30
+ """
31
+ self.standard_cache = {}
32
+ self.transformed_cache = {}
33
+ self.interval_count = {}
34
+
35
+ def calc_nodes(self, degree, prec, verbose=False):
36
+ r"""
37
+ Compute nodes for the standard interval `[-1, 1]`. Subclasses
38
+ should probably implement only this method, and use
39
+ :func:`~mpmath.get_nodes` method to retrieve the nodes.
40
+ """
41
+ raise NotImplementedError
42
+
43
+ def get_nodes(self, a, b, degree, prec, verbose=False):
44
+ """
45
+ Return nodes for given interval, degree and precision. The
46
+ nodes are retrieved from a cache if already computed;
47
+ otherwise they are computed by calling :func:`~mpmath.calc_nodes`
48
+ and are then cached.
49
+
50
+ Subclasses should probably not implement this method,
51
+ but just implement :func:`~mpmath.calc_nodes` for the actual
52
+ node computation.
53
+ """
54
+ key = (a, b, degree, prec)
55
+ if key in self.transformed_cache:
56
+ return self.transformed_cache[key]
57
+ orig = self.ctx.prec
58
+ try:
59
+ self.ctx.prec = prec+20
60
+ # Get nodes on standard interval
61
+ if (degree, prec) in self.standard_cache:
62
+ nodes = self.standard_cache[degree, prec]
63
+ else:
64
+ nodes = self.calc_nodes(degree, prec, verbose)
65
+ self.standard_cache[degree, prec] = nodes
66
+ # Transform to general interval
67
+ nodes = self.transform_nodes(nodes, a, b, verbose)
68
+ if key in self.interval_count:
69
+ self.transformed_cache[key] = nodes
70
+ else:
71
+ self.interval_count[key] = True
72
+ finally:
73
+ self.ctx.prec = orig
74
+ return nodes
75
+
76
+ def transform_nodes(self, nodes, a, b, verbose=False):
77
+ r"""
78
+ Rescale standardized nodes (for `[-1, 1]`) to a general
79
+ interval `[a, b]`. For a finite interval, a simple linear
80
+ change of variables is used. Otherwise, the following
81
+ transformations are used:
82
+
83
+ .. math ::
84
+
85
+ \lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
86
+
87
+ \lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
88
+
89
+ \lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
90
+
91
+ """
92
+ ctx = self.ctx
93
+ a = ctx.convert(a)
94
+ b = ctx.convert(b)
95
+ one = ctx.one
96
+ if (a, b) == (-one, one):
97
+ return nodes
98
+ half = ctx.mpf(0.5)
99
+ new_nodes = []
100
+ if ctx.isinf(a) or ctx.isinf(b):
101
+ if (a, b) == (ctx.ninf, ctx.inf):
102
+ p05 = -half
103
+ for x, w in nodes:
104
+ x2 = x*x
105
+ px1 = one-x2
106
+ spx1 = px1**p05
107
+ x = x*spx1
108
+ w *= spx1/px1
109
+ new_nodes.append((x, w))
110
+ elif a == ctx.ninf:
111
+ b1 = b+1
112
+ for x, w in nodes:
113
+ u = 2/(x+one)
114
+ x = b1-u
115
+ w *= half*u**2
116
+ new_nodes.append((x, w))
117
+ elif b == ctx.inf:
118
+ a1 = a-1
119
+ for x, w in nodes:
120
+ u = 2/(x+one)
121
+ x = a1+u
122
+ w *= half*u**2
123
+ new_nodes.append((x, w))
124
+ elif a == ctx.inf or b == ctx.ninf:
125
+ return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
126
+ else:
127
+ raise NotImplementedError
128
+ else:
129
+ # Simple linear change of variables
130
+ C = (b-a)/2
131
+ D = (b+a)/2
132
+ for x, w in nodes:
133
+ new_nodes.append((D+C*x, C*w))
134
+ return new_nodes
135
+
136
+ def guess_degree(self, prec):
137
+ """
138
+ Given a desired precision `p` in bits, estimate the degree `m`
139
+ of the quadrature required to accomplish full accuracy for
140
+ typical integrals. By default, :func:`~mpmath.quad` will perform up
141
+ to `m` iterations. The value of `m` should be a slight
142
+ overestimate, so that "slightly bad" integrals can be dealt
143
+ with automatically using a few extra iterations. On the
144
+ other hand, it should not be too big, so :func:`~mpmath.quad` can
145
+ quit within a reasonable amount of time when it is given
146
+ an "unsolvable" integral.
147
+
148
+ The default formula used by :func:`~mpmath.guess_degree` is tuned
149
+ for both :class:`TanhSinh` and :class:`GaussLegendre`.
150
+ The output is roughly as follows:
151
+
152
+ +---------+---------+
153
+ | `p` | `m` |
154
+ +=========+=========+
155
+ | 50 | 6 |
156
+ +---------+---------+
157
+ | 100 | 7 |
158
+ +---------+---------+
159
+ | 500 | 10 |
160
+ +---------+---------+
161
+ | 3000 | 12 |
162
+ +---------+---------+
163
+
164
+ This formula is based purely on a limited amount of
165
+ experimentation and will sometimes be wrong.
166
+ """
167
+ # Expected degree
168
+ # XXX: use mag
169
+ g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
170
+ # Reasonable "worst case"
171
+ g += 2
172
+ return g
173
+
174
+ def estimate_error(self, results, prec, epsilon):
175
+ r"""
176
+ Given results from integrations `[I_1, I_2, \ldots, I_k]` done
177
+ with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
178
+ the error of `I_k`.
179
+
180
+ For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
181
+
182
+ For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
183
+ from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
184
+ that each degree increment roughly doubles the accuracy of
185
+ the quadrature rule (this is true for both :class:`TanhSinh`
186
+ and :class:`GaussLegendre`). The extrapolation formula is given
187
+ by Borwein, Bailey & Girgensohn. Although not very conservative,
188
+ this method seems to be very robust in practice.
189
+ """
190
+ if len(results) == 2:
191
+ return abs(results[0]-results[1])
192
+ try:
193
+ if results[-1] == results[-2] == results[-3]:
194
+ return self.ctx.zero
195
+ D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
196
+ D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
197
+ except ValueError:
198
+ return epsilon
199
+ D3 = -prec
200
+ D4 = min(0, max(D1**2/D2, 2*D1, D3))
201
+ return self.ctx.mpf(10) ** int(D4)
202
+
203
+ def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
204
+ """
205
+ Main integration function. Computes the 1D integral over
206
+ the interval specified by *points*. For each subinterval,
207
+ performs quadrature of degree from 1 up to *max_degree*
208
+ until :func:`~mpmath.estimate_error` signals convergence.
209
+
210
+ :func:`~mpmath.summation` transforms each subintegration to
211
+ the standard interval and then calls :func:`~mpmath.sum_next`.
212
+ """
213
+ ctx = self.ctx
214
+ I = total_err = ctx.zero
215
+ for i in xrange(len(points)-1):
216
+ a, b = points[i], points[i+1]
217
+ if a == b:
218
+ continue
219
+ # XXX: we could use a single variable transformation,
220
+ # but this is not good in practice. We get better accuracy
221
+ # by having 0 as an endpoint.
222
+ if (a, b) == (ctx.ninf, ctx.inf):
223
+ _f = f
224
+ f = lambda x: _f(-x) + _f(x)
225
+ a, b = (ctx.zero, ctx.inf)
226
+ results = []
227
+ err = ctx.zero
228
+ for degree in xrange(1, max_degree+1):
229
+ nodes = self.get_nodes(a, b, degree, prec, verbose)
230
+ if verbose:
231
+ print("Integrating from %s to %s (degree %s of %s)" % \
232
+ (ctx.nstr(a), ctx.nstr(b), degree, max_degree))
233
+ result = self.sum_next(f, nodes, degree, prec, results, verbose)
234
+ results.append(result)
235
+ if degree > 1:
236
+ err = self.estimate_error(results, prec, epsilon)
237
+ if verbose:
238
+ print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
239
+ if err <= epsilon:
240
+ break
241
+ I += results[-1]
242
+ total_err += err
243
+ if total_err > epsilon:
244
+ if verbose:
245
+ print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
246
+ return I, total_err
247
+
248
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
249
+ r"""
250
+ Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
251
+ contains the `(w_k, x_k)` pairs.
252
+
253
+ :func:`~mpmath.summation` will supply the list *results* of
254
+ values computed by :func:`~mpmath.sum_next` at previous degrees, in
255
+ case the quadrature rule is able to reuse them.
256
+ """
257
+ return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
258
+
259
+
260
+ class TanhSinh(QuadratureRule):
261
+ r"""
262
+ This class implements "tanh-sinh" or "doubly exponential"
263
+ quadrature. This quadrature rule is based on the Euler-Maclaurin
264
+ integral formula. By performing a change of variables involving
265
+ nested exponentials / hyperbolic functions (hence the name), the
266
+ derivatives at the endpoints vanish rapidly. Since the error term
267
+ in the Euler-Maclaurin formula depends on the derivatives at the
268
+ endpoints, a simple step sum becomes extremely accurate. In
269
+ practice, this means that doubling the number of evaluation
270
+ points roughly doubles the number of accurate digits.
271
+
272
+ Comparison to Gauss-Legendre:
273
+ * Initial computation of nodes is usually faster
274
+ * Handles endpoint singularities better
275
+ * Handles infinite integration intervals better
276
+ * Is slower for smooth integrands once nodes have been computed
277
+
278
+ The implementation of the tanh-sinh algorithm is based on the
279
+ description given in Borwein, Bailey & Girgensohn, "Experimentation
280
+ in Mathematics - Computational Paths to Discovery", A K Peters,
281
+ 2003, pages 312-313. In the present implementation, a few
282
+ improvements have been made:
283
+
284
+ * A more efficient scheme is used to compute nodes (exploiting
285
+ recurrence for the exponential function)
286
+ * The nodes are computed successively instead of all at once
287
+
288
+ **References**
289
+
290
+ * [Bailey]_
291
+ * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
292
+
293
+ """
294
+
295
+ def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
296
+ """
297
+ Step sum for tanh-sinh quadrature of degree `m`. We exploit the
298
+ fact that half of the abscissas at degree `m` are precisely the
299
+ abscissas from degree `m-1`. Thus reusing the result from
300
+ the previous level allows a 2x speedup.
301
+ """
302
+ h = self.ctx.mpf(2)**(-degree)
303
+ # Abscissas overlap, so reusing saves half of the time
304
+ if previous:
305
+ S = previous[-1]/(h*2)
306
+ else:
307
+ S = self.ctx.zero
308
+ S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
309
+ return h*S
310
+
311
+ def calc_nodes(self, degree, prec, verbose=False):
312
+ r"""
313
+ The abscissas and weights for tanh-sinh quadrature of degree
314
+ `m` are given by
315
+
316
+ .. math::
317
+
318
+ x_k = \tanh(\pi/2 \sinh(t_k))
319
+
320
+ w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
321
+
322
+ where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
323
+ list of nodes is actually infinite, but the weights die off so
324
+ rapidly that only a few are needed.
325
+ """
326
+ ctx = self.ctx
327
+ nodes = []
328
+
329
+ extra = 20
330
+ ctx.prec += extra
331
+ tol = ctx.ldexp(1, -prec-10)
332
+ pi4 = ctx.pi/4
333
+
334
+ # For simplicity, we work in steps h = 1/2^n, with the first point
335
+ # offset so that we can reuse the sum from the previous degree
336
+
337
+ # We define degree 1 to include the "degree 0" steps, including
338
+ # the point x = 0. (It doesn't work well otherwise; not sure why.)
339
+ t0 = ctx.ldexp(1, -degree)
340
+ if degree == 1:
341
+ #nodes.append((mpf(0), pi4))
342
+ #nodes.append((-mpf(0), pi4))
343
+ nodes.append((ctx.zero, ctx.pi/2))
344
+ h = t0
345
+ else:
346
+ h = t0*2
347
+
348
+ # Since h is fixed, we can compute the next exponential
349
+ # by simply multiplying by exp(h)
350
+ expt0 = ctx.exp(t0)
351
+ a = pi4 * expt0
352
+ b = pi4 / expt0
353
+ udelta = ctx.exp(h)
354
+ urdelta = 1/udelta
355
+
356
+ for k in xrange(0, 20*2**degree+1):
357
+ # Reference implementation:
358
+ # t = t0 + k*h
359
+ # x = tanh(pi/2 * sinh(t))
360
+ # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
361
+
362
+ # Fast implementation. Note that c = exp(pi/2 * sinh(t))
363
+ c = ctx.exp(a-b)
364
+ d = 1/c
365
+ co = (c+d)/2
366
+ si = (c-d)/2
367
+ x = si / co
368
+ w = (a+b) / co**2
369
+ diff = abs(x-1)
370
+ if diff <= tol:
371
+ break
372
+
373
+ nodes.append((x, w))
374
+ nodes.append((-x, w))
375
+
376
+ a *= udelta
377
+ b *= urdelta
378
+
379
+ if verbose and k % 300 == 150:
380
+ # Note: the number displayed is rather arbitrary. Should
381
+ # figure out how to print something that looks more like a
382
+ # percentage
383
+ print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
384
+
385
+ ctx.prec -= extra
386
+ return nodes
387
+
388
+
389
+ class GaussLegendre(QuadratureRule):
390
+ r"""
391
+ This class implements Gauss-Legendre quadrature, which is
392
+ exceptionally efficient for polynomials and polynomial-like (i.e.
393
+ very smooth) integrands.
394
+
395
+ The abscissas and weights are given by roots and values of
396
+ Legendre polynomials, which are the orthogonal polynomials
397
+ on `[-1, 1]` with respect to the unit weight
398
+ (see :func:`~mpmath.legendre`).
399
+
400
+ In this implementation, we take the "degree" `m` of the quadrature
401
+ to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
402
+ Borwein, Bailey & Girgensohn). This way we get quadratic, rather
403
+ than linear, convergence as the degree is incremented.
404
+
405
+ Comparison to tanh-sinh quadrature:
406
+ * Is faster for smooth integrands once nodes have been computed
407
+ * Initial computation of nodes is usually slower
408
+ * Handles endpoint singularities worse
409
+ * Handles infinite integration intervals worse
410
+
411
+ """
412
+
413
+ def calc_nodes(self, degree, prec, verbose=False):
414
+ r"""
415
+ Calculates the abscissas and weights for Gauss-Legendre
416
+ quadrature of degree of given degree (actually `3 \cdot 2^m`).
417
+ """
418
+ ctx = self.ctx
419
+ # It is important that the epsilon is set lower than the
420
+ # "real" epsilon
421
+ epsilon = ctx.ldexp(1, -prec-8)
422
+ # Fairly high precision might be required for accurate
423
+ # evaluation of the roots
424
+ orig = ctx.prec
425
+ ctx.prec = int(prec*1.5)
426
+ if degree == 1:
427
+ x = ctx.sqrt(ctx.mpf(3)/5)
428
+ w = ctx.mpf(5)/9
429
+ nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
430
+ ctx.prec = orig
431
+ return nodes
432
+ nodes = []
433
+ n = 3*2**(degree-1)
434
+ upto = n//2 + 1
435
+ for j in xrange(1, upto):
436
+ # Asymptotic formula for the roots
437
+ r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
438
+ # Newton iteration
439
+ while 1:
440
+ t1, t2 = 1, 0
441
+ # Evaluates the Legendre polynomial using its defining
442
+ # recurrence relation
443
+ for j1 in xrange(1,n+1):
444
+ t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
445
+ t4 = n*(r*t1-t2)/(r**2-1)
446
+ a = t1/t4
447
+ r = r - a
448
+ if abs(a) < epsilon:
449
+ break
450
+ x = r
451
+ w = 2/((1-r**2)*t4**2)
452
+ if verbose and j % 30 == 15:
453
+ print("Computing nodes (%i of %i)" % (j, upto))
454
+ nodes.append((x, w))
455
+ nodes.append((-x, w))
456
+ ctx.prec = orig
457
+ return nodes
458
+
459
+ class QuadratureMethods(object):
460
+
461
+ def __init__(ctx, *args, **kwargs):
462
+ ctx._gauss_legendre = GaussLegendre(ctx)
463
+ ctx._tanh_sinh = TanhSinh(ctx)
464
+
465
+ def quad(ctx, f, *points, **kwargs):
466
+ r"""
467
+ Computes a single, double or triple integral over a given
468
+ 1D interval, 2D rectangle, or 3D cuboid. A basic example::
469
+
470
+ >>> from mpmath import *
471
+ >>> mp.dps = 15; mp.pretty = True
472
+ >>> quad(sin, [0, pi])
473
+ 2.0
474
+
475
+ A basic 2D integral::
476
+
477
+ >>> f = lambda x, y: cos(x+y/2)
478
+ >>> quad(f, [-pi/2, pi/2], [0, pi])
479
+ 4.0
480
+
481
+ **Interval format**
482
+
483
+ The integration range for each dimension may be specified
484
+ using a list or tuple. Arguments are interpreted as follows:
485
+
486
+ ``quad(f, [x1, x2])`` -- calculates
487
+ `\int_{x_1}^{x_2} f(x) \, dx`
488
+
489
+ ``quad(f, [x1, x2], [y1, y2])`` -- calculates
490
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
491
+
492
+ ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
493
+ `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
494
+ \, dz \, dy \, dx`
495
+
496
+ Endpoints may be finite or infinite. An interval descriptor
497
+ may also contain more than two points. In this
498
+ case, the integration is split into subintervals, between
499
+ each pair of consecutive points. This is useful for
500
+ dealing with mid-interval discontinuities, or integrating
501
+ over large intervals where the function is irregular or
502
+ oscillates.
503
+
504
+ **Options**
505
+
506
+ :func:`~mpmath.quad` recognizes the following keyword arguments:
507
+
508
+ *method*
509
+ Chooses integration algorithm (described below).
510
+ *error*
511
+ If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
512
+ integral and `e` is the estimated error.
513
+ *maxdegree*
514
+ Maximum degree of the quadrature rule to try before
515
+ quitting.
516
+ *verbose*
517
+ Print details about progress.
518
+
519
+ **Algorithms**
520
+
521
+ Mpmath presently implements two integration algorithms: tanh-sinh
522
+ quadrature and Gauss-Legendre quadrature. These can be selected
523
+ using *method='tanh-sinh'* or *method='gauss-legendre'* or by
524
+ passing the classes *method=TanhSinh*, *method=GaussLegendre*.
525
+ The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
526
+ as shortcuts.
527
+
528
+ Both algorithms have the property that doubling the number of
529
+ evaluation points roughly doubles the accuracy, so both are ideal
530
+ for high precision quadrature (hundreds or thousands of digits).
531
+
532
+ At high precision, computing the nodes and weights for the
533
+ integration can be expensive (more expensive than computing the
534
+ function values). To make repeated integrations fast, nodes
535
+ are automatically cached.
536
+
537
+ The advantages of the tanh-sinh algorithm are that it tends to
538
+ handle endpoint singularities well, and that the nodes are cheap
539
+ to compute on the first run. For these reasons, it is used by
540
+ :func:`~mpmath.quad` as the default algorithm.
541
+
542
+ Gauss-Legendre quadrature often requires fewer function
543
+ evaluations, and is therefore often faster for repeated use, but
544
+ the algorithm does not handle endpoint singularities as well and
545
+ the nodes are more expensive to compute. Gauss-Legendre quadrature
546
+ can be a better choice if the integrand is smooth and repeated
547
+ integrations are required (e.g. for multiple integrals).
548
+
549
+ See the documentation for :class:`TanhSinh` and
550
+ :class:`GaussLegendre` for additional details.
551
+
552
+ **Examples of 1D integrals**
553
+
554
+ Intervals may be infinite or half-infinite. The following two
555
+ examples evaluate the limits of the inverse tangent function
556
+ (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
557
+ `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
558
+
559
+ >>> mp.dps = 15
560
+ >>> quad(lambda x: 2/(x**2+1), [0, inf])
561
+ 3.14159265358979
562
+ >>> quad(lambda x: exp(-x**2), [-inf, inf])**2
563
+ 3.14159265358979
564
+
565
+ Integrals can typically be resolved to high precision.
566
+ The following computes 50 digits of `\pi` by integrating the
567
+ area of the half-circle defined by `x^2 + y^2 \le 1`,
568
+ `-1 \le x \le 1`, `y \ge 0`::
569
+
570
+ >>> mp.dps = 50
571
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
572
+ 3.1415926535897932384626433832795028841971693993751
573
+
574
+ One can just as well compute 1000 digits (output truncated)::
575
+
576
+ >>> mp.dps = 1000
577
+ >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
578
+ 3.141592653589793238462643383279502884...216420199
579
+
580
+ Complex integrals are supported. The following computes
581
+ a residue at `z = 0` by integrating counterclockwise along the
582
+ diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
583
+
584
+ >>> mp.dps = 15
585
+ >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
586
+ (0.0 + 6.28318530717959j)
587
+
588
+ **Examples of 2D and 3D integrals**
589
+
590
+ Here are several nice examples of analytically solvable
591
+ 2D integrals (taken from MathWorld [1]) that can be evaluated
592
+ to high precision fairly rapidly by :func:`~mpmath.quad`::
593
+
594
+ >>> mp.dps = 30
595
+ >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
596
+ >>> quad(f, [0, 1], [0, 1])
597
+ 0.577215664901532860606512090082
598
+ >>> +euler
599
+ 0.577215664901532860606512090082
600
+
601
+ >>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
602
+ >>> quad(f, [-1, 1], [-1, 1])
603
+ 3.17343648530607134219175646705
604
+ >>> 4*log(2+sqrt(3))-2*pi/3
605
+ 3.17343648530607134219175646705
606
+
607
+ >>> f = lambda x, y: 1/(1-x**2 * y**2)
608
+ >>> quad(f, [0, 1], [0, 1])
609
+ 1.23370055013616982735431137498
610
+ >>> pi**2 / 8
611
+ 1.23370055013616982735431137498
612
+
613
+ >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
614
+ 1.64493406684822643647241516665
615
+ >>> pi**2 / 6
616
+ 1.64493406684822643647241516665
617
+
618
+ Multiple integrals may be done over infinite ranges::
619
+
620
+ >>> mp.dps = 15
621
+ >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
622
+ 0.367879441171442
623
+ >>> print(1/e)
624
+ 0.367879441171442
625
+
626
+ For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
627
+ For example, we can replicate the earlier example of calculating
628
+ `\pi` by integrating over the unit-circle, and actually use double
629
+ quadrature to actually measure the area circle::
630
+
631
+ >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
632
+ >>> quad(f, [-1, 1])
633
+ 3.14159265358979
634
+
635
+ Here is a simple triple integral::
636
+
637
+ >>> mp.dps = 15
638
+ >>> f = lambda x,y,z: x*y/(1+z)
639
+ >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
640
+ 0.101366277027041
641
+ >>> (log(3)-log(2))/4
642
+ 0.101366277027041
643
+
644
+ **Singularities**
645
+
646
+ Both tanh-sinh and Gauss-Legendre quadrature are designed to
647
+ integrate smooth (infinitely differentiable) functions. Neither
648
+ algorithm copes well with mid-interval singularities (such as
649
+ mid-interval discontinuities in `f(x)` or `f'(x)`).
650
+ The best solution is to split the integral into parts::
651
+
652
+ >>> mp.dps = 15
653
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
654
+ 3.99900894176779
655
+ >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
656
+ 4.0
657
+
658
+ The tanh-sinh rule often works well for integrands having a
659
+ singularity at one or both endpoints::
660
+
661
+ >>> mp.dps = 15
662
+ >>> quad(log, [0, 1], method='tanh-sinh') # Good
663
+ -1.0
664
+ >>> quad(log, [0, 1], method='gauss-legendre') # Bad
665
+ -0.999932197413801
666
+
667
+ However, the result may still be inaccurate for some functions::
668
+
669
+ >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
670
+ 1.99999999946942
671
+
672
+ This problem is not due to the quadrature rule per se, but to
673
+ numerical amplification of errors in the nodes. The problem can be
674
+ circumvented by temporarily increasing the precision::
675
+
676
+ >>> mp.dps = 30
677
+ >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
678
+ >>> mp.dps = 15
679
+ >>> +a
680
+ 2.0
681
+
682
+ **Highly variable functions**
683
+
684
+ For functions that are smooth (in the sense of being infinitely
685
+ differentiable) but contain sharp mid-interval peaks or many
686
+ "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
687
+ example, with default settings, :func:`~mpmath.quad` is able to integrate
688
+ `\sin(x)` accurately over an interval of length 100 but not over
689
+ length 1000::
690
+
691
+ >>> quad(sin, [0, 100]); 1-cos(100) # Good
692
+ 0.137681127712316
693
+ 0.137681127712316
694
+ >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
695
+ -37.8587612408485
696
+ 0.437620923709297
697
+
698
+ One solution is to break the integration into 10 intervals of
699
+ length 100::
700
+
701
+ >>> quad(sin, linspace(0, 1000, 10)) # Good
702
+ 0.437620923709297
703
+
704
+ Another is to increase the degree of the quadrature::
705
+
706
+ >>> quad(sin, [0, 1000], maxdegree=10) # Also good
707
+ 0.437620923709297
708
+
709
+ Whether splitting the interval or increasing the degree is
710
+ more efficient differs from case to case. Another example is the
711
+ function `1/(1+x^2)`, which has a sharp peak centered around
712
+ `x = 0`::
713
+
714
+ >>> f = lambda x: 1/(1+x**2)
715
+ >>> quad(f, [-100, 100]) # Bad
716
+ 3.64804647105268
717
+ >>> quad(f, [-100, 100], maxdegree=10) # Good
718
+ 3.12159332021646
719
+ >>> quad(f, [-100, 0, 100]) # Also good
720
+ 3.12159332021646
721
+
722
+ **References**
723
+
724
+ 1. http://mathworld.wolfram.com/DoubleIntegral.html
725
+
726
+ """
727
+ rule = kwargs.get('method', 'tanh-sinh')
728
+ if type(rule) is str:
729
+ if rule == 'tanh-sinh':
730
+ rule = ctx._tanh_sinh
731
+ elif rule == 'gauss-legendre':
732
+ rule = ctx._gauss_legendre
733
+ else:
734
+ raise ValueError("unknown quadrature rule: %s" % rule)
735
+ else:
736
+ rule = rule(ctx)
737
+ verbose = kwargs.get('verbose')
738
+ dim = len(points)
739
+ orig = prec = ctx.prec
740
+ epsilon = ctx.eps/8
741
+ m = kwargs.get('maxdegree') or rule.guess_degree(prec)
742
+ points = [ctx._as_points(p) for p in points]
743
+ try:
744
+ ctx.prec += 20
745
+ if dim == 1:
746
+ v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
747
+ elif dim == 2:
748
+ v, err = rule.summation(lambda x: \
749
+ rule.summation(lambda y: f(x,y), \
750
+ points[1], prec, epsilon, m)[0],
751
+ points[0], prec, epsilon, m, verbose)
752
+ elif dim == 3:
753
+ v, err = rule.summation(lambda x: \
754
+ rule.summation(lambda y: \
755
+ rule.summation(lambda z: f(x,y,z), \
756
+ points[2], prec, epsilon, m)[0],
757
+ points[1], prec, epsilon, m)[0],
758
+ points[0], prec, epsilon, m, verbose)
759
+ else:
760
+ raise NotImplementedError("quadrature must have dim 1, 2 or 3")
761
+ finally:
762
+ ctx.prec = orig
763
+ if kwargs.get("error"):
764
+ return +v, err
765
+ return +v
766
+
767
+ def quadts(ctx, *args, **kwargs):
768
+ """
769
+ Performs tanh-sinh quadrature. The call
770
+
771
+ quadts(func, *points, ...)
772
+
773
+ is simply a shortcut for:
774
+
775
+ quad(func, *points, ..., method=TanhSinh)
776
+
777
+ For example, a single integral and a double integral:
778
+
779
+ quadts(lambda x: exp(cos(x)), [0, 1])
780
+ quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
781
+
782
+ See the documentation for quad for information about how points
783
+ arguments and keyword arguments are parsed.
784
+
785
+ See documentation for TanhSinh for algorithmic information about
786
+ tanh-sinh quadrature.
787
+ """
788
+ kwargs['method'] = 'tanh-sinh'
789
+ return ctx.quad(*args, **kwargs)
790
+
791
+ def quadgl(ctx, *args, **kwargs):
792
+ """
793
+ Performs Gauss-Legendre quadrature. The call
794
+
795
+ quadgl(func, *points, ...)
796
+
797
+ is simply a shortcut for:
798
+
799
+ quad(func, *points, ..., method=GaussLegendre)
800
+
801
+ For example, a single integral and a double integral:
802
+
803
+ quadgl(lambda x: exp(cos(x)), [0, 1])
804
+ quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
805
+
806
+ See the documentation for quad for information about how points
807
+ arguments and keyword arguments are parsed.
808
+
809
+ See documentation for TanhSinh for algorithmic information about
810
+ tanh-sinh quadrature.
811
+ """
812
+ kwargs['method'] = 'gauss-legendre'
813
+ return ctx.quad(*args, **kwargs)
814
+
815
+ def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
816
+ r"""
817
+ Calculates
818
+
819
+ .. math ::
820
+
821
+ I = \int_a^b f(x) dx
822
+
823
+ where at least one of `a` and `b` is infinite and where
824
+ `f(x) = g(x) \cos(\omega x + \phi)` for some slowly
825
+ decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
826
+ can also handle oscillatory integrals where the oscillation
827
+ rate is different from a pure sine or cosine wave.
828
+
829
+ In the standard case when `|a| < \infty, b = \infty`,
830
+ :func:`~mpmath.quadosc` works by evaluating the infinite series
831
+
832
+ .. math ::
833
+
834
+ I = \int_a^{x_1} f(x) dx +
835
+ \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
836
+
837
+ where `x_k` are consecutive zeros (alternatively
838
+ some other periodic reference point) of `f(x)`.
839
+ Accordingly, :func:`~mpmath.quadosc` requires information about the
840
+ zeros of `f(x)`. For a periodic function, you can specify
841
+ the zeros by either providing the angular frequency `\omega`
842
+ (*omega*) or the *period* `2 \pi/\omega`. In general, you can
843
+ specify the `n`-th zero by providing the *zeros* arguments.
844
+ Below is an example of each::
845
+
846
+ >>> from mpmath import *
847
+ >>> mp.dps = 15; mp.pretty = True
848
+ >>> f = lambda x: sin(3*x)/(x**2+1)
849
+ >>> quadosc(f, [0,inf], omega=3)
850
+ 0.37833007080198
851
+ >>> quadosc(f, [0,inf], period=2*pi/3)
852
+ 0.37833007080198
853
+ >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
854
+ 0.37833007080198
855
+ >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
856
+ 0.37833007080198
857
+
858
+ Note that *zeros* was specified to multiply `n` by the
859
+ *half-period*, not the full period. In theory, it does not matter
860
+ whether each partial integral is done over a half period or a full
861
+ period. However, if done over half-periods, the infinite series
862
+ passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
863
+ typically makes the extrapolation much more efficient.
864
+
865
+ Here is an example of an integration over the entire real line,
866
+ and a half-infinite integration starting at `-\infty`::
867
+
868
+ >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
869
+ 1.15572734979092
870
+ >>> pi/e
871
+ 1.15572734979092
872
+ >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
873
+ -0.0844109505595739
874
+ >>> cos(1)+si(1)-pi/2
875
+ -0.0844109505595738
876
+
877
+ Of course, the integrand may contain a complex exponential just as
878
+ well as a real sine or cosine::
879
+
880
+ >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
881
+ (0.156410688228254 + 0.0j)
882
+ >>> pi/e**3
883
+ 0.156410688228254
884
+ >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
885
+ (0.00317486988463794 - 0.0447701735209082j)
886
+ >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
887
+ (0.00317486988463794 - 0.0447701735209082j)
888
+
889
+ **Non-periodic functions**
890
+
891
+ If `f(x) = g(x) h(x)` for some function `h(x)` that is not
892
+ strictly periodic, *omega* or *period* might not work, and it might
893
+ be necessary to use *zeros*.
894
+
895
+ A notable exception can be made for Bessel functions which, though not
896
+ periodic, are "asymptotically periodic" in a sufficiently strong sense
897
+ that the sum extrapolation will work out::
898
+
899
+ >>> quadosc(j0, [0, inf], period=2*pi)
900
+ 1.0
901
+ >>> quadosc(j1, [0, inf], period=2*pi)
902
+ 1.0
903
+
904
+ More properly, one should provide the exact Bessel function zeros::
905
+
906
+ >>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
907
+ >>> quadosc(j0, [0, inf], zeros=j0zero)
908
+ 1.0
909
+
910
+ For an example where *zeros* becomes necessary, consider the
911
+ complete Fresnel integrals
912
+
913
+ .. math ::
914
+
915
+ \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
916
+ = \sqrt{\frac{\pi}{8}}.
917
+
918
+ Although the integrands do not decrease in magnitude as
919
+ `x \to \infty`, the integrals are convergent since the oscillation
920
+ rate increases (causing consecutive periods to asymptotically
921
+ cancel out). These integrals are virtually impossible to calculate
922
+ to any kind of accuracy using standard quadrature rules. However,
923
+ if one provides the correct asymptotic distribution of zeros
924
+ (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
925
+
926
+ >>> mp.dps = 30
927
+ >>> f = lambda x: cos(x**2)
928
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
929
+ 0.626657068657750125603941321203
930
+ >>> f = lambda x: sin(x**2)
931
+ >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
932
+ 0.626657068657750125603941321203
933
+ >>> sqrt(pi/8)
934
+ 0.626657068657750125603941321203
935
+
936
+ (Interestingly, these integrals can still be evaluated if one
937
+ places some other constant than `\pi` in the square root sign.)
938
+
939
+ In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
940
+ the inverse-function distribution `h^{-1}(x)`::
941
+
942
+ >>> mp.dps = 15
943
+ >>> f = lambda x: sin(exp(x))
944
+ >>> quadosc(f, [1,inf], zeros=lambda n: log(n))
945
+ -0.25024394235267
946
+ >>> pi/2-si(e)
947
+ -0.250243942352671
948
+
949
+ **Non-alternating functions**
950
+
951
+ If the integrand oscillates around a positive value, without
952
+ alternating signs, the extrapolation might fail. A simple trick
953
+ that sometimes works is to multiply or divide the frequency by 2::
954
+
955
+ >>> f = lambda x: 1/x**2+sin(x)/x**4
956
+ >>> quadosc(f, [1,inf], omega=1) # Bad
957
+ 1.28642190869861
958
+ >>> quadosc(f, [1,inf], omega=0.5) # Perfect
959
+ 1.28652953559617
960
+ >>> 1+(cos(1)+ci(1)+sin(1))/6
961
+ 1.28652953559617
962
+
963
+ **Fast decay**
964
+
965
+ :func:`~mpmath.quadosc` is primarily useful for slowly decaying
966
+ integrands. If the integrand decreases exponentially or faster,
967
+ :func:`~mpmath.quad` will likely handle it without trouble (and generally be
968
+ much faster than :func:`~mpmath.quadosc`)::
969
+
970
+ >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
971
+ 0.5
972
+ >>> quad(lambda x: cos(x)/exp(x), [0, inf])
973
+ 0.5
974
+
975
+ """
976
+ a, b = ctx._as_points(interval)
977
+ a = ctx.convert(a)
978
+ b = ctx.convert(b)
979
+ if [omega, period, zeros].count(None) != 2:
980
+ raise ValueError( \
981
+ "must specify exactly one of omega, period, zeros")
982
+ if a == ctx.ninf and b == ctx.inf:
983
+ s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
984
+ s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
985
+ return s1 + s2
986
+ if a == ctx.ninf:
987
+ if zeros:
988
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
989
+ else:
990
+ return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
991
+ if b != ctx.inf:
992
+ raise ValueError("quadosc requires an infinite integration interval")
993
+ if not zeros:
994
+ if omega:
995
+ period = 2*ctx.pi/omega
996
+ zeros = lambda n: n*period/2
997
+ #for n in range(1,10):
998
+ # p = zeros(n)
999
+ # if p > a:
1000
+ # break
1001
+ #if n >= 9:
1002
+ # raise ValueError("zeros do not appear to be correctly indexed")
1003
+ n = 1
1004
+ s = ctx.quadgl(f, [a, zeros(n)])
1005
+ def term(k):
1006
+ return ctx.quadgl(f, [zeros(k), zeros(k+1)])
1007
+ s += ctx.nsum(term, [n, ctx.inf])
1008
+ return s
1009
+
1010
+ def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
1011
+ """
1012
+ Computes the integral of *f* over the interval or path specified
1013
+ by *interval*, using :func:`~mpmath.quad` together with adaptive
1014
+ subdivision of the interval.
1015
+
1016
+ This function gives an accurate answer for some integrals where
1017
+ :func:`~mpmath.quad` fails::
1018
+
1019
+ >>> from mpmath import *
1020
+ >>> mp.dps = 15; mp.pretty = True
1021
+ >>> quad(lambda x: abs(sin(x)), [0, 2*pi])
1022
+ 3.99900894176779
1023
+ >>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
1024
+ 4.0
1025
+ >>> quadsubdiv(sin, [0, 1000])
1026
+ 0.437620923709297
1027
+ >>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
1028
+ 3.12159332021646
1029
+ >>> quadsubdiv(lambda x: ceil(x), [0, 100])
1030
+ 5050.0
1031
+ >>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
1032
+ 0.347400172657248
1033
+
1034
+ The argument *maxintervals* can be set to limit the permissible
1035
+ subdivision::
1036
+
1037
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
1038
+ (-5.40487904307774, 5.011)
1039
+ >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
1040
+ (0.631417921866934, 1.10101120134116e-17)
1041
+
1042
+ Subdivision does not guarantee a correct answer since, the error
1043
+ estimate on subintervals may be inaccurate::
1044
+
1045
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1046
+ (0.210802735500549, 1.0001111101e-17)
1047
+ >>> mp.dps = 20
1048
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
1049
+ (0.21080273550054927738, 2.200000001e-24)
1050
+
1051
+ The second answer is correct. We can get an accurate result at lower
1052
+ precision by forcing a finer initial subdivision::
1053
+
1054
+ >>> mp.dps = 15
1055
+ >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
1056
+ 0.210802735500549
1057
+
1058
+ The following integral is too oscillatory for convergence, but we can get a
1059
+ reasonable estimate::
1060
+
1061
+ >>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
1062
+ >>> round(v, 6), round(err, 6)
1063
+ (0.504067, 1e-06)
1064
+ >>> sin(1) - ci(1)
1065
+ 0.504067061906928
1066
+
1067
+ """
1068
+ queue = []
1069
+ for i in range(len(interval)-1):
1070
+ queue.append((interval[i], interval[i+1]))
1071
+ total = ctx.zero
1072
+ total_error = ctx.zero
1073
+ if maxintervals is None:
1074
+ maxintervals = 10 * ctx.prec
1075
+ count = 0
1076
+ quad_args = kwargs.copy()
1077
+ quad_args["verbose"] = False
1078
+ quad_args["error"] = True
1079
+ if tol is None:
1080
+ tol = +ctx.eps
1081
+ orig = ctx.prec
1082
+ try:
1083
+ ctx.prec += 5
1084
+ while queue:
1085
+ a, b = queue.pop()
1086
+ s, err = ctx.quad(f, [a, b], **quad_args)
1087
+ if kwargs.get("verbose"):
1088
+ print("subinterval", count, a, b, err)
1089
+ if err < tol or count > maxintervals:
1090
+ total += s
1091
+ total_error += err
1092
+ else:
1093
+ count += 1
1094
+ if count == maxintervals and kwargs.get("verbose"):
1095
+ print("warning: number of intervals exceeded maxintervals")
1096
+ if a == -ctx.inf and b == ctx.inf:
1097
+ m = 0
1098
+ elif a == -ctx.inf:
1099
+ m = min(b-1, 2*b)
1100
+ elif b == ctx.inf:
1101
+ m = max(a+1, 2*a)
1102
+ else:
1103
+ m = a + (b - a) / 2
1104
+ queue.append((a, m))
1105
+ queue.append((m, b))
1106
+ finally:
1107
+ ctx.prec = orig
1108
+ if kwargs.get("error"):
1109
+ return +total, +total_error
1110
+ else:
1111
+ return +total
1112
+
1113
+ if __name__ == '__main__':
1114
+ import doctest
1115
+ doctest.testmod()
venv/lib/python3.10/site-packages/mpmath/libmp/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .libmpf import (prec_to_dps, dps_to_prec, repr_dps,
2
+ round_down, round_up, round_floor, round_ceiling, round_nearest,
3
+ to_pickable, from_pickable, ComplexResult,
4
+ fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf,
5
+ math_float_inf, round_int, normalize, normalize1,
6
+ from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor,
7
+ mpf_nint, mpf_frac,
8
+ from_float, from_npfloat, from_Decimal, to_float, from_rational, to_rational, to_fixed,
9
+ mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge,
10
+ mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum,
11
+ mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp,
12
+ mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int,
13
+ mpf_perturb,
14
+ to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr,
15
+ mpf_sqrt, mpf_hypot)
16
+
17
+ from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half,
18
+ mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash,
19
+ mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf,
20
+ mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs,
21
+ mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square,
22
+ mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int,
23
+ mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div,
24
+ complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int,
25
+ mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin,
26
+ mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh,
27
+ mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh,
28
+ mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi,
29
+ mpc_cos_sin, mpc_cos_sin_pi)
30
+
31
+ from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10,
32
+ pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi,
33
+ degree_fixed, mpf_degree,
34
+ mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed,
35
+ mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan,
36
+ mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh,
37
+ mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin,
38
+ mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci)
39
+
40
+ from .libhyper import (NoConvergence, make_hyp_summator,
41
+ mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint,
42
+ mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn,
43
+ mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1,
44
+ mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe)
45
+
46
+ from .gammazeta import (catalan_fixed, mpf_catalan,
47
+ khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher,
48
+ apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed,
49
+ mpf_mertens, twinprime_fixed, mpf_twinprime,
50
+ mpf_bernoulli, bernfrac, mpf_gamma_int,
51
+ mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma,
52
+ mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma,
53
+ mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0,
54
+ mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta,
55
+ mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum)
56
+
57
+ from .libmpi import (mpi_str,
58
+ mpi_from_str, mpi_to_str,
59
+ mpi_eq, mpi_ne,
60
+ mpi_lt, mpi_le, mpi_gt, mpi_ge,
61
+ mpi_add, mpi_sub, mpi_delta, mpi_mid,
62
+ mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp,
63
+ mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin,
64
+ mpi_cos, mpi_sin, mpi_tan, mpi_cot,
65
+ mpi_atan, mpi_atan2,
66
+ mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
67
+ mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin,
68
+ mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma,
69
+ mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial)
70
+
71
+ from .libintmath import (trailing, bitcount, numeral, bin_to_radix,
72
+ isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac,
73
+ list_primes, isprime, moebius, gcd, eulernum, stirling1, stirling2)
74
+
75
+ from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE,
76
+ MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types,
77
+ HASH_MODULUS, HASH_BITS)
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/backend.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-310.pyc ADDED
Binary file (30.2 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/libmp/backend.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Support GMPY for high-speed large integer arithmetic. #
6
+ # #
7
+ # To allow an external module to handle arithmetic, we need to make sure #
8
+ # that all high-precision variables are declared of the correct type. MPZ #
9
+ # is the constructor for the high-precision type. It defaults to Python's #
10
+ # long type but can be assinged another type, typically gmpy.mpz. #
11
+ # #
12
+ # MPZ must be used for the mantissa component of an mpf and must be used #
13
+ # for internal fixed-point operations. #
14
+ # #
15
+ # Side-effects #
16
+ # 1) "is" cannot be used to test for special values. Must use "==". #
17
+ # 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. #
18
+ #----------------------------------------------------------------------------#
19
+
20
+ # So we can import it from this module
21
+ gmpy = None
22
+ sage = None
23
+ sage_utils = None
24
+
25
+ if sys.version_info[0] < 3:
26
+ python3 = False
27
+ else:
28
+ python3 = True
29
+
30
+ BACKEND = 'python'
31
+
32
+ if not python3:
33
+ MPZ = long
34
+ xrange = xrange
35
+ basestring = basestring
36
+
37
+ def exec_(_code_, _globs_=None, _locs_=None):
38
+ """Execute code in a namespace."""
39
+ if _globs_ is None:
40
+ frame = sys._getframe(1)
41
+ _globs_ = frame.f_globals
42
+ if _locs_ is None:
43
+ _locs_ = frame.f_locals
44
+ del frame
45
+ elif _locs_ is None:
46
+ _locs_ = _globs_
47
+ exec("""exec _code_ in _globs_, _locs_""")
48
+ else:
49
+ MPZ = int
50
+ xrange = range
51
+ basestring = str
52
+
53
+ import builtins
54
+ exec_ = getattr(builtins, "exec")
55
+
56
+ # Define constants for calculating hash on Python 3.2.
57
+ if sys.version_info >= (3, 2):
58
+ HASH_MODULUS = sys.hash_info.modulus
59
+ if sys.hash_info.width == 32:
60
+ HASH_BITS = 31
61
+ else:
62
+ HASH_BITS = 61
63
+ else:
64
+ HASH_MODULUS = None
65
+ HASH_BITS = None
66
+
67
+ if 'MPMATH_NOGMPY' not in os.environ:
68
+ try:
69
+ try:
70
+ import gmpy2 as gmpy
71
+ except ImportError:
72
+ try:
73
+ import gmpy
74
+ except ImportError:
75
+ raise ImportError
76
+ if gmpy.version() >= '1.03':
77
+ BACKEND = 'gmpy'
78
+ MPZ = gmpy.mpz
79
+ except:
80
+ pass
81
+
82
+ if ('MPMATH_NOSAGE' not in os.environ and 'SAGE_ROOT' in os.environ or
83
+ 'MPMATH_SAGE' in os.environ):
84
+ try:
85
+ import sage.all
86
+ import sage.libs.mpmath.utils as _sage_utils
87
+ sage = sage.all
88
+ sage_utils = _sage_utils
89
+ BACKEND = 'sage'
90
+ MPZ = sage.Integer
91
+ except:
92
+ pass
93
+
94
+ if 'MPMATH_STRICT' in os.environ:
95
+ STRICT = True
96
+ else:
97
+ STRICT = False
98
+
99
+ MPZ_TYPE = type(MPZ(0))
100
+ MPZ_ZERO = MPZ(0)
101
+ MPZ_ONE = MPZ(1)
102
+ MPZ_TWO = MPZ(2)
103
+ MPZ_THREE = MPZ(3)
104
+ MPZ_FIVE = MPZ(5)
105
+
106
+ try:
107
+ if BACKEND == 'python':
108
+ int_types = (int, long)
109
+ else:
110
+ int_types = (int, long, MPZ_TYPE)
111
+ except NameError:
112
+ if BACKEND == 'python':
113
+ int_types = (int,)
114
+ else:
115
+ int_types = (int, MPZ_TYPE)
venv/lib/python3.10/site-packages/mpmath/libmp/gammazeta.py ADDED
@@ -0,0 +1,2167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ -----------------------------------------------------------------------
3
+ This module implements gamma- and zeta-related functions:
4
+
5
+ * Bernoulli numbers
6
+ * Factorials
7
+ * The gamma function
8
+ * Polygamma functions
9
+ * Harmonic numbers
10
+ * The Riemann zeta function
11
+ * Constants related to these functions
12
+
13
+ -----------------------------------------------------------------------
14
+ """
15
+
16
+ import math
17
+ import sys
18
+
19
+ from .backend import xrange
20
+ from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy
21
+
22
+ from .libintmath import list_primes, ifac, ifac2, moebius
23
+
24
+ from .libmpf import (\
25
+ round_floor, round_ceiling, round_down, round_up,
26
+ round_nearest, round_fast,
27
+ lshift, sqrt_fixed, isqrt_fast,
28
+ fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan,
29
+ from_int, to_int, to_fixed, from_man_exp, from_rational,
30
+ mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub,
31
+ mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int,
32
+ mpf_rdiv_int,
33
+ mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift,
34
+ negative_rnd, reciprocal_rnd,
35
+ bitcount, to_float, mpf_floor, mpf_sign, ComplexResult
36
+ )
37
+
38
+ from .libelefun import (\
39
+ constant_memo,
40
+ def_mpf_constant,
41
+ mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2,
42
+ mpf_exp, mpf_log, mpf_pow, mpf_cosh,
43
+ mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi,
44
+ ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi,
45
+ cos_sin_fixed, exp_fixed
46
+ )
47
+
48
+ from .libmpc import (\
49
+ mpc_zero, mpc_one, mpc_half, mpc_two,
50
+ mpc_abs, mpc_shift, mpc_pos, mpc_neg,
51
+ mpc_add, mpc_sub, mpc_mul, mpc_div,
52
+ mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div,
53
+ mpc_mul_int, mpc_pow_int,
54
+ mpc_log, mpc_exp, mpc_pow,
55
+ mpc_cos_pi, mpc_sin_pi,
56
+ mpc_reciprocal, mpc_square,
57
+ mpc_sub_mpf
58
+ )
59
+
60
+
61
+
62
+ # Catalan's constant is computed using Lupas's rapidly convergent series
63
+ # (listed on http://mathworld.wolfram.com/CatalansConstant.html)
64
+ # oo
65
+ # ___ n-1 8n 2 3 2
66
+ # 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!)
67
+ # K = --- ) -----------------------------------------
68
+ # 64 /___ 3 2
69
+ # n (2n-1) [(4n)!]
70
+ # n = 1
71
+
72
+ @constant_memo
73
+ def catalan_fixed(prec):
74
+ prec = prec + 20
75
+ a = one = MPZ_ONE << prec
76
+ s, t, n = 0, 1, 1
77
+ while t:
78
+ a *= 32 * n**3 * (2*n-1)
79
+ a //= (3-16*n+16*n**2)**2
80
+ t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1))
81
+ s += t
82
+ n += 1
83
+ return s >> (20 + 6)
84
+
85
+ # Khinchin's constant is relatively difficult to compute. Here
86
+ # we use the rational zeta series
87
+
88
+ # oo 2*n-1
89
+ # ___ ___
90
+ # \ ` zeta(2*n)-1 \ ` (-1)^(k+1)
91
+ # log(K)*log(2) = ) ------------ ) ----------
92
+ # /___. n /___. k
93
+ # n = 1 k = 1
94
+
95
+ # which adds half a digit per term. The essential trick for achieving
96
+ # reasonable efficiency is to recycle both the values of the zeta
97
+ # function (essentially Bernoulli numbers) and the partial terms of
98
+ # the inner sum.
99
+
100
+ # An alternative might be to use K = 2*exp[1/log(2) X] where
101
+
102
+ # / 1 1 [ pi*x*(1-x^2) ]
103
+ # X = | ------ log [ ------------ ].
104
+ # / 0 x(1+x) [ sin(pi*x) ]
105
+
106
+ # and integrate numerically. In practice, this seems to be slightly
107
+ # slower than the zeta series at high precision.
108
+
109
+ @constant_memo
110
+ def khinchin_fixed(prec):
111
+ wp = int(prec + prec**0.5 + 15)
112
+ s = MPZ_ZERO
113
+ fac = from_int(4)
114
+ t = ONE = MPZ_ONE << wp
115
+ pi = mpf_pi(wp)
116
+ pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2)
117
+ n = 1
118
+ while 1:
119
+ zeta2n = mpf_abs(mpf_bernoulli(2*n, wp))
120
+ zeta2n = mpf_mul(zeta2n, pipow, wp)
121
+ zeta2n = mpf_div(zeta2n, fac, wp)
122
+ zeta2n = to_fixed(zeta2n, wp)
123
+ term = (((zeta2n - ONE) * t) // n) >> wp
124
+ if term < 100:
125
+ break
126
+ #if not n % 10:
127
+ # print n, math.log(int(abs(term)))
128
+ s += term
129
+ t += ONE//(2*n+1) - ONE//(2*n)
130
+ n += 1
131
+ fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp)
132
+ pipow = mpf_mul(pipow, twopi2, wp)
133
+ s = (s << wp) // ln2_fixed(wp)
134
+ K = mpf_exp(from_man_exp(s, -wp), wp)
135
+ K = to_fixed(K, prec)
136
+ return K
137
+
138
+
139
+ # Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)).
140
+ # One way to compute it would be to perform direct numerical
141
+ # differentiation, but computing arbitrary Riemann zeta function
142
+ # values at high precision is expensive. We instead use the formula
143
+
144
+ # A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12)
145
+
146
+ # and compute zeta'(2) from the series representation
147
+
148
+ # oo
149
+ # ___
150
+ # \ log k
151
+ # -zeta'(2) = ) -----
152
+ # /___ 2
153
+ # k
154
+ # k = 2
155
+
156
+ # This series converges exceptionally slowly, but can be accelerated
157
+ # using Euler-Maclaurin formula. The important insight is that the
158
+ # E-M integral can be done in closed form and that the high order
159
+ # are given by
160
+
161
+ # n / \
162
+ # d | log x | a + b log x
163
+ # --- | ----- | = -----------
164
+ # n | 2 | 2 + n
165
+ # dx \ x / x
166
+
167
+ # where a and b are integers given by a simple recurrence. Note
168
+ # that just one logarithm is needed. However, lots of integer
169
+ # logarithms are required for the initial summation.
170
+
171
+ # This algorithm could possibly be turned into a faster algorithm
172
+ # for general evaluation of zeta(s) or zeta'(s); this should be
173
+ # looked into.
174
+
175
+ @constant_memo
176
+ def glaisher_fixed(prec):
177
+ wp = prec + 30
178
+ # Number of direct terms to sum before applying the Euler-Maclaurin
179
+ # formula to the tail. TODO: choose more intelligently
180
+ N = int(0.33*prec + 5)
181
+ ONE = MPZ_ONE << wp
182
+ # Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1
183
+ s = MPZ_ZERO
184
+ for k in range(2, N):
185
+ #print k, N
186
+ s += log_int_fixed(k, wp) // k**2
187
+ logN = log_int_fixed(N, wp)
188
+ #logN = to_fixed(mpf_log(from_int(N), wp+20), wp)
189
+ # E-M step 2: integral of log(x)/x**2 from N to inf
190
+ s += (ONE + logN) // N
191
+ # E-M step 3: endpoint correction term f(N)/2
192
+ s += logN // (N**2 * 2)
193
+ # E-M step 4: the series of derivatives
194
+ pN = N**3
195
+ a = 1
196
+ b = -2
197
+ j = 3
198
+ fac = from_int(2)
199
+ k = 1
200
+ while 1:
201
+ # D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative]
202
+ D = ((a << wp) + b*logN) // pN
203
+ D = from_man_exp(D, -wp)
204
+ B = mpf_bernoulli(2*k, wp)
205
+ term = mpf_mul(B, D, wp)
206
+ term = mpf_div(term, fac, wp)
207
+ term = to_fixed(term, wp)
208
+ if abs(term) < 100:
209
+ break
210
+ #if not k % 10:
211
+ # print k, math.log(int(abs(term)), 10)
212
+ s -= term
213
+ # Advance derivative twice
214
+ a, b, pN, j = b-a*j, -j*b, pN*N, j+1
215
+ a, b, pN, j = b-a*j, -j*b, pN*N, j+1
216
+ k += 1
217
+ fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp)
218
+ # A = exp((6*s/pi**2 + log(2*pi) + euler)/12)
219
+ pi = pi_fixed(wp)
220
+ s *= 6
221
+ s = (s << wp) // (pi**2 >> wp)
222
+ s += euler_fixed(wp)
223
+ s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp)
224
+ s //= 12
225
+ A = mpf_exp(from_man_exp(s, -wp), wp)
226
+ return to_fixed(A, prec)
227
+
228
+ # Apery's constant can be computed using the very rapidly convergent
229
+ # series
230
+ # oo
231
+ # ___ 2 10
232
+ # \ n 205 n + 250 n + 77 (n!)
233
+ # zeta(3) = ) (-1) ------------------- ----------
234
+ # /___ 64 5
235
+ # n = 0 ((2n+1)!)
236
+
237
+ @constant_memo
238
+ def apery_fixed(prec):
239
+ prec += 20
240
+ d = MPZ_ONE << prec
241
+ term = MPZ(77) << prec
242
+ n = 1
243
+ s = MPZ_ZERO
244
+ while term:
245
+ s += term
246
+ d *= (n**10)
247
+ d //= (((2*n+1)**5) * (2*n)**5)
248
+ term = (-1)**n * (205*(n**2) + 250*n + 77) * d
249
+ n += 1
250
+ return s >> (20 + 6)
251
+
252
+ """
253
+ Euler's constant (gamma) is computed using the Brent-McMillan formula,
254
+ gamma ~= I(n)/J(n) - log(n), where
255
+
256
+ I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
257
+ J(n) = sum_{k=0,1,2,...} (n**k / k!)**2
258
+ H(k) = 1 + 1/2 + 1/3 + ... + 1/k
259
+
260
+ The error is bounded by O(exp(-4n)). Choosing n to be a power
261
+ of two, 2**p, the logarithm becomes particularly easy to calculate.[1]
262
+
263
+ We use the formulation of Algorithm 3.9 in [2] to make the summation
264
+ more efficient.
265
+
266
+ Reference:
267
+ [1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
268
+ http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
269
+
270
+ [2] [BorweinBailey]_
271
+ """
272
+
273
+ @constant_memo
274
+ def euler_fixed(prec):
275
+ extra = 30
276
+ prec += extra
277
+ # choose p such that exp(-4*(2**p)) < 2**-n
278
+ p = int(math.log((prec/4) * math.log(2), 2)) + 1
279
+ n = 2**p
280
+ A = U = -p*ln2_fixed(prec)
281
+ B = V = MPZ_ONE << prec
282
+ k = 1
283
+ while 1:
284
+ B = B*n**2//k**2
285
+ A = (A*n**2//k + B)//k
286
+ U += A
287
+ V += B
288
+ if max(abs(A), abs(B)) < 100:
289
+ break
290
+ k += 1
291
+ return (U<<(prec-extra))//V
292
+
293
+ # Use zeta accelerated formulas for the Mertens and twin
294
+ # prime constants; see
295
+ # http://mathworld.wolfram.com/MertensConstant.html
296
+ # http://mathworld.wolfram.com/TwinPrimesConstant.html
297
+
298
+ @constant_memo
299
+ def mertens_fixed(prec):
300
+ wp = prec + 20
301
+ m = 2
302
+ s = mpf_euler(wp)
303
+ while 1:
304
+ t = mpf_zeta_int(m, wp)
305
+ if t == fone:
306
+ break
307
+ t = mpf_log(t, wp)
308
+ t = mpf_mul_int(t, moebius(m), wp)
309
+ t = mpf_div(t, from_int(m), wp)
310
+ s = mpf_add(s, t)
311
+ m += 1
312
+ return to_fixed(s, prec)
313
+
314
+ @constant_memo
315
+ def twinprime_fixed(prec):
316
+ def I(n):
317
+ return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n
318
+ wp = 2*prec + 30
319
+ res = fone
320
+ primes = [from_rational(1,p,wp) for p in [2,3,5,7]]
321
+ ppowers = [mpf_mul(p,p,wp) for p in primes]
322
+ n = 2
323
+ while 1:
324
+ a = mpf_zeta_int(n, wp)
325
+ for i in range(4):
326
+ a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp)
327
+ ppowers[i] = mpf_mul(ppowers[i], primes[i], wp)
328
+ a = mpf_pow_int(a, -I(n), wp)
329
+ if mpf_pos(a, prec+10, 'n') == fone:
330
+ break
331
+ #from libmpf import to_str
332
+ #print n, to_str(mpf_sub(fone, a), 6)
333
+ res = mpf_mul(res, a, wp)
334
+ n += 1
335
+ res = mpf_mul(res, from_int(3*15*35), wp)
336
+ res = mpf_div(res, from_int(4*16*36), wp)
337
+ return to_fixed(res, prec)
338
+
339
+
340
+ mpf_euler = def_mpf_constant(euler_fixed)
341
+ mpf_apery = def_mpf_constant(apery_fixed)
342
+ mpf_khinchin = def_mpf_constant(khinchin_fixed)
343
+ mpf_glaisher = def_mpf_constant(glaisher_fixed)
344
+ mpf_catalan = def_mpf_constant(catalan_fixed)
345
+ mpf_mertens = def_mpf_constant(mertens_fixed)
346
+ mpf_twinprime = def_mpf_constant(twinprime_fixed)
347
+
348
+
349
+ #-----------------------------------------------------------------------#
350
+ # #
351
+ # Bernoulli numbers #
352
+ # #
353
+ #-----------------------------------------------------------------------#
354
+
355
+ MAX_BERNOULLI_CACHE = 3000
356
+
357
+
358
+ r"""
359
+ Small Bernoulli numbers and factorials are used in numerous summations,
360
+ so it is critical for speed that sequential computation is fast and that
361
+ values are cached up to a fairly high threshold.
362
+
363
+ On the other hand, we also want to support fast computation of isolated
364
+ large numbers. Currently, no such acceleration is provided for integer
365
+ factorials (though it is for large floating-point factorials, which are
366
+ computed via gamma if the precision is low enough).
367
+
368
+ For sequential computation of Bernoulli numbers, we use Ramanujan's formula
369
+
370
+ / n + 3 \
371
+ B = (A(n) - S(n)) / | |
372
+ n \ n /
373
+
374
+ where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
375
+ when n = 4 (mod 6), and
376
+
377
+ [n/6]
378
+ ___
379
+ \ / n + 3 \
380
+ S(n) = ) | | * B
381
+ /___ \ n - 6*k / n-6*k
382
+ k = 1
383
+
384
+ For isolated large Bernoulli numbers, we use the Riemann zeta function
385
+ to calculate a numerical value for B_n. The von Staudt-Clausen theorem
386
+ can then be used to optionally find the exact value of the
387
+ numerator and denominator.
388
+ """
389
+
390
+ bernoulli_cache = {}
391
+ f3 = from_int(3)
392
+ f6 = from_int(6)
393
+
394
+ def bernoulli_size(n):
395
+ """Accurately estimate the size of B_n (even n > 2 only)"""
396
+ lgn = math.log(n,2)
397
+ return int(2.326 + 0.5*lgn + n*(lgn - 4.094))
398
+
399
+ BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE)
400
+
401
+ def mpf_bernoulli(n, prec, rnd=None):
402
+ """Computation of Bernoulli numbers (numerically)"""
403
+ if n < 2:
404
+ if n < 0:
405
+ raise ValueError("Bernoulli numbers only defined for n >= 0")
406
+ if n == 0:
407
+ return fone
408
+ if n == 1:
409
+ return mpf_neg(fhalf)
410
+ # For odd n > 1, the Bernoulli numbers are zero
411
+ if n & 1:
412
+ return fzero
413
+ # If precision is extremely high, we can save time by computing
414
+ # the Bernoulli number at a lower precision that is sufficient to
415
+ # obtain the exact fraction, round to the exact fraction, and
416
+ # convert the fraction back to an mpf value at the original precision
417
+ if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000:
418
+ p, q = bernfrac(n)
419
+ return from_rational(p, q, prec, rnd or round_floor)
420
+ if n > MAX_BERNOULLI_CACHE:
421
+ return mpf_bernoulli_huge(n, prec, rnd)
422
+ wp = prec + 30
423
+ # Reuse nearby precisions
424
+ wp += 32 - (prec & 31)
425
+ cached = bernoulli_cache.get(wp)
426
+ if cached:
427
+ numbers, state = cached
428
+ if n in numbers:
429
+ if not rnd:
430
+ return numbers[n]
431
+ return mpf_pos(numbers[n], prec, rnd)
432
+ m, bin, bin1 = state
433
+ if n - m > 10:
434
+ return mpf_bernoulli_huge(n, prec, rnd)
435
+ else:
436
+ if n > 10:
437
+ return mpf_bernoulli_huge(n, prec, rnd)
438
+ numbers = {0:fone}
439
+ m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE]
440
+ bernoulli_cache[wp] = (numbers, state)
441
+ while m <= n:
442
+ #print m
443
+ case = m % 6
444
+ # Accurately estimate size of B_m so we can use
445
+ # fixed point math without using too much precision
446
+ szbm = bernoulli_size(m)
447
+ s = 0
448
+ sexp = max(0, szbm) - wp
449
+ if m < 6:
450
+ a = MPZ_ZERO
451
+ else:
452
+ a = bin1
453
+ for j in xrange(1, m//6+1):
454
+ usign, uman, uexp, ubc = u = numbers[m-6*j]
455
+ if usign:
456
+ uman = -uman
457
+ s += lshift(a*uman, uexp-sexp)
458
+ # Update inner binomial coefficient
459
+ j6 = 6*j
460
+ a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6))
461
+ a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6))
462
+ if case == 0: b = mpf_rdiv_int(m+3, f3, wp)
463
+ if case == 2: b = mpf_rdiv_int(m+3, f3, wp)
464
+ if case == 4: b = mpf_rdiv_int(-m-3, f6, wp)
465
+ s = from_man_exp(s, sexp, wp)
466
+ b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp)
467
+ numbers[m] = b
468
+ m += 2
469
+ # Update outer binomial coefficient
470
+ bin = bin * ((m+2)*(m+3)) // (m*(m-1))
471
+ if m > 6:
472
+ bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6))
473
+ state[:] = [m, bin, bin1]
474
+ return numbers[n]
475
+
476
+ def mpf_bernoulli_huge(n, prec, rnd=None):
477
+ wp = prec + 10
478
+ piprec = wp + int(math.log(n,2))
479
+ v = mpf_gamma_int(n+1, wp)
480
+ v = mpf_mul(v, mpf_zeta_int(n, wp), wp)
481
+ v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp))
482
+ v = mpf_shift(v, 1-n)
483
+ if not n & 3:
484
+ v = mpf_neg(v)
485
+ return mpf_pos(v, prec, rnd or round_fast)
486
+
487
+ def bernfrac(n):
488
+ r"""
489
+ Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly,
490
+ where `B_n` denotes the `n`-th Bernoulli number. The fraction is
491
+ always reduced to lowest terms. Note that for `n > 1` and `n` odd,
492
+ `B_n = 0`, and `(0, 1)` is returned.
493
+
494
+ **Examples**
495
+
496
+ The first few Bernoulli numbers are exactly::
497
+
498
+ >>> from mpmath import *
499
+ >>> for n in range(15):
500
+ ... p, q = bernfrac(n)
501
+ ... print("%s %s/%s" % (n, p, q))
502
+ ...
503
+ 0 1/1
504
+ 1 -1/2
505
+ 2 1/6
506
+ 3 0/1
507
+ 4 -1/30
508
+ 5 0/1
509
+ 6 1/42
510
+ 7 0/1
511
+ 8 -1/30
512
+ 9 0/1
513
+ 10 5/66
514
+ 11 0/1
515
+ 12 -691/2730
516
+ 13 0/1
517
+ 14 7/6
518
+
519
+ This function works for arbitrarily large `n`::
520
+
521
+ >>> p, q = bernfrac(10**4)
522
+ >>> print(q)
523
+ 2338224387510
524
+ >>> print(len(str(p)))
525
+ 27692
526
+ >>> mp.dps = 15
527
+ >>> print(mpf(p) / q)
528
+ -9.04942396360948e+27677
529
+ >>> print(bernoulli(10**4))
530
+ -9.04942396360948e+27677
531
+
532
+ .. note ::
533
+
534
+ :func:`~mpmath.bernoulli` computes a floating-point approximation
535
+ directly, without computing the exact fraction first.
536
+ This is much faster for large `n`.
537
+
538
+ **Algorithm**
539
+
540
+ :func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically
541
+ and then using the von Staudt-Clausen theorem [1] to reconstruct
542
+ the exact fraction. For large `n`, this is significantly faster than
543
+ computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic.
544
+ The implementation has been tested for `n = 10^m` up to `m = 6`.
545
+
546
+ In practice, :func:`~mpmath.bernfrac` appears to be about three times
547
+ slower than the specialized program calcbn.exe [2]
548
+
549
+ **References**
550
+
551
+ 1. MathWorld, von Staudt-Clausen Theorem:
552
+ http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html
553
+
554
+ 2. The Bernoulli Number Page:
555
+ http://www.bernoulli.org/
556
+
557
+ """
558
+ n = int(n)
559
+ if n < 3:
560
+ return [(1, 1), (-1, 2), (1, 6)][n]
561
+ if n & 1:
562
+ return (0, 1)
563
+ q = 1
564
+ for k in list_primes(n+1):
565
+ if not (n % (k-1)):
566
+ q *= k
567
+ prec = bernoulli_size(n) + int(math.log(q,2)) + 20
568
+ b = mpf_bernoulli(n, prec)
569
+ p = mpf_mul(b, from_int(q))
570
+ pint = to_int(p, round_nearest)
571
+ return (pint, q)
572
+
573
+
574
+ #-----------------------------------------------------------------------#
575
+ # #
576
+ # Polygamma functions #
577
+ # #
578
+ #-----------------------------------------------------------------------#
579
+
580
+ r"""
581
+ For all polygamma (psi) functions, we use the Euler-Maclaurin summation
582
+ formula. It looks slightly different in the m = 0 and m > 0 cases.
583
+
584
+ For m = 0, we have
585
+ oo
586
+ ___ B
587
+ (0) 1 \ 2 k -2 k
588
+ psi (z) ~ log z + --- - ) ------ z
589
+ 2 z /___ (2 k)!
590
+ k = 1
591
+
592
+ Experiment shows that the minimum term of the asymptotic series
593
+ reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence
594
+ for psi (equivalent, in fact, to summing to the first few terms
595
+ directly before applying E-M) to obtain z large enough.
596
+
597
+ Since, very crudely, log z ~= 1 for Re(z) > 1, we can use
598
+ fixed-point arithmetic (if z is extremely large, log(z) itself
599
+ is a sufficient approximation, so we can stop there already).
600
+
601
+ For Re(z) << 0, we could use recurrence, but this is of course
602
+ inefficient for large negative z, so there we use the
603
+ reflection formula instead.
604
+
605
+ For m > 0, we have
606
+
607
+ N - 1
608
+ ___
609
+ ~~~(m) [ \ 1 ] 1 1
610
+ psi (z) ~ [ ) -------- ] + ---------- + -------- +
611
+ [ /___ m+1 ] m+1 m
612
+ k = 1 (z+k) ] 2 (z+N) m (z+N)
613
+
614
+ oo
615
+ ___ B
616
+ \ 2 k (m+1) (m+2) ... (m+2k-1)
617
+ + ) ------ ------------------------
618
+ /___ (2 k)! m + 2 k
619
+ k = 1 (z+N)
620
+
621
+ where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!).
622
+
623
+ Here again N is chosen to make z+N large enough for the minimum
624
+ term in the last series to become smaller than eps.
625
+
626
+ TODO: the current estimation of N for m > 0 is *very suboptimal*.
627
+
628
+ TODO: implement the reflection formula for m > 0, Re(z) << 0.
629
+ It is generally a combination of multiple cotangents. Need to
630
+ figure out a reasonably simple way to generate these formulas
631
+ on the fly.
632
+
633
+ TODO: maybe use exact algorithms to compute psi for integral
634
+ and certain rational arguments, as this can be much more
635
+ efficient. (On the other hand, the availability of these
636
+ special values provides a convenient way to test the general
637
+ algorithm.)
638
+ """
639
+
640
+ # Harmonic numbers are just shifted digamma functions
641
+ # We should calculate these exactly when x is an integer
642
+ # and when doing so is faster.
643
+
644
+ def mpf_harmonic(x, prec, rnd):
645
+ if x in (fzero, fnan, finf):
646
+ return x
647
+ a = mpf_psi0(mpf_add(fone, x, prec+5), prec)
648
+ return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd)
649
+
650
+ def mpc_harmonic(z, prec, rnd):
651
+ if z[1] == fzero:
652
+ return (mpf_harmonic(z[0], prec, rnd), fzero)
653
+ a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec)
654
+ return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd)
655
+
656
+ def mpf_psi0(x, prec, rnd=round_fast):
657
+ """
658
+ Computation of the digamma function (psi function of order 0)
659
+ of a real argument.
660
+ """
661
+ sign, man, exp, bc = x
662
+ wp = prec + 10
663
+ if not man:
664
+ if x == finf: return x
665
+ if x == fninf or x == fnan: return fnan
666
+ if x == fzero or (exp >= 0 and sign):
667
+ raise ValueError("polygamma pole")
668
+ # Near 0 -- fixed-point arithmetic becomes bad
669
+ if exp+bc < -5:
670
+ v = mpf_psi0(mpf_add(x, fone, prec, rnd), prec, rnd)
671
+ return mpf_sub(v, mpf_div(fone, x, wp, rnd), prec, rnd)
672
+ # Reflection formula
673
+ if sign and exp+bc > 3:
674
+ c, s = mpf_cos_sin_pi(x, wp)
675
+ q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp)
676
+ p = mpf_psi0(mpf_sub(fone, x, wp), wp)
677
+ return mpf_sub(p, q, prec, rnd)
678
+ # The logarithmic term is accurate enough
679
+ if (not sign) and bc + exp > wp:
680
+ return mpf_log(mpf_sub(x, fone, wp), prec, rnd)
681
+ # Initial recurrence to obtain a large enough x
682
+ m = to_int(x)
683
+ n = int(0.11*wp) + 2
684
+ s = MPZ_ZERO
685
+ x = to_fixed(x, wp)
686
+ one = MPZ_ONE << wp
687
+ if m < n:
688
+ for k in xrange(m, n):
689
+ s -= (one << wp) // x
690
+ x += one
691
+ x -= one
692
+ # Logarithmic term
693
+ s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp)
694
+ # Endpoint term in Euler-Maclaurin expansion
695
+ s += (one << wp) // (2*x)
696
+ # Euler-Maclaurin remainder sum
697
+ x2 = (x*x) >> wp
698
+ t = one
699
+ prev = 0
700
+ k = 1
701
+ while 1:
702
+ t = (t*x2) >> wp
703
+ bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp)
704
+ offset = (bexp + 2*wp)
705
+ if offset >= 0: term = (bman << offset) // (t*(2*k))
706
+ else: term = (bman >> (-offset)) // (t*(2*k))
707
+ if k & 1: s -= term
708
+ else: s += term
709
+ if k > 2 and term >= prev:
710
+ break
711
+ prev = term
712
+ k += 1
713
+ return from_man_exp(s, -wp, wp, rnd)
714
+
715
+ def mpc_psi0(z, prec, rnd=round_fast):
716
+ """
717
+ Computation of the digamma function (psi function of order 0)
718
+ of a complex argument.
719
+ """
720
+ re, im = z
721
+ # Fall back to the real case
722
+ if im == fzero:
723
+ return (mpf_psi0(re, prec, rnd), fzero)
724
+ wp = prec + 20
725
+ sign, man, exp, bc = re
726
+ # Reflection formula
727
+ if sign and exp+bc > 3:
728
+ c = mpc_cos_pi(z, wp)
729
+ s = mpc_sin_pi(z, wp)
730
+ q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp)
731
+ p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp)
732
+ return mpc_sub(p, q, prec, rnd)
733
+ # Just the logarithmic term
734
+ if (not sign) and bc + exp > wp:
735
+ return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd)
736
+ # Initial recurrence to obtain a large enough z
737
+ w = to_int(re)
738
+ n = int(0.11*wp) + 2
739
+ s = mpc_zero
740
+ if w < n:
741
+ for k in xrange(w, n):
742
+ s = mpc_sub(s, mpc_reciprocal(z, wp), wp)
743
+ z = mpc_add_mpf(z, fone, wp)
744
+ z = mpc_sub(z, mpc_one, wp)
745
+ # Logarithmic and endpoint term
746
+ s = mpc_add(s, mpc_log(z, wp), wp)
747
+ s = mpc_add(s, mpc_div(mpc_half, z, wp), wp)
748
+ # Euler-Maclaurin remainder sum
749
+ z2 = mpc_square(z, wp)
750
+ t = mpc_one
751
+ prev = mpc_zero
752
+ szprev = fzero
753
+ k = 1
754
+ eps = mpf_shift(fone, -wp+2)
755
+ while 1:
756
+ t = mpc_mul(t, z2, wp)
757
+ bern = mpf_bernoulli(2*k, wp)
758
+ term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp)
759
+ s = mpc_sub(s, term, wp)
760
+ szterm = mpc_abs(term, 10)
761
+ if k > 2 and (mpf_le(szterm, eps) or mpf_le(szprev, szterm)):
762
+ break
763
+ prev = term
764
+ szprev = szterm
765
+ k += 1
766
+ return s
767
+
768
+ # Currently unoptimized
769
+ def mpf_psi(m, x, prec, rnd=round_fast):
770
+ """
771
+ Computation of the polygamma function of arbitrary integer order
772
+ m >= 0, for a real argument x.
773
+ """
774
+ if m == 0:
775
+ return mpf_psi0(x, prec, rnd=round_fast)
776
+ return mpc_psi(m, (x, fzero), prec, rnd)[0]
777
+
778
+ def mpc_psi(m, z, prec, rnd=round_fast):
779
+ """
780
+ Computation of the polygamma function of arbitrary integer order
781
+ m >= 0, for a complex argument z.
782
+ """
783
+ if m == 0:
784
+ return mpc_psi0(z, prec, rnd)
785
+ re, im = z
786
+ wp = prec + 20
787
+ sign, man, exp, bc = re
788
+ if not im[1]:
789
+ if im in (finf, fninf, fnan):
790
+ return (fnan, fnan)
791
+ if not man:
792
+ if re == finf and im == fzero:
793
+ return (fzero, fzero)
794
+ if re == fnan:
795
+ return (fnan, fnan)
796
+ # Recurrence
797
+ w = to_int(re)
798
+ n = int(0.4*wp + 4*m)
799
+ s = mpc_zero
800
+ if w < n:
801
+ for k in xrange(w, n):
802
+ t = mpc_pow_int(z, -m-1, wp)
803
+ s = mpc_add(s, t, wp)
804
+ z = mpc_add_mpf(z, fone, wp)
805
+ zm = mpc_pow_int(z, -m, wp)
806
+ z2 = mpc_pow_int(z, -2, wp)
807
+ # 1/m*(z+N)^m
808
+ integral_term = mpc_div_mpf(zm, from_int(m), wp)
809
+ s = mpc_add(s, integral_term, wp)
810
+ # 1/2*(z+N)^(-(m+1))
811
+ s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp)
812
+ a = m + 1
813
+ b = 2
814
+ k = 1
815
+ # Important: we want to sum up to the *relative* error,
816
+ # not the absolute error, because psi^(m)(z) might be tiny
817
+ magn = mpc_abs(s, 10)
818
+ magn = magn[2]+magn[3]
819
+ eps = mpf_shift(fone, magn-wp+2)
820
+ while 1:
821
+ zm = mpc_mul(zm, z2, wp)
822
+ bern = mpf_bernoulli(2*k, wp)
823
+ scal = mpf_mul_int(bern, a, wp)
824
+ scal = mpf_div(scal, from_int(b), wp)
825
+ term = mpc_mul_mpf(zm, scal, wp)
826
+ s = mpc_add(s, term, wp)
827
+ szterm = mpc_abs(term, 10)
828
+ if k > 2 and mpf_le(szterm, eps):
829
+ break
830
+ #print k, to_str(szterm, 10), to_str(eps, 10)
831
+ a *= (m+2*k)*(m+2*k+1)
832
+ b *= (2*k+1)*(2*k+2)
833
+ k += 1
834
+ # Scale and sign factor
835
+ v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd)
836
+ if not (m & 1):
837
+ v = mpf_neg(v[0]), mpf_neg(v[1])
838
+ return v
839
+
840
+
841
+ #-----------------------------------------------------------------------#
842
+ # #
843
+ # Riemann zeta function #
844
+ # #
845
+ #-----------------------------------------------------------------------#
846
+
847
+ r"""
848
+ We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation
849
+
850
+ n-1
851
+ ___ k
852
+ -1 \ (-1) (d_k - d_n)
853
+ eta(s) ~= ---- ) ------------------
854
+ d_n /___ s
855
+ k = 0 (k + 1)
856
+ where
857
+ k
858
+ ___ i
859
+ \ (n + i - 1)! 4
860
+ d_k = n ) ---------------.
861
+ /___ (n - i)! (2i)!
862
+ i = 0
863
+
864
+ If s = a + b*I, the absolute error for eta(s) is bounded by
865
+
866
+ 3 (1 + 2|b|)
867
+ ------------ * exp(|b| pi/2)
868
+ n
869
+ (3+sqrt(8))
870
+
871
+ Disregarding the linear term, we have approximately,
872
+
873
+ log(err) ~= log(exp(1.58*|b|)) - log(5.8**n)
874
+ log(err) ~= 1.58*|b| - log(5.8)*n
875
+ log(err) ~= 1.58*|b| - 1.76*n
876
+ log2(err) ~= 2.28*|b| - 2.54*n
877
+
878
+ So for p bits, we should choose n > (p + 2.28*|b|) / 2.54.
879
+
880
+ References:
881
+ -----------
882
+
883
+ Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function"
884
+ http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps
885
+
886
+ http://en.wikipedia.org/wiki/Dirichlet_eta_function
887
+ """
888
+
889
+ borwein_cache = {}
890
+
891
+ def borwein_coefficients(n):
892
+ if n in borwein_cache:
893
+ return borwein_cache[n]
894
+ ds = [MPZ_ZERO] * (n+1)
895
+ d = MPZ_ONE
896
+ s = ds[0] = MPZ_ONE
897
+ for i in range(1, n+1):
898
+ d = d * 4 * (n+i-1) * (n-i+1)
899
+ d //= ((2*i) * ((2*i)-1))
900
+ s += d
901
+ ds[i] = s
902
+ borwein_cache[n] = ds
903
+ return ds
904
+
905
+ ZETA_INT_CACHE_MAX_PREC = 1000
906
+ zeta_int_cache = {}
907
+
908
+ def mpf_zeta_int(s, prec, rnd=round_fast):
909
+ """
910
+ Optimized computation of zeta(s) for an integer s.
911
+ """
912
+ wp = prec + 20
913
+ s = int(s)
914
+ if s in zeta_int_cache and zeta_int_cache[s][0] >= wp:
915
+ return mpf_pos(zeta_int_cache[s][1], prec, rnd)
916
+ if s < 2:
917
+ if s == 1:
918
+ raise ValueError("zeta(1) pole")
919
+ if not s:
920
+ return mpf_neg(fhalf)
921
+ return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd)
922
+ # 2^-s term vanishes?
923
+ if s >= wp:
924
+ return mpf_perturb(fone, 0, prec, rnd)
925
+ # 5^-s term vanishes?
926
+ elif s >= wp*0.431:
927
+ t = one = 1 << wp
928
+ t += 1 << (wp - s)
929
+ t += one // (MPZ_THREE ** s)
930
+ t += 1 << max(0, wp - s*2)
931
+ return from_man_exp(t, -wp, prec, rnd)
932
+ else:
933
+ # Fast enough to sum directly?
934
+ # Even better, we use the Euler product (idea stolen from pari)
935
+ m = (float(wp)/(s-1) + 1)
936
+ if m < 30:
937
+ needed_terms = int(2.0**m + 1)
938
+ if needed_terms < int(wp/2.54 + 5) / 10:
939
+ t = fone
940
+ for k in list_primes(needed_terms):
941
+ #print k, needed_terms
942
+ powprec = int(wp - s*math.log(k,2))
943
+ if powprec < 2:
944
+ break
945
+ a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp)
946
+ t = mpf_mul(t, a, wp)
947
+ return mpf_div(fone, t, wp)
948
+ # Use Borwein's algorithm
949
+ n = int(wp/2.54 + 5)
950
+ d = borwein_coefficients(n)
951
+ t = MPZ_ZERO
952
+ s = MPZ(s)
953
+ for k in xrange(n):
954
+ t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s
955
+ t = (t << wp) // (-d[n])
956
+ t = (t << wp) // ((1 << wp) - (1 << (wp+1-s)))
957
+ if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache):
958
+ zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp))
959
+ return from_man_exp(t, -wp-wp, prec, rnd)
960
+
961
+ def mpf_zeta(s, prec, rnd=round_fast, alt=0):
962
+ sign, man, exp, bc = s
963
+ if not man:
964
+ if s == fzero:
965
+ if alt:
966
+ return fhalf
967
+ else:
968
+ return mpf_neg(fhalf)
969
+ if s == finf:
970
+ return fone
971
+ return fnan
972
+ wp = prec + 20
973
+ # First term vanishes?
974
+ if (not sign) and (exp + bc > (math.log(wp,2) + 2)):
975
+ return mpf_perturb(fone, alt, prec, rnd)
976
+ # Optimize for integer arguments
977
+ elif exp >= 0:
978
+ if alt:
979
+ if s == fone:
980
+ return mpf_ln2(prec, rnd)
981
+ z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd])
982
+ q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
983
+ return mpf_mul(z, q, prec, rnd)
984
+ else:
985
+ return mpf_zeta_int(to_int(s), prec, rnd)
986
+ # Negative: use the reflection formula
987
+ # Borwein only proves the accuracy bound for x >= 1/2. However, based on
988
+ # tests, the accuracy without reflection is quite good even some distance
989
+ # to the left of 1/2. XXX: verify this.
990
+ if sign:
991
+ # XXX: could use the separate refl. formula for Dirichlet eta
992
+ if alt:
993
+ q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
994
+ return mpf_mul(mpf_zeta(s, wp), q, prec, rnd)
995
+ # XXX: -1 should be done exactly
996
+ y = mpf_sub(fone, s, 10*wp)
997
+ a = mpf_gamma(y, wp)
998
+ b = mpf_zeta(y, wp)
999
+ c = mpf_sin_pi(mpf_shift(s, -1), wp)
1000
+ wp2 = wp + max(0,exp+bc)
1001
+ pi = mpf_pi(wp+wp2)
1002
+ d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2)
1003
+ return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd)
1004
+
1005
+ # Near pole
1006
+ r = mpf_sub(fone, s, wp)
1007
+ asign, aman, aexp, abc = mpf_abs(r)
1008
+ pole_dist = -2*(aexp+abc)
1009
+ if pole_dist > wp:
1010
+ if alt:
1011
+ return mpf_ln2(prec, rnd)
1012
+ else:
1013
+ q = mpf_neg(mpf_div(fone, r, wp))
1014
+ return mpf_add(q, mpf_euler(wp), prec, rnd)
1015
+ else:
1016
+ wp += max(0, pole_dist)
1017
+
1018
+ t = MPZ_ZERO
1019
+ #wp += 16 - (prec & 15)
1020
+ # Use Borwein's algorithm
1021
+ n = int(wp/2.54 + 5)
1022
+ d = borwein_coefficients(n)
1023
+ t = MPZ_ZERO
1024
+ sf = to_fixed(s, wp)
1025
+ ln2 = ln2_fixed(wp)
1026
+ for k in xrange(n):
1027
+ u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp
1028
+ #esign, eman, eexp, ebc = mpf_exp(u, wp)
1029
+ #offset = eexp + wp
1030
+ #if offset >= 0:
1031
+ # w = ((d[k] - d[n]) * eman) << offset
1032
+ #else:
1033
+ # w = ((d[k] - d[n]) * eman) >> (-offset)
1034
+ eman = exp_fixed(u, wp, ln2)
1035
+ w = (d[k] - d[n]) * eman
1036
+ if k & 1:
1037
+ t -= w
1038
+ else:
1039
+ t += w
1040
+ t = t // (-d[n])
1041
+ t = from_man_exp(t, -wp, wp)
1042
+ if alt:
1043
+ return mpf_pos(t, prec, rnd)
1044
+ else:
1045
+ q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
1046
+ return mpf_div(t, q, prec, rnd)
1047
+
1048
+ def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False):
1049
+ re, im = s
1050
+ if im == fzero:
1051
+ return mpf_zeta(re, prec, rnd, alt), fzero
1052
+
1053
+ # slow for large s
1054
+ if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)):
1055
+ raise NotImplementedError
1056
+
1057
+ wp = prec + 20
1058
+
1059
+ # Near pole
1060
+ r = mpc_sub(mpc_one, s, wp)
1061
+ asign, aman, aexp, abc = mpc_abs(r, 10)
1062
+ pole_dist = -2*(aexp+abc)
1063
+ if pole_dist > wp:
1064
+ if alt:
1065
+ q = mpf_ln2(wp)
1066
+ y = mpf_mul(q, mpf_euler(wp), wp)
1067
+ g = mpf_shift(mpf_mul(q, q, wp), -1)
1068
+ g = mpf_sub(y, g)
1069
+ z = mpc_mul_mpf(r, mpf_neg(g), wp)
1070
+ z = mpc_add_mpf(z, q, wp)
1071
+ return mpc_pos(z, prec, rnd)
1072
+ else:
1073
+ q = mpc_neg(mpc_div(mpc_one, r, wp))
1074
+ q = mpc_add_mpf(q, mpf_euler(wp), wp)
1075
+ return mpc_pos(q, prec, rnd)
1076
+ else:
1077
+ wp += max(0, pole_dist)
1078
+
1079
+ # Reflection formula. To be rigorous, we should reflect to the left of
1080
+ # re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary
1081
+ # slowdown for interesting values of s
1082
+ if mpf_lt(re, fzero):
1083
+ # XXX: could use the separate refl. formula for Dirichlet eta
1084
+ if alt:
1085
+ q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp),
1086
+ wp), wp)
1087
+ return mpc_mul(mpc_zeta(s, wp), q, prec, rnd)
1088
+ # XXX: -1 should be done exactly
1089
+ y = mpc_sub(mpc_one, s, 10*wp)
1090
+ a = mpc_gamma(y, wp)
1091
+ b = mpc_zeta(y, wp)
1092
+ c = mpc_sin_pi(mpc_shift(s, -1), wp)
1093
+ rsign, rman, rexp, rbc = re
1094
+ isign, iman, iexp, ibc = im
1095
+ mag = max(rexp+rbc, iexp+ibc)
1096
+ wp2 = wp + max(0, mag)
1097
+ pi = mpf_pi(wp+wp2)
1098
+ pi2 = (mpf_shift(pi, 1), fzero)
1099
+ d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2)
1100
+ return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd)
1101
+ n = int(wp/2.54 + 5)
1102
+ n += int(0.9*abs(to_int(im)))
1103
+ d = borwein_coefficients(n)
1104
+ ref = to_fixed(re, wp)
1105
+ imf = to_fixed(im, wp)
1106
+ tre = MPZ_ZERO
1107
+ tim = MPZ_ZERO
1108
+ one = MPZ_ONE << wp
1109
+ one_2wp = MPZ_ONE << (2*wp)
1110
+ critical_line = re == fhalf
1111
+ ln2 = ln2_fixed(wp)
1112
+ pi2 = pi_fixed(wp-1)
1113
+ wp2 = wp+wp
1114
+ for k in xrange(n):
1115
+ log = log_int_fixed(k+1, wp, ln2)
1116
+ # A square root is much cheaper than an exp
1117
+ if critical_line:
1118
+ w = one_2wp // isqrt_fast((k+1) << wp2)
1119
+ else:
1120
+ w = exp_fixed((-ref*log) >> wp, wp)
1121
+ if k & 1:
1122
+ w *= (d[n] - d[k])
1123
+ else:
1124
+ w *= (d[k] - d[n])
1125
+ wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2)
1126
+ tre += (w * wre) >> wp
1127
+ tim += (w * wim) >> wp
1128
+ tre //= (-d[n])
1129
+ tim //= (-d[n])
1130
+ tre = from_man_exp(tre, -wp, wp)
1131
+ tim = from_man_exp(tim, -wp, wp)
1132
+ if alt:
1133
+ return mpc_pos((tre, tim), prec, rnd)
1134
+ else:
1135
+ q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp)
1136
+ return mpc_div((tre, tim), q, prec, rnd)
1137
+
1138
+ def mpf_altzeta(s, prec, rnd=round_fast):
1139
+ return mpf_zeta(s, prec, rnd, 1)
1140
+
1141
+ def mpc_altzeta(s, prec, rnd=round_fast):
1142
+ return mpc_zeta(s, prec, rnd, 1)
1143
+
1144
+ # Not optimized currently
1145
+ mpf_zetasum = None
1146
+
1147
+
1148
+ def pow_fixed(x, n, wp):
1149
+ if n == 1:
1150
+ return x
1151
+ y = MPZ_ONE << wp
1152
+ while n:
1153
+ if n & 1:
1154
+ y = (y*x) >> wp
1155
+ n -= 1
1156
+ x = (x*x) >> wp
1157
+ n //= 2
1158
+ return y
1159
+
1160
+ # TODO: optimize / cleanup interface / unify with list_primes
1161
+ sieve_cache = []
1162
+ primes_cache = []
1163
+ mult_cache = []
1164
+
1165
+ def primesieve(n):
1166
+ global sieve_cache, primes_cache, mult_cache
1167
+ if n < len(sieve_cache):
1168
+ sieve = sieve_cache#[:n+1]
1169
+ primes = primes_cache[:primes_cache.index(max(sieve))+1]
1170
+ mult = mult_cache#[:n+1]
1171
+ return sieve, primes, mult
1172
+ sieve = [0] * (n+1)
1173
+ mult = [0] * (n+1)
1174
+ primes = list_primes(n)
1175
+ for p in primes:
1176
+ #sieve[p::p] = p
1177
+ for k in xrange(p,n+1,p):
1178
+ sieve[k] = p
1179
+ for i, p in enumerate(sieve):
1180
+ if i >= 2:
1181
+ m = 1
1182
+ n = i // p
1183
+ while not n % p:
1184
+ n //= p
1185
+ m += 1
1186
+ mult[i] = m
1187
+ sieve_cache = sieve
1188
+ primes_cache = primes
1189
+ mult_cache = mult
1190
+ return sieve, primes, mult
1191
+
1192
+ def zetasum_sieved(critical_line, sre, sim, a, n, wp):
1193
+ if a < 1:
1194
+ raise ValueError("a cannot be less than 1")
1195
+ sieve, primes, mult = primesieve(a+n)
1196
+ basic_powers = {}
1197
+ one = MPZ_ONE << wp
1198
+ one_2wp = MPZ_ONE << (2*wp)
1199
+ wp2 = wp+wp
1200
+ ln2 = ln2_fixed(wp)
1201
+ pi2 = pi_fixed(wp-1)
1202
+ for p in primes:
1203
+ if p*2 > a+n:
1204
+ break
1205
+ log = log_int_fixed(p, wp, ln2)
1206
+ cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
1207
+ if critical_line:
1208
+ u = one_2wp // isqrt_fast(p<<wp2)
1209
+ else:
1210
+ u = exp_fixed((-sre*log)>>wp, wp)
1211
+ pre = (u*cos) >> wp
1212
+ pim = (u*sin) >> wp
1213
+ basic_powers[p] = [(pre, pim)]
1214
+ tre, tim = pre, pim
1215
+ for m in range(1,int(math.log(a+n,p)+0.01)+1):
1216
+ tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
1217
+ basic_powers[p].append((tre,tim))
1218
+ xre = MPZ_ZERO
1219
+ xim = MPZ_ZERO
1220
+ if a == 1:
1221
+ xre += one
1222
+ aa = max(a,2)
1223
+ for k in xrange(aa, a+n+1):
1224
+ p = sieve[k]
1225
+ if p in basic_powers:
1226
+ m = mult[k]
1227
+ tre, tim = basic_powers[p][m-1]
1228
+ while 1:
1229
+ k //= p**m
1230
+ if k == 1:
1231
+ break
1232
+ p = sieve[k]
1233
+ m = mult[k]
1234
+ pre, pim = basic_powers[p][m-1]
1235
+ tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
1236
+ else:
1237
+ log = log_int_fixed(k, wp, ln2)
1238
+ cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
1239
+ if critical_line:
1240
+ u = one_2wp // isqrt_fast(k<<wp2)
1241
+ else:
1242
+ u = exp_fixed((-sre*log)>>wp, wp)
1243
+ tre = (u*cos) >> wp
1244
+ tim = (u*sin) >> wp
1245
+ xre += tre
1246
+ xim += tim
1247
+ return xre, xim
1248
+
1249
+ # Set to something large to disable
1250
+ ZETASUM_SIEVE_CUTOFF = 10
1251
+
1252
+ def mpc_zetasum(s, a, n, derivatives, reflect, prec):
1253
+ """
1254
+ Fast version of mp._zetasum, assuming s = complex, a = integer.
1255
+ """
1256
+
1257
+ wp = prec + 10
1258
+ derivatives = list(derivatives)
1259
+ have_derivatives = derivatives != [0]
1260
+ have_one_derivative = len(derivatives) == 1
1261
+
1262
+ # parse s
1263
+ sre, sim = s
1264
+ critical_line = (sre == fhalf)
1265
+ sre = to_fixed(sre, wp)
1266
+ sim = to_fixed(sim, wp)
1267
+
1268
+ if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives \
1269
+ and not reflect and (n < 4e7 or sys.maxsize > 2**32):
1270
+ re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp)
1271
+ xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))]
1272
+ return xs, []
1273
+
1274
+ maxd = max(derivatives)
1275
+ if not have_one_derivative:
1276
+ derivatives = range(maxd+1)
1277
+
1278
+ # x_d = 0, y_d = 0
1279
+ xre = [MPZ_ZERO for d in derivatives]
1280
+ xim = [MPZ_ZERO for d in derivatives]
1281
+ if reflect:
1282
+ yre = [MPZ_ZERO for d in derivatives]
1283
+ yim = [MPZ_ZERO for d in derivatives]
1284
+ else:
1285
+ yre = yim = []
1286
+
1287
+ one = MPZ_ONE << wp
1288
+ one_2wp = MPZ_ONE << (2*wp)
1289
+
1290
+ ln2 = ln2_fixed(wp)
1291
+ pi2 = pi_fixed(wp-1)
1292
+ wp2 = wp+wp
1293
+
1294
+ for w in xrange(a, a+n+1):
1295
+ log = log_int_fixed(w, wp, ln2)
1296
+ cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
1297
+ if critical_line:
1298
+ u = one_2wp // isqrt_fast(w<<wp2)
1299
+ else:
1300
+ u = exp_fixed((-sre*log)>>wp, wp)
1301
+ xterm_re = (u * cos) >> wp
1302
+ xterm_im = (u * sin) >> wp
1303
+ if reflect:
1304
+ reciprocal = (one_2wp // (u*w))
1305
+ yterm_re = (reciprocal * cos) >> wp
1306
+ yterm_im = (reciprocal * sin) >> wp
1307
+
1308
+ if have_derivatives:
1309
+ if have_one_derivative:
1310
+ log = pow_fixed(log, maxd, wp)
1311
+ xre[0] += (xterm_re * log) >> wp
1312
+ xim[0] += (xterm_im * log) >> wp
1313
+ if reflect:
1314
+ yre[0] += (yterm_re * log) >> wp
1315
+ yim[0] += (yterm_im * log) >> wp
1316
+ else:
1317
+ t = MPZ_ONE << wp
1318
+ for d in derivatives:
1319
+ xre[d] += (xterm_re * t) >> wp
1320
+ xim[d] += (xterm_im * t) >> wp
1321
+ if reflect:
1322
+ yre[d] += (yterm_re * t) >> wp
1323
+ yim[d] += (yterm_im * t) >> wp
1324
+ t = (t * log) >> wp
1325
+ else:
1326
+ xre[0] += xterm_re
1327
+ xim[0] += xterm_im
1328
+ if reflect:
1329
+ yre[0] += yterm_re
1330
+ yim[0] += yterm_im
1331
+ if have_derivatives:
1332
+ if have_one_derivative:
1333
+ if maxd % 2:
1334
+ xre[0] = -xre[0]
1335
+ xim[0] = -xim[0]
1336
+ if reflect:
1337
+ yre[0] = -yre[0]
1338
+ yim[0] = -yim[0]
1339
+ else:
1340
+ xre = [(-1)**d * xre[d] for d in derivatives]
1341
+ xim = [(-1)**d * xim[d] for d in derivatives]
1342
+ if reflect:
1343
+ yre = [(-1)**d * yre[d] for d in derivatives]
1344
+ yim = [(-1)**d * yim[d] for d in derivatives]
1345
+ xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n'))
1346
+ for (xa, xb) in zip(xre, xim)]
1347
+ ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n'))
1348
+ for (ya, yb) in zip(yre, yim)]
1349
+ return xs, ys
1350
+
1351
+
1352
+ #-----------------------------------------------------------------------#
1353
+ # #
1354
+ # The gamma function (NEW IMPLEMENTATION) #
1355
+ # #
1356
+ #-----------------------------------------------------------------------#
1357
+
1358
+ # Higher means faster, but more precomputation time
1359
+ MAX_GAMMA_TAYLOR_PREC = 5000
1360
+ # Need to derive higher bounds for Taylor series to go higher
1361
+ assert MAX_GAMMA_TAYLOR_PREC < 15000
1362
+
1363
+ # Use Stirling's series if abs(x) > beta*prec
1364
+ # Important: must be large enough for convergence!
1365
+ GAMMA_STIRLING_BETA = 0.2
1366
+
1367
+ SMALL_FACTORIAL_CACHE_SIZE = 150
1368
+
1369
+ gamma_taylor_cache = {}
1370
+ gamma_stirling_cache = {}
1371
+
1372
+ small_factorial_cache = [from_int(ifac(n)) for \
1373
+ n in range(SMALL_FACTORIAL_CACHE_SIZE+1)]
1374
+
1375
+ def zeta_array(N, prec):
1376
+ """
1377
+ zeta(n) = A * pi**n / n! + B
1378
+
1379
+ where A is a rational number (A = Bernoulli number
1380
+ for n even) and B is an infinite sum over powers of exp(2*pi).
1381
+ (B = 0 for n even).
1382
+
1383
+ TODO: this is currently only used for gamma, but could
1384
+ be very useful elsewhere.
1385
+ """
1386
+ extra = 30
1387
+ wp = prec+extra
1388
+ zeta_values = [MPZ_ZERO] * (N+2)
1389
+ pi = pi_fixed(wp)
1390
+ # STEP 1:
1391
+ one = MPZ_ONE << wp
1392
+ zeta_values[0] = -one//2
1393
+ f_2pi = mpf_shift(mpf_pi(wp),1)
1394
+ exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp)
1395
+ # Compute exponential series
1396
+ # Store values of 1/(exp(2*pi*k)-1),
1397
+ # exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2
1398
+ # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
1399
+ exps3 = []
1400
+ k = 1
1401
+ while 1:
1402
+ tp = wp - 9*k
1403
+ if tp < 1:
1404
+ break
1405
+ # 1/(exp(2*pi*k-1)
1406
+ q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp)
1407
+ # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
1408
+ q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp)
1409
+ q1 = to_fixed(q1, wp)
1410
+ q2 = to_fixed(q2, wp)
1411
+ q2 = (k * q2 * pi) >> wp
1412
+ exps3.append((q1, q2))
1413
+ # Multiply for next round
1414
+ exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp)
1415
+ k += 1
1416
+ # Exponential sum
1417
+ for n in xrange(3, N+1, 2):
1418
+ s = MPZ_ZERO
1419
+ k = 1
1420
+ for e1, e2 in exps3:
1421
+ if n%4 == 3:
1422
+ t = e1 // k**n
1423
+ else:
1424
+ U = (n-1)//4
1425
+ t = (e1 + e2//U) // k**n
1426
+ if not t:
1427
+ break
1428
+ s += t
1429
+ k += 1
1430
+ zeta_values[n] = -2*s
1431
+ # Even zeta values
1432
+ B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)]
1433
+ pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp)
1434
+ pi_pow = mpf_div(pi_pow, from_int(4), wp)
1435
+ for n in xrange(2,N+2,2):
1436
+ z = mpf_mul(B[n], pi_pow, wp)
1437
+ zeta_values[n] = to_fixed(z, wp)
1438
+ pi_pow = mpf_mul(pi_pow, fpi, wp)
1439
+ pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp)
1440
+ # Zeta sum
1441
+ reciprocal_pi = (one << wp) // pi
1442
+ for n in xrange(3, N+1, 4):
1443
+ U = (n-3)//4
1444
+ s = zeta_values[4*U+4]*(4*U+7)//4
1445
+ for k in xrange(1, U+1):
1446
+ s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp
1447
+ zeta_values[n] += (2*s*reciprocal_pi) >> wp
1448
+ for n in xrange(5, N+1, 4):
1449
+ U = (n-1)//4
1450
+ s = zeta_values[4*U+2]*(2*U+1)
1451
+ for k in xrange(1, 2*U+1):
1452
+ s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp
1453
+ zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U)
1454
+ return [x>>extra for x in zeta_values]
1455
+
1456
+ def gamma_taylor_coefficients(inprec):
1457
+ """
1458
+ Gives the Taylor coefficients of 1/gamma(1+x) as
1459
+ a list of fixed-point numbers. Enough coefficients are returned
1460
+ to ensure that the series converges to the given precision
1461
+ when x is in [0.5, 1.5].
1462
+ """
1463
+ # Reuse nearby cache values (small case)
1464
+ if inprec < 400:
1465
+ prec = inprec + (10-(inprec%10))
1466
+ elif inprec < 1000:
1467
+ prec = inprec + (30-(inprec%30))
1468
+ else:
1469
+ prec = inprec
1470
+ if prec in gamma_taylor_cache:
1471
+ return gamma_taylor_cache[prec], prec
1472
+
1473
+ # Experimentally determined bounds
1474
+ if prec < 1000:
1475
+ N = int(prec**0.76 + 2)
1476
+ else:
1477
+ # Valid to at least 15000 bits
1478
+ N = int(prec**0.787 + 2)
1479
+
1480
+ # Reuse higher precision values
1481
+ for cprec in gamma_taylor_cache:
1482
+ if cprec > prec:
1483
+ coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]]
1484
+ if inprec < 1000:
1485
+ gamma_taylor_cache[prec] = coeffs
1486
+ return coeffs, prec
1487
+
1488
+ # Cache at a higher precision (large case)
1489
+ if prec > 1000:
1490
+ prec = int(prec * 1.2)
1491
+
1492
+ wp = prec + 20
1493
+ A = [0] * N
1494
+ A[0] = MPZ_ZERO
1495
+ A[1] = MPZ_ONE << wp
1496
+ A[2] = euler_fixed(wp)
1497
+ # SLOW, reference implementation
1498
+ #zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)]
1499
+ zeta_values = zeta_array(N, wp)
1500
+ for k in xrange(3, N):
1501
+ a = (-A[2]*A[k-1])>>wp
1502
+ for j in xrange(2,k):
1503
+ a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp
1504
+ a //= (1-k)
1505
+ A[k] = a
1506
+ A = [a>>20 for a in A]
1507
+ A = A[::-1]
1508
+ A = A[:-1]
1509
+ gamma_taylor_cache[prec] = A
1510
+ #return A, prec
1511
+ return gamma_taylor_coefficients(inprec)
1512
+
1513
+ def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type):
1514
+ # Determine nearest multiple of N/2
1515
+ #n = int(x >> (wp-1))
1516
+ #steps = (n-1)>>1
1517
+ nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1
1518
+ one = MPZ_ONE << wp
1519
+ coeffs, cwp = gamma_taylor_coefficients(wp)
1520
+ if nearest_int > 0:
1521
+ r = one
1522
+ for i in xrange(nearest_int-1):
1523
+ x -= one
1524
+ r = (r*x) >> wp
1525
+ x -= one
1526
+ p = MPZ_ZERO
1527
+ for c in coeffs:
1528
+ p = c + ((x*p)>>wp)
1529
+ p >>= (cwp-wp)
1530
+ if type == 0:
1531
+ return from_man_exp((r<<wp)//p, -wp, prec, rnd)
1532
+ if type == 2:
1533
+ return mpf_shift(from_rational(p, (r<<wp), prec, rnd), wp)
1534
+ if type == 3:
1535
+ return mpf_log(mpf_abs(from_man_exp((r<<wp)//p, -wp)), prec, rnd)
1536
+ else:
1537
+ r = one
1538
+ for i in xrange(-nearest_int):
1539
+ r = (r*x) >> wp
1540
+ x += one
1541
+ p = MPZ_ZERO
1542
+ for c in coeffs:
1543
+ p = c + ((x*p)>>wp)
1544
+ p >>= (cwp-wp)
1545
+ if wp - bitcount(abs(x)) > 10:
1546
+ # pass very close to 0, so do floating-point multiply
1547
+ g = mpf_add(xmpf, from_int(-nearest_int)) # exact
1548
+ r = from_man_exp(p*r,-wp-wp)
1549
+ r = mpf_mul(r, g, wp)
1550
+ if type == 0:
1551
+ return mpf_div(fone, r, prec, rnd)
1552
+ if type == 2:
1553
+ return mpf_pos(r, prec, rnd)
1554
+ if type == 3:
1555
+ return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd)
1556
+ else:
1557
+ r = from_man_exp(x*p*r,-3*wp)
1558
+ if type == 0: return mpf_div(fone, r, prec, rnd)
1559
+ if type == 2: return mpf_pos(r, prec, rnd)
1560
+ if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd))
1561
+
1562
+ def stirling_coefficient(n):
1563
+ if n in gamma_stirling_cache:
1564
+ return gamma_stirling_cache[n]
1565
+ p, q = bernfrac(n)
1566
+ q *= MPZ(n*(n-1))
1567
+ gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q)
1568
+ return gamma_stirling_cache[n]
1569
+
1570
+ def real_stirling_series(x, prec):
1571
+ """
1572
+ Sums the rational part of Stirling's expansion,
1573
+
1574
+ log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ...
1575
+
1576
+ """
1577
+ t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x
1578
+ u = (t*t)>>prec # u = 1/x**2
1579
+ s = ln_sqrt2pi_fixed(prec) - x
1580
+ # Add initial terms of Stirling's series
1581
+ s += t//12; t = (t*u)>>prec
1582
+ s -= t//360; t = (t*u)>>prec
1583
+ s += t//1260; t = (t*u)>>prec
1584
+ s -= t//1680; t = (t*u)>>prec
1585
+ if not t: return s
1586
+ s += t//1188; t = (t*u)>>prec
1587
+ s -= 691*t//360360; t = (t*u)>>prec
1588
+ s += t//156; t = (t*u)>>prec
1589
+ if not t: return s
1590
+ s -= 3617*t//122400; t = (t*u)>>prec
1591
+ s += 43867*t//244188; t = (t*u)>>prec
1592
+ s -= 174611*t//125400; t = (t*u)>>prec
1593
+ if not t: return s
1594
+ k = 22
1595
+ # From here on, the coefficients are growing, so we
1596
+ # have to keep t at a roughly constant size
1597
+ usize = bitcount(abs(u))
1598
+ tsize = bitcount(abs(t))
1599
+ texp = 0
1600
+ while 1:
1601
+ p, q, pb, qb = stirling_coefficient(k)
1602
+ term_mag = tsize + pb + texp
1603
+ shift = -texp
1604
+ m = pb - term_mag
1605
+ if m > 0 and shift < m:
1606
+ p >>= m
1607
+ shift -= m
1608
+ m = tsize - term_mag
1609
+ if m > 0 and shift < m:
1610
+ w = t >> m
1611
+ shift -= m
1612
+ else:
1613
+ w = t
1614
+ term = (t*p//q) >> shift
1615
+ if not term:
1616
+ break
1617
+ s += term
1618
+ t = (t*u) >> usize
1619
+ texp -= (prec - usize)
1620
+ k += 2
1621
+ return s
1622
+
1623
+ def complex_stirling_series(x, y, prec):
1624
+ # t = 1/z
1625
+ _m = (x*x + y*y) >> prec
1626
+ tre = (x << prec) // _m
1627
+ tim = (-y << prec) // _m
1628
+ # u = 1/z**2
1629
+ ure = (tre*tre - tim*tim) >> prec
1630
+ uim = tim*tre >> (prec-1)
1631
+ # s = log(sqrt(2*pi)) - z
1632
+ sre = ln_sqrt2pi_fixed(prec) - x
1633
+ sim = -y
1634
+
1635
+ # Add initial terms of Stirling's series
1636
+ sre += tre//12; sim += tim//12;
1637
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1638
+ sre -= tre//360; sim -= tim//360;
1639
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1640
+ sre += tre//1260; sim += tim//1260;
1641
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1642
+ sre -= tre//1680; sim -= tim//1680;
1643
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1644
+ if abs(tre) + abs(tim) < 5: return sre, sim
1645
+ sre += tre//1188; sim += tim//1188;
1646
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1647
+ sre -= 691*tre//360360; sim -= 691*tim//360360;
1648
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1649
+ sre += tre//156; sim += tim//156;
1650
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1651
+ if abs(tre) + abs(tim) < 5: return sre, sim
1652
+ sre -= 3617*tre//122400; sim -= 3617*tim//122400;
1653
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1654
+ sre += 43867*tre//244188; sim += 43867*tim//244188;
1655
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1656
+ sre -= 174611*tre//125400; sim -= 174611*tim//125400;
1657
+ tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
1658
+ if abs(tre) + abs(tim) < 5: return sre, sim
1659
+
1660
+ k = 22
1661
+ # From here on, the coefficients are growing, so we
1662
+ # have to keep t at a roughly constant size
1663
+ usize = bitcount(max(abs(ure), abs(uim)))
1664
+ tsize = bitcount(max(abs(tre), abs(tim)))
1665
+ texp = 0
1666
+ while 1:
1667
+ p, q, pb, qb = stirling_coefficient(k)
1668
+ term_mag = tsize + pb + texp
1669
+ shift = -texp
1670
+ m = pb - term_mag
1671
+ if m > 0 and shift < m:
1672
+ p >>= m
1673
+ shift -= m
1674
+ m = tsize - term_mag
1675
+ if m > 0 and shift < m:
1676
+ wre = tre >> m
1677
+ wim = tim >> m
1678
+ shift -= m
1679
+ else:
1680
+ wre = tre
1681
+ wim = tim
1682
+ termre = (tre*p//q) >> shift
1683
+ termim = (tim*p//q) >> shift
1684
+ if abs(termre) + abs(termim) < 5:
1685
+ break
1686
+ sre += termre
1687
+ sim += termim
1688
+ tre, tim = ((tre*ure - tim*uim)>>usize), \
1689
+ ((tre*uim + tim*ure)>>usize)
1690
+ texp -= (prec - usize)
1691
+ k += 2
1692
+ return sre, sim
1693
+
1694
+
1695
+ def mpf_gamma(x, prec, rnd='d', type=0):
1696
+ """
1697
+ This function implements multipurpose evaluation of the gamma
1698
+ function, G(x), as well as the following versions of the same:
1699
+
1700
+ type = 0 -- G(x) [standard gamma function]
1701
+ type = 1 -- G(x+1) = x*G(x+1) = x! [factorial]
1702
+ type = 2 -- 1/G(x) [reciprocal gamma function]
1703
+ type = 3 -- log(|G(x)|) [log-gamma function, real part]
1704
+ """
1705
+
1706
+ # Specal values
1707
+ sign, man, exp, bc = x
1708
+ if not man:
1709
+ if x == fzero:
1710
+ if type == 1: return fone
1711
+ if type == 2: return fzero
1712
+ raise ValueError("gamma function pole")
1713
+ if x == finf:
1714
+ if type == 2: return fzero
1715
+ return finf
1716
+ return fnan
1717
+
1718
+ # First of all, for log gamma, numbers can be well beyond the fixed-point
1719
+ # range, so we must take care of huge numbers before e.g. trying
1720
+ # to convert x to the nearest integer
1721
+ if type == 3:
1722
+ wp = prec+20
1723
+ if exp+bc > wp and not sign:
1724
+ return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd)
1725
+
1726
+ # We strongly want to special-case small integers
1727
+ is_integer = exp >= 0
1728
+ if is_integer:
1729
+ # Poles
1730
+ if sign:
1731
+ if type == 2:
1732
+ return fzero
1733
+ raise ValueError("gamma function pole")
1734
+ # n = x
1735
+ n = man << exp
1736
+ if n < SMALL_FACTORIAL_CACHE_SIZE:
1737
+ if type == 0:
1738
+ return mpf_pos(small_factorial_cache[n-1], prec, rnd)
1739
+ if type == 1:
1740
+ return mpf_pos(small_factorial_cache[n], prec, rnd)
1741
+ if type == 2:
1742
+ return mpf_div(fone, small_factorial_cache[n-1], prec, rnd)
1743
+ if type == 3:
1744
+ return mpf_log(small_factorial_cache[n-1], prec, rnd)
1745
+ else:
1746
+ # floor(abs(x))
1747
+ n = int(man >> (-exp))
1748
+
1749
+ # Estimate size and precision
1750
+ # Estimate log(gamma(|x|),2) as x*log(x,2)
1751
+ mag = exp + bc
1752
+ gamma_size = n*mag
1753
+
1754
+ if type == 3:
1755
+ wp = prec + 20
1756
+ else:
1757
+ wp = prec + bitcount(gamma_size) + 20
1758
+
1759
+ # Very close to 0, pole
1760
+ if mag < -wp:
1761
+ if type == 0:
1762
+ return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd)
1763
+ if type == 1: return mpf_sub(fone, x, prec, rnd)
1764
+ if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd)
1765
+ if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd))
1766
+
1767
+ # From now on, we assume having a gamma function
1768
+ if type == 1:
1769
+ return mpf_gamma(mpf_add(x, fone), prec, rnd, 0)
1770
+
1771
+ # Special case integers (those not small enough to be caught above,
1772
+ # but still small enough for an exact factorial to be faster
1773
+ # than an approximate algorithm), and half-integers
1774
+ if exp >= -1:
1775
+ if is_integer:
1776
+ if gamma_size < 10*wp:
1777
+ if type == 0:
1778
+ return from_int(ifac(n-1), prec, rnd)
1779
+ if type == 2:
1780
+ return from_rational(MPZ_ONE, ifac(n-1), prec, rnd)
1781
+ if type == 3:
1782
+ return mpf_log(from_int(ifac(n-1)), prec, rnd)
1783
+ # half-integer
1784
+ if n < 100 or gamma_size < 10*wp:
1785
+ if sign:
1786
+ w = sqrtpi_fixed(wp)
1787
+ if n % 2: f = ifac2(2*n+1)
1788
+ else: f = -ifac2(2*n+1)
1789
+ if type == 0:
1790
+ return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1)
1791
+ if type == 2:
1792
+ return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1)
1793
+ if type == 3:
1794
+ return mpf_log(mpf_shift(from_rational(w, abs(f),
1795
+ prec, rnd), -wp+n+1), prec, rnd)
1796
+ elif n == 0:
1797
+ if type == 0: return mpf_sqrtpi(prec, rnd)
1798
+ if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd)
1799
+ if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd)
1800
+ else:
1801
+ w = sqrtpi_fixed(wp)
1802
+ w = from_man_exp(w * ifac2(2*n-1), -wp-n)
1803
+ if type == 0: return mpf_pos(w, prec, rnd)
1804
+ if type == 2: return mpf_div(fone, w, prec, rnd)
1805
+ if type == 3: return mpf_log(mpf_abs(w), prec, rnd)
1806
+
1807
+ # Convert to fixed point
1808
+ offset = exp + wp
1809
+ if offset >= 0: absxman = man << offset
1810
+ else: absxman = man >> (-offset)
1811
+
1812
+ # For log gamma, provide accurate evaluation for x = 1+eps and 2+eps
1813
+ if type == 3 and not sign:
1814
+ one = MPZ_ONE << wp
1815
+ one_dist = abs(absxman-one)
1816
+ two_dist = abs(absxman-2*one)
1817
+ cancellation = (wp - bitcount(min(one_dist, two_dist)))
1818
+ if cancellation > 10:
1819
+ xsub1 = mpf_sub(fone, x)
1820
+ xsub2 = mpf_sub(ftwo, x)
1821
+ xsub1mag = xsub1[2]+xsub1[3]
1822
+ xsub2mag = xsub2[2]+xsub2[3]
1823
+ if xsub1mag < -wp:
1824
+ return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd)
1825
+ if xsub2mag < -wp:
1826
+ return mpf_mul(mpf_sub(fone, mpf_euler(wp)),
1827
+ mpf_sub(x, ftwo), prec, rnd)
1828
+ # Proceed but increase precision
1829
+ wp += max(-xsub1mag, -xsub2mag)
1830
+ offset = exp + wp
1831
+ if offset >= 0: absxman = man << offset
1832
+ else: absxman = man >> (-offset)
1833
+
1834
+ # Use Taylor series if appropriate
1835
+ n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
1836
+ if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC:
1837
+ if sign:
1838
+ absxman = -absxman
1839
+ return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type)
1840
+
1841
+ # Use Stirling's series
1842
+ # First ensure that |x| is large enough for rapid convergence
1843
+ xorig = x
1844
+
1845
+ # Argument reduction
1846
+ r = 0
1847
+ if n < n_for_stirling:
1848
+ r = one = MPZ_ONE << wp
1849
+ d = n_for_stirling - n
1850
+ for k in xrange(d):
1851
+ r = (r * absxman) >> wp
1852
+ absxman += one
1853
+ x = xabs = from_man_exp(absxman, -wp)
1854
+ if sign:
1855
+ x = mpf_neg(x)
1856
+ else:
1857
+ xabs = mpf_abs(x)
1858
+
1859
+ # Asymptotic series
1860
+ y = real_stirling_series(absxman, wp)
1861
+ u = to_fixed(mpf_log(xabs, wp), wp)
1862
+ u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp
1863
+ y += u
1864
+ w = from_man_exp(y, -wp)
1865
+
1866
+ # Compute final value
1867
+ if sign:
1868
+ # Reflection formula
1869
+ A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp)
1870
+ B = mpf_neg(mpf_pi(wp))
1871
+ if type == 0 or type == 2:
1872
+ A = mpf_mul(A, mpf_exp(w, wp))
1873
+ if r:
1874
+ B = mpf_mul(B, from_man_exp(r, -wp), wp)
1875
+ if type == 0:
1876
+ return mpf_div(B, A, prec, rnd)
1877
+ if type == 2:
1878
+ return mpf_div(A, B, prec, rnd)
1879
+ if type == 3:
1880
+ if r:
1881
+ B = mpf_mul(B, from_man_exp(r, -wp), wp)
1882
+ A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp)
1883
+ return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd)
1884
+ else:
1885
+ if type == 0:
1886
+ if r:
1887
+ return mpf_div(mpf_exp(w, wp),
1888
+ from_man_exp(r, -wp), prec, rnd)
1889
+ return mpf_exp(w, prec, rnd)
1890
+ if type == 2:
1891
+ if r:
1892
+ return mpf_div(from_man_exp(r, -wp),
1893
+ mpf_exp(w, wp), prec, rnd)
1894
+ return mpf_exp(mpf_neg(w), prec, rnd)
1895
+ if type == 3:
1896
+ if r:
1897
+ return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd)
1898
+ return mpf_pos(w, prec, rnd)
1899
+
1900
+
1901
+ def mpc_gamma(z, prec, rnd='d', type=0):
1902
+ a, b = z
1903
+ asign, aman, aexp, abc = a
1904
+ bsign, bman, bexp, bbc = b
1905
+
1906
+ if b == fzero:
1907
+ # Imaginary part on negative half-axis for log-gamma function
1908
+ if type == 3 and asign:
1909
+ re = mpf_gamma(a, prec, rnd, 3)
1910
+ n = (-aman) >> (-aexp)
1911
+ im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
1912
+ return re, im
1913
+ return mpf_gamma(a, prec, rnd, type), fzero
1914
+
1915
+ # Some kind of complex inf/nan
1916
+ if (not aman and aexp) or (not bman and bexp):
1917
+ return (fnan, fnan)
1918
+
1919
+ # Initial working precision
1920
+ wp = prec + 20
1921
+
1922
+ amag = aexp+abc
1923
+ bmag = bexp+bbc
1924
+ if aman:
1925
+ mag = max(amag, bmag)
1926
+ else:
1927
+ mag = bmag
1928
+
1929
+ # Close to 0
1930
+ if mag < -8:
1931
+ if mag < -wp:
1932
+ # 1/gamma(z) = z + euler*z^2 + O(z^3)
1933
+ v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp)
1934
+ if type == 0: return mpc_reciprocal(v, prec, rnd)
1935
+ if type == 1: return mpc_div(z, v, prec, rnd)
1936
+ if type == 2: return mpc_pos(v, prec, rnd)
1937
+ if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd)
1938
+ elif type != 1:
1939
+ wp += (-mag)
1940
+
1941
+ # Handle huge log-gamma values; must do this before converting to
1942
+ # a fixed-point value. TODO: determine a precise cutoff of validity
1943
+ # depending on amag and bmag
1944
+ if type == 3 and mag > wp and ((not asign) or (bmag >= amag)):
1945
+ return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd)
1946
+
1947
+ # From now on, we assume having a gamma function
1948
+ if type == 1:
1949
+ return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0)
1950
+
1951
+ an = abs(to_int(a))
1952
+ bn = abs(to_int(b))
1953
+ absn = max(an, bn)
1954
+ gamma_size = absn*mag
1955
+ if type == 3:
1956
+ pass
1957
+ else:
1958
+ wp += bitcount(gamma_size)
1959
+
1960
+ # Reflect to the right half-plane. Note that Stirling's expansion
1961
+ # is valid in the left half-plane too, as long as we're not too close
1962
+ # to the real axis, but in order to use this argument reduction
1963
+ # in the negative direction must be implemented.
1964
+ #need_reflection = asign and ((bmag < 0) or (amag-bmag > 4))
1965
+ need_reflection = asign
1966
+ zorig = z
1967
+ if need_reflection:
1968
+ z = mpc_neg(z)
1969
+ asign, aman, aexp, abc = a = z[0]
1970
+ bsign, bman, bexp, bbc = b = z[1]
1971
+
1972
+ # Imaginary part very small compared to real one?
1973
+ yfinal = 0
1974
+ balance_prec = 0
1975
+ if bmag < -10:
1976
+ # Check z ~= 1 and z ~= 2 for loggamma
1977
+ if type == 3:
1978
+ zsub1 = mpc_sub_mpf(z, fone)
1979
+ if zsub1[0] == fzero:
1980
+ cancel1 = -bmag
1981
+ else:
1982
+ cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag)
1983
+ if cancel1 > wp:
1984
+ pi = mpf_pi(wp)
1985
+ x = mpc_mul_mpf(zsub1, pi, wp)
1986
+ x = mpc_mul(x, x, wp)
1987
+ x = mpc_div_mpf(x, from_int(12), wp)
1988
+ y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp)
1989
+ yfinal = mpc_add(x, y, wp)
1990
+ if not need_reflection:
1991
+ return mpc_pos(yfinal, prec, rnd)
1992
+ elif cancel1 > 0:
1993
+ wp += cancel1
1994
+ zsub2 = mpc_sub_mpf(z, ftwo)
1995
+ if zsub2[0] == fzero:
1996
+ cancel2 = -bmag
1997
+ else:
1998
+ cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag)
1999
+ if cancel2 > wp:
2000
+ pi = mpf_pi(wp)
2001
+ t = mpf_sub(mpf_mul(pi, pi), from_int(6))
2002
+ x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp)
2003
+ x = mpc_div_mpf(x, from_int(12), wp)
2004
+ y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp)
2005
+ yfinal = mpc_add(x, y, wp)
2006
+ if not need_reflection:
2007
+ return mpc_pos(yfinal, prec, rnd)
2008
+ elif cancel2 > 0:
2009
+ wp += cancel2
2010
+ if bmag < -wp:
2011
+ # Compute directly from the real gamma function.
2012
+ pp = 2*(wp+10)
2013
+ aabs = mpf_abs(a)
2014
+ eps = mpf_shift(fone, amag-wp)
2015
+ x1 = mpf_gamma(aabs, pp, type=type)
2016
+ x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type)
2017
+ xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp)
2018
+ y = mpf_mul(b, xprime, prec, rnd)
2019
+ yfinal = (x1, y)
2020
+ # Note: we still need to use the reflection formula for
2021
+ # near-poles, and the correct branch of the log-gamma function
2022
+ if not need_reflection:
2023
+ return mpc_pos(yfinal, prec, rnd)
2024
+ else:
2025
+ balance_prec += (-bmag)
2026
+
2027
+ wp += balance_prec
2028
+ n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
2029
+ need_reduction = absn < n_for_stirling
2030
+
2031
+ afix = to_fixed(a, wp)
2032
+ bfix = to_fixed(b, wp)
2033
+
2034
+ r = 0
2035
+ if not yfinal:
2036
+ zprered = z
2037
+ # Argument reduction
2038
+ if absn < n_for_stirling:
2039
+ absn = complex(an, bn)
2040
+ d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an)
2041
+ rre = one = MPZ_ONE << wp
2042
+ rim = MPZ_ZERO
2043
+ for k in xrange(d):
2044
+ rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp)
2045
+ afix += one
2046
+ r = from_man_exp(rre, -wp), from_man_exp(rim, -wp)
2047
+ a = from_man_exp(afix, -wp)
2048
+ z = a, b
2049
+
2050
+ yre, yim = complex_stirling_series(afix, bfix, wp)
2051
+ # (z-1/2)*log(z) + S
2052
+ lre, lim = mpc_log(z, wp)
2053
+ lre = to_fixed(lre, wp)
2054
+ lim = to_fixed(lim, wp)
2055
+ yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre
2056
+ yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim
2057
+ y = from_man_exp(yre, -wp), from_man_exp(yim, -wp)
2058
+
2059
+ if r and type == 3:
2060
+ # If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z)
2061
+ # and log(gamma(z)) coincide. Otherwise, use the zeroth order
2062
+ # Stirling expansion to compute the correct imaginary part.
2063
+ y = mpc_sub(y, mpc_log(r, wp), wp)
2064
+ zfa = to_float(zprered[0])
2065
+ zfb = to_float(zprered[1])
2066
+ zfabs = math.hypot(zfa,zfb)
2067
+ #if not (zfa > 0.0 and zfabs <= 4):
2068
+ yfb = to_float(y[1])
2069
+ u = math.atan2(zfb, zfa)
2070
+ if zfabs <= 0.5:
2071
+ gi = 0.577216*zfb - u
2072
+ else:
2073
+ gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs)
2074
+ n = int(math.floor((gi-yfb)/(2*math.pi)+0.5))
2075
+ y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp))
2076
+
2077
+ if need_reflection:
2078
+ if type == 0 or type == 2:
2079
+ A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp)
2080
+ B = (mpf_neg(mpf_pi(wp)), fzero)
2081
+ if yfinal:
2082
+ if type == 2:
2083
+ A = mpc_div(A, yfinal, wp)
2084
+ else:
2085
+ A = mpc_mul(A, yfinal, wp)
2086
+ else:
2087
+ A = mpc_mul(A, mpc_exp(y, wp), wp)
2088
+ if r:
2089
+ B = mpc_mul(B, r, wp)
2090
+ if type == 0: return mpc_div(B, A, prec, rnd)
2091
+ if type == 2: return mpc_div(A, B, prec, rnd)
2092
+
2093
+ # Reflection formula for the log-gamma function with correct branch
2094
+ # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/
2095
+ # LogGamma[z] == -LogGamma[-z] - Log[-z] +
2096
+ # Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] -
2097
+ # Log[Sin[Pi (z - Floor[Re[z]])]] -
2098
+ # Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]]
2099
+ if type == 3:
2100
+ if yfinal:
2101
+ s1 = mpc_neg(yfinal)
2102
+ else:
2103
+ s1 = mpc_neg(y)
2104
+ # s -= log(-z)
2105
+ s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp)
2106
+ # floor(re(z))
2107
+ rezfloor = mpf_floor(zorig[0])
2108
+ imzsign = mpf_sign(zorig[1])
2109
+ pi = mpf_pi(wp)
2110
+ t = mpf_mul(pi, rezfloor)
2111
+ t = mpf_mul_int(t, imzsign, wp)
2112
+ s1 = (s1[0], mpf_add(s1[1], t, wp))
2113
+ s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp)
2114
+ t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp)
2115
+ t = mpc_log(t, wp)
2116
+ s1 = mpc_sub(s1, t, wp)
2117
+ # Note: may actually be unused, because we fall back
2118
+ # to the mpf_ function for real arguments
2119
+ if not imzsign:
2120
+ t = mpf_mul(pi, mpf_floor(rezfloor), wp)
2121
+ s1 = (s1[0], mpf_sub(s1[1], t, wp))
2122
+ return mpc_pos(s1, prec, rnd)
2123
+ else:
2124
+ if type == 0:
2125
+ if r:
2126
+ return mpc_div(mpc_exp(y, wp), r, prec, rnd)
2127
+ return mpc_exp(y, prec, rnd)
2128
+ if type == 2:
2129
+ if r:
2130
+ return mpc_div(r, mpc_exp(y, wp), prec, rnd)
2131
+ return mpc_exp(mpc_neg(y), prec, rnd)
2132
+ if type == 3:
2133
+ return mpc_pos(y, prec, rnd)
2134
+
2135
+ def mpf_factorial(x, prec, rnd='d'):
2136
+ return mpf_gamma(x, prec, rnd, 1)
2137
+
2138
+ def mpc_factorial(x, prec, rnd='d'):
2139
+ return mpc_gamma(x, prec, rnd, 1)
2140
+
2141
+ def mpf_rgamma(x, prec, rnd='d'):
2142
+ return mpf_gamma(x, prec, rnd, 2)
2143
+
2144
+ def mpc_rgamma(x, prec, rnd='d'):
2145
+ return mpc_gamma(x, prec, rnd, 2)
2146
+
2147
+ def mpf_loggamma(x, prec, rnd='d'):
2148
+ sign, man, exp, bc = x
2149
+ if sign:
2150
+ raise ComplexResult
2151
+ return mpf_gamma(x, prec, rnd, 3)
2152
+
2153
+ def mpc_loggamma(z, prec, rnd='d'):
2154
+ a, b = z
2155
+ asign, aman, aexp, abc = a
2156
+ bsign, bman, bexp, bbc = b
2157
+ if b == fzero and asign:
2158
+ re = mpf_gamma(a, prec, rnd, 3)
2159
+ n = (-aman) >> (-aexp)
2160
+ im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
2161
+ return re, im
2162
+ return mpc_gamma(z, prec, rnd, 3)
2163
+
2164
+ def mpf_gamma_int(n, prec, rnd=round_fast):
2165
+ if n < SMALL_FACTORIAL_CACHE_SIZE:
2166
+ return mpf_pos(small_factorial_cache[n-1], prec, rnd)
2167
+ return mpf_gamma(from_int(n), prec, rnd)
venv/lib/python3.10/site-packages/mpmath/libmp/libelefun.py ADDED
@@ -0,0 +1,1428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements computation of elementary transcendental
3
+ functions (powers, logarithms, trigonometric and hyperbolic
4
+ functions, inverse trigonometric and hyperbolic) for real
5
+ floating-point numbers.
6
+
7
+ For complex and interval implementations of the same functions,
8
+ see libmpc and libmpi.
9
+
10
+ """
11
+
12
+ import math
13
+ from bisect import bisect
14
+
15
+ from .backend import xrange
16
+ from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, BACKEND
17
+
18
+ from .libmpf import (
19
+ round_floor, round_ceiling, round_down, round_up,
20
+ round_nearest, round_fast,
21
+ ComplexResult,
22
+ bitcount, bctable, lshift, rshift, giant_steps, sqrt_fixed,
23
+ from_int, to_int, from_man_exp, to_fixed, to_float, from_float,
24
+ from_rational, normalize,
25
+ fzero, fone, fnone, fhalf, finf, fninf, fnan,
26
+ mpf_cmp, mpf_sign, mpf_abs,
27
+ mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_div, mpf_shift,
28
+ mpf_rdiv_int, mpf_pow_int, mpf_sqrt,
29
+ reciprocal_rnd, negative_rnd, mpf_perturb,
30
+ isqrt_fast
31
+ )
32
+
33
+ from .libintmath import ifib
34
+
35
+
36
+ #-------------------------------------------------------------------------------
37
+ # Tuning parameters
38
+ #-------------------------------------------------------------------------------
39
+
40
+ # Cutoff for computing exp from cosh+sinh. This reduces the
41
+ # number of terms by half, but also requires a square root which
42
+ # is expensive with the pure-Python square root code.
43
+ if BACKEND == 'python':
44
+ EXP_COSH_CUTOFF = 600
45
+ else:
46
+ EXP_COSH_CUTOFF = 400
47
+ # Cutoff for using more than 2 series
48
+ EXP_SERIES_U_CUTOFF = 1500
49
+
50
+ # Also basically determined by sqrt
51
+ if BACKEND == 'python':
52
+ COS_SIN_CACHE_PREC = 400
53
+ else:
54
+ COS_SIN_CACHE_PREC = 200
55
+ COS_SIN_CACHE_STEP = 8
56
+ cos_sin_cache = {}
57
+
58
+ # Number of integer logarithms to cache (for zeta sums)
59
+ MAX_LOG_INT_CACHE = 2000
60
+ log_int_cache = {}
61
+
62
+ LOG_TAYLOR_PREC = 2500 # Use Taylor series with caching up to this prec
63
+ LOG_TAYLOR_SHIFT = 9 # Cache log values in steps of size 2^-N
64
+ log_taylor_cache = {}
65
+ # prec/size ratio of x for fastest convergence in AGM formula
66
+ LOG_AGM_MAG_PREC_RATIO = 20
67
+
68
+ ATAN_TAYLOR_PREC = 3000 # Same as for log
69
+ ATAN_TAYLOR_SHIFT = 7 # steps of size 2^-N
70
+ atan_taylor_cache = {}
71
+
72
+
73
+ # ~= next power of two + 20
74
+ cache_prec_steps = [22,22]
75
+ for k in xrange(1, bitcount(LOG_TAYLOR_PREC)+1):
76
+ cache_prec_steps += [min(2**k,LOG_TAYLOR_PREC)+20] * 2**(k-1)
77
+
78
+
79
+ #----------------------------------------------------------------------------#
80
+ # #
81
+ # Elementary mathematical constants #
82
+ # #
83
+ #----------------------------------------------------------------------------#
84
+
85
+ def constant_memo(f):
86
+ """
87
+ Decorator for caching computed values of mathematical
88
+ constants. This decorator should be applied to a
89
+ function taking a single argument prec as input and
90
+ returning a fixed-point value with the given precision.
91
+ """
92
+ f.memo_prec = -1
93
+ f.memo_val = None
94
+ def g(prec, **kwargs):
95
+ memo_prec = f.memo_prec
96
+ if prec <= memo_prec:
97
+ return f.memo_val >> (memo_prec-prec)
98
+ newprec = int(prec*1.05+10)
99
+ f.memo_val = f(newprec, **kwargs)
100
+ f.memo_prec = newprec
101
+ return f.memo_val >> (newprec-prec)
102
+ g.__name__ = f.__name__
103
+ g.__doc__ = f.__doc__
104
+ return g
105
+
106
+ def def_mpf_constant(fixed):
107
+ """
108
+ Create a function that computes the mpf value for a mathematical
109
+ constant, given a function that computes the fixed-point value.
110
+
111
+ Assumptions: the constant is positive and has magnitude ~= 1;
112
+ the fixed-point function rounds to floor.
113
+ """
114
+ def f(prec, rnd=round_fast):
115
+ wp = prec + 20
116
+ v = fixed(wp)
117
+ if rnd in (round_up, round_ceiling):
118
+ v += 1
119
+ return normalize(0, v, -wp, bitcount(v), prec, rnd)
120
+ f.__doc__ = fixed.__doc__
121
+ return f
122
+
123
+ def bsp_acot(q, a, b, hyperbolic):
124
+ if b - a == 1:
125
+ a1 = MPZ(2*a + 3)
126
+ if hyperbolic or a&1:
127
+ return MPZ_ONE, a1 * q**2, a1
128
+ else:
129
+ return -MPZ_ONE, a1 * q**2, a1
130
+ m = (a+b)//2
131
+ p1, q1, r1 = bsp_acot(q, a, m, hyperbolic)
132
+ p2, q2, r2 = bsp_acot(q, m, b, hyperbolic)
133
+ return q2*p1 + r1*p2, q1*q2, r1*r2
134
+
135
+ # the acoth(x) series converges like the geometric series for x^2
136
+ # N = ceil(p*log(2)/(2*log(x)))
137
+ def acot_fixed(a, prec, hyperbolic):
138
+ """
139
+ Compute acot(a) or acoth(a) for an integer a with binary splitting; see
140
+ http://numbers.computation.free.fr/Constants/Algorithms/splitting.html
141
+ """
142
+ N = int(0.35 * prec/math.log(a) + 20)
143
+ p, q, r = bsp_acot(a, 0,N, hyperbolic)
144
+ return ((p+q)<<prec)//(q*a)
145
+
146
+ def machin(coefs, prec, hyperbolic=False):
147
+ """
148
+ Evaluate a Machin-like formula, i.e., a linear combination of
149
+ acot(n) or acoth(n) for specific integer values of n, using fixed-
150
+ point arithmetic. The input should be a list [(c, n), ...], giving
151
+ c*acot[h](n) + ...
152
+ """
153
+ extraprec = 10
154
+ s = MPZ_ZERO
155
+ for a, b in coefs:
156
+ s += MPZ(a) * acot_fixed(MPZ(b), prec+extraprec, hyperbolic)
157
+ return (s >> extraprec)
158
+
159
+ # Logarithms of integers are needed for various computations involving
160
+ # logarithms, powers, radix conversion, etc
161
+
162
+ @constant_memo
163
+ def ln2_fixed(prec):
164
+ """
165
+ Computes ln(2). This is done with a hyperbolic Machin-type formula,
166
+ with binary splitting at high precision.
167
+ """
168
+ return machin([(18, 26), (-2, 4801), (8, 8749)], prec, True)
169
+
170
+ @constant_memo
171
+ def ln10_fixed(prec):
172
+ """
173
+ Computes ln(10). This is done with a hyperbolic Machin-type formula.
174
+ """
175
+ return machin([(46, 31), (34, 49), (20, 161)], prec, True)
176
+
177
+
178
+ r"""
179
+ For computation of pi, we use the Chudnovsky series:
180
+
181
+ oo
182
+ ___ k
183
+ 1 \ (-1) (6 k)! (A + B k)
184
+ ----- = ) -----------------------
185
+ 12 pi /___ 3 3k+3/2
186
+ (3 k)! (k!) C
187
+ k = 0
188
+
189
+ where A, B, and C are certain integer constants. This series adds roughly
190
+ 14 digits per term. Note that C^(3/2) can be extracted so that the
191
+ series contains only rational terms. This makes binary splitting very
192
+ efficient.
193
+
194
+ The recurrence formulas for the binary splitting were taken from
195
+ ftp://ftp.gmplib.org/pub/src/gmp-chudnovsky.c
196
+
197
+ Previously, Machin's formula was used at low precision and the AGM iteration
198
+ was used at high precision. However, the Chudnovsky series is essentially as
199
+ fast as the Machin formula at low precision and in practice about 3x faster
200
+ than the AGM at high precision (despite theoretically having a worse
201
+ asymptotic complexity), so there is no reason not to use it in all cases.
202
+
203
+ """
204
+
205
+ # Constants in Chudnovsky's series
206
+ CHUD_A = MPZ(13591409)
207
+ CHUD_B = MPZ(545140134)
208
+ CHUD_C = MPZ(640320)
209
+ CHUD_D = MPZ(12)
210
+
211
+ def bs_chudnovsky(a, b, level, verbose):
212
+ """
213
+ Computes the sum from a to b of the series in the Chudnovsky
214
+ formula. Returns g, p, q where p/q is the sum as an exact
215
+ fraction and g is a temporary value used to save work
216
+ for recursive calls.
217
+ """
218
+ if b-a == 1:
219
+ g = MPZ((6*b-5)*(2*b-1)*(6*b-1))
220
+ p = b**3 * CHUD_C**3 // 24
221
+ q = (-1)**b * g * (CHUD_A+CHUD_B*b)
222
+ else:
223
+ if verbose and level < 4:
224
+ print(" binary splitting", a, b)
225
+ mid = (a+b)//2
226
+ g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose)
227
+ g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose)
228
+ p = p1*p2
229
+ g = g1*g2
230
+ q = q1*p2 + q2*g1
231
+ return g, p, q
232
+
233
+ @constant_memo
234
+ def pi_fixed(prec, verbose=False, verbose_base=None):
235
+ """
236
+ Compute floor(pi * 2**prec) as a big integer.
237
+
238
+ This is done using Chudnovsky's series (see comments in
239
+ libelefun.py for details).
240
+ """
241
+ # The Chudnovsky series gives 14.18 digits per term
242
+ N = int(prec/3.3219280948/14.181647462 + 2)
243
+ if verbose:
244
+ print("binary splitting with N =", N)
245
+ g, p, q = bs_chudnovsky(0, N, 0, verbose)
246
+ sqrtC = isqrt_fast(CHUD_C<<(2*prec))
247
+ v = p*CHUD_C*sqrtC//((q+CHUD_A*p)*CHUD_D)
248
+ return v
249
+
250
+ def degree_fixed(prec):
251
+ return pi_fixed(prec)//180
252
+
253
+ def bspe(a, b):
254
+ """
255
+ Sum series for exp(1)-1 between a, b, returning the result
256
+ as an exact fraction (p, q).
257
+ """
258
+ if b-a == 1:
259
+ return MPZ_ONE, MPZ(b)
260
+ m = (a+b)//2
261
+ p1, q1 = bspe(a, m)
262
+ p2, q2 = bspe(m, b)
263
+ return p1*q2+p2, q1*q2
264
+
265
+ @constant_memo
266
+ def e_fixed(prec):
267
+ """
268
+ Computes exp(1). This is done using the ordinary Taylor series for
269
+ exp, with binary splitting. For a description of the algorithm,
270
+ see:
271
+
272
+ http://numbers.computation.free.fr/Constants/
273
+ Algorithms/splitting.html
274
+ """
275
+ # Slight overestimate of N needed for 1/N! < 2**(-prec)
276
+ # This could be tightened for large N.
277
+ N = int(1.1*prec/math.log(prec) + 20)
278
+ p, q = bspe(0,N)
279
+ return ((p+q)<<prec)//q
280
+
281
+ @constant_memo
282
+ def phi_fixed(prec):
283
+ """
284
+ Computes the golden ratio, (1+sqrt(5))/2
285
+ """
286
+ prec += 10
287
+ a = isqrt_fast(MPZ_FIVE<<(2*prec)) + (MPZ_ONE << prec)
288
+ return a >> 11
289
+
290
+ mpf_phi = def_mpf_constant(phi_fixed)
291
+ mpf_pi = def_mpf_constant(pi_fixed)
292
+ mpf_e = def_mpf_constant(e_fixed)
293
+ mpf_degree = def_mpf_constant(degree_fixed)
294
+ mpf_ln2 = def_mpf_constant(ln2_fixed)
295
+ mpf_ln10 = def_mpf_constant(ln10_fixed)
296
+
297
+
298
+ @constant_memo
299
+ def ln_sqrt2pi_fixed(prec):
300
+ wp = prec + 10
301
+ # ln(sqrt(2*pi)) = ln(2*pi)/2
302
+ return to_fixed(mpf_log(mpf_shift(mpf_pi(wp), 1), wp), prec-1)
303
+
304
+ @constant_memo
305
+ def sqrtpi_fixed(prec):
306
+ return sqrt_fixed(pi_fixed(prec), prec)
307
+
308
+ mpf_sqrtpi = def_mpf_constant(sqrtpi_fixed)
309
+ mpf_ln_sqrt2pi = def_mpf_constant(ln_sqrt2pi_fixed)
310
+
311
+
312
+ #----------------------------------------------------------------------------#
313
+ # #
314
+ # Powers #
315
+ # #
316
+ #----------------------------------------------------------------------------#
317
+
318
+ def mpf_pow(s, t, prec, rnd=round_fast):
319
+ """
320
+ Compute s**t. Raises ComplexResult if s is negative and t is
321
+ fractional.
322
+ """
323
+ ssign, sman, sexp, sbc = s
324
+ tsign, tman, texp, tbc = t
325
+ if ssign and texp < 0:
326
+ raise ComplexResult("negative number raised to a fractional power")
327
+ if texp >= 0:
328
+ return mpf_pow_int(s, (-1)**tsign * (tman<<texp), prec, rnd)
329
+ # s**(n/2) = sqrt(s)**n
330
+ if texp == -1:
331
+ if tman == 1:
332
+ if tsign:
333
+ return mpf_div(fone, mpf_sqrt(s, prec+10,
334
+ reciprocal_rnd[rnd]), prec, rnd)
335
+ return mpf_sqrt(s, prec, rnd)
336
+ else:
337
+ if tsign:
338
+ return mpf_pow_int(mpf_sqrt(s, prec+10,
339
+ reciprocal_rnd[rnd]), -tman, prec, rnd)
340
+ return mpf_pow_int(mpf_sqrt(s, prec+10, rnd), tman, prec, rnd)
341
+ # General formula: s**t = exp(t*log(s))
342
+ # TODO: handle rnd direction of the logarithm carefully
343
+ c = mpf_log(s, prec+10, rnd)
344
+ return mpf_exp(mpf_mul(t, c), prec, rnd)
345
+
346
+ def int_pow_fixed(y, n, prec):
347
+ """n-th power of a fixed point number with precision prec
348
+
349
+ Returns the power in the form man, exp,
350
+ man * 2**exp ~= y**n
351
+ """
352
+ if n == 2:
353
+ return (y*y), 0
354
+ bc = bitcount(y)
355
+ exp = 0
356
+ workprec = 2 * (prec + 4*bitcount(n) + 4)
357
+ _, pm, pe, pbc = fone
358
+ while 1:
359
+ if n & 1:
360
+ pm = pm*y
361
+ pe = pe+exp
362
+ pbc += bc - 2
363
+ pbc = pbc + bctable[int(pm >> pbc)]
364
+ if pbc > workprec:
365
+ pm = pm >> (pbc-workprec)
366
+ pe += pbc - workprec
367
+ pbc = workprec
368
+ n -= 1
369
+ if not n:
370
+ break
371
+ y = y*y
372
+ exp = exp+exp
373
+ bc = bc + bc - 2
374
+ bc = bc + bctable[int(y >> bc)]
375
+ if bc > workprec:
376
+ y = y >> (bc-workprec)
377
+ exp += bc - workprec
378
+ bc = workprec
379
+ n = n // 2
380
+ return pm, pe
381
+
382
+ # froot(s, n, prec, rnd) computes the real n-th root of a
383
+ # positive mpf tuple s.
384
+ # To compute the root we start from a 50-bit estimate for r
385
+ # generated with ordinary floating-point arithmetic, and then refine
386
+ # the value to full accuracy using the iteration
387
+
388
+ # 1 / y \
389
+ # r = --- | (n-1) * r + ---------- |
390
+ # n+1 n \ n r_n**(n-1) /
391
+
392
+ # which is simply Newton's method applied to the equation r**n = y.
393
+ # With giant_steps(start, prec+extra) = [p0,...,pm, prec+extra]
394
+ # and y = man * 2**-shift one has
395
+ # (man * 2**exp)**(1/n) =
396
+ # y**(1/n) * 2**(start-prec/n) * 2**(p0-start) * ... * 2**(prec+extra-pm) *
397
+ # 2**((exp+shift-(n-1)*prec)/n -extra))
398
+ # The last factor is accounted for in the last line of froot.
399
+
400
+ def nthroot_fixed(y, n, prec, exp1):
401
+ start = 50
402
+ try:
403
+ y1 = rshift(y, prec - n*start)
404
+ r = MPZ(int(y1**(1.0/n)))
405
+ except OverflowError:
406
+ y1 = from_int(y1, start)
407
+ fn = from_int(n)
408
+ fn = mpf_rdiv_int(1, fn, start)
409
+ r = mpf_pow(y1, fn, start)
410
+ r = to_int(r)
411
+ extra = 10
412
+ extra1 = n
413
+ prevp = start
414
+ for p in giant_steps(start, prec+extra):
415
+ pm, pe = int_pow_fixed(r, n-1, prevp)
416
+ r2 = rshift(pm, (n-1)*prevp - p - pe - extra1)
417
+ B = lshift(y, 2*p-prec+extra1)//r2
418
+ r = (B + (n-1) * lshift(r, p-prevp))//n
419
+ prevp = p
420
+ return r
421
+
422
+ def mpf_nthroot(s, n, prec, rnd=round_fast):
423
+ """nth-root of a positive number
424
+
425
+ Use the Newton method when faster, otherwise use x**(1/n)
426
+ """
427
+ sign, man, exp, bc = s
428
+ if sign:
429
+ raise ComplexResult("nth root of a negative number")
430
+ if not man:
431
+ if s == fnan:
432
+ return fnan
433
+ if s == fzero:
434
+ if n > 0:
435
+ return fzero
436
+ if n == 0:
437
+ return fone
438
+ return finf
439
+ # Infinity
440
+ if not n:
441
+ return fnan
442
+ if n < 0:
443
+ return fzero
444
+ return finf
445
+ flag_inverse = False
446
+ if n < 2:
447
+ if n == 0:
448
+ return fone
449
+ if n == 1:
450
+ return mpf_pos(s, prec, rnd)
451
+ if n == -1:
452
+ return mpf_div(fone, s, prec, rnd)
453
+ # n < 0
454
+ rnd = reciprocal_rnd[rnd]
455
+ flag_inverse = True
456
+ extra_inverse = 5
457
+ prec += extra_inverse
458
+ n = -n
459
+ if n > 20 and (n >= 20000 or prec < int(233 + 28.3 * n**0.62)):
460
+ prec2 = prec + 10
461
+ fn = from_int(n)
462
+ nth = mpf_rdiv_int(1, fn, prec2)
463
+ r = mpf_pow(s, nth, prec2, rnd)
464
+ s = normalize(r[0], r[1], r[2], r[3], prec, rnd)
465
+ if flag_inverse:
466
+ return mpf_div(fone, s, prec-extra_inverse, rnd)
467
+ else:
468
+ return s
469
+ # Convert to a fixed-point number with prec2 bits.
470
+ prec2 = prec + 2*n - (prec%n)
471
+ # a few tests indicate that
472
+ # for 10 < n < 10**4 a bit more precision is needed
473
+ if n > 10:
474
+ prec2 += prec2//10
475
+ prec2 = prec2 - prec2%n
476
+ # Mantissa may have more bits than we need. Trim it down.
477
+ shift = bc - prec2
478
+ # Adjust exponents to make prec2 and exp+shift multiples of n.
479
+ sign1 = 0
480
+ es = exp+shift
481
+ if es < 0:
482
+ sign1 = 1
483
+ es = -es
484
+ if sign1:
485
+ shift += es%n
486
+ else:
487
+ shift -= es%n
488
+ man = rshift(man, shift)
489
+ extra = 10
490
+ exp1 = ((exp+shift-(n-1)*prec2)//n) - extra
491
+ rnd_shift = 0
492
+ if flag_inverse:
493
+ if rnd == 'u' or rnd == 'c':
494
+ rnd_shift = 1
495
+ else:
496
+ if rnd == 'd' or rnd == 'f':
497
+ rnd_shift = 1
498
+ man = nthroot_fixed(man+rnd_shift, n, prec2, exp1)
499
+ s = from_man_exp(man, exp1, prec, rnd)
500
+ if flag_inverse:
501
+ return mpf_div(fone, s, prec-extra_inverse, rnd)
502
+ else:
503
+ return s
504
+
505
+ def mpf_cbrt(s, prec, rnd=round_fast):
506
+ """cubic root of a positive number"""
507
+ return mpf_nthroot(s, 3, prec, rnd)
508
+
509
+ #----------------------------------------------------------------------------#
510
+ # #
511
+ # Logarithms #
512
+ # #
513
+ #----------------------------------------------------------------------------#
514
+
515
+
516
+ def log_int_fixed(n, prec, ln2=None):
517
+ """
518
+ Fast computation of log(n), caching the value for small n,
519
+ intended for zeta sums.
520
+ """
521
+ if n in log_int_cache:
522
+ value, vprec = log_int_cache[n]
523
+ if vprec >= prec:
524
+ return value >> (vprec - prec)
525
+ wp = prec + 10
526
+ if wp <= LOG_TAYLOR_SHIFT:
527
+ if ln2 is None:
528
+ ln2 = ln2_fixed(wp)
529
+ r = bitcount(n)
530
+ x = n << (wp-r)
531
+ v = log_taylor_cached(x, wp) + r*ln2
532
+ else:
533
+ v = to_fixed(mpf_log(from_int(n), wp+5), wp)
534
+ if n < MAX_LOG_INT_CACHE:
535
+ log_int_cache[n] = (v, wp)
536
+ return v >> (wp-prec)
537
+
538
+ def agm_fixed(a, b, prec):
539
+ """
540
+ Fixed-point computation of agm(a,b), assuming
541
+ a, b both close to unit magnitude.
542
+ """
543
+ i = 0
544
+ while 1:
545
+ anew = (a+b)>>1
546
+ if i > 4 and abs(a-anew) < 8:
547
+ return a
548
+ b = isqrt_fast(a*b)
549
+ a = anew
550
+ i += 1
551
+ return a
552
+
553
+ def log_agm(x, prec):
554
+ """
555
+ Fixed-point computation of -log(x) = log(1/x), suitable
556
+ for large precision. It is required that 0 < x < 1. The
557
+ algorithm used is the Sasaki-Kanada formula
558
+
559
+ -log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
560
+
561
+ For faster convergence in the theta functions, x should
562
+ be chosen closer to 0.
563
+
564
+ Guard bits must be added by the caller.
565
+
566
+ HYPOTHESIS: if x = 2^(-n), n bits need to be added to
567
+ account for the truncation to a fixed-point number,
568
+ and this is the only significant cancellation error.
569
+
570
+ The number of bits lost to roundoff is small and can be
571
+ considered constant.
572
+
573
+ [1] Richard P. Brent, "Fast Algorithms for High-Precision
574
+ Computation of Elementary Functions (extended abstract)",
575
+ http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
576
+
577
+ """
578
+ x2 = (x*x) >> prec
579
+ # Compute jtheta2(x)**2
580
+ s = a = b = x2
581
+ while a:
582
+ b = (b*x2) >> prec
583
+ a = (a*b) >> prec
584
+ s += a
585
+ s += (MPZ_ONE<<prec)
586
+ s = (s*s)>>(prec-2)
587
+ s = (s*isqrt_fast(x<<prec))>>prec
588
+ # Compute jtheta3(x)**2
589
+ t = a = b = x
590
+ while a:
591
+ b = (b*x2) >> prec
592
+ a = (a*b) >> prec
593
+ t += a
594
+ t = (MPZ_ONE<<prec) + (t<<1)
595
+ t = (t*t)>>prec
596
+ # Final formula
597
+ p = agm_fixed(s, t, prec)
598
+ return (pi_fixed(prec) << prec) // p
599
+
600
+ def log_taylor(x, prec, r=0):
601
+ """
602
+ Fixed-point calculation of log(x). It is assumed that x is close
603
+ enough to 1 for the Taylor series to converge quickly. Convergence
604
+ can be improved by specifying r > 0 to compute
605
+ log(x^(1/2^r))*2^r, at the cost of performing r square roots.
606
+
607
+ The caller must provide sufficient guard bits.
608
+ """
609
+ for i in xrange(r):
610
+ x = isqrt_fast(x<<prec)
611
+ one = MPZ_ONE << prec
612
+ v = ((x-one)<<prec)//(x+one)
613
+ sign = v < 0
614
+ if sign:
615
+ v = -v
616
+ v2 = (v*v) >> prec
617
+ v4 = (v2*v2) >> prec
618
+ s0 = v
619
+ s1 = v//3
620
+ v = (v*v4) >> prec
621
+ k = 5
622
+ while v:
623
+ s0 += v // k
624
+ k += 2
625
+ s1 += v // k
626
+ v = (v*v4) >> prec
627
+ k += 2
628
+ s1 = (s1*v2) >> prec
629
+ s = (s0+s1) << (1+r)
630
+ if sign:
631
+ return -s
632
+ return s
633
+
634
+ def log_taylor_cached(x, prec):
635
+ """
636
+ Fixed-point computation of log(x), assuming x in (0.5, 2)
637
+ and prec <= LOG_TAYLOR_PREC.
638
+ """
639
+ n = x >> (prec-LOG_TAYLOR_SHIFT)
640
+ cached_prec = cache_prec_steps[prec]
641
+ dprec = cached_prec - prec
642
+ if (n, cached_prec) in log_taylor_cache:
643
+ a, log_a = log_taylor_cache[n, cached_prec]
644
+ else:
645
+ a = n << (cached_prec - LOG_TAYLOR_SHIFT)
646
+ log_a = log_taylor(a, cached_prec, 8)
647
+ log_taylor_cache[n, cached_prec] = (a, log_a)
648
+ a >>= dprec
649
+ log_a >>= dprec
650
+ u = ((x - a) << prec) // a
651
+ v = (u << prec) // ((MPZ_TWO << prec) + u)
652
+ v2 = (v*v) >> prec
653
+ v4 = (v2*v2) >> prec
654
+ s0 = v
655
+ s1 = v//3
656
+ v = (v*v4) >> prec
657
+ k = 5
658
+ while v:
659
+ s0 += v//k
660
+ k += 2
661
+ s1 += v//k
662
+ v = (v*v4) >> prec
663
+ k += 2
664
+ s1 = (s1*v2) >> prec
665
+ s = (s0+s1) << 1
666
+ return log_a + s
667
+
668
+ def mpf_log(x, prec, rnd=round_fast):
669
+ """
670
+ Compute the natural logarithm of the mpf value x. If x is negative,
671
+ ComplexResult is raised.
672
+ """
673
+ sign, man, exp, bc = x
674
+ #------------------------------------------------------------------
675
+ # Handle special values
676
+ if not man:
677
+ if x == fzero: return fninf
678
+ if x == finf: return finf
679
+ if x == fnan: return fnan
680
+ if sign:
681
+ raise ComplexResult("logarithm of a negative number")
682
+ wp = prec + 20
683
+ #------------------------------------------------------------------
684
+ # Handle log(2^n) = log(n)*2.
685
+ # Here we catch the only possible exact value, log(1) = 0
686
+ if man == 1:
687
+ if not exp:
688
+ return fzero
689
+ return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
690
+ mag = exp+bc
691
+ abs_mag = abs(mag)
692
+ #------------------------------------------------------------------
693
+ # Handle x = 1+eps, where log(x) ~ x. We need to check for
694
+ # cancellation when moving to fixed-point math and compensate
695
+ # by increasing the precision. Note that abs_mag in (0, 1) <=>
696
+ # 0.5 < x < 2 and x != 1
697
+ if abs_mag <= 1:
698
+ # Calculate t = x-1 to measure distance from 1 in bits
699
+ tsign = 1-abs_mag
700
+ if tsign:
701
+ tman = (MPZ_ONE<<bc) - man
702
+ else:
703
+ tman = man - (MPZ_ONE<<(bc-1))
704
+ tbc = bitcount(tman)
705
+ cancellation = bc - tbc
706
+ if cancellation > wp:
707
+ t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')
708
+ return mpf_perturb(t, tsign, prec, rnd)
709
+ else:
710
+ wp += cancellation
711
+ # TODO: if close enough to 1, we could use Taylor series
712
+ # even in the AGM precision range, since the Taylor series
713
+ # converges rapidly
714
+ #------------------------------------------------------------------
715
+ # Another special case:
716
+ # n*log(2) is a good enough approximation
717
+ if abs_mag > 10000:
718
+ if bitcount(abs_mag) > wp:
719
+ return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
720
+ #------------------------------------------------------------------
721
+ # General case.
722
+ # Perform argument reduction using log(x) = log(x*2^n) - n*log(2):
723
+ # If we are in the Taylor precision range, choose magnitude 0 or 1.
724
+ # If we are in the AGM precision range, choose magnitude -m for
725
+ # some large m; benchmarking on one machine showed m = prec/20 to be
726
+ # optimal between 1000 and 100,000 digits.
727
+ if wp <= LOG_TAYLOR_PREC:
728
+ m = log_taylor_cached(lshift(man, wp-bc), wp)
729
+ if mag:
730
+ m += mag*ln2_fixed(wp)
731
+ else:
732
+ optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO
733
+ n = optimal_mag - mag
734
+ x = mpf_shift(x, n)
735
+ wp += (-optimal_mag)
736
+ m = -log_agm(to_fixed(x, wp), wp)
737
+ m -= n*ln2_fixed(wp)
738
+ return from_man_exp(m, -wp, prec, rnd)
739
+
740
+ def mpf_log_hypot(a, b, prec, rnd):
741
+ """
742
+ Computes log(sqrt(a^2+b^2)) accurately.
743
+ """
744
+ # If either a or b is inf/nan/0, assume it to be a
745
+ if not b[1]:
746
+ a, b = b, a
747
+ # a is inf/nan/0
748
+ if not a[1]:
749
+ # both are inf/nan/0
750
+ if not b[1]:
751
+ if a == b == fzero:
752
+ return fninf
753
+ if fnan in (a, b):
754
+ return fnan
755
+ # at least one term is (+/- inf)^2
756
+ return finf
757
+ # only a is inf/nan/0
758
+ if a == fzero:
759
+ # log(sqrt(0+b^2)) = log(|b|)
760
+ return mpf_log(mpf_abs(b), prec, rnd)
761
+ if a == fnan:
762
+ return fnan
763
+ return finf
764
+ # Exact
765
+ a2 = mpf_mul(a,a)
766
+ b2 = mpf_mul(b,b)
767
+ extra = 20
768
+ # Not exact
769
+ h2 = mpf_add(a2, b2, prec+extra)
770
+ cancelled = mpf_add(h2, fnone, 10)
771
+ mag_cancelled = cancelled[2]+cancelled[3]
772
+ # Just redo the sum exactly if necessary (could be smarter
773
+ # and avoid memory allocation when a or b is precisely 1
774
+ # and the other is tiny...)
775
+ if cancelled == fzero or mag_cancelled < -extra//2:
776
+ h2 = mpf_add(a2, b2, prec+extra-min(a2[2],b2[2]))
777
+ return mpf_shift(mpf_log(h2, prec, rnd), -1)
778
+
779
+
780
+ #----------------------------------------------------------------------
781
+ # Inverse tangent
782
+ #
783
+
784
+ def atan_newton(x, prec):
785
+ if prec >= 100:
786
+ r = math.atan(int((x>>(prec-53)))/2.0**53)
787
+ else:
788
+ r = math.atan(int(x)/2.0**prec)
789
+ prevp = 50
790
+ r = MPZ(int(r * 2.0**53) >> (53-prevp))
791
+ extra_p = 50
792
+ for wp in giant_steps(prevp, prec):
793
+ wp += extra_p
794
+ r = r << (wp-prevp)
795
+ cos, sin = cos_sin_fixed(r, wp)
796
+ tan = (sin << wp) // cos
797
+ a = ((tan-rshift(x, prec-wp)) << wp) // ((MPZ_ONE<<wp) + ((tan**2)>>wp))
798
+ r = r - a
799
+ prevp = wp
800
+ return rshift(r, prevp-prec)
801
+
802
+ def atan_taylor_get_cached(n, prec):
803
+ # Taylor series with caching wins up to huge precisions
804
+ # To avoid unnecessary precomputation at low precision, we
805
+ # do it in steps
806
+ # Round to next power of 2
807
+ prec2 = (1<<(bitcount(prec-1))) + 20
808
+ dprec = prec2 - prec
809
+ if (n, prec2) in atan_taylor_cache:
810
+ a, atan_a = atan_taylor_cache[n, prec2]
811
+ else:
812
+ a = n << (prec2 - ATAN_TAYLOR_SHIFT)
813
+ atan_a = atan_newton(a, prec2)
814
+ atan_taylor_cache[n, prec2] = (a, atan_a)
815
+ return (a >> dprec), (atan_a >> dprec)
816
+
817
+ def atan_taylor(x, prec):
818
+ n = (x >> (prec-ATAN_TAYLOR_SHIFT))
819
+ a, atan_a = atan_taylor_get_cached(n, prec)
820
+ d = x - a
821
+ s0 = v = (d << prec) // ((a**2 >> prec) + (a*d >> prec) + (MPZ_ONE << prec))
822
+ v2 = (v**2 >> prec)
823
+ v4 = (v2 * v2) >> prec
824
+ s1 = v//3
825
+ v = (v * v4) >> prec
826
+ k = 5
827
+ while v:
828
+ s0 += v // k
829
+ k += 2
830
+ s1 += v // k
831
+ v = (v * v4) >> prec
832
+ k += 2
833
+ s1 = (s1 * v2) >> prec
834
+ s = s0 - s1
835
+ return atan_a + s
836
+
837
+ def atan_inf(sign, prec, rnd):
838
+ if not sign:
839
+ return mpf_shift(mpf_pi(prec, rnd), -1)
840
+ return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
841
+
842
+ def mpf_atan(x, prec, rnd=round_fast):
843
+ sign, man, exp, bc = x
844
+ if not man:
845
+ if x == fzero: return fzero
846
+ if x == finf: return atan_inf(0, prec, rnd)
847
+ if x == fninf: return atan_inf(1, prec, rnd)
848
+ return fnan
849
+ mag = exp + bc
850
+ # Essentially infinity
851
+ if mag > prec+20:
852
+ return atan_inf(sign, prec, rnd)
853
+ # Essentially ~ x
854
+ if -mag > prec+20:
855
+ return mpf_perturb(x, 1-sign, prec, rnd)
856
+ wp = prec + 30 + abs(mag)
857
+ # For large x, use atan(x) = pi/2 - atan(1/x)
858
+ if mag >= 2:
859
+ x = mpf_rdiv_int(1, x, wp)
860
+ reciprocal = True
861
+ else:
862
+ reciprocal = False
863
+ t = to_fixed(x, wp)
864
+ if sign:
865
+ t = -t
866
+ if wp < ATAN_TAYLOR_PREC:
867
+ a = atan_taylor(t, wp)
868
+ else:
869
+ a = atan_newton(t, wp)
870
+ if reciprocal:
871
+ a = ((pi_fixed(wp)>>1)+1) - a
872
+ if sign:
873
+ a = -a
874
+ return from_man_exp(a, -wp, prec, rnd)
875
+
876
+ # TODO: cleanup the special cases
877
+ def mpf_atan2(y, x, prec, rnd=round_fast):
878
+ xsign, xman, xexp, xbc = x
879
+ ysign, yman, yexp, ybc = y
880
+ if not yman:
881
+ if y == fzero and x != fnan:
882
+ if mpf_sign(x) >= 0:
883
+ return fzero
884
+ return mpf_pi(prec, rnd)
885
+ if y in (finf, fninf):
886
+ if x in (finf, fninf):
887
+ return fnan
888
+ # pi/2
889
+ if y == finf:
890
+ return mpf_shift(mpf_pi(prec, rnd), -1)
891
+ # -pi/2
892
+ return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
893
+ return fnan
894
+ if ysign:
895
+ return mpf_neg(mpf_atan2(mpf_neg(y), x, prec, negative_rnd[rnd]))
896
+ if not xman:
897
+ if x == fnan:
898
+ return fnan
899
+ if x == finf:
900
+ return fzero
901
+ if x == fninf:
902
+ return mpf_pi(prec, rnd)
903
+ if y == fzero:
904
+ return fzero
905
+ return mpf_shift(mpf_pi(prec, rnd), -1)
906
+ tquo = mpf_atan(mpf_div(y, x, prec+4), prec+4)
907
+ if xsign:
908
+ return mpf_add(mpf_pi(prec+4), tquo, prec, rnd)
909
+ else:
910
+ return mpf_pos(tquo, prec, rnd)
911
+
912
+ def mpf_asin(x, prec, rnd=round_fast):
913
+ sign, man, exp, bc = x
914
+ if bc+exp > 0 and x not in (fone, fnone):
915
+ raise ComplexResult("asin(x) is real only for -1 <= x <= 1")
916
+ # asin(x) = 2*atan(x/(1+sqrt(1-x**2)))
917
+ wp = prec + 15
918
+ a = mpf_mul(x, x)
919
+ b = mpf_add(fone, mpf_sqrt(mpf_sub(fone, a, wp), wp), wp)
920
+ c = mpf_div(x, b, wp)
921
+ return mpf_shift(mpf_atan(c, prec, rnd), 1)
922
+
923
+ def mpf_acos(x, prec, rnd=round_fast):
924
+ # acos(x) = 2*atan(sqrt(1-x**2)/(1+x))
925
+ sign, man, exp, bc = x
926
+ if bc + exp > 0:
927
+ if x not in (fone, fnone):
928
+ raise ComplexResult("acos(x) is real only for -1 <= x <= 1")
929
+ if x == fnone:
930
+ return mpf_pi(prec, rnd)
931
+ wp = prec + 15
932
+ a = mpf_mul(x, x)
933
+ b = mpf_sqrt(mpf_sub(fone, a, wp), wp)
934
+ c = mpf_div(b, mpf_add(fone, x, wp), wp)
935
+ return mpf_shift(mpf_atan(c, prec, rnd), 1)
936
+
937
+ def mpf_asinh(x, prec, rnd=round_fast):
938
+ wp = prec + 20
939
+ sign, man, exp, bc = x
940
+ mag = exp+bc
941
+ if mag < -8:
942
+ if mag < -wp:
943
+ return mpf_perturb(x, 1-sign, prec, rnd)
944
+ wp += (-mag)
945
+ # asinh(x) = log(x+sqrt(x**2+1))
946
+ # use reflection symmetry to avoid cancellation
947
+ q = mpf_sqrt(mpf_add(mpf_mul(x, x), fone, wp), wp)
948
+ q = mpf_add(mpf_abs(x), q, wp)
949
+ if sign:
950
+ return mpf_neg(mpf_log(q, prec, negative_rnd[rnd]))
951
+ else:
952
+ return mpf_log(q, prec, rnd)
953
+
954
+ def mpf_acosh(x, prec, rnd=round_fast):
955
+ # acosh(x) = log(x+sqrt(x**2-1))
956
+ wp = prec + 15
957
+ if mpf_cmp(x, fone) == -1:
958
+ raise ComplexResult("acosh(x) is real only for x >= 1")
959
+ q = mpf_sqrt(mpf_add(mpf_mul(x,x), fnone, wp), wp)
960
+ return mpf_log(mpf_add(x, q, wp), prec, rnd)
961
+
962
+ def mpf_atanh(x, prec, rnd=round_fast):
963
+ # atanh(x) = log((1+x)/(1-x))/2
964
+ sign, man, exp, bc = x
965
+ if (not man) and exp:
966
+ if x in (fzero, fnan):
967
+ return x
968
+ raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
969
+ mag = bc + exp
970
+ if mag > 0:
971
+ if mag == 1 and man == 1:
972
+ return [finf, fninf][sign]
973
+ raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
974
+ wp = prec + 15
975
+ if mag < -8:
976
+ if mag < -wp:
977
+ return mpf_perturb(x, sign, prec, rnd)
978
+ wp += (-mag)
979
+ a = mpf_add(x, fone, wp)
980
+ b = mpf_sub(fone, x, wp)
981
+ return mpf_shift(mpf_log(mpf_div(a, b, wp), prec, rnd), -1)
982
+
983
+ def mpf_fibonacci(x, prec, rnd=round_fast):
984
+ sign, man, exp, bc = x
985
+ if not man:
986
+ if x == fninf:
987
+ return fnan
988
+ return x
989
+ # F(2^n) ~= 2^(2^n)
990
+ size = abs(exp+bc)
991
+ if exp >= 0:
992
+ # Exact
993
+ if size < 10 or size <= bitcount(prec):
994
+ return from_int(ifib(to_int(x)), prec, rnd)
995
+ # Use the modified Binet formula
996
+ wp = prec + size + 20
997
+ a = mpf_phi(wp)
998
+ b = mpf_add(mpf_shift(a, 1), fnone, wp)
999
+ u = mpf_pow(a, x, wp)
1000
+ v = mpf_cos_pi(x, wp)
1001
+ v = mpf_div(v, u, wp)
1002
+ u = mpf_sub(u, v, wp)
1003
+ u = mpf_div(u, b, prec, rnd)
1004
+ return u
1005
+
1006
+
1007
+ #-------------------------------------------------------------------------------
1008
+ # Exponential-type functions
1009
+ #-------------------------------------------------------------------------------
1010
+
1011
+ def exponential_series(x, prec, type=0):
1012
+ """
1013
+ Taylor series for cosh/sinh or cos/sin.
1014
+
1015
+ type = 0 -- returns exp(x) (slightly faster than cosh+sinh)
1016
+ type = 1 -- returns (cosh(x), sinh(x))
1017
+ type = 2 -- returns (cos(x), sin(x))
1018
+ """
1019
+ if x < 0:
1020
+ x = -x
1021
+ sign = 1
1022
+ else:
1023
+ sign = 0
1024
+ r = int(0.5*prec**0.5)
1025
+ xmag = bitcount(x) - prec
1026
+ r = max(0, xmag + r)
1027
+ extra = 10 + 2*max(r,-xmag)
1028
+ wp = prec + extra
1029
+ x <<= (extra - r)
1030
+ one = MPZ_ONE << wp
1031
+ alt = (type == 2)
1032
+ if prec < EXP_SERIES_U_CUTOFF:
1033
+ x2 = a = (x*x) >> wp
1034
+ x4 = (x2*x2) >> wp
1035
+ s0 = s1 = MPZ_ZERO
1036
+ k = 2
1037
+ while a:
1038
+ a //= (k-1)*k; s0 += a; k += 2
1039
+ a //= (k-1)*k; s1 += a; k += 2
1040
+ a = (a*x4) >> wp
1041
+ s1 = (x2*s1) >> wp
1042
+ if alt:
1043
+ c = s1 - s0 + one
1044
+ else:
1045
+ c = s1 + s0 + one
1046
+ else:
1047
+ u = int(0.3*prec**0.35)
1048
+ x2 = a = (x*x) >> wp
1049
+ xpowers = [one, x2]
1050
+ for i in xrange(1, u):
1051
+ xpowers.append((xpowers[-1]*x2)>>wp)
1052
+ sums = [MPZ_ZERO] * u
1053
+ k = 2
1054
+ while a:
1055
+ for i in xrange(u):
1056
+ a //= (k-1)*k
1057
+ if alt and k & 2: sums[i] -= a
1058
+ else: sums[i] += a
1059
+ k += 2
1060
+ a = (a*xpowers[-1]) >> wp
1061
+ for i in xrange(1, u):
1062
+ sums[i] = (sums[i]*xpowers[i]) >> wp
1063
+ c = sum(sums) + one
1064
+ if type == 0:
1065
+ s = isqrt_fast(c*c - (one<<wp))
1066
+ if sign:
1067
+ v = c - s
1068
+ else:
1069
+ v = c + s
1070
+ for i in xrange(r):
1071
+ v = (v*v) >> wp
1072
+ return v >> extra
1073
+ else:
1074
+ # Repeatedly apply the double-angle formula
1075
+ # cosh(2*x) = 2*cosh(x)^2 - 1
1076
+ # cos(2*x) = 2*cos(x)^2 - 1
1077
+ pshift = wp-1
1078
+ for i in xrange(r):
1079
+ c = ((c*c) >> pshift) - one
1080
+ # With the abs, this is the same for sinh and sin
1081
+ s = isqrt_fast(abs((one<<wp) - c*c))
1082
+ if sign:
1083
+ s = -s
1084
+ return (c>>extra), (s>>extra)
1085
+
1086
+ def exp_basecase(x, prec):
1087
+ """
1088
+ Compute exp(x) as a fixed-point number. Works for any x,
1089
+ but for speed should have |x| < 1. For an arbitrary number,
1090
+ use exp(x) = exp(x-m*log(2)) * 2^m where m = floor(x/log(2)).
1091
+ """
1092
+ if prec > EXP_COSH_CUTOFF:
1093
+ return exponential_series(x, prec, 0)
1094
+ r = int(prec**0.5)
1095
+ prec += r
1096
+ s0 = s1 = (MPZ_ONE << prec)
1097
+ k = 2
1098
+ a = x2 = (x*x) >> prec
1099
+ while a:
1100
+ a //= k; s0 += a; k += 1
1101
+ a //= k; s1 += a; k += 1
1102
+ a = (a*x2) >> prec
1103
+ s1 = (s1*x) >> prec
1104
+ s = s0 + s1
1105
+ u = r
1106
+ while r:
1107
+ s = (s*s) >> prec
1108
+ r -= 1
1109
+ return s >> u
1110
+
1111
+ def exp_expneg_basecase(x, prec):
1112
+ """
1113
+ Computation of exp(x), exp(-x)
1114
+ """
1115
+ if prec > EXP_COSH_CUTOFF:
1116
+ cosh, sinh = exponential_series(x, prec, 1)
1117
+ return cosh+sinh, cosh-sinh
1118
+ a = exp_basecase(x, prec)
1119
+ b = (MPZ_ONE << (prec+prec)) // a
1120
+ return a, b
1121
+
1122
+ def cos_sin_basecase(x, prec):
1123
+ """
1124
+ Compute cos(x), sin(x) as fixed-point numbers, assuming x
1125
+ in [0, pi/2). For an arbitrary number, use x' = x - m*(pi/2)
1126
+ where m = floor(x/(pi/2)) along with quarter-period symmetries.
1127
+ """
1128
+ if prec > COS_SIN_CACHE_PREC:
1129
+ return exponential_series(x, prec, 2)
1130
+ precs = prec - COS_SIN_CACHE_STEP
1131
+ t = x >> precs
1132
+ n = int(t)
1133
+ if n not in cos_sin_cache:
1134
+ w = t<<(10+COS_SIN_CACHE_PREC-COS_SIN_CACHE_STEP)
1135
+ cos_t, sin_t = exponential_series(w, 10+COS_SIN_CACHE_PREC, 2)
1136
+ cos_sin_cache[n] = (cos_t>>10), (sin_t>>10)
1137
+ cos_t, sin_t = cos_sin_cache[n]
1138
+ offset = COS_SIN_CACHE_PREC - prec
1139
+ cos_t >>= offset
1140
+ sin_t >>= offset
1141
+ x -= t << precs
1142
+ cos = MPZ_ONE << prec
1143
+ sin = x
1144
+ k = 2
1145
+ a = -((x*x) >> prec)
1146
+ while a:
1147
+ a //= k; cos += a; k += 1; a = (a*x) >> prec
1148
+ a //= k; sin += a; k += 1; a = -((a*x) >> prec)
1149
+ return ((cos*cos_t-sin*sin_t) >> prec), ((sin*cos_t+cos*sin_t) >> prec)
1150
+
1151
+ def mpf_exp(x, prec, rnd=round_fast):
1152
+ sign, man, exp, bc = x
1153
+ if man:
1154
+ mag = bc + exp
1155
+ wp = prec + 14
1156
+ if sign:
1157
+ man = -man
1158
+ # TODO: the best cutoff depends on both x and the precision.
1159
+ if prec > 600 and exp >= 0:
1160
+ # Need about log2(exp(n)) ~= 1.45*mag extra precision
1161
+ e = mpf_e(wp+int(1.45*mag))
1162
+ return mpf_pow_int(e, man<<exp, prec, rnd)
1163
+ if mag < -wp:
1164
+ return mpf_perturb(fone, sign, prec, rnd)
1165
+ # |x| >= 2
1166
+ if mag > 1:
1167
+ # For large arguments: exp(2^mag*(1+eps)) =
1168
+ # exp(2^mag)*exp(2^mag*eps) = exp(2^mag)*(1 + 2^mag*eps + ...)
1169
+ # so about mag extra bits is required.
1170
+ wpmod = wp + mag
1171
+ offset = exp + wpmod
1172
+ if offset >= 0:
1173
+ t = man << offset
1174
+ else:
1175
+ t = man >> (-offset)
1176
+ lg2 = ln2_fixed(wpmod)
1177
+ n, t = divmod(t, lg2)
1178
+ n = int(n)
1179
+ t >>= mag
1180
+ else:
1181
+ offset = exp + wp
1182
+ if offset >= 0:
1183
+ t = man << offset
1184
+ else:
1185
+ t = man >> (-offset)
1186
+ n = 0
1187
+ man = exp_basecase(t, wp)
1188
+ return from_man_exp(man, n-wp, prec, rnd)
1189
+ if not exp:
1190
+ return fone
1191
+ if x == fninf:
1192
+ return fzero
1193
+ return x
1194
+
1195
+
1196
+ def mpf_cosh_sinh(x, prec, rnd=round_fast, tanh=0):
1197
+ """Simultaneously compute (cosh(x), sinh(x)) for real x"""
1198
+ sign, man, exp, bc = x
1199
+ if (not man) and exp:
1200
+ if tanh:
1201
+ if x == finf: return fone
1202
+ if x == fninf: return fnone
1203
+ return fnan
1204
+ if x == finf: return (finf, finf)
1205
+ if x == fninf: return (finf, fninf)
1206
+ return fnan, fnan
1207
+ mag = exp+bc
1208
+ wp = prec+14
1209
+ if mag < -4:
1210
+ # Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1
1211
+ if mag < -wp:
1212
+ if tanh:
1213
+ return mpf_perturb(x, 1-sign, prec, rnd)
1214
+ cosh = mpf_perturb(fone, 0, prec, rnd)
1215
+ sinh = mpf_perturb(x, sign, prec, rnd)
1216
+ return cosh, sinh
1217
+ # Fix for cancellation when computing sinh
1218
+ wp += (-mag)
1219
+ # Does exp(-2*x) vanish?
1220
+ if mag > 10:
1221
+ if 3*(1<<(mag-1)) > wp:
1222
+ # XXX: rounding
1223
+ if tanh:
1224
+ return mpf_perturb([fone,fnone][sign], 1-sign, prec, rnd)
1225
+ c = s = mpf_shift(mpf_exp(mpf_abs(x), prec, rnd), -1)
1226
+ if sign:
1227
+ s = mpf_neg(s)
1228
+ return c, s
1229
+ # |x| > 1
1230
+ if mag > 1:
1231
+ wpmod = wp + mag
1232
+ offset = exp + wpmod
1233
+ if offset >= 0:
1234
+ t = man << offset
1235
+ else:
1236
+ t = man >> (-offset)
1237
+ lg2 = ln2_fixed(wpmod)
1238
+ n, t = divmod(t, lg2)
1239
+ n = int(n)
1240
+ t >>= mag
1241
+ else:
1242
+ offset = exp + wp
1243
+ if offset >= 0:
1244
+ t = man << offset
1245
+ else:
1246
+ t = man >> (-offset)
1247
+ n = 0
1248
+ a, b = exp_expneg_basecase(t, wp)
1249
+ # TODO: optimize division precision
1250
+ cosh = a + (b>>(2*n))
1251
+ sinh = a - (b>>(2*n))
1252
+ if sign:
1253
+ sinh = -sinh
1254
+ if tanh:
1255
+ man = (sinh << wp) // cosh
1256
+ return from_man_exp(man, -wp, prec, rnd)
1257
+ else:
1258
+ cosh = from_man_exp(cosh, n-wp-1, prec, rnd)
1259
+ sinh = from_man_exp(sinh, n-wp-1, prec, rnd)
1260
+ return cosh, sinh
1261
+
1262
+
1263
+ def mod_pi2(man, exp, mag, wp):
1264
+ # Reduce to standard interval
1265
+ if mag > 0:
1266
+ i = 0
1267
+ while 1:
1268
+ cancellation_prec = 20 << i
1269
+ wpmod = wp + mag + cancellation_prec
1270
+ pi2 = pi_fixed(wpmod-1)
1271
+ pi4 = pi2 >> 1
1272
+ offset = wpmod + exp
1273
+ if offset >= 0:
1274
+ t = man << offset
1275
+ else:
1276
+ t = man >> (-offset)
1277
+ n, y = divmod(t, pi2)
1278
+ if y > pi4:
1279
+ small = pi2 - y
1280
+ else:
1281
+ small = y
1282
+ if small >> (wp+mag-10):
1283
+ n = int(n)
1284
+ t = y >> mag
1285
+ wp = wpmod - mag
1286
+ break
1287
+ i += 1
1288
+ else:
1289
+ wp += (-mag)
1290
+ offset = exp + wp
1291
+ if offset >= 0:
1292
+ t = man << offset
1293
+ else:
1294
+ t = man >> (-offset)
1295
+ n = 0
1296
+ return t, n, wp
1297
+
1298
+
1299
+ def mpf_cos_sin(x, prec, rnd=round_fast, which=0, pi=False):
1300
+ """
1301
+ which:
1302
+ 0 -- return cos(x), sin(x)
1303
+ 1 -- return cos(x)
1304
+ 2 -- return sin(x)
1305
+ 3 -- return tan(x)
1306
+
1307
+ if pi=True, compute for pi*x
1308
+ """
1309
+ sign, man, exp, bc = x
1310
+ if not man:
1311
+ if exp:
1312
+ c, s = fnan, fnan
1313
+ else:
1314
+ c, s = fone, fzero
1315
+ if which == 0: return c, s
1316
+ if which == 1: return c
1317
+ if which == 2: return s
1318
+ if which == 3: return s
1319
+
1320
+ mag = bc + exp
1321
+ wp = prec + 10
1322
+
1323
+ # Extremely small?
1324
+ if mag < 0:
1325
+ if mag < -wp:
1326
+ if pi:
1327
+ x = mpf_mul(x, mpf_pi(wp))
1328
+ c = mpf_perturb(fone, 1, prec, rnd)
1329
+ s = mpf_perturb(x, 1-sign, prec, rnd)
1330
+ if which == 0: return c, s
1331
+ if which == 1: return c
1332
+ if which == 2: return s
1333
+ if which == 3: return mpf_perturb(x, sign, prec, rnd)
1334
+ if pi:
1335
+ if exp >= -1:
1336
+ if exp == -1:
1337
+ c = fzero
1338
+ s = (fone, fnone)[bool(man & 2) ^ sign]
1339
+ elif exp == 0:
1340
+ c, s = (fnone, fzero)
1341
+ else:
1342
+ c, s = (fone, fzero)
1343
+ if which == 0: return c, s
1344
+ if which == 1: return c
1345
+ if which == 2: return s
1346
+ if which == 3: return mpf_div(s, c, prec, rnd)
1347
+ # Subtract nearest half-integer (= mod by pi/2)
1348
+ n = ((man >> (-exp-2)) + 1) >> 1
1349
+ man = man - (n << (-exp-1))
1350
+ mag2 = bitcount(man) + exp
1351
+ wp = prec + 10 - mag2
1352
+ offset = exp + wp
1353
+ if offset >= 0:
1354
+ t = man << offset
1355
+ else:
1356
+ t = man >> (-offset)
1357
+ t = (t*pi_fixed(wp)) >> wp
1358
+ else:
1359
+ t, n, wp = mod_pi2(man, exp, mag, wp)
1360
+ c, s = cos_sin_basecase(t, wp)
1361
+ m = n & 3
1362
+ if m == 1: c, s = -s, c
1363
+ elif m == 2: c, s = -c, -s
1364
+ elif m == 3: c, s = s, -c
1365
+ if sign:
1366
+ s = -s
1367
+ if which == 0:
1368
+ c = from_man_exp(c, -wp, prec, rnd)
1369
+ s = from_man_exp(s, -wp, prec, rnd)
1370
+ return c, s
1371
+ if which == 1:
1372
+ return from_man_exp(c, -wp, prec, rnd)
1373
+ if which == 2:
1374
+ return from_man_exp(s, -wp, prec, rnd)
1375
+ if which == 3:
1376
+ return from_rational(s, c, prec, rnd)
1377
+
1378
+ def mpf_cos(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1)
1379
+ def mpf_sin(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2)
1380
+ def mpf_tan(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 3)
1381
+ def mpf_cos_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 0, 1)
1382
+ def mpf_cos_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1, 1)
1383
+ def mpf_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2, 1)
1384
+ def mpf_cosh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[0]
1385
+ def mpf_sinh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[1]
1386
+ def mpf_tanh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd, tanh=1)
1387
+
1388
+
1389
+ # Low-overhead fixed-point versions
1390
+
1391
+ def cos_sin_fixed(x, prec, pi2=None):
1392
+ if pi2 is None:
1393
+ pi2 = pi_fixed(prec-1)
1394
+ n, t = divmod(x, pi2)
1395
+ n = int(n)
1396
+ c, s = cos_sin_basecase(t, prec)
1397
+ m = n & 3
1398
+ if m == 0: return c, s
1399
+ if m == 1: return -s, c
1400
+ if m == 2: return -c, -s
1401
+ if m == 3: return s, -c
1402
+
1403
+ def exp_fixed(x, prec, ln2=None):
1404
+ if ln2 is None:
1405
+ ln2 = ln2_fixed(prec)
1406
+ n, t = divmod(x, ln2)
1407
+ n = int(n)
1408
+ v = exp_basecase(t, prec)
1409
+ if n >= 0:
1410
+ return v << n
1411
+ else:
1412
+ return v >> (-n)
1413
+
1414
+
1415
+ if BACKEND == 'sage':
1416
+ try:
1417
+ import sage.libs.mpmath.ext_libmp as _lbmp
1418
+ mpf_sqrt = _lbmp.mpf_sqrt
1419
+ mpf_exp = _lbmp.mpf_exp
1420
+ mpf_log = _lbmp.mpf_log
1421
+ mpf_cos = _lbmp.mpf_cos
1422
+ mpf_sin = _lbmp.mpf_sin
1423
+ mpf_pow = _lbmp.mpf_pow
1424
+ exp_fixed = _lbmp.exp_fixed
1425
+ cos_sin_fixed = _lbmp.cos_sin_fixed
1426
+ log_int_fixed = _lbmp.log_int_fixed
1427
+ except (ImportError, AttributeError):
1428
+ print("Warning: Sage imports in libelefun failed")
venv/lib/python3.10/site-packages/mpmath/libmp/libhyper.py ADDED
@@ -0,0 +1,1150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements computation of hypergeometric and related
3
+ functions. In particular, it provides code for generic summation
4
+ of hypergeometric series. Optimized versions for various special
5
+ cases are also provided.
6
+ """
7
+
8
+ import operator
9
+ import math
10
+
11
+ from .backend import MPZ_ZERO, MPZ_ONE, BACKEND, xrange, exec_
12
+
13
+ from .libintmath import gcd
14
+
15
+ from .libmpf import (\
16
+ ComplexResult, round_fast, round_nearest,
17
+ negative_rnd, bitcount, to_fixed, from_man_exp, from_int, to_int,
18
+ from_rational,
19
+ fzero, fone, fnone, ftwo, finf, fninf, fnan,
20
+ mpf_sign, mpf_add, mpf_abs, mpf_pos,
21
+ mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_min_max,
22
+ mpf_perturb, mpf_neg, mpf_shift, mpf_sub, mpf_mul, mpf_div,
23
+ sqrt_fixed, mpf_sqrt, mpf_rdiv_int, mpf_pow_int,
24
+ to_rational,
25
+ )
26
+
27
+ from .libelefun import (\
28
+ mpf_pi, mpf_exp, mpf_log, pi_fixed, mpf_cos_sin, mpf_cos, mpf_sin,
29
+ mpf_sqrt, agm_fixed,
30
+ )
31
+
32
+ from .libmpc import (\
33
+ mpc_one, mpc_sub, mpc_mul_mpf, mpc_mul, mpc_neg, complex_int_pow,
34
+ mpc_div, mpc_add_mpf, mpc_sub_mpf,
35
+ mpc_log, mpc_add, mpc_pos, mpc_shift,
36
+ mpc_is_infnan, mpc_zero, mpc_sqrt, mpc_abs,
37
+ mpc_mpf_div, mpc_square, mpc_exp
38
+ )
39
+
40
+ from .libintmath import ifac
41
+ from .gammazeta import mpf_gamma_int, mpf_euler, euler_fixed
42
+
43
+ class NoConvergence(Exception):
44
+ pass
45
+
46
+
47
+ #-----------------------------------------------------------------------#
48
+ # #
49
+ # Generic hypergeometric series #
50
+ # #
51
+ #-----------------------------------------------------------------------#
52
+
53
+ """
54
+ TODO:
55
+
56
+ 1. proper mpq parsing
57
+ 2. imaginary z special-cased (also: rational, integer?)
58
+ 3. more clever handling of series that don't converge because of stupid
59
+ upwards rounding
60
+ 4. checking for cancellation
61
+
62
+ """
63
+
64
+ def make_hyp_summator(key):
65
+ """
66
+ Returns a function that sums a generalized hypergeometric series,
67
+ for given parameter types (integer, rational, real, complex).
68
+
69
+ """
70
+ p, q, param_types, ztype = key
71
+
72
+ pstring = "".join(param_types)
73
+ fname = "hypsum_%i_%i_%s_%s_%s" % (p, q, pstring[:p], pstring[p:], ztype)
74
+ #print "generating hypsum", fname
75
+
76
+ have_complex_param = 'C' in param_types
77
+ have_complex_arg = ztype == 'C'
78
+ have_complex = have_complex_param or have_complex_arg
79
+
80
+ source = []
81
+ add = source.append
82
+
83
+ aint = []
84
+ arat = []
85
+ bint = []
86
+ brat = []
87
+ areal = []
88
+ breal = []
89
+ acomplex = []
90
+ bcomplex = []
91
+
92
+ #add("wp = prec + 40")
93
+ add("MAX = kwargs.get('maxterms', wp*100)")
94
+ add("HIGH = MPZ_ONE<<epsshift")
95
+ add("LOW = -HIGH")
96
+
97
+ # Setup code
98
+ add("SRE = PRE = one = (MPZ_ONE << wp)")
99
+ if have_complex:
100
+ add("SIM = PIM = MPZ_ZERO")
101
+
102
+ if have_complex_arg:
103
+ add("xsign, xm, xe, xbc = z[0]")
104
+ add("if xsign: xm = -xm")
105
+ add("ysign, ym, ye, ybc = z[1]")
106
+ add("if ysign: ym = -ym")
107
+ else:
108
+ add("xsign, xm, xe, xbc = z")
109
+ add("if xsign: xm = -xm")
110
+
111
+ add("offset = xe + wp")
112
+ add("if offset >= 0:")
113
+ add(" ZRE = xm << offset")
114
+ add("else:")
115
+ add(" ZRE = xm >> (-offset)")
116
+ if have_complex_arg:
117
+ add("offset = ye + wp")
118
+ add("if offset >= 0:")
119
+ add(" ZIM = ym << offset")
120
+ add("else:")
121
+ add(" ZIM = ym >> (-offset)")
122
+
123
+ for i, flag in enumerate(param_types):
124
+ W = ["A", "B"][i >= p]
125
+ if flag == 'Z':
126
+ ([aint,bint][i >= p]).append(i)
127
+ add("%sINT_%i = coeffs[%i]" % (W, i, i))
128
+ elif flag == 'Q':
129
+ ([arat,brat][i >= p]).append(i)
130
+ add("%sP_%i, %sQ_%i = coeffs[%i]._mpq_" % (W, i, W, i, i))
131
+ elif flag == 'R':
132
+ ([areal,breal][i >= p]).append(i)
133
+ add("xsign, xm, xe, xbc = coeffs[%i]._mpf_" % i)
134
+ add("if xsign: xm = -xm")
135
+ add("offset = xe + wp")
136
+ add("if offset >= 0:")
137
+ add(" %sREAL_%i = xm << offset" % (W, i))
138
+ add("else:")
139
+ add(" %sREAL_%i = xm >> (-offset)" % (W, i))
140
+ elif flag == 'C':
141
+ ([acomplex,bcomplex][i >= p]).append(i)
142
+ add("__re, __im = coeffs[%i]._mpc_" % i)
143
+ add("xsign, xm, xe, xbc = __re")
144
+ add("if xsign: xm = -xm")
145
+ add("ysign, ym, ye, ybc = __im")
146
+ add("if ysign: ym = -ym")
147
+
148
+ add("offset = xe + wp")
149
+ add("if offset >= 0:")
150
+ add(" %sCRE_%i = xm << offset" % (W, i))
151
+ add("else:")
152
+ add(" %sCRE_%i = xm >> (-offset)" % (W, i))
153
+ add("offset = ye + wp")
154
+ add("if offset >= 0:")
155
+ add(" %sCIM_%i = ym << offset" % (W, i))
156
+ add("else:")
157
+ add(" %sCIM_%i = ym >> (-offset)" % (W, i))
158
+ else:
159
+ raise ValueError
160
+
161
+ l_areal = len(areal)
162
+ l_breal = len(breal)
163
+ cancellable_real = min(l_areal, l_breal)
164
+ noncancellable_real_num = areal[cancellable_real:]
165
+ noncancellable_real_den = breal[cancellable_real:]
166
+
167
+ # LOOP
168
+ add("for n in xrange(1,10**8):")
169
+
170
+ add(" if n in magnitude_check:")
171
+ add(" p_mag = bitcount(abs(PRE))")
172
+ if have_complex:
173
+ add(" p_mag = max(p_mag, bitcount(abs(PIM)))")
174
+ add(" magnitude_check[n] = wp-p_mag")
175
+
176
+ # Real factors
177
+ multiplier = " * ".join(["AINT_#".replace("#", str(i)) for i in aint] + \
178
+ ["AP_#".replace("#", str(i)) for i in arat] + \
179
+ ["BQ_#".replace("#", str(i)) for i in brat])
180
+
181
+ divisor = " * ".join(["BINT_#".replace("#", str(i)) for i in bint] + \
182
+ ["BP_#".replace("#", str(i)) for i in brat] + \
183
+ ["AQ_#".replace("#", str(i)) for i in arat] + ["n"])
184
+
185
+ if multiplier:
186
+ add(" mul = " + multiplier)
187
+ add(" div = " + divisor)
188
+
189
+ # Check for singular terms
190
+ add(" if not div:")
191
+ if multiplier:
192
+ add(" if not mul:")
193
+ add(" break")
194
+ add(" raise ZeroDivisionError")
195
+
196
+ # Update product
197
+ if have_complex:
198
+
199
+ # TODO: when there are several real parameters and just a few complex
200
+ # (maybe just the complex argument), we only need to do about
201
+ # half as many ops if we accumulate the real factor in a single real variable
202
+ for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
203
+ for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i)))
204
+ for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i)))
205
+ for k in range(cancellable_real): add(" PIM = PIM * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
206
+ for i in noncancellable_real_num: add(" PIM = (PIM * AREAL_#) >> wp".replace("#", str(i)))
207
+ for i in noncancellable_real_den: add(" PIM = (PIM << wp) // BREAL_#".replace("#", str(i)))
208
+
209
+ if multiplier:
210
+ if have_complex_arg:
211
+ add(" PRE, PIM = (mul*(PRE*ZRE-PIM*ZIM))//div, (mul*(PIM*ZRE+PRE*ZIM))//div")
212
+ add(" PRE >>= wp")
213
+ add(" PIM >>= wp")
214
+ else:
215
+ add(" PRE = ((mul * PRE * ZRE) >> wp) // div")
216
+ add(" PIM = ((mul * PIM * ZRE) >> wp) // div")
217
+ else:
218
+ if have_complex_arg:
219
+ add(" PRE, PIM = (PRE*ZRE-PIM*ZIM)//div, (PIM*ZRE+PRE*ZIM)//div")
220
+ add(" PRE >>= wp")
221
+ add(" PIM >>= wp")
222
+ else:
223
+ add(" PRE = ((PRE * ZRE) >> wp) // div")
224
+ add(" PIM = ((PIM * ZRE) >> wp) // div")
225
+
226
+ for i in acomplex:
227
+ add(" PRE, PIM = PRE*ACRE_#-PIM*ACIM_#, PIM*ACRE_#+PRE*ACIM_#".replace("#", str(i)))
228
+ add(" PRE >>= wp")
229
+ add(" PIM >>= wp")
230
+
231
+ for i in bcomplex:
232
+ add(" mag = BCRE_#*BCRE_#+BCIM_#*BCIM_#".replace("#", str(i)))
233
+ add(" re = PRE*BCRE_# + PIM*BCIM_#".replace("#", str(i)))
234
+ add(" im = PIM*BCRE_# - PRE*BCIM_#".replace("#", str(i)))
235
+ add(" PRE = (re << wp) // mag".replace("#", str(i)))
236
+ add(" PIM = (im << wp) // mag".replace("#", str(i)))
237
+
238
+ else:
239
+ for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
240
+ for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i)))
241
+ for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i)))
242
+ if multiplier:
243
+ add(" PRE = ((PRE * mul * ZRE) >> wp) // div")
244
+ else:
245
+ add(" PRE = ((PRE * ZRE) >> wp) // div")
246
+
247
+ # Add product to sum
248
+ if have_complex:
249
+ add(" SRE += PRE")
250
+ add(" SIM += PIM")
251
+ add(" if (HIGH > PRE > LOW) and (HIGH > PIM > LOW):")
252
+ add(" break")
253
+ else:
254
+ add(" SRE += PRE")
255
+ add(" if HIGH > PRE > LOW:")
256
+ add(" break")
257
+
258
+ #add(" from mpmath import nprint, log, ldexp")
259
+ #add(" nprint([n, log(abs(PRE),2), ldexp(PRE,-wp)])")
260
+
261
+ add(" if n > MAX:")
262
+ add(" raise NoConvergence('Hypergeometric series converges too slowly. Try increasing maxterms.')")
263
+
264
+ # +1 all parameters for next loop
265
+ for i in aint: add(" AINT_# += 1".replace("#", str(i)))
266
+ for i in bint: add(" BINT_# += 1".replace("#", str(i)))
267
+ for i in arat: add(" AP_# += AQ_#".replace("#", str(i)))
268
+ for i in brat: add(" BP_# += BQ_#".replace("#", str(i)))
269
+ for i in areal: add(" AREAL_# += one".replace("#", str(i)))
270
+ for i in breal: add(" BREAL_# += one".replace("#", str(i)))
271
+ for i in acomplex: add(" ACRE_# += one".replace("#", str(i)))
272
+ for i in bcomplex: add(" BCRE_# += one".replace("#", str(i)))
273
+
274
+ if have_complex:
275
+ add("a = from_man_exp(SRE, -wp, prec, 'n')")
276
+ add("b = from_man_exp(SIM, -wp, prec, 'n')")
277
+
278
+ add("if SRE:")
279
+ add(" if SIM:")
280
+ add(" magn = max(a[2]+a[3], b[2]+b[3])")
281
+ add(" else:")
282
+ add(" magn = a[2]+a[3]")
283
+ add("elif SIM:")
284
+ add(" magn = b[2]+b[3]")
285
+ add("else:")
286
+ add(" magn = -wp+1")
287
+
288
+ add("return (a, b), True, magn")
289
+ else:
290
+ add("a = from_man_exp(SRE, -wp, prec, 'n')")
291
+
292
+ add("if SRE:")
293
+ add(" magn = a[2]+a[3]")
294
+ add("else:")
295
+ add(" magn = -wp+1")
296
+
297
+ add("return a, False, magn")
298
+
299
+ source = "\n".join((" " + line) for line in source)
300
+ source = ("def %s(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):\n" % fname) + source
301
+
302
+ namespace = {}
303
+
304
+ exec_(source, globals(), namespace)
305
+
306
+ #print source
307
+ return source, namespace[fname]
308
+
309
+
310
+ if BACKEND == 'sage':
311
+
312
+ def make_hyp_summator(key):
313
+ """
314
+ Returns a function that sums a generalized hypergeometric series,
315
+ for given parameter types (integer, rational, real, complex).
316
+ """
317
+ from sage.libs.mpmath.ext_main import hypsum_internal
318
+ p, q, param_types, ztype = key
319
+ def _hypsum(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):
320
+ return hypsum_internal(p, q, param_types, ztype, coeffs, z,
321
+ prec, wp, epsshift, magnitude_check, kwargs)
322
+
323
+ return "(none)", _hypsum
324
+
325
+
326
+ #-----------------------------------------------------------------------#
327
+ # #
328
+ # Error functions #
329
+ # #
330
+ #-----------------------------------------------------------------------#
331
+
332
+ # TODO: mpf_erf should call mpf_erfc when appropriate (currently
333
+ # only the converse delegation is implemented)
334
+
335
+ def mpf_erf(x, prec, rnd=round_fast):
336
+ sign, man, exp, bc = x
337
+ if not man:
338
+ if x == fzero: return fzero
339
+ if x == finf: return fone
340
+ if x== fninf: return fnone
341
+ return fnan
342
+ size = exp + bc
343
+ lg = math.log
344
+ # The approximation erf(x) = 1 is accurate to > x^2 * log(e,2) bits
345
+ if size > 3 and 2*(size-1) + 0.528766 > lg(prec,2):
346
+ if sign:
347
+ return mpf_perturb(fnone, 0, prec, rnd)
348
+ else:
349
+ return mpf_perturb(fone, 1, prec, rnd)
350
+ # erf(x) ~ 2*x/sqrt(pi) close to 0
351
+ if size < -prec:
352
+ # 2*x
353
+ x = mpf_shift(x,1)
354
+ c = mpf_sqrt(mpf_pi(prec+20), prec+20)
355
+ # TODO: interval rounding
356
+ return mpf_div(x, c, prec, rnd)
357
+ wp = prec + abs(size) + 25
358
+ # Taylor series for erf, fixed-point summation
359
+ t = abs(to_fixed(x, wp))
360
+ t2 = (t*t) >> wp
361
+ s, term, k = t, 12345, 1
362
+ while term:
363
+ t = ((t * t2) >> wp) // k
364
+ term = t // (2*k+1)
365
+ if k & 1:
366
+ s -= term
367
+ else:
368
+ s += term
369
+ k += 1
370
+ s = (s << (wp+1)) // sqrt_fixed(pi_fixed(wp), wp)
371
+ if sign:
372
+ s = -s
373
+ return from_man_exp(s, -wp, prec, rnd)
374
+
375
+ # If possible, we use the asymptotic series for erfc.
376
+ # This is an alternating divergent asymptotic series, so
377
+ # the error is at most equal to the first omitted term.
378
+ # Here we check if the smallest term is small enough
379
+ # for a given x and precision
380
+ def erfc_check_series(x, prec):
381
+ n = to_int(x)
382
+ if n**2 * 1.44 > prec:
383
+ return True
384
+ return False
385
+
386
+ def mpf_erfc(x, prec, rnd=round_fast):
387
+ sign, man, exp, bc = x
388
+ if not man:
389
+ if x == fzero: return fone
390
+ if x == finf: return fzero
391
+ if x == fninf: return ftwo
392
+ return fnan
393
+ wp = prec + 20
394
+ mag = bc+exp
395
+ # Preserve full accuracy when exponent grows huge
396
+ wp += max(0, 2*mag)
397
+ regular_erf = sign or mag < 2
398
+ if regular_erf or not erfc_check_series(x, wp):
399
+ if regular_erf:
400
+ return mpf_sub(fone, mpf_erf(x, prec+10, negative_rnd[rnd]), prec, rnd)
401
+ # 1-erf(x) ~ exp(-x^2), increase prec to deal with cancellation
402
+ n = to_int(x)+1
403
+ return mpf_sub(fone, mpf_erf(x, prec + int(n**2*1.44) + 10), prec, rnd)
404
+ s = term = MPZ_ONE << wp
405
+ term_prev = 0
406
+ t = (2 * to_fixed(x, wp) ** 2) >> wp
407
+ k = 1
408
+ while 1:
409
+ term = ((term * (2*k - 1)) << wp) // t
410
+ if k > 4 and term > term_prev or not term:
411
+ break
412
+ if k & 1:
413
+ s -= term
414
+ else:
415
+ s += term
416
+ term_prev = term
417
+ #print k, to_str(from_man_exp(term, -wp, 50), 10)
418
+ k += 1
419
+ s = (s << wp) // sqrt_fixed(pi_fixed(wp), wp)
420
+ s = from_man_exp(s, -wp, wp)
421
+ z = mpf_exp(mpf_neg(mpf_mul(x,x,wp),wp),wp)
422
+ y = mpf_div(mpf_mul(z, s, wp), x, prec, rnd)
423
+ return y
424
+
425
+
426
+ #-----------------------------------------------------------------------#
427
+ # #
428
+ # Exponential integrals #
429
+ # #
430
+ #-----------------------------------------------------------------------#
431
+
432
+ def ei_taylor(x, prec):
433
+ s = t = x
434
+ k = 2
435
+ while t:
436
+ t = ((t*x) >> prec) // k
437
+ s += t // k
438
+ k += 1
439
+ return s
440
+
441
+ def complex_ei_taylor(zre, zim, prec):
442
+ _abs = abs
443
+ sre = tre = zre
444
+ sim = tim = zim
445
+ k = 2
446
+ while _abs(tre) + _abs(tim) > 5:
447
+ tre, tim = ((tre*zre-tim*zim)//k)>>prec, ((tre*zim+tim*zre)//k)>>prec
448
+ sre += tre // k
449
+ sim += tim // k
450
+ k += 1
451
+ return sre, sim
452
+
453
+ def ei_asymptotic(x, prec):
454
+ one = MPZ_ONE << prec
455
+ x = t = ((one << prec) // x)
456
+ s = one + x
457
+ k = 2
458
+ while t:
459
+ t = (k*t*x) >> prec
460
+ s += t
461
+ k += 1
462
+ return s
463
+
464
+ def complex_ei_asymptotic(zre, zim, prec):
465
+ _abs = abs
466
+ one = MPZ_ONE << prec
467
+ M = (zim*zim + zre*zre) >> prec
468
+ # 1 / z
469
+ xre = tre = (zre << prec) // M
470
+ xim = tim = ((-zim) << prec) // M
471
+ sre = one + xre
472
+ sim = xim
473
+ k = 2
474
+ while _abs(tre) + _abs(tim) > 1000:
475
+ #print tre, tim
476
+ tre, tim = ((tre*xre-tim*xim)*k)>>prec, ((tre*xim+tim*xre)*k)>>prec
477
+ sre += tre
478
+ sim += tim
479
+ k += 1
480
+ if k > prec:
481
+ raise NoConvergence
482
+ return sre, sim
483
+
484
+ def mpf_ei(x, prec, rnd=round_fast, e1=False):
485
+ if e1:
486
+ x = mpf_neg(x)
487
+ sign, man, exp, bc = x
488
+ if e1 and not sign:
489
+ if x == fzero:
490
+ return finf
491
+ raise ComplexResult("E1(x) for x < 0")
492
+ if man:
493
+ xabs = 0, man, exp, bc
494
+ xmag = exp+bc
495
+ wp = prec + 20
496
+ can_use_asymp = xmag > wp
497
+ if not can_use_asymp:
498
+ if exp >= 0:
499
+ xabsint = man << exp
500
+ else:
501
+ xabsint = man >> (-exp)
502
+ can_use_asymp = xabsint > int(wp*0.693) + 10
503
+ if can_use_asymp:
504
+ if xmag > wp:
505
+ v = fone
506
+ else:
507
+ v = from_man_exp(ei_asymptotic(to_fixed(x, wp), wp), -wp)
508
+ v = mpf_mul(v, mpf_exp(x, wp), wp)
509
+ v = mpf_div(v, x, prec, rnd)
510
+ else:
511
+ wp += 2*int(to_int(xabs))
512
+ u = to_fixed(x, wp)
513
+ v = ei_taylor(u, wp) + euler_fixed(wp)
514
+ t1 = from_man_exp(v,-wp)
515
+ t2 = mpf_log(xabs,wp)
516
+ v = mpf_add(t1, t2, prec, rnd)
517
+ else:
518
+ if x == fzero: v = fninf
519
+ elif x == finf: v = finf
520
+ elif x == fninf: v = fzero
521
+ else: v = fnan
522
+ if e1:
523
+ v = mpf_neg(v)
524
+ return v
525
+
526
+ def mpc_ei(z, prec, rnd=round_fast, e1=False):
527
+ if e1:
528
+ z = mpc_neg(z)
529
+ a, b = z
530
+ asign, aman, aexp, abc = a
531
+ bsign, bman, bexp, bbc = b
532
+ if b == fzero:
533
+ if e1:
534
+ x = mpf_neg(mpf_ei(a, prec, rnd))
535
+ if not asign:
536
+ y = mpf_neg(mpf_pi(prec, rnd))
537
+ else:
538
+ y = fzero
539
+ return x, y
540
+ else:
541
+ return mpf_ei(a, prec, rnd), fzero
542
+ if a != fzero:
543
+ if not aman or not bman:
544
+ return (fnan, fnan)
545
+ wp = prec + 40
546
+ amag = aexp+abc
547
+ bmag = bexp+bbc
548
+ zmag = max(amag, bmag)
549
+ can_use_asymp = zmag > wp
550
+ if not can_use_asymp:
551
+ zabsint = abs(to_int(a)) + abs(to_int(b))
552
+ can_use_asymp = zabsint > int(wp*0.693) + 20
553
+ try:
554
+ if can_use_asymp:
555
+ if zmag > wp:
556
+ v = fone, fzero
557
+ else:
558
+ zre = to_fixed(a, wp)
559
+ zim = to_fixed(b, wp)
560
+ vre, vim = complex_ei_asymptotic(zre, zim, wp)
561
+ v = from_man_exp(vre, -wp), from_man_exp(vim, -wp)
562
+ v = mpc_mul(v, mpc_exp(z, wp), wp)
563
+ v = mpc_div(v, z, wp)
564
+ if e1:
565
+ v = mpc_neg(v, prec, rnd)
566
+ else:
567
+ x, y = v
568
+ if bsign:
569
+ v = mpf_pos(x, prec, rnd), mpf_sub(y, mpf_pi(wp), prec, rnd)
570
+ else:
571
+ v = mpf_pos(x, prec, rnd), mpf_add(y, mpf_pi(wp), prec, rnd)
572
+ return v
573
+ except NoConvergence:
574
+ pass
575
+ #wp += 2*max(0,zmag)
576
+ wp += 2*int(to_int(mpc_abs(z, 5)))
577
+ zre = to_fixed(a, wp)
578
+ zim = to_fixed(b, wp)
579
+ vre, vim = complex_ei_taylor(zre, zim, wp)
580
+ vre += euler_fixed(wp)
581
+ v = from_man_exp(vre,-wp), from_man_exp(vim,-wp)
582
+ if e1:
583
+ u = mpc_log(mpc_neg(z),wp)
584
+ else:
585
+ u = mpc_log(z,wp)
586
+ v = mpc_add(v, u, prec, rnd)
587
+ if e1:
588
+ v = mpc_neg(v)
589
+ return v
590
+
591
+ def mpf_e1(x, prec, rnd=round_fast):
592
+ return mpf_ei(x, prec, rnd, True)
593
+
594
+ def mpc_e1(x, prec, rnd=round_fast):
595
+ return mpc_ei(x, prec, rnd, True)
596
+
597
+ def mpf_expint(n, x, prec, rnd=round_fast, gamma=False):
598
+ """
599
+ E_n(x), n an integer, x real
600
+
601
+ With gamma=True, computes Gamma(n,x) (upper incomplete gamma function)
602
+
603
+ Returns (real, None) if real, otherwise (real, imag)
604
+ The imaginary part is an optional branch cut term
605
+
606
+ """
607
+ sign, man, exp, bc = x
608
+ if not man:
609
+ if gamma:
610
+ if x == fzero:
611
+ # Actually gamma function pole
612
+ if n <= 0:
613
+ return finf, None
614
+ return mpf_gamma_int(n, prec, rnd), None
615
+ if x == finf:
616
+ return fzero, None
617
+ # TODO: could return finite imaginary value at -inf
618
+ return fnan, fnan
619
+ else:
620
+ if x == fzero:
621
+ if n > 1:
622
+ return from_rational(1, n-1, prec, rnd), None
623
+ else:
624
+ return finf, None
625
+ if x == finf:
626
+ return fzero, None
627
+ return fnan, fnan
628
+ n_orig = n
629
+ if gamma:
630
+ n = 1-n
631
+ wp = prec + 20
632
+ xmag = exp + bc
633
+ # Beware of near-poles
634
+ if xmag < -10:
635
+ raise NotImplementedError
636
+ nmag = bitcount(abs(n))
637
+ have_imag = n > 0 and sign
638
+ negx = mpf_neg(x)
639
+ # Skip series if direct convergence
640
+ if n == 0 or 2*nmag - xmag < -wp:
641
+ if gamma:
642
+ v = mpf_exp(negx, wp)
643
+ re = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), prec, rnd)
644
+ else:
645
+ v = mpf_exp(negx, wp)
646
+ re = mpf_div(v, x, prec, rnd)
647
+ else:
648
+ # Finite number of terms, or...
649
+ can_use_asymptotic_series = -3*wp < n <= 0
650
+ # ...large enough?
651
+ if not can_use_asymptotic_series:
652
+ xi = abs(to_int(x))
653
+ m = min(max(1, xi-n), 2*wp)
654
+ siz = -n*nmag + (m+n)*bitcount(abs(m+n)) - m*xmag - (144*m//100)
655
+ tol = -wp-10
656
+ can_use_asymptotic_series = siz < tol
657
+ if can_use_asymptotic_series:
658
+ r = ((-MPZ_ONE) << (wp+wp)) // to_fixed(x, wp)
659
+ m = n
660
+ t = r*m
661
+ s = MPZ_ONE << wp
662
+ while m and t:
663
+ s += t
664
+ m += 1
665
+ t = (m*r*t) >> wp
666
+ v = mpf_exp(negx, wp)
667
+ if gamma:
668
+ # ~ exp(-x) * x^(n-1) * (1 + ...)
669
+ v = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), wp)
670
+ else:
671
+ # ~ exp(-x)/x * (1 + ...)
672
+ v = mpf_div(v, x, wp)
673
+ re = mpf_mul(v, from_man_exp(s, -wp), prec, rnd)
674
+ elif n == 1:
675
+ re = mpf_neg(mpf_ei(negx, prec, rnd))
676
+ elif n > 0 and n < 3*wp:
677
+ T1 = mpf_neg(mpf_ei(negx, wp))
678
+ if gamma:
679
+ if n_orig & 1:
680
+ T1 = mpf_neg(T1)
681
+ else:
682
+ T1 = mpf_mul(T1, mpf_pow_int(negx, n-1, wp), wp)
683
+ r = t = to_fixed(x, wp)
684
+ facs = [1] * (n-1)
685
+ for k in range(1,n-1):
686
+ facs[k] = facs[k-1] * k
687
+ facs = facs[::-1]
688
+ s = facs[0] << wp
689
+ for k in range(1, n-1):
690
+ if k & 1:
691
+ s -= facs[k] * t
692
+ else:
693
+ s += facs[k] * t
694
+ t = (t*r) >> wp
695
+ T2 = from_man_exp(s, -wp, wp)
696
+ T2 = mpf_mul(T2, mpf_exp(negx, wp))
697
+ if gamma:
698
+ T2 = mpf_mul(T2, mpf_pow_int(x, n_orig, wp), wp)
699
+ R = mpf_add(T1, T2)
700
+ re = mpf_div(R, from_int(ifac(n-1)), prec, rnd)
701
+ else:
702
+ raise NotImplementedError
703
+ if have_imag:
704
+ M = from_int(-ifac(n-1))
705
+ if gamma:
706
+ im = mpf_div(mpf_pi(wp), M, prec, rnd)
707
+ if n_orig & 1:
708
+ im = mpf_neg(im)
709
+ else:
710
+ im = mpf_div(mpf_mul(mpf_pi(wp), mpf_pow_int(negx, n_orig-1, wp), wp), M, prec, rnd)
711
+ return re, im
712
+ else:
713
+ return re, None
714
+
715
+ def mpf_ci_si_taylor(x, wp, which=0):
716
+ """
717
+ 0 - Ci(x) - (euler+log(x))
718
+ 1 - Si(x)
719
+ """
720
+ x = to_fixed(x, wp)
721
+ x2 = -(x*x) >> wp
722
+ if which == 0:
723
+ s, t, k = 0, (MPZ_ONE<<wp), 2
724
+ else:
725
+ s, t, k = x, x, 3
726
+ while t:
727
+ t = (t*x2//(k*(k-1)))>>wp
728
+ s += t//k
729
+ k += 2
730
+ return from_man_exp(s, -wp)
731
+
732
+ def mpc_ci_si_taylor(re, im, wp, which=0):
733
+ # The following code is only designed for small arguments,
734
+ # and not too small arguments (for relative accuracy)
735
+ if re[1]:
736
+ mag = re[2]+re[3]
737
+ elif im[1]:
738
+ mag = im[2]+im[3]
739
+ if im[1]:
740
+ mag = max(mag, im[2]+im[3])
741
+ if mag > 2 or mag < -wp:
742
+ raise NotImplementedError
743
+ wp += (2-mag)
744
+ zre = to_fixed(re, wp)
745
+ zim = to_fixed(im, wp)
746
+ z2re = (zim*zim-zre*zre)>>wp
747
+ z2im = (-2*zre*zim)>>wp
748
+ tre = zre
749
+ tim = zim
750
+ one = MPZ_ONE<<wp
751
+ if which == 0:
752
+ sre, sim, tre, tim, k = 0, 0, (MPZ_ONE<<wp), 0, 2
753
+ else:
754
+ sre, sim, tre, tim, k = zre, zim, zre, zim, 3
755
+ while max(abs(tre), abs(tim)) > 2:
756
+ f = k*(k-1)
757
+ tre, tim = ((tre*z2re-tim*z2im)//f)>>wp, ((tre*z2im+tim*z2re)//f)>>wp
758
+ sre += tre//k
759
+ sim += tim//k
760
+ k += 2
761
+ return from_man_exp(sre, -wp), from_man_exp(sim, -wp)
762
+
763
+ def mpf_ci_si(x, prec, rnd=round_fast, which=2):
764
+ """
765
+ Calculation of Ci(x), Si(x) for real x.
766
+
767
+ which = 0 -- returns (Ci(x), -)
768
+ which = 1 -- returns (Si(x), -)
769
+ which = 2 -- returns (Ci(x), Si(x))
770
+
771
+ Note: if x < 0, Ci(x) needs an additional imaginary term, pi*i.
772
+ """
773
+ wp = prec + 20
774
+ sign, man, exp, bc = x
775
+ ci, si = None, None
776
+ if not man:
777
+ if x == fzero:
778
+ return (fninf, fzero)
779
+ if x == fnan:
780
+ return (x, x)
781
+ ci = fzero
782
+ if which != 0:
783
+ if x == finf:
784
+ si = mpf_shift(mpf_pi(prec, rnd), -1)
785
+ if x == fninf:
786
+ si = mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
787
+ return (ci, si)
788
+ # For small x: Ci(x) ~ euler + log(x), Si(x) ~ x
789
+ mag = exp+bc
790
+ if mag < -wp:
791
+ if which != 0:
792
+ si = mpf_perturb(x, 1-sign, prec, rnd)
793
+ if which != 1:
794
+ y = mpf_euler(wp)
795
+ xabs = mpf_abs(x)
796
+ ci = mpf_add(y, mpf_log(xabs, wp), prec, rnd)
797
+ return ci, si
798
+ # For huge x: Ci(x) ~ sin(x)/x, Si(x) ~ pi/2
799
+ elif mag > wp:
800
+ if which != 0:
801
+ if sign:
802
+ si = mpf_neg(mpf_pi(prec, negative_rnd[rnd]))
803
+ else:
804
+ si = mpf_pi(prec, rnd)
805
+ si = mpf_shift(si, -1)
806
+ if which != 1:
807
+ ci = mpf_div(mpf_sin(x, wp), x, prec, rnd)
808
+ return ci, si
809
+ else:
810
+ wp += abs(mag)
811
+ # Use an asymptotic series? The smallest value of n!/x^n
812
+ # occurs for n ~ x, where the magnitude is ~ exp(-x).
813
+ asymptotic = mag-1 > math.log(wp, 2)
814
+ # Case 1: convergent series near 0
815
+ if not asymptotic:
816
+ if which != 0:
817
+ si = mpf_pos(mpf_ci_si_taylor(x, wp, 1), prec, rnd)
818
+ if which != 1:
819
+ ci = mpf_ci_si_taylor(x, wp, 0)
820
+ ci = mpf_add(ci, mpf_euler(wp), wp)
821
+ ci = mpf_add(ci, mpf_log(mpf_abs(x), wp), prec, rnd)
822
+ return ci, si
823
+ x = mpf_abs(x)
824
+ # Case 2: asymptotic series for x >> 1
825
+ xf = to_fixed(x, wp)
826
+ xr = (MPZ_ONE<<(2*wp)) // xf # 1/x
827
+ s1 = (MPZ_ONE << wp)
828
+ s2 = xr
829
+ t = xr
830
+ k = 2
831
+ while t:
832
+ t = -t
833
+ t = (t*xr*k)>>wp
834
+ k += 1
835
+ s1 += t
836
+ t = (t*xr*k)>>wp
837
+ k += 1
838
+ s2 += t
839
+ s1 = from_man_exp(s1, -wp)
840
+ s2 = from_man_exp(s2, -wp)
841
+ s1 = mpf_div(s1, x, wp)
842
+ s2 = mpf_div(s2, x, wp)
843
+ cos, sin = mpf_cos_sin(x, wp)
844
+ # Ci(x) = sin(x)*s1-cos(x)*s2
845
+ # Si(x) = pi/2-cos(x)*s1-sin(x)*s2
846
+ if which != 0:
847
+ si = mpf_add(mpf_mul(cos, s1), mpf_mul(sin, s2), wp)
848
+ si = mpf_sub(mpf_shift(mpf_pi(wp), -1), si, wp)
849
+ if sign:
850
+ si = mpf_neg(si)
851
+ si = mpf_pos(si, prec, rnd)
852
+ if which != 1:
853
+ ci = mpf_sub(mpf_mul(sin, s1), mpf_mul(cos, s2), prec, rnd)
854
+ return ci, si
855
+
856
+ def mpf_ci(x, prec, rnd=round_fast):
857
+ if mpf_sign(x) < 0:
858
+ raise ComplexResult
859
+ return mpf_ci_si(x, prec, rnd, 0)[0]
860
+
861
+ def mpf_si(x, prec, rnd=round_fast):
862
+ return mpf_ci_si(x, prec, rnd, 1)[1]
863
+
864
+ def mpc_ci(z, prec, rnd=round_fast):
865
+ re, im = z
866
+ if im == fzero:
867
+ ci = mpf_ci_si(re, prec, rnd, 0)[0]
868
+ if mpf_sign(re) < 0:
869
+ return (ci, mpf_pi(prec, rnd))
870
+ return (ci, fzero)
871
+ wp = prec + 20
872
+ cre, cim = mpc_ci_si_taylor(re, im, wp, 0)
873
+ cre = mpf_add(cre, mpf_euler(wp), wp)
874
+ ci = mpc_add((cre, cim), mpc_log(z, wp), prec, rnd)
875
+ return ci
876
+
877
+ def mpc_si(z, prec, rnd=round_fast):
878
+ re, im = z
879
+ if im == fzero:
880
+ return (mpf_ci_si(re, prec, rnd, 1)[1], fzero)
881
+ wp = prec + 20
882
+ z = mpc_ci_si_taylor(re, im, wp, 1)
883
+ return mpc_pos(z, prec, rnd)
884
+
885
+
886
+ #-----------------------------------------------------------------------#
887
+ # #
888
+ # Bessel functions #
889
+ # #
890
+ #-----------------------------------------------------------------------#
891
+
892
+ # A Bessel function of the first kind of integer order, J_n(x), is
893
+ # given by the power series
894
+
895
+ # oo
896
+ # ___ k 2 k + n
897
+ # \ (-1) / x \
898
+ # J_n(x) = ) ----------- | - |
899
+ # /___ k! (k + n)! \ 2 /
900
+ # k = 0
901
+
902
+ # Simplifying the quotient between two successive terms gives the
903
+ # ratio x^2 / (-4*k*(k+n)). Hence, we only need one full-precision
904
+ # multiplication and one division by a small integer per term.
905
+ # The complex version is very similar, the only difference being
906
+ # that the multiplication is actually 4 multiplies.
907
+
908
+ # In the general case, we have
909
+ # J_v(x) = (x/2)**v / v! * 0F1(v+1, (-1/4)*z**2)
910
+
911
+ # TODO: for extremely large x, we could use an asymptotic
912
+ # trigonometric approximation.
913
+
914
+ # TODO: recompute at higher precision if the fixed-point mantissa
915
+ # is very small
916
+
917
+ def mpf_besseljn(n, x, prec, rounding=round_fast):
918
+ prec += 50
919
+ negate = n < 0 and n & 1
920
+ mag = x[2]+x[3]
921
+ n = abs(n)
922
+ wp = prec + 20 + n*bitcount(n)
923
+ if mag < 0:
924
+ wp -= n * mag
925
+ x = to_fixed(x, wp)
926
+ x2 = (x**2) >> wp
927
+ if not n:
928
+ s = t = MPZ_ONE << wp
929
+ else:
930
+ s = t = (x**n // ifac(n)) >> ((n-1)*wp + n)
931
+ k = 1
932
+ while t:
933
+ t = ((t * x2) // (-4*k*(k+n))) >> wp
934
+ s += t
935
+ k += 1
936
+ if negate:
937
+ s = -s
938
+ return from_man_exp(s, -wp, prec, rounding)
939
+
940
+ def mpc_besseljn(n, z, prec, rounding=round_fast):
941
+ negate = n < 0 and n & 1
942
+ n = abs(n)
943
+ origprec = prec
944
+ zre, zim = z
945
+ mag = max(zre[2]+zre[3], zim[2]+zim[3])
946
+ prec += 20 + n*bitcount(n) + abs(mag)
947
+ if mag < 0:
948
+ prec -= n * mag
949
+ zre = to_fixed(zre, prec)
950
+ zim = to_fixed(zim, prec)
951
+ z2re = (zre**2 - zim**2) >> prec
952
+ z2im = (zre*zim) >> (prec-1)
953
+ if not n:
954
+ sre = tre = MPZ_ONE << prec
955
+ sim = tim = MPZ_ZERO
956
+ else:
957
+ re, im = complex_int_pow(zre, zim, n)
958
+ sre = tre = (re // ifac(n)) >> ((n-1)*prec + n)
959
+ sim = tim = (im // ifac(n)) >> ((n-1)*prec + n)
960
+ k = 1
961
+ while abs(tre) + abs(tim) > 3:
962
+ p = -4*k*(k+n)
963
+ tre, tim = tre*z2re - tim*z2im, tim*z2re + tre*z2im
964
+ tre = (tre // p) >> prec
965
+ tim = (tim // p) >> prec
966
+ sre += tre
967
+ sim += tim
968
+ k += 1
969
+ if negate:
970
+ sre = -sre
971
+ sim = -sim
972
+ re = from_man_exp(sre, -prec, origprec, rounding)
973
+ im = from_man_exp(sim, -prec, origprec, rounding)
974
+ return (re, im)
975
+
976
+ def mpf_agm(a, b, prec, rnd=round_fast):
977
+ """
978
+ Computes the arithmetic-geometric mean agm(a,b) for
979
+ nonnegative mpf values a, b.
980
+ """
981
+ asign, aman, aexp, abc = a
982
+ bsign, bman, bexp, bbc = b
983
+ if asign or bsign:
984
+ raise ComplexResult("agm of a negative number")
985
+ # Handle inf, nan or zero in either operand
986
+ if not (aman and bman):
987
+ if a == fnan or b == fnan:
988
+ return fnan
989
+ if a == finf:
990
+ if b == fzero:
991
+ return fnan
992
+ return finf
993
+ if b == finf:
994
+ if a == fzero:
995
+ return fnan
996
+ return finf
997
+ # agm(0,x) = agm(x,0) = 0
998
+ return fzero
999
+ wp = prec + 20
1000
+ amag = aexp+abc
1001
+ bmag = bexp+bbc
1002
+ mag_delta = amag - bmag
1003
+ # Reduce to roughly the same magnitude using floating-point AGM
1004
+ abs_mag_delta = abs(mag_delta)
1005
+ if abs_mag_delta > 10:
1006
+ while abs_mag_delta > 10:
1007
+ a, b = mpf_shift(mpf_add(a,b,wp),-1), \
1008
+ mpf_sqrt(mpf_mul(a,b,wp),wp)
1009
+ abs_mag_delta //= 2
1010
+ asign, aman, aexp, abc = a
1011
+ bsign, bman, bexp, bbc = b
1012
+ amag = aexp+abc
1013
+ bmag = bexp+bbc
1014
+ mag_delta = amag - bmag
1015
+ #print to_float(a), to_float(b)
1016
+ # Use agm(a,b) = agm(x*a,x*b)/x to obtain a, b ~= 1
1017
+ min_mag = min(amag,bmag)
1018
+ max_mag = max(amag,bmag)
1019
+ n = 0
1020
+ # If too small, we lose precision when going to fixed-point
1021
+ if min_mag < -8:
1022
+ n = -min_mag
1023
+ # If too large, we waste time using fixed-point with large numbers
1024
+ elif max_mag > 20:
1025
+ n = -max_mag
1026
+ if n:
1027
+ a = mpf_shift(a, n)
1028
+ b = mpf_shift(b, n)
1029
+ #print to_float(a), to_float(b)
1030
+ af = to_fixed(a, wp)
1031
+ bf = to_fixed(b, wp)
1032
+ g = agm_fixed(af, bf, wp)
1033
+ return from_man_exp(g, -wp-n, prec, rnd)
1034
+
1035
+ def mpf_agm1(a, prec, rnd=round_fast):
1036
+ """
1037
+ Computes the arithmetic-geometric mean agm(1,a) for a nonnegative
1038
+ mpf value a.
1039
+ """
1040
+ return mpf_agm(fone, a, prec, rnd)
1041
+
1042
+ def mpc_agm(a, b, prec, rnd=round_fast):
1043
+ """
1044
+ Complex AGM.
1045
+
1046
+ TODO:
1047
+ * check that convergence works as intended
1048
+ * optimize
1049
+ * select a nonarbitrary branch
1050
+ """
1051
+ if mpc_is_infnan(a) or mpc_is_infnan(b):
1052
+ return fnan, fnan
1053
+ if mpc_zero in (a, b):
1054
+ return fzero, fzero
1055
+ if mpc_neg(a) == b:
1056
+ return fzero, fzero
1057
+ wp = prec+20
1058
+ eps = mpf_shift(fone, -wp+10)
1059
+ while 1:
1060
+ a1 = mpc_shift(mpc_add(a, b, wp), -1)
1061
+ b1 = mpc_sqrt(mpc_mul(a, b, wp), wp)
1062
+ a, b = a1, b1
1063
+ size = mpf_min_max([mpc_abs(a,10), mpc_abs(b,10)])[1]
1064
+ err = mpc_abs(mpc_sub(a, b, 10), 10)
1065
+ if size == fzero or mpf_lt(err, mpf_mul(eps, size)):
1066
+ return a
1067
+
1068
+ def mpc_agm1(a, prec, rnd=round_fast):
1069
+ return mpc_agm(mpc_one, a, prec, rnd)
1070
+
1071
+ def mpf_ellipk(x, prec, rnd=round_fast):
1072
+ if not x[1]:
1073
+ if x == fzero:
1074
+ return mpf_shift(mpf_pi(prec, rnd), -1)
1075
+ if x == fninf:
1076
+ return fzero
1077
+ if x == fnan:
1078
+ return x
1079
+ if x == fone:
1080
+ return finf
1081
+ # TODO: for |x| << 1/2, one could use fall back to
1082
+ # pi/2 * hyp2f1_rat((1,2),(1,2),(1,1), x)
1083
+ wp = prec + 15
1084
+ # Use K(x) = pi/2/agm(1,a) where a = sqrt(1-x)
1085
+ # The sqrt raises ComplexResult if x > 0
1086
+ a = mpf_sqrt(mpf_sub(fone, x, wp), wp)
1087
+ v = mpf_agm1(a, wp)
1088
+ r = mpf_div(mpf_pi(wp), v, prec, rnd)
1089
+ return mpf_shift(r, -1)
1090
+
1091
+ def mpc_ellipk(z, prec, rnd=round_fast):
1092
+ re, im = z
1093
+ if im == fzero:
1094
+ if re == finf:
1095
+ return mpc_zero
1096
+ if mpf_le(re, fone):
1097
+ return mpf_ellipk(re, prec, rnd), fzero
1098
+ wp = prec + 15
1099
+ a = mpc_sqrt(mpc_sub(mpc_one, z, wp), wp)
1100
+ v = mpc_agm1(a, wp)
1101
+ r = mpc_mpf_div(mpf_pi(wp), v, prec, rnd)
1102
+ return mpc_shift(r, -1)
1103
+
1104
+ def mpf_ellipe(x, prec, rnd=round_fast):
1105
+ # http://functions.wolfram.com/EllipticIntegrals/
1106
+ # EllipticK/20/01/0001/
1107
+ # E = (1-m)*(K'(m)*2*m + K(m))
1108
+ sign, man, exp, bc = x
1109
+ if not man:
1110
+ if x == fzero:
1111
+ return mpf_shift(mpf_pi(prec, rnd), -1)
1112
+ if x == fninf:
1113
+ return finf
1114
+ if x == fnan:
1115
+ return x
1116
+ if x == finf:
1117
+ raise ComplexResult
1118
+ if x == fone:
1119
+ return fone
1120
+ wp = prec+20
1121
+ mag = exp+bc
1122
+ if mag < -wp:
1123
+ return mpf_shift(mpf_pi(prec, rnd), -1)
1124
+ # Compute a finite difference for K'
1125
+ p = max(mag, 0) - wp
1126
+ h = mpf_shift(fone, p)
1127
+ K = mpf_ellipk(x, 2*wp)
1128
+ Kh = mpf_ellipk(mpf_sub(x, h), 2*wp)
1129
+ Kdiff = mpf_shift(mpf_sub(K, Kh), -p)
1130
+ t = mpf_sub(fone, x)
1131
+ b = mpf_mul(Kdiff, mpf_shift(x,1), wp)
1132
+ return mpf_mul(t, mpf_add(K, b), prec, rnd)
1133
+
1134
+ def mpc_ellipe(z, prec, rnd=round_fast):
1135
+ re, im = z
1136
+ if im == fzero:
1137
+ if re == finf:
1138
+ return (fzero, finf)
1139
+ if mpf_le(re, fone):
1140
+ return mpf_ellipe(re, prec, rnd), fzero
1141
+ wp = prec + 15
1142
+ mag = mpc_abs(z, 1)
1143
+ p = max(mag[2]+mag[3], 0) - wp
1144
+ h = mpf_shift(fone, p)
1145
+ K = mpc_ellipk(z, 2*wp)
1146
+ Kh = mpc_ellipk(mpc_add_mpf(z, h, 2*wp), 2*wp)
1147
+ Kdiff = mpc_shift(mpc_sub(Kh, K, wp), -p)
1148
+ t = mpc_sub(mpc_one, z, wp)
1149
+ b = mpc_mul(Kdiff, mpc_shift(z,1), wp)
1150
+ return mpc_mul(t, mpc_add(K, b, wp), prec, rnd)
venv/lib/python3.10/site-packages/mpmath/libmp/libintmath.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions for integer math.
3
+
4
+ TODO: rename, cleanup, perhaps move the gmpy wrapper code
5
+ here from settings.py
6
+
7
+ """
8
+
9
+ import math
10
+ from bisect import bisect
11
+
12
+ from .backend import xrange
13
+ from .backend import BACKEND, gmpy, sage, sage_utils, MPZ, MPZ_ONE, MPZ_ZERO
14
+
15
+ small_trailing = [0] * 256
16
+ for j in range(1,8):
17
+ small_trailing[1<<j::1<<(j+1)] = [j] * (1<<(7-j))
18
+
19
+ def giant_steps(start, target, n=2):
20
+ """
21
+ Return a list of integers ~=
22
+
23
+ [start, n*start, ..., target/n^2, target/n, target]
24
+
25
+ but conservatively rounded so that the quotient between two
26
+ successive elements is actually slightly less than n.
27
+
28
+ With n = 2, this describes suitable precision steps for a
29
+ quadratically convergent algorithm such as Newton's method;
30
+ with n = 3 steps for cubic convergence (Halley's method), etc.
31
+
32
+ >>> giant_steps(50,1000)
33
+ [66, 128, 253, 502, 1000]
34
+ >>> giant_steps(50,1000,4)
35
+ [65, 252, 1000]
36
+
37
+ """
38
+ L = [target]
39
+ while L[-1] > start*n:
40
+ L = L + [L[-1]//n + 2]
41
+ return L[::-1]
42
+
43
+ def rshift(x, n):
44
+ """For an integer x, calculate x >> n with the fastest (floor)
45
+ rounding. Unlike the plain Python expression (x >> n), n is
46
+ allowed to be negative, in which case a left shift is performed."""
47
+ if n >= 0: return x >> n
48
+ else: return x << (-n)
49
+
50
+ def lshift(x, n):
51
+ """For an integer x, calculate x << n. Unlike the plain Python
52
+ expression (x << n), n is allowed to be negative, in which case a
53
+ right shift with default (floor) rounding is performed."""
54
+ if n >= 0: return x << n
55
+ else: return x >> (-n)
56
+
57
+ if BACKEND == 'sage':
58
+ import operator
59
+ rshift = operator.rshift
60
+ lshift = operator.lshift
61
+
62
+ def python_trailing(n):
63
+ """Count the number of trailing zero bits in abs(n)."""
64
+ if not n:
65
+ return 0
66
+ low_byte = n & 0xff
67
+ if low_byte:
68
+ return small_trailing[low_byte]
69
+ t = 8
70
+ n >>= 8
71
+ while not n & 0xff:
72
+ n >>= 8
73
+ t += 8
74
+ return t + small_trailing[n & 0xff]
75
+
76
+ if BACKEND == 'gmpy':
77
+ if gmpy.version() >= '2':
78
+ def gmpy_trailing(n):
79
+ """Count the number of trailing zero bits in abs(n) using gmpy."""
80
+ if n: return MPZ(n).bit_scan1()
81
+ else: return 0
82
+ else:
83
+ def gmpy_trailing(n):
84
+ """Count the number of trailing zero bits in abs(n) using gmpy."""
85
+ if n: return MPZ(n).scan1()
86
+ else: return 0
87
+
88
+ # Small powers of 2
89
+ powers = [1<<_ for _ in range(300)]
90
+
91
+ def python_bitcount(n):
92
+ """Calculate bit size of the nonnegative integer n."""
93
+ bc = bisect(powers, n)
94
+ if bc != 300:
95
+ return bc
96
+ bc = int(math.log(n, 2)) - 4
97
+ return bc + bctable[n>>bc]
98
+
99
+ def gmpy_bitcount(n):
100
+ """Calculate bit size of the nonnegative integer n."""
101
+ if n: return MPZ(n).numdigits(2)
102
+ else: return 0
103
+
104
+ #def sage_bitcount(n):
105
+ # if n: return MPZ(n).nbits()
106
+ # else: return 0
107
+
108
+ def sage_trailing(n):
109
+ return MPZ(n).trailing_zero_bits()
110
+
111
+ if BACKEND == 'gmpy':
112
+ bitcount = gmpy_bitcount
113
+ trailing = gmpy_trailing
114
+ elif BACKEND == 'sage':
115
+ sage_bitcount = sage_utils.bitcount
116
+ bitcount = sage_bitcount
117
+ trailing = sage_trailing
118
+ else:
119
+ bitcount = python_bitcount
120
+ trailing = python_trailing
121
+
122
+ if BACKEND == 'gmpy' and 'bit_length' in dir(gmpy):
123
+ bitcount = gmpy.bit_length
124
+
125
+ # Used to avoid slow function calls as far as possible
126
+ trailtable = [trailing(n) for n in range(256)]
127
+ bctable = [bitcount(n) for n in range(1024)]
128
+
129
+ # TODO: speed up for bases 2, 4, 8, 16, ...
130
+
131
+ def bin_to_radix(x, xbits, base, bdigits):
132
+ """Changes radix of a fixed-point number; i.e., converts
133
+ x * 2**xbits to floor(x * 10**bdigits)."""
134
+ return x * (MPZ(base)**bdigits) >> xbits
135
+
136
+ stddigits = '0123456789abcdefghijklmnopqrstuvwxyz'
137
+
138
+ def small_numeral(n, base=10, digits=stddigits):
139
+ """Return the string numeral of a positive integer in an arbitrary
140
+ base. Most efficient for small input."""
141
+ if base == 10:
142
+ return str(n)
143
+ digs = []
144
+ while n:
145
+ n, digit = divmod(n, base)
146
+ digs.append(digits[digit])
147
+ return "".join(digs[::-1])
148
+
149
+ def numeral_python(n, base=10, size=0, digits=stddigits):
150
+ """Represent the integer n as a string of digits in the given base.
151
+ Recursive division is used to make this function about 3x faster
152
+ than Python's str() for converting integers to decimal strings.
153
+
154
+ The 'size' parameters specifies the number of digits in n; this
155
+ number is only used to determine splitting points and need not be
156
+ exact."""
157
+ if n <= 0:
158
+ if not n:
159
+ return "0"
160
+ return "-" + numeral(-n, base, size, digits)
161
+ # Fast enough to do directly
162
+ if size < 250:
163
+ return small_numeral(n, base, digits)
164
+ # Divide in half
165
+ half = (size // 2) + (size & 1)
166
+ A, B = divmod(n, base**half)
167
+ ad = numeral(A, base, half, digits)
168
+ bd = numeral(B, base, half, digits).rjust(half, "0")
169
+ return ad + bd
170
+
171
+ def numeral_gmpy(n, base=10, size=0, digits=stddigits):
172
+ """Represent the integer n as a string of digits in the given base.
173
+ Recursive division is used to make this function about 3x faster
174
+ than Python's str() for converting integers to decimal strings.
175
+
176
+ The 'size' parameters specifies the number of digits in n; this
177
+ number is only used to determine splitting points and need not be
178
+ exact."""
179
+ if n < 0:
180
+ return "-" + numeral(-n, base, size, digits)
181
+ # gmpy.digits() may cause a segmentation fault when trying to convert
182
+ # extremely large values to a string. The size limit may need to be
183
+ # adjusted on some platforms, but 1500000 works on Windows and Linux.
184
+ if size < 1500000:
185
+ return gmpy.digits(n, base)
186
+ # Divide in half
187
+ half = (size // 2) + (size & 1)
188
+ A, B = divmod(n, MPZ(base)**half)
189
+ ad = numeral(A, base, half, digits)
190
+ bd = numeral(B, base, half, digits).rjust(half, "0")
191
+ return ad + bd
192
+
193
+ if BACKEND == "gmpy":
194
+ numeral = numeral_gmpy
195
+ else:
196
+ numeral = numeral_python
197
+
198
+ _1_800 = 1<<800
199
+ _1_600 = 1<<600
200
+ _1_400 = 1<<400
201
+ _1_200 = 1<<200
202
+ _1_100 = 1<<100
203
+ _1_50 = 1<<50
204
+
205
+ def isqrt_small_python(x):
206
+ """
207
+ Correctly (floor) rounded integer square root, using
208
+ division. Fast up to ~200 digits.
209
+ """
210
+ if not x:
211
+ return x
212
+ if x < _1_800:
213
+ # Exact with IEEE double precision arithmetic
214
+ if x < _1_50:
215
+ return int(x**0.5)
216
+ # Initial estimate can be any integer >= the true root; round up
217
+ r = int(x**0.5 * 1.00000000000001) + 1
218
+ else:
219
+ bc = bitcount(x)
220
+ n = bc//2
221
+ r = int((x>>(2*n-100))**0.5+2)<<(n-50) # +2 is to round up
222
+ # The following iteration now precisely computes floor(sqrt(x))
223
+ # See e.g. Crandall & Pomerance, "Prime Numbers: A Computational
224
+ # Perspective"
225
+ while 1:
226
+ y = (r+x//r)>>1
227
+ if y >= r:
228
+ return r
229
+ r = y
230
+
231
+ def isqrt_fast_python(x):
232
+ """
233
+ Fast approximate integer square root, computed using division-free
234
+ Newton iteration for large x. For random integers the result is almost
235
+ always correct (floor(sqrt(x))), but is 1 ulp too small with a roughly
236
+ 0.1% probability. If x is very close to an exact square, the answer is
237
+ 1 ulp wrong with high probability.
238
+
239
+ With 0 guard bits, the largest error over a set of 10^5 random
240
+ inputs of size 1-10^5 bits was 3 ulp. The use of 10 guard bits
241
+ almost certainly guarantees a max 1 ulp error.
242
+ """
243
+ # Use direct division-based iteration if sqrt(x) < 2^400
244
+ # Assume floating-point square root accurate to within 1 ulp, then:
245
+ # 0 Newton iterations good to 52 bits
246
+ # 1 Newton iterations good to 104 bits
247
+ # 2 Newton iterations good to 208 bits
248
+ # 3 Newton iterations good to 416 bits
249
+ if x < _1_800:
250
+ y = int(x**0.5)
251
+ if x >= _1_100:
252
+ y = (y + x//y) >> 1
253
+ if x >= _1_200:
254
+ y = (y + x//y) >> 1
255
+ if x >= _1_400:
256
+ y = (y + x//y) >> 1
257
+ return y
258
+ bc = bitcount(x)
259
+ guard_bits = 10
260
+ x <<= 2*guard_bits
261
+ bc += 2*guard_bits
262
+ bc += (bc&1)
263
+ hbc = bc//2
264
+ startprec = min(50, hbc)
265
+ # Newton iteration for 1/sqrt(x), with floating-point starting value
266
+ r = int(2.0**(2*startprec) * (x >> (bc-2*startprec)) ** -0.5)
267
+ pp = startprec
268
+ for p in giant_steps(startprec, hbc):
269
+ # r**2, scaled from real size 2**(-bc) to 2**p
270
+ r2 = (r*r) >> (2*pp - p)
271
+ # x*r**2, scaled from real size ~1.0 to 2**p
272
+ xr2 = ((x >> (bc-p)) * r2) >> p
273
+ # New value of r, scaled from real size 2**(-bc/2) to 2**p
274
+ r = (r * ((3<<p) - xr2)) >> (pp+1)
275
+ pp = p
276
+ # (1/sqrt(x))*x = sqrt(x)
277
+ return (r*(x>>hbc)) >> (p+guard_bits)
278
+
279
+ def sqrtrem_python(x):
280
+ """Correctly rounded integer (floor) square root with remainder."""
281
+ # to check cutoff:
282
+ # plot(lambda x: timing(isqrt, 2**int(x)), [0,2000])
283
+ if x < _1_600:
284
+ y = isqrt_small_python(x)
285
+ return y, x - y*y
286
+ y = isqrt_fast_python(x) + 1
287
+ rem = x - y*y
288
+ # Correct remainder
289
+ while rem < 0:
290
+ y -= 1
291
+ rem += (1+2*y)
292
+ else:
293
+ if rem:
294
+ while rem > 2*(1+y):
295
+ y += 1
296
+ rem -= (1+2*y)
297
+ return y, rem
298
+
299
+ def isqrt_python(x):
300
+ """Integer square root with correct (floor) rounding."""
301
+ return sqrtrem_python(x)[0]
302
+
303
+ def sqrt_fixed(x, prec):
304
+ return isqrt_fast(x<<prec)
305
+
306
+ sqrt_fixed2 = sqrt_fixed
307
+
308
+ if BACKEND == 'gmpy':
309
+ if gmpy.version() >= '2':
310
+ isqrt_small = isqrt_fast = isqrt = gmpy.isqrt
311
+ sqrtrem = gmpy.isqrt_rem
312
+ else:
313
+ isqrt_small = isqrt_fast = isqrt = gmpy.sqrt
314
+ sqrtrem = gmpy.sqrtrem
315
+ elif BACKEND == 'sage':
316
+ isqrt_small = isqrt_fast = isqrt = \
317
+ getattr(sage_utils, "isqrt", lambda n: MPZ(n).isqrt())
318
+ sqrtrem = lambda n: MPZ(n).sqrtrem()
319
+ else:
320
+ isqrt_small = isqrt_small_python
321
+ isqrt_fast = isqrt_fast_python
322
+ isqrt = isqrt_python
323
+ sqrtrem = sqrtrem_python
324
+
325
+
326
+ def ifib(n, _cache={}):
327
+ """Computes the nth Fibonacci number as an integer, for
328
+ integer n."""
329
+ if n < 0:
330
+ return (-1)**(-n+1) * ifib(-n)
331
+ if n in _cache:
332
+ return _cache[n]
333
+ m = n
334
+ # Use Dijkstra's logarithmic algorithm
335
+ # The following implementation is basically equivalent to
336
+ # http://en.literateprograms.org/Fibonacci_numbers_(Scheme)
337
+ a, b, p, q = MPZ_ONE, MPZ_ZERO, MPZ_ZERO, MPZ_ONE
338
+ while n:
339
+ if n & 1:
340
+ aq = a*q
341
+ a, b = b*q+aq+a*p, b*p+aq
342
+ n -= 1
343
+ else:
344
+ qq = q*q
345
+ p, q = p*p+qq, qq+2*p*q
346
+ n >>= 1
347
+ if m < 250:
348
+ _cache[m] = b
349
+ return b
350
+
351
+ MAX_FACTORIAL_CACHE = 1000
352
+
353
+ def ifac(n, memo={0:1, 1:1}):
354
+ """Return n factorial (for integers n >= 0 only)."""
355
+ f = memo.get(n)
356
+ if f:
357
+ return f
358
+ k = len(memo)
359
+ p = memo[k-1]
360
+ MAX = MAX_FACTORIAL_CACHE
361
+ while k <= n:
362
+ p *= k
363
+ if k <= MAX:
364
+ memo[k] = p
365
+ k += 1
366
+ return p
367
+
368
+ def ifac2(n, memo_pair=[{0:1}, {1:1}]):
369
+ """Return n!! (double factorial), integers n >= 0 only."""
370
+ memo = memo_pair[n&1]
371
+ f = memo.get(n)
372
+ if f:
373
+ return f
374
+ k = max(memo)
375
+ p = memo[k]
376
+ MAX = MAX_FACTORIAL_CACHE
377
+ while k < n:
378
+ k += 2
379
+ p *= k
380
+ if k <= MAX:
381
+ memo[k] = p
382
+ return p
383
+
384
+ if BACKEND == 'gmpy':
385
+ ifac = gmpy.fac
386
+ elif BACKEND == 'sage':
387
+ ifac = lambda n: int(sage.factorial(n))
388
+ ifib = sage.fibonacci
389
+
390
+ def list_primes(n):
391
+ n = n + 1
392
+ sieve = list(xrange(n))
393
+ sieve[:2] = [0, 0]
394
+ for i in xrange(2, int(n**0.5)+1):
395
+ if sieve[i]:
396
+ for j in xrange(i**2, n, i):
397
+ sieve[j] = 0
398
+ return [p for p in sieve if p]
399
+
400
+ if BACKEND == 'sage':
401
+ # Note: it is *VERY* important for performance that we convert
402
+ # the list to Python ints.
403
+ def list_primes(n):
404
+ return [int(_) for _ in sage.primes(n+1)]
405
+
406
+ small_odd_primes = (3,5,7,11,13,17,19,23,29,31,37,41,43,47)
407
+ small_odd_primes_set = set(small_odd_primes)
408
+
409
+ def isprime(n):
410
+ """
411
+ Determines whether n is a prime number. A probabilistic test is
412
+ performed if n is very large. No special trick is used for detecting
413
+ perfect powers.
414
+
415
+ >>> sum(list_primes(100000))
416
+ 454396537
417
+ >>> sum(n*isprime(n) for n in range(100000))
418
+ 454396537
419
+
420
+ """
421
+ n = int(n)
422
+ if not n & 1:
423
+ return n == 2
424
+ if n < 50:
425
+ return n in small_odd_primes_set
426
+ for p in small_odd_primes:
427
+ if not n % p:
428
+ return False
429
+ m = n-1
430
+ s = trailing(m)
431
+ d = m >> s
432
+ def test(a):
433
+ x = pow(a,d,n)
434
+ if x == 1 or x == m:
435
+ return True
436
+ for r in xrange(1,s):
437
+ x = x**2 % n
438
+ if x == m:
439
+ return True
440
+ return False
441
+ # See http://primes.utm.edu/prove/prove2_3.html
442
+ if n < 1373653:
443
+ witnesses = [2,3]
444
+ elif n < 341550071728321:
445
+ witnesses = [2,3,5,7,11,13,17]
446
+ else:
447
+ witnesses = small_odd_primes
448
+ for a in witnesses:
449
+ if not test(a):
450
+ return False
451
+ return True
452
+
453
+ def moebius(n):
454
+ """
455
+ Evaluates the Moebius function which is `mu(n) = (-1)^k` if `n`
456
+ is a product of `k` distinct primes and `mu(n) = 0` otherwise.
457
+
458
+ TODO: speed up using factorization
459
+ """
460
+ n = abs(int(n))
461
+ if n < 2:
462
+ return n
463
+ factors = []
464
+ for p in xrange(2, n+1):
465
+ if not (n % p):
466
+ if not (n % p**2):
467
+ return 0
468
+ if not sum(p % f for f in factors):
469
+ factors.append(p)
470
+ return (-1)**len(factors)
471
+
472
+ def gcd(*args):
473
+ a = 0
474
+ for b in args:
475
+ if a:
476
+ while b:
477
+ a, b = b, a % b
478
+ else:
479
+ a = b
480
+ return a
481
+
482
+
483
+ # Comment by Juan Arias de Reyna:
484
+ #
485
+ # I learn this method to compute EulerE[2n] from van de Lune.
486
+ #
487
+ # We apply the formula EulerE[2n] = (-1)^n 2**(-2n) sum_{j=0}^n a(2n,2j+1)
488
+ #
489
+ # where the numbers a(n,j) vanish for j > n+1 or j <= -1 and satisfies
490
+ #
491
+ # a(0,-1) = a(0,0) = 0; a(0,1)= 1; a(0,2) = a(0,3) = 0
492
+ #
493
+ # a(n,j) = a(n-1,j) when n+j is even
494
+ # a(n,j) = (j-1) a(n-1,j-1) + (j+1) a(n-1,j+1) when n+j is odd
495
+ #
496
+ #
497
+ # But we can use only one array unidimensional a(j) since to compute
498
+ # a(n,j) we only need to know a(n-1,k) where k and j are of different parity
499
+ # and we have not to conserve the used values.
500
+ #
501
+ # We cached up the values of Euler numbers to sufficiently high order.
502
+ #
503
+ # Important Observation: If we pretend to use the numbers
504
+ # EulerE[1], EulerE[2], ... , EulerE[n]
505
+ # it is convenient to compute first EulerE[n], since the algorithm
506
+ # computes first all
507
+ # the previous ones, and keeps them in the CACHE
508
+
509
+ MAX_EULER_CACHE = 500
510
+
511
+ def eulernum(m, _cache={0:MPZ_ONE}):
512
+ r"""
513
+ Computes the Euler numbers `E(n)`, which can be defined as
514
+ coefficients of the Taylor expansion of `1/cosh x`:
515
+
516
+ .. math ::
517
+
518
+ \frac{1}{\cosh x} = \sum_{n=0}^\infty \frac{E_n}{n!} x^n
519
+
520
+ Example::
521
+
522
+ >>> [int(eulernum(n)) for n in range(11)]
523
+ [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521]
524
+ >>> [int(eulernum(n)) for n in range(11)] # test cache
525
+ [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521]
526
+
527
+ """
528
+ # for odd m > 1, the Euler numbers are zero
529
+ if m & 1:
530
+ return MPZ_ZERO
531
+ f = _cache.get(m)
532
+ if f:
533
+ return f
534
+ MAX = MAX_EULER_CACHE
535
+ n = m
536
+ a = [MPZ(_) for _ in [0,0,1,0,0,0]]
537
+ for n in range(1, m+1):
538
+ for j in range(n+1, -1, -2):
539
+ a[j+1] = (j-1)*a[j] + (j+1)*a[j+2]
540
+ a.append(0)
541
+ suma = 0
542
+ for k in range(n+1, -1, -2):
543
+ suma += a[k+1]
544
+ if n <= MAX:
545
+ _cache[n] = ((-1)**(n//2))*(suma // 2**n)
546
+ if n == m:
547
+ return ((-1)**(n//2))*suma // 2**n
548
+
549
+ def stirling1(n, k):
550
+ """
551
+ Stirling number of the first kind.
552
+ """
553
+ if n < 0 or k < 0:
554
+ raise ValueError
555
+ if k >= n:
556
+ return MPZ(n == k)
557
+ if k < 1:
558
+ return MPZ_ZERO
559
+ L = [MPZ_ZERO] * (k+1)
560
+ L[1] = MPZ_ONE
561
+ for m in xrange(2, n+1):
562
+ for j in xrange(min(k, m), 0, -1):
563
+ L[j] = (m-1) * L[j] + L[j-1]
564
+ return (-1)**(n+k) * L[k]
565
+
566
+ def stirling2(n, k):
567
+ """
568
+ Stirling number of the second kind.
569
+ """
570
+ if n < 0 or k < 0:
571
+ raise ValueError
572
+ if k >= n:
573
+ return MPZ(n == k)
574
+ if k <= 1:
575
+ return MPZ(k == 1)
576
+ s = MPZ_ZERO
577
+ t = MPZ_ONE
578
+ for j in xrange(k+1):
579
+ if (k + j) & 1:
580
+ s -= t * MPZ(j)**n
581
+ else:
582
+ s += t * MPZ(j)**n
583
+ t = t * (k - j) // (j + 1)
584
+ return s // ifac(k)
venv/lib/python3.10/site-packages/mpmath/libmp/libmpc.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Low-level functions for complex arithmetic.
3
+ """
4
+
5
+ import sys
6
+
7
+ from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, BACKEND
8
+
9
+ from .libmpf import (\
10
+ round_floor, round_ceiling, round_down, round_up,
11
+ round_nearest, round_fast, bitcount,
12
+ bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps,
13
+ negative_rnd,
14
+ to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int,
15
+ fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone,
16
+ mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul,
17
+ mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot,
18
+ mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac,
19
+ mpf_sign, mpf_hash,
20
+ ComplexResult
21
+ )
22
+
23
+ from .libelefun import (\
24
+ mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int,
25
+ mpf_log_hypot,
26
+ mpf_cos_sin_pi, mpf_phi,
27
+ mpf_cos, mpf_sin, mpf_cos_pi, mpf_sin_pi,
28
+ mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh,
29
+ mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci
30
+ )
31
+
32
+ # An mpc value is a (real, imag) tuple
33
+ mpc_one = fone, fzero
34
+ mpc_zero = fzero, fzero
35
+ mpc_two = ftwo, fzero
36
+ mpc_half = (fhalf, fzero)
37
+
38
+ _infs = (finf, fninf)
39
+ _infs_nan = (finf, fninf, fnan)
40
+
41
+ def mpc_is_inf(z):
42
+ """Check if either real or imaginary part is infinite"""
43
+ re, im = z
44
+ if re in _infs: return True
45
+ if im in _infs: return True
46
+ return False
47
+
48
+ def mpc_is_infnan(z):
49
+ """Check if either real or imaginary part is infinite or nan"""
50
+ re, im = z
51
+ if re in _infs_nan: return True
52
+ if im in _infs_nan: return True
53
+ return False
54
+
55
+ def mpc_to_str(z, dps, **kwargs):
56
+ re, im = z
57
+ rs = to_str(re, dps)
58
+ if im[0]:
59
+ return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j"
60
+ else:
61
+ return rs + " + " + to_str(im, dps, **kwargs) + "j"
62
+
63
+ def mpc_to_complex(z, strict=False, rnd=round_fast):
64
+ re, im = z
65
+ return complex(to_float(re, strict, rnd), to_float(im, strict, rnd))
66
+
67
+ def mpc_hash(z):
68
+ if sys.version_info >= (3, 2):
69
+ re, im = z
70
+ h = mpf_hash(re) + sys.hash_info.imag * mpf_hash(im)
71
+ # Need to reduce either module 2^32 or 2^64
72
+ h = h % (2**sys.hash_info.width)
73
+ return int(h)
74
+ else:
75
+ try:
76
+ return hash(mpc_to_complex(z, strict=True))
77
+ except OverflowError:
78
+ return hash(z)
79
+
80
+ def mpc_conjugate(z, prec, rnd=round_fast):
81
+ re, im = z
82
+ return re, mpf_neg(im, prec, rnd)
83
+
84
+ def mpc_is_nonzero(z):
85
+ return z != mpc_zero
86
+
87
+ def mpc_add(z, w, prec, rnd=round_fast):
88
+ a, b = z
89
+ c, d = w
90
+ return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd)
91
+
92
+ def mpc_add_mpf(z, x, prec, rnd=round_fast):
93
+ a, b = z
94
+ return mpf_add(a, x, prec, rnd), b
95
+
96
+ def mpc_sub(z, w, prec=0, rnd=round_fast):
97
+ a, b = z
98
+ c, d = w
99
+ return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd)
100
+
101
+ def mpc_sub_mpf(z, p, prec=0, rnd=round_fast):
102
+ a, b = z
103
+ return mpf_sub(a, p, prec, rnd), b
104
+
105
+ def mpc_pos(z, prec, rnd=round_fast):
106
+ a, b = z
107
+ return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd)
108
+
109
+ def mpc_neg(z, prec=None, rnd=round_fast):
110
+ a, b = z
111
+ return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd)
112
+
113
+ def mpc_shift(z, n):
114
+ a, b = z
115
+ return mpf_shift(a, n), mpf_shift(b, n)
116
+
117
+ def mpc_abs(z, prec, rnd=round_fast):
118
+ """Absolute value of a complex number, |a+bi|.
119
+ Returns an mpf value."""
120
+ a, b = z
121
+ return mpf_hypot(a, b, prec, rnd)
122
+
123
+ def mpc_arg(z, prec, rnd=round_fast):
124
+ """Argument of a complex number. Returns an mpf value."""
125
+ a, b = z
126
+ return mpf_atan2(b, a, prec, rnd)
127
+
128
+ def mpc_floor(z, prec, rnd=round_fast):
129
+ a, b = z
130
+ return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd)
131
+
132
+ def mpc_ceil(z, prec, rnd=round_fast):
133
+ a, b = z
134
+ return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd)
135
+
136
+ def mpc_nint(z, prec, rnd=round_fast):
137
+ a, b = z
138
+ return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd)
139
+
140
+ def mpc_frac(z, prec, rnd=round_fast):
141
+ a, b = z
142
+ return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd)
143
+
144
+
145
+ def mpc_mul(z, w, prec, rnd=round_fast):
146
+ """
147
+ Complex multiplication.
148
+
149
+ Returns the real and imaginary part of (a+bi)*(c+di), rounded to
150
+ the specified precision. The rounding mode applies to the real and
151
+ imaginary parts separately.
152
+ """
153
+ a, b = z
154
+ c, d = w
155
+ p = mpf_mul(a, c)
156
+ q = mpf_mul(b, d)
157
+ r = mpf_mul(a, d)
158
+ s = mpf_mul(b, c)
159
+ re = mpf_sub(p, q, prec, rnd)
160
+ im = mpf_add(r, s, prec, rnd)
161
+ return re, im
162
+
163
+ def mpc_square(z, prec, rnd=round_fast):
164
+ # (a+b*I)**2 == a**2 - b**2 + 2*I*a*b
165
+ a, b = z
166
+ p = mpf_mul(a,a)
167
+ q = mpf_mul(b,b)
168
+ r = mpf_mul(a,b, prec, rnd)
169
+ re = mpf_sub(p, q, prec, rnd)
170
+ im = mpf_shift(r, 1)
171
+ return re, im
172
+
173
+ def mpc_mul_mpf(z, p, prec, rnd=round_fast):
174
+ a, b = z
175
+ re = mpf_mul(a, p, prec, rnd)
176
+ im = mpf_mul(b, p, prec, rnd)
177
+ return re, im
178
+
179
+ def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast):
180
+ """
181
+ Multiply the mpc value z by I*x where x is an mpf value.
182
+ """
183
+ a, b = z
184
+ re = mpf_neg(mpf_mul(b, x, prec, rnd))
185
+ im = mpf_mul(a, x, prec, rnd)
186
+ return re, im
187
+
188
+ def mpc_mul_int(z, n, prec, rnd=round_fast):
189
+ a, b = z
190
+ re = mpf_mul_int(a, n, prec, rnd)
191
+ im = mpf_mul_int(b, n, prec, rnd)
192
+ return re, im
193
+
194
+ def mpc_div(z, w, prec, rnd=round_fast):
195
+ a, b = z
196
+ c, d = w
197
+ wp = prec + 10
198
+ # mag = c*c + d*d
199
+ mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp)
200
+ # (a*c+b*d)/mag, (b*c-a*d)/mag
201
+ t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp)
202
+ u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp)
203
+ return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd)
204
+
205
+ def mpc_div_mpf(z, p, prec, rnd=round_fast):
206
+ """Calculate z/p where p is real"""
207
+ a, b = z
208
+ re = mpf_div(a, p, prec, rnd)
209
+ im = mpf_div(b, p, prec, rnd)
210
+ return re, im
211
+
212
+ def mpc_reciprocal(z, prec, rnd=round_fast):
213
+ """Calculate 1/z efficiently"""
214
+ a, b = z
215
+ m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10)
216
+ re = mpf_div(a, m, prec, rnd)
217
+ im = mpf_neg(mpf_div(b, m, prec, rnd))
218
+ return re, im
219
+
220
+ def mpc_mpf_div(p, z, prec, rnd=round_fast):
221
+ """Calculate p/z where p is real efficiently"""
222
+ a, b = z
223
+ m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10)
224
+ re = mpf_div(mpf_mul(a,p), m, prec, rnd)
225
+ im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd)
226
+ return re, im
227
+
228
+ def complex_int_pow(a, b, n):
229
+ """Complex integer power: computes (a+b*I)**n exactly for
230
+ nonnegative n (a and b must be Python ints)."""
231
+ wre = 1
232
+ wim = 0
233
+ while n:
234
+ if n & 1:
235
+ wre, wim = wre*a - wim*b, wim*a + wre*b
236
+ n -= 1
237
+ a, b = a*a - b*b, 2*a*b
238
+ n //= 2
239
+ return wre, wim
240
+
241
+ def mpc_pow(z, w, prec, rnd=round_fast):
242
+ if w[1] == fzero:
243
+ return mpc_pow_mpf(z, w[0], prec, rnd)
244
+ return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd)
245
+
246
+ def mpc_pow_mpf(z, p, prec, rnd=round_fast):
247
+ psign, pman, pexp, pbc = p
248
+ if pexp >= 0:
249
+ return mpc_pow_int(z, (-1)**psign * (pman<<pexp), prec, rnd)
250
+ if pexp == -1:
251
+ sqrtz = mpc_sqrt(z, prec+10)
252
+ return mpc_pow_int(sqrtz, (-1)**psign * pman, prec, rnd)
253
+ return mpc_exp(mpc_mul_mpf(mpc_log(z, prec+10), p, prec+10), prec, rnd)
254
+
255
+ def mpc_pow_int(z, n, prec, rnd=round_fast):
256
+ a, b = z
257
+ if b == fzero:
258
+ return mpf_pow_int(a, n, prec, rnd), fzero
259
+ if a == fzero:
260
+ v = mpf_pow_int(b, n, prec, rnd)
261
+ n %= 4
262
+ if n == 0:
263
+ return v, fzero
264
+ elif n == 1:
265
+ return fzero, v
266
+ elif n == 2:
267
+ return mpf_neg(v), fzero
268
+ elif n == 3:
269
+ return fzero, mpf_neg(v)
270
+ if n == 0: return mpc_one
271
+ if n == 1: return mpc_pos(z, prec, rnd)
272
+ if n == 2: return mpc_square(z, prec, rnd)
273
+ if n == -1: return mpc_reciprocal(z, prec, rnd)
274
+ if n < 0: return mpc_reciprocal(mpc_pow_int(z, -n, prec+4), prec, rnd)
275
+ asign, aman, aexp, abc = a
276
+ bsign, bman, bexp, bbc = b
277
+ if asign: aman = -aman
278
+ if bsign: bman = -bman
279
+ de = aexp - bexp
280
+ abs_de = abs(de)
281
+ exact_size = n*(abs_de + max(abc, bbc))
282
+ if exact_size < 10000:
283
+ if de > 0:
284
+ aman <<= de
285
+ aexp = bexp
286
+ else:
287
+ bman <<= (-de)
288
+ bexp = aexp
289
+ re, im = complex_int_pow(aman, bman, n)
290
+ re = from_man_exp(re, int(n*aexp), prec, rnd)
291
+ im = from_man_exp(im, int(n*bexp), prec, rnd)
292
+ return re, im
293
+ return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd)
294
+
295
+ def mpc_sqrt(z, prec, rnd=round_fast):
296
+ """Complex square root (principal branch).
297
+
298
+ We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where
299
+ r = abs(a+bi), when a+bi is not a negative real number."""
300
+ a, b = z
301
+ if b == fzero:
302
+ if a == fzero:
303
+ return (a, b)
304
+ # When a+bi is a negative real number, we get a real sqrt times i
305
+ if a[0]:
306
+ im = mpf_sqrt(mpf_neg(a), prec, rnd)
307
+ return (fzero, im)
308
+ else:
309
+ re = mpf_sqrt(a, prec, rnd)
310
+ return (re, fzero)
311
+ wp = prec+20
312
+ if not a[0]: # case a positive
313
+ t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a
314
+ u = mpf_shift(t, -1) # u = t/2
315
+ re = mpf_sqrt(u, prec, rnd) # re = sqrt(u)
316
+ v = mpf_shift(t, 1) # v = 2*t
317
+ w = mpf_sqrt(v, wp) # w = sqrt(v)
318
+ im = mpf_div(b, w, prec, rnd) # im = b / w
319
+ else: # case a negative
320
+ t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a
321
+ u = mpf_shift(t, -1) # u = t/2
322
+ im = mpf_sqrt(u, prec, rnd) # im = sqrt(u)
323
+ v = mpf_shift(t, 1) # v = 2*t
324
+ w = mpf_sqrt(v, wp) # w = sqrt(v)
325
+ re = mpf_div(b, w, prec, rnd) # re = b/w
326
+ if b[0]:
327
+ re = mpf_neg(re)
328
+ im = mpf_neg(im)
329
+ return re, im
330
+
331
+ def mpc_nthroot_fixed(a, b, n, prec):
332
+ # a, b signed integers at fixed precision prec
333
+ start = 50
334
+ a1 = int(rshift(a, prec - n*start))
335
+ b1 = int(rshift(b, prec - n*start))
336
+ try:
337
+ r = (a1 + 1j * b1)**(1.0/n)
338
+ re = r.real
339
+ im = r.imag
340
+ re = MPZ(int(re))
341
+ im = MPZ(int(im))
342
+ except OverflowError:
343
+ a1 = from_int(a1, start)
344
+ b1 = from_int(b1, start)
345
+ fn = from_int(n)
346
+ nth = mpf_rdiv_int(1, fn, start)
347
+ re, im = mpc_pow((a1, b1), (nth, fzero), start)
348
+ re = to_int(re)
349
+ im = to_int(im)
350
+ extra = 10
351
+ prevp = start
352
+ extra1 = n
353
+ for p in giant_steps(start, prec+extra):
354
+ # this is slow for large n, unlike int_pow_fixed
355
+ re2, im2 = complex_int_pow(re, im, n-1)
356
+ re2 = rshift(re2, (n-1)*prevp - p - extra1)
357
+ im2 = rshift(im2, (n-1)*prevp - p - extra1)
358
+ r4 = (re2*re2 + im2*im2) >> (p + extra1)
359
+ ap = rshift(a, prec - p)
360
+ bp = rshift(b, prec - p)
361
+ rec = (ap * re2 + bp * im2) >> p
362
+ imc = (-ap * im2 + bp * re2) >> p
363
+ reb = (rec << p) // r4
364
+ imb = (imc << p) // r4
365
+ re = (reb + (n-1)*lshift(re, p-prevp))//n
366
+ im = (imb + (n-1)*lshift(im, p-prevp))//n
367
+ prevp = p
368
+ return re, im
369
+
370
+ def mpc_nthroot(z, n, prec, rnd=round_fast):
371
+ """
372
+ Complex n-th root.
373
+
374
+ Use Newton method as in the real case when it is faster,
375
+ otherwise use z**(1/n)
376
+ """
377
+ a, b = z
378
+ if a[0] == 0 and b == fzero:
379
+ re = mpf_nthroot(a, n, prec, rnd)
380
+ return (re, fzero)
381
+ if n < 2:
382
+ if n == 0:
383
+ return mpc_one
384
+ if n == 1:
385
+ return mpc_pos((a, b), prec, rnd)
386
+ if n == -1:
387
+ return mpc_div(mpc_one, (a, b), prec, rnd)
388
+ inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd])
389
+ return mpc_div(mpc_one, inverse, prec, rnd)
390
+ if n <= 20:
391
+ prec2 = int(1.2 * (prec + 10))
392
+ asign, aman, aexp, abc = a
393
+ bsign, bman, bexp, bbc = b
394
+ pf = mpc_abs((a,b), prec)
395
+ if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec:
396
+ af = to_fixed(a, prec2)
397
+ bf = to_fixed(b, prec2)
398
+ re, im = mpc_nthroot_fixed(af, bf, n, prec2)
399
+ extra = 10
400
+ re = from_man_exp(re, -prec2-extra, prec2, rnd)
401
+ im = from_man_exp(im, -prec2-extra, prec2, rnd)
402
+ return re, im
403
+ fn = from_int(n)
404
+ prec2 = prec+10 + 10
405
+ nth = mpf_rdiv_int(1, fn, prec2)
406
+ re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd)
407
+ re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
408
+ im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
409
+ return re, im
410
+
411
+ def mpc_cbrt(z, prec, rnd=round_fast):
412
+ """
413
+ Complex cubic root.
414
+ """
415
+ return mpc_nthroot(z, 3, prec, rnd)
416
+
417
+ def mpc_exp(z, prec, rnd=round_fast):
418
+ """
419
+ Complex exponential function.
420
+
421
+ We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
422
+ for the computation. This formula is very nice because it is
423
+ pefectly stable; since we just do real multiplications, the only
424
+ numerical errors that can creep in are single-ulp rounding errors.
425
+
426
+ The formula is efficient since mpmath's real exp is quite fast and
427
+ since we can compute cos and sin simultaneously.
428
+
429
+ It is no problem if a and b are large; if the implementations of
430
+ exp/cos/sin are accurate and efficient for all real numbers, then
431
+ so is this function for all complex numbers.
432
+ """
433
+ a, b = z
434
+ if a == fzero:
435
+ return mpf_cos_sin(b, prec, rnd)
436
+ if b == fzero:
437
+ return mpf_exp(a, prec, rnd), fzero
438
+ mag = mpf_exp(a, prec+4, rnd)
439
+ c, s = mpf_cos_sin(b, prec+4, rnd)
440
+ re = mpf_mul(mag, c, prec, rnd)
441
+ im = mpf_mul(mag, s, prec, rnd)
442
+ return re, im
443
+
444
+ def mpc_log(z, prec, rnd=round_fast):
445
+ re = mpf_log_hypot(z[0], z[1], prec, rnd)
446
+ im = mpc_arg(z, prec, rnd)
447
+ return re, im
448
+
449
+ def mpc_cos(z, prec, rnd=round_fast):
450
+ """Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) -
451
+ sin(a)*sinh(b)*i.
452
+
453
+ The same comments apply as for the complex exp: only real
454
+ multiplications are pewrormed, so no cancellation errors are
455
+ possible. The formula is also efficient since we can compute both
456
+ pairs (cos, sin) and (cosh, sinh) in single stwps."""
457
+ a, b = z
458
+ if b == fzero:
459
+ return mpf_cos(a, prec, rnd), fzero
460
+ if a == fzero:
461
+ return mpf_cosh(b, prec, rnd), fzero
462
+ wp = prec + 6
463
+ c, s = mpf_cos_sin(a, wp)
464
+ ch, sh = mpf_cosh_sinh(b, wp)
465
+ re = mpf_mul(c, ch, prec, rnd)
466
+ im = mpf_mul(s, sh, prec, rnd)
467
+ return re, mpf_neg(im)
468
+
469
+ def mpc_sin(z, prec, rnd=round_fast):
470
+ """Complex sine. We have sin(a+bi) = sin(a)*cosh(b) +
471
+ cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional
472
+ comments."""
473
+ a, b = z
474
+ if b == fzero:
475
+ return mpf_sin(a, prec, rnd), fzero
476
+ if a == fzero:
477
+ return fzero, mpf_sinh(b, prec, rnd)
478
+ wp = prec + 6
479
+ c, s = mpf_cos_sin(a, wp)
480
+ ch, sh = mpf_cosh_sinh(b, wp)
481
+ re = mpf_mul(s, ch, prec, rnd)
482
+ im = mpf_mul(c, sh, prec, rnd)
483
+ return re, im
484
+
485
+ def mpc_tan(z, prec, rnd=round_fast):
486
+ """Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i
487
+ where M = cos(2a) + cosh(2b)."""
488
+ a, b = z
489
+ asign, aman, aexp, abc = a
490
+ bsign, bman, bexp, bbc = b
491
+ if b == fzero: return mpf_tan(a, prec, rnd), fzero
492
+ if a == fzero: return fzero, mpf_tanh(b, prec, rnd)
493
+ wp = prec + 15
494
+ a = mpf_shift(a, 1)
495
+ b = mpf_shift(b, 1)
496
+ c, s = mpf_cos_sin(a, wp)
497
+ ch, sh = mpf_cosh_sinh(b, wp)
498
+ # TODO: handle cancellation when c ~= -1 and ch ~= 1
499
+ mag = mpf_add(c, ch, wp)
500
+ re = mpf_div(s, mag, prec, rnd)
501
+ im = mpf_div(sh, mag, prec, rnd)
502
+ return re, im
503
+
504
+ def mpc_cos_pi(z, prec, rnd=round_fast):
505
+ a, b = z
506
+ if b == fzero:
507
+ return mpf_cos_pi(a, prec, rnd), fzero
508
+ b = mpf_mul(b, mpf_pi(prec+5), prec+5)
509
+ if a == fzero:
510
+ return mpf_cosh(b, prec, rnd), fzero
511
+ wp = prec + 6
512
+ c, s = mpf_cos_sin_pi(a, wp)
513
+ ch, sh = mpf_cosh_sinh(b, wp)
514
+ re = mpf_mul(c, ch, prec, rnd)
515
+ im = mpf_mul(s, sh, prec, rnd)
516
+ return re, mpf_neg(im)
517
+
518
+ def mpc_sin_pi(z, prec, rnd=round_fast):
519
+ a, b = z
520
+ if b == fzero:
521
+ return mpf_sin_pi(a, prec, rnd), fzero
522
+ b = mpf_mul(b, mpf_pi(prec+5), prec+5)
523
+ if a == fzero:
524
+ return fzero, mpf_sinh(b, prec, rnd)
525
+ wp = prec + 6
526
+ c, s = mpf_cos_sin_pi(a, wp)
527
+ ch, sh = mpf_cosh_sinh(b, wp)
528
+ re = mpf_mul(s, ch, prec, rnd)
529
+ im = mpf_mul(c, sh, prec, rnd)
530
+ return re, im
531
+
532
+ def mpc_cos_sin(z, prec, rnd=round_fast):
533
+ a, b = z
534
+ if a == fzero:
535
+ ch, sh = mpf_cosh_sinh(b, prec, rnd)
536
+ return (ch, fzero), (fzero, sh)
537
+ if b == fzero:
538
+ c, s = mpf_cos_sin(a, prec, rnd)
539
+ return (c, fzero), (s, fzero)
540
+ wp = prec + 6
541
+ c, s = mpf_cos_sin(a, wp)
542
+ ch, sh = mpf_cosh_sinh(b, wp)
543
+ cre = mpf_mul(c, ch, prec, rnd)
544
+ cim = mpf_mul(s, sh, prec, rnd)
545
+ sre = mpf_mul(s, ch, prec, rnd)
546
+ sim = mpf_mul(c, sh, prec, rnd)
547
+ return (cre, mpf_neg(cim)), (sre, sim)
548
+
549
+ def mpc_cos_sin_pi(z, prec, rnd=round_fast):
550
+ a, b = z
551
+ if b == fzero:
552
+ c, s = mpf_cos_sin_pi(a, prec, rnd)
553
+ return (c, fzero), (s, fzero)
554
+ b = mpf_mul(b, mpf_pi(prec+5), prec+5)
555
+ if a == fzero:
556
+ ch, sh = mpf_cosh_sinh(b, prec, rnd)
557
+ return (ch, fzero), (fzero, sh)
558
+ wp = prec + 6
559
+ c, s = mpf_cos_sin_pi(a, wp)
560
+ ch, sh = mpf_cosh_sinh(b, wp)
561
+ cre = mpf_mul(c, ch, prec, rnd)
562
+ cim = mpf_mul(s, sh, prec, rnd)
563
+ sre = mpf_mul(s, ch, prec, rnd)
564
+ sim = mpf_mul(c, sh, prec, rnd)
565
+ return (cre, mpf_neg(cim)), (sre, sim)
566
+
567
+ def mpc_cosh(z, prec, rnd=round_fast):
568
+ """Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i)."""
569
+ a, b = z
570
+ return mpc_cos((b, mpf_neg(a)), prec, rnd)
571
+
572
+ def mpc_sinh(z, prec, rnd=round_fast):
573
+ """Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i)."""
574
+ a, b = z
575
+ b, a = mpc_sin((b, a), prec, rnd)
576
+ return a, b
577
+
578
+ def mpc_tanh(z, prec, rnd=round_fast):
579
+ """Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i)."""
580
+ a, b = z
581
+ b, a = mpc_tan((b, a), prec, rnd)
582
+ return a, b
583
+
584
+ # TODO: avoid loss of accuracy
585
+ def mpc_atan(z, prec, rnd=round_fast):
586
+ a, b = z
587
+ # atan(z) = (I/2)*(log(1-I*z) - log(1+I*z))
588
+ # x = 1-I*z = 1 + b - I*a
589
+ # y = 1+I*z = 1 - b + I*a
590
+ wp = prec + 15
591
+ x = mpf_add(fone, b, wp), mpf_neg(a)
592
+ y = mpf_sub(fone, b, wp), a
593
+ l1 = mpc_log(x, wp)
594
+ l2 = mpc_log(y, wp)
595
+ a, b = mpc_sub(l1, l2, prec, rnd)
596
+ # (I/2) * (a+b*I) = (-b/2 + a/2*I)
597
+ v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1)
598
+ # Subtraction at infinity gives correct real part but
599
+ # wrong imaginary part (should be zero)
600
+ if v[1] == fnan and mpc_is_inf(z):
601
+ v = (v[0], fzero)
602
+ return v
603
+
604
+ beta_crossover = from_float(0.6417)
605
+ alpha_crossover = from_float(1.5)
606
+
607
+ def acos_asin(z, prec, rnd, n):
608
+ """ complex acos for n = 0, asin for n = 1
609
+ The algorithm is described in
610
+ T.E. Hull, T.F. Fairgrieve and P.T.P. Tang
611
+ 'Implementing the Complex Arcsine and Arcosine Functions
612
+ using Exception Handling',
613
+ ACM Trans. on Math. Software Vol. 23 (1997), p299
614
+ The complex acos and asin can be defined as
615
+ acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1))
616
+ asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1))
617
+ where z = a + I*b
618
+ alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha
619
+ r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2)
620
+ These expressions are rewritten in different ways in different
621
+ regions, delimited by two crossovers alpha_crossover and beta_crossover,
622
+ and by abs(a) <= 1, in order to improve the numerical accuracy.
623
+ """
624
+ a, b = z
625
+ wp = prec + 10
626
+ # special cases with real argument
627
+ if b == fzero:
628
+ am = mpf_sub(fone, mpf_abs(a), wp)
629
+ # case abs(a) <= 1
630
+ if not am[0]:
631
+ if n == 0:
632
+ return mpf_acos(a, prec, rnd), fzero
633
+ else:
634
+ return mpf_asin(a, prec, rnd), fzero
635
+ # cases abs(a) > 1
636
+ else:
637
+ # case a < -1
638
+ if a[0]:
639
+ pi = mpf_pi(prec, rnd)
640
+ c = mpf_acosh(mpf_neg(a), prec, rnd)
641
+ if n == 0:
642
+ return pi, mpf_neg(c)
643
+ else:
644
+ return mpf_neg(mpf_shift(pi, -1)), c
645
+ # case a > 1
646
+ else:
647
+ c = mpf_acosh(a, prec, rnd)
648
+ if n == 0:
649
+ return fzero, c
650
+ else:
651
+ pi = mpf_pi(prec, rnd)
652
+ return mpf_shift(pi, -1), mpf_neg(c)
653
+ asign = bsign = 0
654
+ if a[0]:
655
+ a = mpf_neg(a)
656
+ asign = 1
657
+ if b[0]:
658
+ b = mpf_neg(b)
659
+ bsign = 1
660
+ am = mpf_sub(fone, a, wp)
661
+ ap = mpf_add(fone, a, wp)
662
+ r = mpf_hypot(ap, b, wp)
663
+ s = mpf_hypot(am, b, wp)
664
+ alpha = mpf_shift(mpf_add(r, s, wp), -1)
665
+ beta = mpf_div(a, alpha, wp)
666
+ b2 = mpf_mul(b,b, wp)
667
+ # case beta <= beta_crossover
668
+ if not mpf_sub(beta_crossover, beta, wp)[0]:
669
+ if n == 0:
670
+ re = mpf_acos(beta, wp)
671
+ else:
672
+ re = mpf_asin(beta, wp)
673
+ else:
674
+ # to compute the real part in this region use the identity
675
+ # asin(beta) = atan(beta/sqrt(1-beta**2))
676
+ # beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a)
677
+ # alpha + a is numerically accurate; alpha - a can have
678
+ # cancellations leading to numerical inaccuracies, so rewrite
679
+ # it in differente ways according to the region
680
+ Ax = mpf_add(alpha, a, wp)
681
+ # case a <= 1
682
+ if not am[0]:
683
+ # c = b*b/(r + (a+1)); d = (s + (1-a))
684
+ # alpha - a = (1/2)*(c + d)
685
+ # case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a)
686
+ # case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d)))
687
+ c = mpf_div(b2, mpf_add(r, ap, wp), wp)
688
+ d = mpf_add(s, am, wp)
689
+ re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1)
690
+ if n == 0:
691
+ re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp)
692
+ else:
693
+ re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp)
694
+ else:
695
+ # c = Ax/(r + (a+1)); d = Ax/(s - (1-a))
696
+ # alpha - a = (1/2)*(c + d)
697
+ # case n = 0: re = atan(b*sqrt(c + d)/2/a)
698
+ # case n = 1: re = atan(a/(b*sqrt(c + d)/2)
699
+ c = mpf_div(Ax, mpf_add(r, ap, wp), wp)
700
+ d = mpf_div(Ax, mpf_sub(s, am, wp), wp)
701
+ re = mpf_shift(mpf_add(c, d, wp), -1)
702
+ re = mpf_mul(b, mpf_sqrt(re, wp), wp)
703
+ if n == 0:
704
+ re = mpf_atan(mpf_div(re, a, wp), wp)
705
+ else:
706
+ re = mpf_atan(mpf_div(a, re, wp), wp)
707
+ # to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover
708
+ # replace it with 1 + Am1 + sqrt(Am1*(alpha+1)))
709
+ # where Am1 = alpha -1
710
+ # if alpha <= alpha_crossover:
711
+ if not mpf_sub(alpha_crossover, alpha, wp)[0]:
712
+ c1 = mpf_div(b2, mpf_add(r, ap, wp), wp)
713
+ # case a < 1
714
+ if mpf_neg(am)[0]:
715
+ # Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a))
716
+ c2 = mpf_add(s, am, wp)
717
+ c2 = mpf_div(b2, c2, wp)
718
+ Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
719
+ else:
720
+ # Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a)))
721
+ c2 = mpf_sub(s, am, wp)
722
+ Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
723
+ # im = log(1 + Am1 + sqrt(Am1*(alpha+1)))
724
+ im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp)
725
+ im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp)
726
+ else:
727
+ # im = log(alpha + sqrt(alpha*alpha - 1))
728
+ im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp)
729
+ im = mpf_log(mpf_add(alpha, im, wp), wp)
730
+ if asign:
731
+ if n == 0:
732
+ re = mpf_sub(mpf_pi(wp), re, wp)
733
+ else:
734
+ re = mpf_neg(re)
735
+ if not bsign and n == 0:
736
+ im = mpf_neg(im)
737
+ if bsign and n == 1:
738
+ im = mpf_neg(im)
739
+ re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
740
+ im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
741
+ return re, im
742
+
743
+ def mpc_acos(z, prec, rnd=round_fast):
744
+ return acos_asin(z, prec, rnd, 0)
745
+
746
+ def mpc_asin(z, prec, rnd=round_fast):
747
+ return acos_asin(z, prec, rnd, 1)
748
+
749
+ def mpc_asinh(z, prec, rnd=round_fast):
750
+ # asinh(z) = I * asin(-I z)
751
+ a, b = z
752
+ a, b = mpc_asin((b, mpf_neg(a)), prec, rnd)
753
+ return mpf_neg(b), a
754
+
755
+ def mpc_acosh(z, prec, rnd=round_fast):
756
+ # acosh(z) = -I * acos(z) for Im(acos(z)) <= 0
757
+ # +I * acos(z) otherwise
758
+ a, b = mpc_acos(z, prec, rnd)
759
+ if b[0] or b == fzero:
760
+ return mpf_neg(b), a
761
+ else:
762
+ return b, mpf_neg(a)
763
+
764
+ def mpc_atanh(z, prec, rnd=round_fast):
765
+ # atanh(z) = (log(1+z)-log(1-z))/2
766
+ wp = prec + 15
767
+ a = mpc_add(z, mpc_one, wp)
768
+ b = mpc_sub(mpc_one, z, wp)
769
+ a = mpc_log(a, wp)
770
+ b = mpc_log(b, wp)
771
+ v = mpc_shift(mpc_sub(a, b, wp), -1)
772
+ # Subtraction at infinity gives correct imaginary part but
773
+ # wrong real part (should be zero)
774
+ if v[0] == fnan and mpc_is_inf(z):
775
+ v = (fzero, v[1])
776
+ return v
777
+
778
+ def mpc_fibonacci(z, prec, rnd=round_fast):
779
+ re, im = z
780
+ if im == fzero:
781
+ return (mpf_fibonacci(re, prec, rnd), fzero)
782
+ size = max(abs(re[2]+re[3]), abs(re[2]+re[3]))
783
+ wp = prec + size + 20
784
+ a = mpf_phi(wp)
785
+ b = mpf_add(mpf_shift(a, 1), fnone, wp)
786
+ u = mpc_pow((a, fzero), z, wp)
787
+ v = mpc_cos_pi(z, wp)
788
+ v = mpc_div(v, u, wp)
789
+ u = mpc_sub(u, v, wp)
790
+ u = mpc_div_mpf(u, b, prec, rnd)
791
+ return u
792
+
793
+ def mpf_expj(x, prec, rnd='f'):
794
+ raise ComplexResult
795
+
796
+ def mpc_expj(z, prec, rnd='f'):
797
+ re, im = z
798
+ if im == fzero:
799
+ return mpf_cos_sin(re, prec, rnd)
800
+ if re == fzero:
801
+ return mpf_exp(mpf_neg(im), prec, rnd), fzero
802
+ ey = mpf_exp(mpf_neg(im), prec+10)
803
+ c, s = mpf_cos_sin(re, prec+10)
804
+ re = mpf_mul(ey, c, prec, rnd)
805
+ im = mpf_mul(ey, s, prec, rnd)
806
+ return re, im
807
+
808
+ def mpf_expjpi(x, prec, rnd='f'):
809
+ raise ComplexResult
810
+
811
+ def mpc_expjpi(z, prec, rnd='f'):
812
+ re, im = z
813
+ if im == fzero:
814
+ return mpf_cos_sin_pi(re, prec, rnd)
815
+ sign, man, exp, bc = im
816
+ wp = prec+10
817
+ if man:
818
+ wp += max(0, exp+bc)
819
+ im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp))
820
+ if re == fzero:
821
+ return mpf_exp(im, prec, rnd), fzero
822
+ ey = mpf_exp(im, prec+10)
823
+ c, s = mpf_cos_sin_pi(re, prec+10)
824
+ re = mpf_mul(ey, c, prec, rnd)
825
+ im = mpf_mul(ey, s, prec, rnd)
826
+ return re, im
827
+
828
+
829
+ if BACKEND == 'sage':
830
+ try:
831
+ import sage.libs.mpmath.ext_libmp as _lbmp
832
+ mpc_exp = _lbmp.mpc_exp
833
+ mpc_sqrt = _lbmp.mpc_sqrt
834
+ except (ImportError, AttributeError):
835
+ print("Warning: Sage imports in libmpc failed")
venv/lib/python3.10/site-packages/mpmath/libmp/libmpf.py ADDED
@@ -0,0 +1,1414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Low-level functions for arbitrary-precision floating-point arithmetic.
3
+ """
4
+
5
+ __docformat__ = 'plaintext'
6
+
7
+ import math
8
+
9
+ from bisect import bisect
10
+
11
+ import sys
12
+
13
+ # Importing random is slow
14
+ #from random import getrandbits
15
+ getrandbits = None
16
+
17
+ from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
18
+ BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
19
+
20
+ from .libintmath import (giant_steps,
21
+ trailtable, bctable, lshift, rshift, bitcount, trailing,
22
+ sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
23
+ bin_to_radix)
24
+
25
+ # We don't pickle tuples directly for the following reasons:
26
+ # 1: pickle uses str() for ints, which is inefficient when they are large
27
+ # 2: pickle doesn't work for gmpy mpzs
28
+ # Both problems are solved by using hex()
29
+
30
+ if BACKEND == 'sage':
31
+ def to_pickable(x):
32
+ sign, man, exp, bc = x
33
+ return sign, hex(man), exp, bc
34
+ else:
35
+ def to_pickable(x):
36
+ sign, man, exp, bc = x
37
+ return sign, hex(man)[2:], exp, bc
38
+
39
+ def from_pickable(x):
40
+ sign, man, exp, bc = x
41
+ return (sign, MPZ(man, 16), exp, bc)
42
+
43
+ class ComplexResult(ValueError):
44
+ pass
45
+
46
+ try:
47
+ intern
48
+ except NameError:
49
+ intern = lambda x: x
50
+
51
+ # All supported rounding modes
52
+ round_nearest = intern('n')
53
+ round_floor = intern('f')
54
+ round_ceiling = intern('c')
55
+ round_up = intern('u')
56
+ round_down = intern('d')
57
+ round_fast = round_down
58
+
59
+ def prec_to_dps(n):
60
+ """Return number of accurate decimals that can be represented
61
+ with a precision of n bits."""
62
+ return max(1, int(round(int(n)/3.3219280948873626)-1))
63
+
64
+ def dps_to_prec(n):
65
+ """Return the number of bits required to represent n decimals
66
+ accurately."""
67
+ return max(1, int(round((int(n)+1)*3.3219280948873626)))
68
+
69
+ def repr_dps(n):
70
+ """Return the number of decimal digits required to represent
71
+ a number with n-bit precision so that it can be uniquely
72
+ reconstructed from the representation."""
73
+ dps = prec_to_dps(n)
74
+ if dps == 15:
75
+ return 17
76
+ return dps + 3
77
+
78
+ #----------------------------------------------------------------------------#
79
+ # Some commonly needed float values #
80
+ #----------------------------------------------------------------------------#
81
+
82
+ # Regular number format:
83
+ # (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
84
+ fzero = (0, MPZ_ZERO, 0, 0)
85
+ fnzero = (1, MPZ_ZERO, 0, 0)
86
+ fone = (0, MPZ_ONE, 0, 1)
87
+ fnone = (1, MPZ_ONE, 0, 1)
88
+ ftwo = (0, MPZ_ONE, 1, 1)
89
+ ften = (0, MPZ_FIVE, 1, 3)
90
+ fhalf = (0, MPZ_ONE, -1, 1)
91
+
92
+ # Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
93
+ fnan = (0, MPZ_ZERO, -123, -1)
94
+ finf = (0, MPZ_ZERO, -456, -2)
95
+ fninf = (1, MPZ_ZERO, -789, -3)
96
+
97
+ # Was 1e1000; this is broken in Python 2.4
98
+ math_float_inf = 1e300 * 1e300
99
+
100
+
101
+ #----------------------------------------------------------------------------#
102
+ # Rounding #
103
+ #----------------------------------------------------------------------------#
104
+
105
+ # This function can be used to round a mantissa generally. However,
106
+ # we will try to do most rounding inline for efficiency.
107
+ def round_int(x, n, rnd):
108
+ if rnd == round_nearest:
109
+ if x >= 0:
110
+ t = x >> (n-1)
111
+ if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
112
+ return (t>>1)+1
113
+ else:
114
+ return t>>1
115
+ else:
116
+ return -round_int(-x, n, rnd)
117
+ if rnd == round_floor:
118
+ return x >> n
119
+ if rnd == round_ceiling:
120
+ return -((-x) >> n)
121
+ if rnd == round_down:
122
+ if x >= 0:
123
+ return x >> n
124
+ return -((-x) >> n)
125
+ if rnd == round_up:
126
+ if x >= 0:
127
+ return -((-x) >> n)
128
+ return x >> n
129
+
130
+ # These masks are used to pick out segments of numbers to determine
131
+ # which direction to round when rounding to nearest.
132
+ class h_mask_big:
133
+ def __getitem__(self, n):
134
+ return (MPZ_ONE<<(n-1))-1
135
+
136
+ h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
137
+ h_mask = [h_mask_big(), h_mask_small]
138
+
139
+ # The >> operator rounds to floor. shifts_down[rnd][sign]
140
+ # tells whether this is the right direction to use, or if the
141
+ # number should be negated before shifting
142
+ shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
143
+ round_down:(1,1), round_up:(0,0)}
144
+
145
+
146
+ #----------------------------------------------------------------------------#
147
+ # Normalization of raw mpfs #
148
+ #----------------------------------------------------------------------------#
149
+
150
+ # This function is called almost every time an mpf is created.
151
+ # It has been optimized accordingly.
152
+
153
+ def _normalize(sign, man, exp, bc, prec, rnd):
154
+ """
155
+ Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
156
+ normalized mantissa. The mantissa is rounded in the specified
157
+ direction if its size exceeds the precision. Trailing zero bits
158
+ are also stripped from the mantissa to ensure that the
159
+ representation is canonical.
160
+
161
+ Conditions on the input:
162
+ * The input must represent a regular (finite) number
163
+ * The sign bit must be 0 or 1
164
+ * The mantissa must be positive
165
+ * The exponent must be an integer
166
+ * The bitcount must be exact
167
+
168
+ If these conditions are not met, use from_man_exp, mpf_pos, or any
169
+ of the conversion functions to create normalized raw mpf tuples.
170
+ """
171
+ if not man:
172
+ return fzero
173
+ # Cut mantissa down to size if larger than target precision
174
+ n = bc - prec
175
+ if n > 0:
176
+ if rnd == round_nearest:
177
+ t = man >> (n-1)
178
+ if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
179
+ man = (t>>1)+1
180
+ else:
181
+ man = t>>1
182
+ elif shifts_down[rnd][sign]:
183
+ man >>= n
184
+ else:
185
+ man = -((-man)>>n)
186
+ exp += n
187
+ bc = prec
188
+ # Strip trailing bits
189
+ if not man & 1:
190
+ t = trailtable[int(man & 255)]
191
+ if not t:
192
+ while not man & 255:
193
+ man >>= 8
194
+ exp += 8
195
+ bc -= 8
196
+ t = trailtable[int(man & 255)]
197
+ man >>= t
198
+ exp += t
199
+ bc -= t
200
+ # Bit count can be wrong if the input mantissa was 1 less than
201
+ # a power of 2 and got rounded up, thereby adding an extra bit.
202
+ # With trailing bits removed, all powers of two have mantissa 1,
203
+ # so this is easy to check for.
204
+ if man == 1:
205
+ bc = 1
206
+ return sign, man, exp, bc
207
+
208
+ def _normalize1(sign, man, exp, bc, prec, rnd):
209
+ """same as normalize, but with the added condition that
210
+ man is odd or zero
211
+ """
212
+ if not man:
213
+ return fzero
214
+ if bc <= prec:
215
+ return sign, man, exp, bc
216
+ n = bc - prec
217
+ if rnd == round_nearest:
218
+ t = man >> (n-1)
219
+ if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
220
+ man = (t>>1)+1
221
+ else:
222
+ man = t>>1
223
+ elif shifts_down[rnd][sign]:
224
+ man >>= n
225
+ else:
226
+ man = -((-man)>>n)
227
+ exp += n
228
+ bc = prec
229
+ # Strip trailing bits
230
+ if not man & 1:
231
+ t = trailtable[int(man & 255)]
232
+ if not t:
233
+ while not man & 255:
234
+ man >>= 8
235
+ exp += 8
236
+ bc -= 8
237
+ t = trailtable[int(man & 255)]
238
+ man >>= t
239
+ exp += t
240
+ bc -= t
241
+ # Bit count can be wrong if the input mantissa was 1 less than
242
+ # a power of 2 and got rounded up, thereby adding an extra bit.
243
+ # With trailing bits removed, all powers of two have mantissa 1,
244
+ # so this is easy to check for.
245
+ if man == 1:
246
+ bc = 1
247
+ return sign, man, exp, bc
248
+
249
+ try:
250
+ _exp_types = (int, long)
251
+ except NameError:
252
+ _exp_types = (int,)
253
+
254
+ def strict_normalize(sign, man, exp, bc, prec, rnd):
255
+ """Additional checks on the components of an mpf. Enable tests by setting
256
+ the environment variable MPMATH_STRICT to Y."""
257
+ assert type(man) == MPZ_TYPE
258
+ assert type(bc) in _exp_types
259
+ assert type(exp) in _exp_types
260
+ assert bc == bitcount(man)
261
+ return _normalize(sign, man, exp, bc, prec, rnd)
262
+
263
+ def strict_normalize1(sign, man, exp, bc, prec, rnd):
264
+ """Additional checks on the components of an mpf. Enable tests by setting
265
+ the environment variable MPMATH_STRICT to Y."""
266
+ assert type(man) == MPZ_TYPE
267
+ assert type(bc) in _exp_types
268
+ assert type(exp) in _exp_types
269
+ assert bc == bitcount(man)
270
+ assert (not man) or (man & 1)
271
+ return _normalize1(sign, man, exp, bc, prec, rnd)
272
+
273
+ if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
274
+ _normalize = gmpy._mpmath_normalize
275
+ _normalize1 = gmpy._mpmath_normalize
276
+
277
+ if BACKEND == 'sage':
278
+ _normalize = _normalize1 = sage_utils.normalize
279
+
280
+ if STRICT:
281
+ normalize = strict_normalize
282
+ normalize1 = strict_normalize1
283
+ else:
284
+ normalize = _normalize
285
+ normalize1 = _normalize1
286
+
287
+ #----------------------------------------------------------------------------#
288
+ # Conversion functions #
289
+ #----------------------------------------------------------------------------#
290
+
291
+ def from_man_exp(man, exp, prec=None, rnd=round_fast):
292
+ """Create raw mpf from (man, exp) pair. The mantissa may be signed.
293
+ If no precision is specified, the mantissa is stored exactly."""
294
+ man = MPZ(man)
295
+ sign = 0
296
+ if man < 0:
297
+ sign = 1
298
+ man = -man
299
+ if man < 1024:
300
+ bc = bctable[int(man)]
301
+ else:
302
+ bc = bitcount(man)
303
+ if not prec:
304
+ if not man:
305
+ return fzero
306
+ if not man & 1:
307
+ if man & 2:
308
+ return (sign, man >> 1, exp + 1, bc - 1)
309
+ t = trailtable[int(man & 255)]
310
+ if not t:
311
+ while not man & 255:
312
+ man >>= 8
313
+ exp += 8
314
+ bc -= 8
315
+ t = trailtable[int(man & 255)]
316
+ man >>= t
317
+ exp += t
318
+ bc -= t
319
+ return (sign, man, exp, bc)
320
+ return normalize(sign, man, exp, bc, prec, rnd)
321
+
322
+ int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
323
+
324
+ if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
325
+ from_man_exp = gmpy._mpmath_create
326
+
327
+ if BACKEND == 'sage':
328
+ from_man_exp = sage_utils.from_man_exp
329
+
330
+ def from_int(n, prec=0, rnd=round_fast):
331
+ """Create a raw mpf from an integer. If no precision is specified,
332
+ the mantissa is stored exactly."""
333
+ if not prec:
334
+ if n in int_cache:
335
+ return int_cache[n]
336
+ return from_man_exp(n, 0, prec, rnd)
337
+
338
+ def to_man_exp(s):
339
+ """Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
340
+ sign, man, exp, bc = s
341
+ if (not man) and exp:
342
+ raise ValueError("mantissa and exponent are undefined for %s" % man)
343
+ return man, exp
344
+
345
+ def to_int(s, rnd=None):
346
+ """Convert a raw mpf to the nearest int. Rounding is done down by
347
+ default (same as int(float) in Python), but can be changed. If the
348
+ input is inf/nan, an exception is raised."""
349
+ sign, man, exp, bc = s
350
+ if (not man) and exp:
351
+ raise ValueError("cannot convert inf or nan to int")
352
+ if exp >= 0:
353
+ if sign:
354
+ return (-man) << exp
355
+ return man << exp
356
+ # Make default rounding fast
357
+ if not rnd:
358
+ if sign:
359
+ return -(man >> (-exp))
360
+ else:
361
+ return man >> (-exp)
362
+ if sign:
363
+ return round_int(-man, -exp, rnd)
364
+ else:
365
+ return round_int(man, -exp, rnd)
366
+
367
+ def mpf_round_int(s, rnd):
368
+ sign, man, exp, bc = s
369
+ if (not man) and exp:
370
+ return s
371
+ if exp >= 0:
372
+ return s
373
+ mag = exp+bc
374
+ if mag < 1:
375
+ if rnd == round_ceiling:
376
+ if sign: return fzero
377
+ else: return fone
378
+ elif rnd == round_floor:
379
+ if sign: return fnone
380
+ else: return fzero
381
+ elif rnd == round_nearest:
382
+ if mag < 0 or man == MPZ_ONE: return fzero
383
+ elif sign: return fnone
384
+ else: return fone
385
+ else:
386
+ raise NotImplementedError
387
+ return mpf_pos(s, min(bc, mag), rnd)
388
+
389
+ def mpf_floor(s, prec=0, rnd=round_fast):
390
+ v = mpf_round_int(s, round_floor)
391
+ if prec:
392
+ v = mpf_pos(v, prec, rnd)
393
+ return v
394
+
395
+ def mpf_ceil(s, prec=0, rnd=round_fast):
396
+ v = mpf_round_int(s, round_ceiling)
397
+ if prec:
398
+ v = mpf_pos(v, prec, rnd)
399
+ return v
400
+
401
+ def mpf_nint(s, prec=0, rnd=round_fast):
402
+ v = mpf_round_int(s, round_nearest)
403
+ if prec:
404
+ v = mpf_pos(v, prec, rnd)
405
+ return v
406
+
407
+ def mpf_frac(s, prec=0, rnd=round_fast):
408
+ return mpf_sub(s, mpf_floor(s), prec, rnd)
409
+
410
+ def from_float(x, prec=53, rnd=round_fast):
411
+ """Create a raw mpf from a Python float, rounding if necessary.
412
+ If prec >= 53, the result is guaranteed to represent exactly the
413
+ same number as the input. If prec is not specified, use prec=53."""
414
+ # frexp only raises an exception for nan on some platforms
415
+ if x != x:
416
+ return fnan
417
+ # in Python2.5 math.frexp gives an exception for float infinity
418
+ # in Python2.6 it returns (float infinity, 0)
419
+ try:
420
+ m, e = math.frexp(x)
421
+ except:
422
+ if x == math_float_inf: return finf
423
+ if x == -math_float_inf: return fninf
424
+ return fnan
425
+ if x == math_float_inf: return finf
426
+ if x == -math_float_inf: return fninf
427
+ return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
428
+
429
+ def from_npfloat(x, prec=113, rnd=round_fast):
430
+ """Create a raw mpf from a numpy float, rounding if necessary.
431
+ If prec >= 113, the result is guaranteed to represent exactly the
432
+ same number as the input. If prec is not specified, use prec=113."""
433
+ y = float(x)
434
+ if x == y: # ldexp overflows for float16
435
+ return from_float(y, prec, rnd)
436
+ import numpy as np
437
+ if np.isfinite(x):
438
+ m, e = np.frexp(x)
439
+ return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
440
+ if np.isposinf(x): return finf
441
+ if np.isneginf(x): return fninf
442
+ return fnan
443
+
444
+ def from_Decimal(x, prec=None, rnd=round_fast):
445
+ """Create a raw mpf from a Decimal, rounding if necessary.
446
+ If prec is not specified, use the equivalent bit precision
447
+ of the number of significant digits in x."""
448
+ if x.is_nan(): return fnan
449
+ if x.is_infinite(): return fninf if x.is_signed() else finf
450
+ if prec is None:
451
+ prec = int(len(x.as_tuple()[1])*3.3219280948873626)
452
+ return from_str(str(x), prec, rnd)
453
+
454
+ def to_float(s, strict=False, rnd=round_fast):
455
+ """
456
+ Convert a raw mpf to a Python float. The result is exact if the
457
+ bitcount of s is <= 53 and no underflow/overflow occurs.
458
+
459
+ If the number is too large or too small to represent as a regular
460
+ float, it will be converted to inf or 0.0. Setting strict=True
461
+ forces an OverflowError to be raised instead.
462
+
463
+ Warning: with a directed rounding mode, the correct nearest representable
464
+ floating-point number in the specified direction might not be computed
465
+ in case of overflow or (gradual) underflow.
466
+ """
467
+ sign, man, exp, bc = s
468
+ if not man:
469
+ if s == fzero: return 0.0
470
+ if s == finf: return math_float_inf
471
+ if s == fninf: return -math_float_inf
472
+ return math_float_inf/math_float_inf
473
+ if bc > 53:
474
+ sign, man, exp, bc = normalize1(sign, man, exp, bc, 53, rnd)
475
+ if sign:
476
+ man = -man
477
+ try:
478
+ return math.ldexp(man, exp)
479
+ except OverflowError:
480
+ if strict:
481
+ raise
482
+ # Overflow to infinity
483
+ if exp + bc > 0:
484
+ if sign:
485
+ return -math_float_inf
486
+ else:
487
+ return math_float_inf
488
+ # Underflow to zero
489
+ return 0.0
490
+
491
+ def from_rational(p, q, prec, rnd=round_fast):
492
+ """Create a raw mpf from a rational number p/q, round if
493
+ necessary."""
494
+ return mpf_div(from_int(p), from_int(q), prec, rnd)
495
+
496
+ def to_rational(s):
497
+ """Convert a raw mpf to a rational number. Return integers (p, q)
498
+ such that s = p/q exactly."""
499
+ sign, man, exp, bc = s
500
+ if sign:
501
+ man = -man
502
+ if bc == -1:
503
+ raise ValueError("cannot convert %s to a rational number" % man)
504
+ if exp >= 0:
505
+ return man * (1<<exp), 1
506
+ else:
507
+ return man, 1<<(-exp)
508
+
509
+ def to_fixed(s, prec):
510
+ """Convert a raw mpf to a fixed-point big integer"""
511
+ sign, man, exp, bc = s
512
+ offset = exp + prec
513
+ if sign:
514
+ if offset >= 0: return (-man) << offset
515
+ else: return (-man) >> (-offset)
516
+ else:
517
+ if offset >= 0: return man << offset
518
+ else: return man >> (-offset)
519
+
520
+
521
+ ##############################################################################
522
+ ##############################################################################
523
+
524
+ #----------------------------------------------------------------------------#
525
+ # Arithmetic operations, etc. #
526
+ #----------------------------------------------------------------------------#
527
+
528
+ def mpf_rand(prec):
529
+ """Return a raw mpf chosen randomly from [0, 1), with prec bits
530
+ in the mantissa."""
531
+ global getrandbits
532
+ if not getrandbits:
533
+ import random
534
+ getrandbits = random.getrandbits
535
+ return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
536
+
537
+ def mpf_eq(s, t):
538
+ """Test equality of two raw mpfs. This is simply tuple comparison
539
+ unless either number is nan, in which case the result is False."""
540
+ if not s[1] or not t[1]:
541
+ if s == fnan or t == fnan:
542
+ return False
543
+ return s == t
544
+
545
+ def mpf_hash(s):
546
+ # Duplicate the new hash algorithm introduces in Python 3.2.
547
+ if sys.version_info >= (3, 2):
548
+ ssign, sman, sexp, sbc = s
549
+
550
+ # Handle special numbers
551
+ if not sman:
552
+ if s == fnan: return sys.hash_info.nan
553
+ if s == finf: return sys.hash_info.inf
554
+ if s == fninf: return -sys.hash_info.inf
555
+ h = sman % HASH_MODULUS
556
+ if sexp >= 0:
557
+ sexp = sexp % HASH_BITS
558
+ else:
559
+ sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
560
+ h = (h << sexp) % HASH_MODULUS
561
+ if ssign: h = -h
562
+ if h == -1: h = -2
563
+ return int(h)
564
+ else:
565
+ try:
566
+ # Try to be compatible with hash values for floats and ints
567
+ return hash(to_float(s, strict=1))
568
+ except OverflowError:
569
+ # We must unfortunately sacrifice compatibility with ints here.
570
+ # We could do hash(man << exp) when the exponent is positive, but
571
+ # this would cause unreasonable inefficiency for large numbers.
572
+ return hash(s)
573
+
574
+ def mpf_cmp(s, t):
575
+ """Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
576
+ and 1 if s > t. (Same convention as Python's cmp() function.)"""
577
+
578
+ # In principle, a comparison amounts to determining the sign of s-t.
579
+ # A full subtraction is relatively slow, however, so we first try to
580
+ # look at the components.
581
+ ssign, sman, sexp, sbc = s
582
+ tsign, tman, texp, tbc = t
583
+
584
+ # Handle zeros and special numbers
585
+ if not sman or not tman:
586
+ if s == fzero: return -mpf_sign(t)
587
+ if t == fzero: return mpf_sign(s)
588
+ if s == t: return 0
589
+ # Follow same convention as Python's cmp for float nan
590
+ if t == fnan: return 1
591
+ if s == finf: return 1
592
+ if t == fninf: return 1
593
+ return -1
594
+ # Different sides of zero
595
+ if ssign != tsign:
596
+ if not ssign: return 1
597
+ return -1
598
+ # This reduces to direct integer comparison
599
+ if sexp == texp:
600
+ if sman == tman:
601
+ return 0
602
+ if sman > tman:
603
+ if ssign: return -1
604
+ else: return 1
605
+ else:
606
+ if ssign: return 1
607
+ else: return -1
608
+ # Check position of the highest set bit in each number. If
609
+ # different, there is certainly an inequality.
610
+ a = sbc + sexp
611
+ b = tbc + texp
612
+ if ssign:
613
+ if a < b: return 1
614
+ if a > b: return -1
615
+ else:
616
+ if a < b: return -1
617
+ if a > b: return 1
618
+
619
+ # Both numbers have the same highest bit. Subtract to find
620
+ # how the lower bits compare.
621
+ delta = mpf_sub(s, t, 5, round_floor)
622
+ if delta[0]:
623
+ return -1
624
+ return 1
625
+
626
+ def mpf_lt(s, t):
627
+ if s == fnan or t == fnan:
628
+ return False
629
+ return mpf_cmp(s, t) < 0
630
+
631
+ def mpf_le(s, t):
632
+ if s == fnan or t == fnan:
633
+ return False
634
+ return mpf_cmp(s, t) <= 0
635
+
636
+ def mpf_gt(s, t):
637
+ if s == fnan or t == fnan:
638
+ return False
639
+ return mpf_cmp(s, t) > 0
640
+
641
+ def mpf_ge(s, t):
642
+ if s == fnan or t == fnan:
643
+ return False
644
+ return mpf_cmp(s, t) >= 0
645
+
646
+ def mpf_min_max(seq):
647
+ min = max = seq[0]
648
+ for x in seq[1:]:
649
+ if mpf_lt(x, min): min = x
650
+ if mpf_gt(x, max): max = x
651
+ return min, max
652
+
653
+ def mpf_pos(s, prec=0, rnd=round_fast):
654
+ """Calculate 0+s for a raw mpf (i.e., just round s to the specified
655
+ precision)."""
656
+ if prec:
657
+ sign, man, exp, bc = s
658
+ if (not man) and exp:
659
+ return s
660
+ return normalize1(sign, man, exp, bc, prec, rnd)
661
+ return s
662
+
663
+ def mpf_neg(s, prec=None, rnd=round_fast):
664
+ """Negate a raw mpf (return -s), rounding the result to the
665
+ specified precision. The prec argument can be omitted to do the
666
+ operation exactly."""
667
+ sign, man, exp, bc = s
668
+ if not man:
669
+ if exp:
670
+ if s == finf: return fninf
671
+ if s == fninf: return finf
672
+ return s
673
+ if not prec:
674
+ return (1-sign, man, exp, bc)
675
+ return normalize1(1-sign, man, exp, bc, prec, rnd)
676
+
677
+ def mpf_abs(s, prec=None, rnd=round_fast):
678
+ """Return abs(s) of the raw mpf s, rounded to the specified
679
+ precision. The prec argument can be omitted to generate an
680
+ exact result."""
681
+ sign, man, exp, bc = s
682
+ if (not man) and exp:
683
+ if s == fninf:
684
+ return finf
685
+ return s
686
+ if not prec:
687
+ if sign:
688
+ return (0, man, exp, bc)
689
+ return s
690
+ return normalize1(0, man, exp, bc, prec, rnd)
691
+
692
+ def mpf_sign(s):
693
+ """Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
694
+ whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
695
+ sign, man, exp, bc = s
696
+ if not man:
697
+ if s == finf: return 1
698
+ if s == fninf: return -1
699
+ return 0
700
+ return (-1) ** sign
701
+
702
+ def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
703
+ """
704
+ Add the two raw mpf values s and t.
705
+
706
+ With prec=0, no rounding is performed. Note that this can
707
+ produce a very large mantissa (potentially too large to fit
708
+ in memory) if exponents are far apart.
709
+ """
710
+ ssign, sman, sexp, sbc = s
711
+ tsign, tman, texp, tbc = t
712
+ tsign ^= _sub
713
+ # Standard case: two nonzero, regular numbers
714
+ if sman and tman:
715
+ offset = sexp - texp
716
+ if offset:
717
+ if offset > 0:
718
+ # Outside precision range; only need to perturb
719
+ if offset > 100 and prec:
720
+ delta = sbc + sexp - tbc - texp
721
+ if delta > prec + 4:
722
+ offset = prec + 4
723
+ sman <<= offset
724
+ if tsign == ssign: sman += 1
725
+ else: sman -= 1
726
+ return normalize1(ssign, sman, sexp-offset,
727
+ bitcount(sman), prec, rnd)
728
+ # Add
729
+ if ssign == tsign:
730
+ man = tman + (sman << offset)
731
+ # Subtract
732
+ else:
733
+ if ssign: man = tman - (sman << offset)
734
+ else: man = (sman << offset) - tman
735
+ if man >= 0:
736
+ ssign = 0
737
+ else:
738
+ man = -man
739
+ ssign = 1
740
+ bc = bitcount(man)
741
+ return normalize1(ssign, man, texp, bc, prec or bc, rnd)
742
+ elif offset < 0:
743
+ # Outside precision range; only need to perturb
744
+ if offset < -100 and prec:
745
+ delta = tbc + texp - sbc - sexp
746
+ if delta > prec + 4:
747
+ offset = prec + 4
748
+ tman <<= offset
749
+ if ssign == tsign: tman += 1
750
+ else: tman -= 1
751
+ return normalize1(tsign, tman, texp-offset,
752
+ bitcount(tman), prec, rnd)
753
+ # Add
754
+ if ssign == tsign:
755
+ man = sman + (tman << -offset)
756
+ # Subtract
757
+ else:
758
+ if tsign: man = sman - (tman << -offset)
759
+ else: man = (tman << -offset) - sman
760
+ if man >= 0:
761
+ ssign = 0
762
+ else:
763
+ man = -man
764
+ ssign = 1
765
+ bc = bitcount(man)
766
+ return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
767
+ # Equal exponents; no shifting necessary
768
+ if ssign == tsign:
769
+ man = tman + sman
770
+ else:
771
+ if ssign: man = tman - sman
772
+ else: man = sman - tman
773
+ if man >= 0:
774
+ ssign = 0
775
+ else:
776
+ man = -man
777
+ ssign = 1
778
+ bc = bitcount(man)
779
+ return normalize(ssign, man, texp, bc, prec or bc, rnd)
780
+ # Handle zeros and special numbers
781
+ if _sub:
782
+ t = mpf_neg(t)
783
+ if not sman:
784
+ if sexp:
785
+ if s == t or tman or not texp:
786
+ return s
787
+ return fnan
788
+ if tman:
789
+ return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
790
+ return t
791
+ if texp:
792
+ return t
793
+ if sman:
794
+ return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
795
+ return s
796
+
797
+ def mpf_sub(s, t, prec=0, rnd=round_fast):
798
+ """Return the difference of two raw mpfs, s-t. This function is
799
+ simply a wrapper of mpf_add that changes the sign of t."""
800
+ return mpf_add(s, t, prec, rnd, 1)
801
+
802
+ def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
803
+ """
804
+ Sum a list of mpf values efficiently and accurately
805
+ (typically no temporary roundoff occurs). If prec=0,
806
+ the final result will not be rounded either.
807
+
808
+ There may be roundoff error or cancellation if extremely
809
+ large exponent differences occur.
810
+
811
+ With absolute=True, sums the absolute values.
812
+ """
813
+ man = 0
814
+ exp = 0
815
+ max_extra_prec = prec*2 or 1000000 # XXX
816
+ special = None
817
+ for x in xs:
818
+ xsign, xman, xexp, xbc = x
819
+ if xman:
820
+ if xsign and not absolute:
821
+ xman = -xman
822
+ delta = xexp - exp
823
+ if xexp >= exp:
824
+ # x much larger than existing sum?
825
+ # first: quick test
826
+ if (delta > max_extra_prec) and \
827
+ ((not man) or delta-bitcount(abs(man)) > max_extra_prec):
828
+ man = xman
829
+ exp = xexp
830
+ else:
831
+ man += (xman << delta)
832
+ else:
833
+ delta = -delta
834
+ # x much smaller than existing sum?
835
+ if delta-xbc > max_extra_prec:
836
+ if not man:
837
+ man, exp = xman, xexp
838
+ else:
839
+ man = (man << delta) + xman
840
+ exp = xexp
841
+ elif xexp:
842
+ if absolute:
843
+ x = mpf_abs(x)
844
+ special = mpf_add(special or fzero, x, 1)
845
+ # Will be inf or nan
846
+ if special:
847
+ return special
848
+ return from_man_exp(man, exp, prec, rnd)
849
+
850
+ def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
851
+ """Multiply two raw mpfs"""
852
+ ssign, sman, sexp, sbc = s
853
+ tsign, tman, texp, tbc = t
854
+ sign = ssign ^ tsign
855
+ man = sman*tman
856
+ if man:
857
+ bc = bitcount(man)
858
+ if prec:
859
+ return normalize1(sign, man, sexp+texp, bc, prec, rnd)
860
+ else:
861
+ return (sign, man, sexp+texp, bc)
862
+ s_special = (not sman) and sexp
863
+ t_special = (not tman) and texp
864
+ if not s_special and not t_special:
865
+ return fzero
866
+ if fnan in (s, t): return fnan
867
+ if (not tman) and texp: s, t = t, s
868
+ if t == fzero: return fnan
869
+ return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
870
+
871
+ def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
872
+ """Multiply by a Python integer."""
873
+ sign, man, exp, bc = s
874
+ if not man:
875
+ return mpf_mul(s, from_int(n), prec, rnd)
876
+ if not n:
877
+ return fzero
878
+ if n < 0:
879
+ sign ^= 1
880
+ n = -n
881
+ man *= n
882
+ return normalize(sign, man, exp, bitcount(man), prec, rnd)
883
+
884
+ def python_mpf_mul(s, t, prec=0, rnd=round_fast):
885
+ """Multiply two raw mpfs"""
886
+ ssign, sman, sexp, sbc = s
887
+ tsign, tman, texp, tbc = t
888
+ sign = ssign ^ tsign
889
+ man = sman*tman
890
+ if man:
891
+ bc = sbc + tbc - 1
892
+ bc += int(man>>bc)
893
+ if prec:
894
+ return normalize1(sign, man, sexp+texp, bc, prec, rnd)
895
+ else:
896
+ return (sign, man, sexp+texp, bc)
897
+ s_special = (not sman) and sexp
898
+ t_special = (not tman) and texp
899
+ if not s_special and not t_special:
900
+ return fzero
901
+ if fnan in (s, t): return fnan
902
+ if (not tman) and texp: s, t = t, s
903
+ if t == fzero: return fnan
904
+ return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
905
+
906
+ def python_mpf_mul_int(s, n, prec, rnd=round_fast):
907
+ """Multiply by a Python integer."""
908
+ sign, man, exp, bc = s
909
+ if not man:
910
+ return mpf_mul(s, from_int(n), prec, rnd)
911
+ if not n:
912
+ return fzero
913
+ if n < 0:
914
+ sign ^= 1
915
+ n = -n
916
+ man *= n
917
+ # Generally n will be small
918
+ if n < 1024:
919
+ bc += bctable[int(n)] - 1
920
+ else:
921
+ bc += bitcount(n) - 1
922
+ bc += int(man>>bc)
923
+ return normalize(sign, man, exp, bc, prec, rnd)
924
+
925
+
926
+ if BACKEND == 'gmpy':
927
+ mpf_mul = gmpy_mpf_mul
928
+ mpf_mul_int = gmpy_mpf_mul_int
929
+ else:
930
+ mpf_mul = python_mpf_mul
931
+ mpf_mul_int = python_mpf_mul_int
932
+
933
+ def mpf_shift(s, n):
934
+ """Quickly multiply the raw mpf s by 2**n without rounding."""
935
+ sign, man, exp, bc = s
936
+ if not man:
937
+ return s
938
+ return sign, man, exp+n, bc
939
+
940
+ def mpf_frexp(x):
941
+ """Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
942
+ sign, man, exp, bc = x
943
+ if not man:
944
+ if x == fzero:
945
+ return (fzero, 0)
946
+ else:
947
+ raise ValueError
948
+ return mpf_shift(x, -bc-exp), bc+exp
949
+
950
+ def mpf_div(s, t, prec, rnd=round_fast):
951
+ """Floating-point division"""
952
+ ssign, sman, sexp, sbc = s
953
+ tsign, tman, texp, tbc = t
954
+ if not sman or not tman:
955
+ if s == fzero:
956
+ if t == fzero: raise ZeroDivisionError
957
+ if t == fnan: return fnan
958
+ return fzero
959
+ if t == fzero:
960
+ raise ZeroDivisionError
961
+ s_special = (not sman) and sexp
962
+ t_special = (not tman) and texp
963
+ if s_special and t_special:
964
+ return fnan
965
+ if s == fnan or t == fnan:
966
+ return fnan
967
+ if not t_special:
968
+ if t == fzero:
969
+ return fnan
970
+ return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
971
+ return fzero
972
+ sign = ssign ^ tsign
973
+ if tman == 1:
974
+ return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
975
+ # Same strategy as for addition: if there is a remainder, perturb
976
+ # the result a few bits outside the precision range before rounding
977
+ extra = prec - sbc + tbc + 5
978
+ if extra < 5:
979
+ extra = 5
980
+ quot, rem = divmod(sman<<extra, tman)
981
+ if rem:
982
+ quot = (quot<<1) + 1
983
+ extra += 1
984
+ return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
985
+ return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
986
+
987
+ def mpf_rdiv_int(n, t, prec, rnd=round_fast):
988
+ """Floating-point division n/t with a Python integer as numerator"""
989
+ sign, man, exp, bc = t
990
+ if not n or not man:
991
+ return mpf_div(from_int(n), t, prec, rnd)
992
+ if n < 0:
993
+ sign ^= 1
994
+ n = -n
995
+ extra = prec + bc + 5
996
+ quot, rem = divmod(n<<extra, man)
997
+ if rem:
998
+ quot = (quot<<1) + 1
999
+ extra += 1
1000
+ return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
1001
+ return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
1002
+
1003
+ def mpf_mod(s, t, prec, rnd=round_fast):
1004
+ ssign, sman, sexp, sbc = s
1005
+ tsign, tman, texp, tbc = t
1006
+ if ((not sman) and sexp) or ((not tman) and texp):
1007
+ return fnan
1008
+ # Important special case: do nothing if t is larger
1009
+ if ssign == tsign and texp > sexp+sbc:
1010
+ return s
1011
+ # Another important special case: this allows us to do e.g. x % 1.0
1012
+ # to find the fractional part of x, and it will work when x is huge.
1013
+ if tman == 1 and sexp > texp+tbc:
1014
+ return fzero
1015
+ base = min(sexp, texp)
1016
+ sman = (-1)**ssign * sman
1017
+ tman = (-1)**tsign * tman
1018
+ man = (sman << (sexp-base)) % (tman << (texp-base))
1019
+ if man >= 0:
1020
+ sign = 0
1021
+ else:
1022
+ man = -man
1023
+ sign = 1
1024
+ return normalize(sign, man, base, bitcount(man), prec, rnd)
1025
+
1026
+ reciprocal_rnd = {
1027
+ round_down : round_up,
1028
+ round_up : round_down,
1029
+ round_floor : round_ceiling,
1030
+ round_ceiling : round_floor,
1031
+ round_nearest : round_nearest
1032
+ }
1033
+
1034
+ negative_rnd = {
1035
+ round_down : round_down,
1036
+ round_up : round_up,
1037
+ round_floor : round_ceiling,
1038
+ round_ceiling : round_floor,
1039
+ round_nearest : round_nearest
1040
+ }
1041
+
1042
+ def mpf_pow_int(s, n, prec, rnd=round_fast):
1043
+ """Compute s**n, where s is a raw mpf and n is a Python integer."""
1044
+ sign, man, exp, bc = s
1045
+
1046
+ if (not man) and exp:
1047
+ if s == finf:
1048
+ if n > 0: return s
1049
+ if n == 0: return fnan
1050
+ return fzero
1051
+ if s == fninf:
1052
+ if n > 0: return [finf, fninf][n & 1]
1053
+ if n == 0: return fnan
1054
+ return fzero
1055
+ return fnan
1056
+
1057
+ n = int(n)
1058
+ if n == 0: return fone
1059
+ if n == 1: return mpf_pos(s, prec, rnd)
1060
+ if n == 2:
1061
+ _, man, exp, bc = s
1062
+ if not man:
1063
+ return fzero
1064
+ man = man*man
1065
+ if man == 1:
1066
+ return (0, MPZ_ONE, exp+exp, 1)
1067
+ bc = bc + bc - 2
1068
+ bc += bctable[int(man>>bc)]
1069
+ return normalize1(0, man, exp+exp, bc, prec, rnd)
1070
+ if n == -1: return mpf_div(fone, s, prec, rnd)
1071
+ if n < 0:
1072
+ inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
1073
+ return mpf_div(fone, inverse, prec, rnd)
1074
+
1075
+ result_sign = sign & n
1076
+
1077
+ # Use exact integer power when the exact mantissa is small
1078
+ if man == 1:
1079
+ return (result_sign, MPZ_ONE, exp*n, 1)
1080
+ if bc*n < 1000:
1081
+ man **= n
1082
+ return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
1083
+
1084
+ # Use directed rounding all the way through to maintain rigorous
1085
+ # bounds for interval arithmetic
1086
+ rounds_down = (rnd == round_nearest) or \
1087
+ shifts_down[rnd][result_sign]
1088
+
1089
+ # Now we perform binary exponentiation. Need to estimate precision
1090
+ # to avoid rounding errors from temporary operations. Roughly log_2(n)
1091
+ # operations are performed.
1092
+ workprec = prec + 4*bitcount(n) + 4
1093
+ _, pm, pe, pbc = fone
1094
+ while 1:
1095
+ if n & 1:
1096
+ pm = pm*man
1097
+ pe = pe+exp
1098
+ pbc += bc - 2
1099
+ pbc = pbc + bctable[int(pm >> pbc)]
1100
+ if pbc > workprec:
1101
+ if rounds_down:
1102
+ pm = pm >> (pbc-workprec)
1103
+ else:
1104
+ pm = -((-pm) >> (pbc-workprec))
1105
+ pe += pbc - workprec
1106
+ pbc = workprec
1107
+ n -= 1
1108
+ if not n:
1109
+ break
1110
+ man = man*man
1111
+ exp = exp+exp
1112
+ bc = bc + bc - 2
1113
+ bc = bc + bctable[int(man >> bc)]
1114
+ if bc > workprec:
1115
+ if rounds_down:
1116
+ man = man >> (bc-workprec)
1117
+ else:
1118
+ man = -((-man) >> (bc-workprec))
1119
+ exp += bc - workprec
1120
+ bc = workprec
1121
+ n = n // 2
1122
+
1123
+ return normalize(result_sign, pm, pe, pbc, prec, rnd)
1124
+
1125
+
1126
+ def mpf_perturb(x, eps_sign, prec, rnd):
1127
+ """
1128
+ For nonzero x, calculate x + eps with directed rounding, where
1129
+ eps < prec relatively and eps has the given sign (0 for
1130
+ positive, 1 for negative).
1131
+
1132
+ With rounding to nearest, this is taken to simply normalize
1133
+ x to the given precision.
1134
+ """
1135
+ if rnd == round_nearest:
1136
+ return mpf_pos(x, prec, rnd)
1137
+ sign, man, exp, bc = x
1138
+ eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
1139
+ if sign:
1140
+ away = (rnd in (round_down, round_ceiling)) ^ eps_sign
1141
+ else:
1142
+ away = (rnd in (round_up, round_ceiling)) ^ eps_sign
1143
+ if away:
1144
+ return mpf_add(x, eps, prec, rnd)
1145
+ else:
1146
+ return mpf_pos(x, prec, rnd)
1147
+
1148
+
1149
+ #----------------------------------------------------------------------------#
1150
+ # Radix conversion #
1151
+ #----------------------------------------------------------------------------#
1152
+
1153
+ def to_digits_exp(s, dps):
1154
+ """Helper function for representing the floating-point number s as
1155
+ a decimal with dps digits. Returns (sign, string, exponent) where
1156
+ sign is '' or '-', string is the digit string, and exponent is
1157
+ the decimal exponent as an int.
1158
+
1159
+ If inexact, the decimal representation is rounded toward zero."""
1160
+
1161
+ # Extract sign first so it doesn't mess up the string digit count
1162
+ if s[0]:
1163
+ sign = '-'
1164
+ s = mpf_neg(s)
1165
+ else:
1166
+ sign = ''
1167
+ _sign, man, exp, bc = s
1168
+
1169
+ if not man:
1170
+ return '', '0', 0
1171
+
1172
+ bitprec = int(dps * math.log(10,2)) + 10
1173
+
1174
+ # Cut down to size
1175
+ # TODO: account for precision when doing this
1176
+ exp_from_1 = exp + bc
1177
+ if abs(exp_from_1) > 3500:
1178
+ from .libelefun import mpf_ln2, mpf_ln10
1179
+ # Set b = int(exp * log(2)/log(10))
1180
+ # If exp is huge, we must use high-precision arithmetic to
1181
+ # find the nearest power of ten
1182
+ expprec = bitcount(abs(exp)) + 5
1183
+ tmp = from_int(exp)
1184
+ tmp = mpf_mul(tmp, mpf_ln2(expprec))
1185
+ tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
1186
+ b = to_int(tmp)
1187
+ s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
1188
+ _sign, man, exp, bc = s
1189
+ exponent = b
1190
+ else:
1191
+ exponent = 0
1192
+
1193
+ # First, calculate mantissa digits by converting to a binary
1194
+ # fixed-point number and then converting that number to
1195
+ # a decimal fixed-point number.
1196
+ fixprec = max(bitprec - exp - bc, 0)
1197
+ fixdps = int(fixprec / math.log(10,2) + 0.5)
1198
+ sf = to_fixed(s, fixprec)
1199
+ sd = bin_to_radix(sf, fixprec, 10, fixdps)
1200
+ digits = numeral(sd, base=10, size=dps)
1201
+
1202
+ exponent += len(digits) - fixdps - 1
1203
+ return sign, digits, exponent
1204
+
1205
+ def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
1206
+ show_zero_exponent=False):
1207
+ """
1208
+ Convert a raw mpf to a decimal floating-point literal with at
1209
+ most `dps` decimal digits in the mantissa (not counting extra zeros
1210
+ that may be inserted for visual purposes).
1211
+
1212
+ The number will be printed in fixed-point format if the position
1213
+ of the leading digit is strictly between min_fixed
1214
+ (default = min(-dps/3,-5)) and max_fixed (default = dps).
1215
+
1216
+ To force fixed-point format always, set min_fixed = -inf,
1217
+ max_fixed = +inf. To force floating-point format, set
1218
+ min_fixed >= max_fixed.
1219
+
1220
+ The literal is formatted so that it can be parsed back to a number
1221
+ by to_str, float() or Decimal().
1222
+ """
1223
+
1224
+ # Special numbers
1225
+ if not s[1]:
1226
+ if s == fzero:
1227
+ if dps: t = '0.0'
1228
+ else: t = '.0'
1229
+ if show_zero_exponent:
1230
+ t += 'e+0'
1231
+ return t
1232
+ if s == finf: return '+inf'
1233
+ if s == fninf: return '-inf'
1234
+ if s == fnan: return 'nan'
1235
+ raise ValueError
1236
+
1237
+ if min_fixed is None: min_fixed = min(-(dps//3), -5)
1238
+ if max_fixed is None: max_fixed = dps
1239
+
1240
+ # to_digits_exp rounds to floor.
1241
+ # This sometimes kills some instances of "...00001"
1242
+ sign, digits, exponent = to_digits_exp(s, dps+3)
1243
+
1244
+ # No digits: show only .0; round exponent to nearest
1245
+ if not dps:
1246
+ if digits[0] in '56789':
1247
+ exponent += 1
1248
+ digits = ".0"
1249
+
1250
+ else:
1251
+ # Rounding up kills some instances of "...99999"
1252
+ if len(digits) > dps and digits[dps] in '56789':
1253
+ digits = digits[:dps]
1254
+ i = dps - 1
1255
+ while i >= 0 and digits[i] == '9':
1256
+ i -= 1
1257
+ if i >= 0:
1258
+ digits = digits[:i] + str(int(digits[i]) + 1) + '0' * (dps - i - 1)
1259
+ else:
1260
+ digits = '1' + '0' * (dps - 1)
1261
+ exponent += 1
1262
+ else:
1263
+ digits = digits[:dps]
1264
+
1265
+ # Prettify numbers close to unit magnitude
1266
+ if min_fixed < exponent < max_fixed:
1267
+ if exponent < 0:
1268
+ digits = ("0"*int(-exponent)) + digits
1269
+ split = 1
1270
+ else:
1271
+ split = exponent + 1
1272
+ if split > dps:
1273
+ digits += "0"*(split-dps)
1274
+ exponent = 0
1275
+ else:
1276
+ split = 1
1277
+
1278
+ digits = (digits[:split] + "." + digits[split:])
1279
+
1280
+ if strip_zeros:
1281
+ # Clean up trailing zeros
1282
+ digits = digits.rstrip('0')
1283
+ if digits[-1] == ".":
1284
+ digits += "0"
1285
+
1286
+ if exponent == 0 and dps and not show_zero_exponent: return sign + digits
1287
+ if exponent >= 0: return sign + digits + "e+" + str(exponent)
1288
+ if exponent < 0: return sign + digits + "e" + str(exponent)
1289
+
1290
+ def str_to_man_exp(x, base=10):
1291
+ """Helper function for from_str."""
1292
+ x = x.lower().rstrip('l')
1293
+ # Verify that the input is a valid float literal
1294
+ float(x)
1295
+ # Split into mantissa, exponent
1296
+ parts = x.split('e')
1297
+ if len(parts) == 1:
1298
+ exp = 0
1299
+ else: # == 2
1300
+ x = parts[0]
1301
+ exp = int(parts[1])
1302
+ # Look for radix point in mantissa
1303
+ parts = x.split('.')
1304
+ if len(parts) == 2:
1305
+ a, b = parts[0], parts[1].rstrip('0')
1306
+ exp -= len(b)
1307
+ x = a + b
1308
+ x = MPZ(int(x, base))
1309
+ return x, exp
1310
+
1311
+ special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
1312
+
1313
+ def from_str(x, prec, rnd=round_fast):
1314
+ """Create a raw mpf from a decimal literal, rounding in the
1315
+ specified direction if the input number cannot be represented
1316
+ exactly as a binary floating-point number with the given number of
1317
+ bits. The literal syntax accepted is the same as for Python
1318
+ floats.
1319
+
1320
+ TODO: the rounding does not work properly for large exponents.
1321
+ """
1322
+ x = x.lower().strip()
1323
+ if x in special_str:
1324
+ return special_str[x]
1325
+
1326
+ if '/' in x:
1327
+ p, q = x.split('/')
1328
+ p, q = p.rstrip('l'), q.rstrip('l')
1329
+ return from_rational(int(p), int(q), prec, rnd)
1330
+
1331
+ man, exp = str_to_man_exp(x, base=10)
1332
+
1333
+ # XXX: appropriate cutoffs & track direction
1334
+ # note no factors of 5
1335
+ if abs(exp) > 400:
1336
+ s = from_int(man, prec+10)
1337
+ s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
1338
+ else:
1339
+ if exp >= 0:
1340
+ s = from_int(man * 10**exp, prec, rnd)
1341
+ else:
1342
+ s = from_rational(man, 10**-exp, prec, rnd)
1343
+ return s
1344
+
1345
+ # Binary string conversion. These are currently mainly used for debugging
1346
+ # and could use some improvement in the future
1347
+
1348
+ def from_bstr(x):
1349
+ man, exp = str_to_man_exp(x, base=2)
1350
+ man = MPZ(man)
1351
+ sign = 0
1352
+ if man < 0:
1353
+ man = -man
1354
+ sign = 1
1355
+ bc = bitcount(man)
1356
+ return normalize(sign, man, exp, bc, bc, round_floor)
1357
+
1358
+ def to_bstr(x):
1359
+ sign, man, exp, bc = x
1360
+ return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
1361
+
1362
+
1363
+ #----------------------------------------------------------------------------#
1364
+ # Square roots #
1365
+ #----------------------------------------------------------------------------#
1366
+
1367
+
1368
+ def mpf_sqrt(s, prec, rnd=round_fast):
1369
+ """
1370
+ Compute the square root of a nonnegative mpf value. The
1371
+ result is correctly rounded.
1372
+ """
1373
+ sign, man, exp, bc = s
1374
+ if sign:
1375
+ raise ComplexResult("square root of a negative number")
1376
+ if not man:
1377
+ return s
1378
+ if exp & 1:
1379
+ exp -= 1
1380
+ man <<= 1
1381
+ bc += 1
1382
+ elif man == 1:
1383
+ return normalize1(sign, man, exp//2, bc, prec, rnd)
1384
+ shift = max(4, 2*prec-bc+4)
1385
+ shift += shift & 1
1386
+ if rnd in 'fd':
1387
+ man = isqrt(man<<shift)
1388
+ else:
1389
+ man, rem = sqrtrem(man<<shift)
1390
+ # Perturb up
1391
+ if rem:
1392
+ man = (man<<1)+1
1393
+ shift += 2
1394
+ return from_man_exp(man, (exp-shift)//2, prec, rnd)
1395
+
1396
+ def mpf_hypot(x, y, prec, rnd=round_fast):
1397
+ """Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
1398
+ x and y."""
1399
+ if y == fzero: return mpf_abs(x, prec, rnd)
1400
+ if x == fzero: return mpf_abs(y, prec, rnd)
1401
+ hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
1402
+ return mpf_sqrt(hypot2, prec, rnd)
1403
+
1404
+
1405
+ if BACKEND == 'sage':
1406
+ try:
1407
+ import sage.libs.mpmath.ext_libmp as ext_lib
1408
+ mpf_add = ext_lib.mpf_add
1409
+ mpf_sub = ext_lib.mpf_sub
1410
+ mpf_mul = ext_lib.mpf_mul
1411
+ mpf_div = ext_lib.mpf_div
1412
+ mpf_sqrt = ext_lib.mpf_sqrt
1413
+ except ImportError:
1414
+ pass
venv/lib/python3.10/site-packages/mpmath/libmp/libmpi.py ADDED
@@ -0,0 +1,935 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Computational functions for interval arithmetic.
3
+
4
+ """
5
+
6
+ from .backend import xrange
7
+
8
+ from .libmpf import (
9
+ ComplexResult,
10
+ round_down, round_up, round_floor, round_ceiling, round_nearest,
11
+ prec_to_dps, repr_dps, dps_to_prec,
12
+ bitcount,
13
+ from_float,
14
+ fnan, finf, fninf, fzero, fhalf, fone, fnone,
15
+ mpf_sign, mpf_lt, mpf_le, mpf_gt, mpf_ge, mpf_eq, mpf_cmp,
16
+ mpf_min_max,
17
+ mpf_floor, from_int, to_int, to_str, from_str,
18
+ mpf_abs, mpf_neg, mpf_pos, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
19
+ mpf_div, mpf_shift, mpf_pow_int,
20
+ from_man_exp, MPZ_ONE)
21
+
22
+ from .libelefun import (
23
+ mpf_log, mpf_exp, mpf_sqrt, mpf_atan, mpf_atan2,
24
+ mpf_pi, mod_pi2, mpf_cos_sin
25
+ )
26
+
27
+ from .gammazeta import mpf_gamma, mpf_rgamma, mpf_loggamma, mpc_loggamma
28
+
29
+ def mpi_str(s, prec):
30
+ sa, sb = s
31
+ dps = prec_to_dps(prec) + 5
32
+ return "[%s, %s]" % (to_str(sa, dps), to_str(sb, dps))
33
+ #dps = prec_to_dps(prec)
34
+ #m = mpi_mid(s, prec)
35
+ #d = mpf_shift(mpi_delta(s, 20), -1)
36
+ #return "%s +/- %s" % (to_str(m, dps), to_str(d, 3))
37
+
38
+ mpi_zero = (fzero, fzero)
39
+ mpi_one = (fone, fone)
40
+
41
+ def mpi_eq(s, t):
42
+ return s == t
43
+
44
+ def mpi_ne(s, t):
45
+ return s != t
46
+
47
+ def mpi_lt(s, t):
48
+ sa, sb = s
49
+ ta, tb = t
50
+ if mpf_lt(sb, ta): return True
51
+ if mpf_ge(sa, tb): return False
52
+ return None
53
+
54
+ def mpi_le(s, t):
55
+ sa, sb = s
56
+ ta, tb = t
57
+ if mpf_le(sb, ta): return True
58
+ if mpf_gt(sa, tb): return False
59
+ return None
60
+
61
+ def mpi_gt(s, t): return mpi_lt(t, s)
62
+ def mpi_ge(s, t): return mpi_le(t, s)
63
+
64
+ def mpi_add(s, t, prec=0):
65
+ sa, sb = s
66
+ ta, tb = t
67
+ a = mpf_add(sa, ta, prec, round_floor)
68
+ b = mpf_add(sb, tb, prec, round_ceiling)
69
+ if a == fnan: a = fninf
70
+ if b == fnan: b = finf
71
+ return a, b
72
+
73
+ def mpi_sub(s, t, prec=0):
74
+ sa, sb = s
75
+ ta, tb = t
76
+ a = mpf_sub(sa, tb, prec, round_floor)
77
+ b = mpf_sub(sb, ta, prec, round_ceiling)
78
+ if a == fnan: a = fninf
79
+ if b == fnan: b = finf
80
+ return a, b
81
+
82
+ def mpi_delta(s, prec):
83
+ sa, sb = s
84
+ return mpf_sub(sb, sa, prec, round_up)
85
+
86
+ def mpi_mid(s, prec):
87
+ sa, sb = s
88
+ return mpf_shift(mpf_add(sa, sb, prec, round_nearest), -1)
89
+
90
+ def mpi_pos(s, prec):
91
+ sa, sb = s
92
+ a = mpf_pos(sa, prec, round_floor)
93
+ b = mpf_pos(sb, prec, round_ceiling)
94
+ return a, b
95
+
96
+ def mpi_neg(s, prec=0):
97
+ sa, sb = s
98
+ a = mpf_neg(sb, prec, round_floor)
99
+ b = mpf_neg(sa, prec, round_ceiling)
100
+ return a, b
101
+
102
+ def mpi_abs(s, prec=0):
103
+ sa, sb = s
104
+ sas = mpf_sign(sa)
105
+ sbs = mpf_sign(sb)
106
+ # Both points nonnegative?
107
+ if sas >= 0:
108
+ a = mpf_pos(sa, prec, round_floor)
109
+ b = mpf_pos(sb, prec, round_ceiling)
110
+ # Upper point nonnegative?
111
+ elif sbs >= 0:
112
+ a = fzero
113
+ negsa = mpf_neg(sa)
114
+ if mpf_lt(negsa, sb):
115
+ b = mpf_pos(sb, prec, round_ceiling)
116
+ else:
117
+ b = mpf_pos(negsa, prec, round_ceiling)
118
+ # Both negative?
119
+ else:
120
+ a = mpf_neg(sb, prec, round_floor)
121
+ b = mpf_neg(sa, prec, round_ceiling)
122
+ return a, b
123
+
124
+ # TODO: optimize
125
+ def mpi_mul_mpf(s, t, prec):
126
+ return mpi_mul(s, (t, t), prec)
127
+
128
+ def mpi_div_mpf(s, t, prec):
129
+ return mpi_div(s, (t, t), prec)
130
+
131
+ def mpi_mul(s, t, prec=0):
132
+ sa, sb = s
133
+ ta, tb = t
134
+ sas = mpf_sign(sa)
135
+ sbs = mpf_sign(sb)
136
+ tas = mpf_sign(ta)
137
+ tbs = mpf_sign(tb)
138
+ if sas == sbs == 0:
139
+ # Should maybe be undefined
140
+ if ta == fninf or tb == finf:
141
+ return fninf, finf
142
+ return fzero, fzero
143
+ if tas == tbs == 0:
144
+ # Should maybe be undefined
145
+ if sa == fninf or sb == finf:
146
+ return fninf, finf
147
+ return fzero, fzero
148
+ if sas >= 0:
149
+ # positive * positive
150
+ if tas >= 0:
151
+ a = mpf_mul(sa, ta, prec, round_floor)
152
+ b = mpf_mul(sb, tb, prec, round_ceiling)
153
+ if a == fnan: a = fzero
154
+ if b == fnan: b = finf
155
+ # positive * negative
156
+ elif tbs <= 0:
157
+ a = mpf_mul(sb, ta, prec, round_floor)
158
+ b = mpf_mul(sa, tb, prec, round_ceiling)
159
+ if a == fnan: a = fninf
160
+ if b == fnan: b = fzero
161
+ # positive * both signs
162
+ else:
163
+ a = mpf_mul(sb, ta, prec, round_floor)
164
+ b = mpf_mul(sb, tb, prec, round_ceiling)
165
+ if a == fnan: a = fninf
166
+ if b == fnan: b = finf
167
+ elif sbs <= 0:
168
+ # negative * positive
169
+ if tas >= 0:
170
+ a = mpf_mul(sa, tb, prec, round_floor)
171
+ b = mpf_mul(sb, ta, prec, round_ceiling)
172
+ if a == fnan: a = fninf
173
+ if b == fnan: b = fzero
174
+ # negative * negative
175
+ elif tbs <= 0:
176
+ a = mpf_mul(sb, tb, prec, round_floor)
177
+ b = mpf_mul(sa, ta, prec, round_ceiling)
178
+ if a == fnan: a = fzero
179
+ if b == fnan: b = finf
180
+ # negative * both signs
181
+ else:
182
+ a = mpf_mul(sa, tb, prec, round_floor)
183
+ b = mpf_mul(sa, ta, prec, round_ceiling)
184
+ if a == fnan: a = fninf
185
+ if b == fnan: b = finf
186
+ else:
187
+ # General case: perform all cross-multiplications and compare
188
+ # Since the multiplications can be done exactly, we need only
189
+ # do 4 (instead of 8: two for each rounding mode)
190
+ cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)]
191
+ if fnan in cases:
192
+ a, b = (fninf, finf)
193
+ else:
194
+ a, b = mpf_min_max(cases)
195
+ a = mpf_pos(a, prec, round_floor)
196
+ b = mpf_pos(b, prec, round_ceiling)
197
+ return a, b
198
+
199
+ def mpi_square(s, prec=0):
200
+ sa, sb = s
201
+ if mpf_ge(sa, fzero):
202
+ a = mpf_mul(sa, sa, prec, round_floor)
203
+ b = mpf_mul(sb, sb, prec, round_ceiling)
204
+ elif mpf_le(sb, fzero):
205
+ a = mpf_mul(sb, sb, prec, round_floor)
206
+ b = mpf_mul(sa, sa, prec, round_ceiling)
207
+ else:
208
+ sa = mpf_neg(sa)
209
+ sa, sb = mpf_min_max([sa, sb])
210
+ a = fzero
211
+ b = mpf_mul(sb, sb, prec, round_ceiling)
212
+ return a, b
213
+
214
+ def mpi_div(s, t, prec):
215
+ sa, sb = s
216
+ ta, tb = t
217
+ sas = mpf_sign(sa)
218
+ sbs = mpf_sign(sb)
219
+ tas = mpf_sign(ta)
220
+ tbs = mpf_sign(tb)
221
+ # 0 / X
222
+ if sas == sbs == 0:
223
+ # 0 / <interval containing 0>
224
+ if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0):
225
+ return fninf, finf
226
+ return fzero, fzero
227
+ # Denominator contains both negative and positive numbers;
228
+ # this should properly be a multi-interval, but the closest
229
+ # match is the entire (extended) real line
230
+ if tas < 0 and tbs > 0:
231
+ return fninf, finf
232
+ # Assume denominator to be nonnegative
233
+ if tas < 0:
234
+ return mpi_div(mpi_neg(s), mpi_neg(t), prec)
235
+ # Division by zero
236
+ # XXX: make sure all results make sense
237
+ if tas == 0:
238
+ # Numerator contains both signs?
239
+ if sas < 0 and sbs > 0:
240
+ return fninf, finf
241
+ if tas == tbs:
242
+ return fninf, finf
243
+ # Numerator positive?
244
+ if sas >= 0:
245
+ a = mpf_div(sa, tb, prec, round_floor)
246
+ b = finf
247
+ if sbs <= 0:
248
+ a = fninf
249
+ b = mpf_div(sb, tb, prec, round_ceiling)
250
+ # Division with positive denominator
251
+ # We still have to handle nans resulting from inf/0 or inf/inf
252
+ else:
253
+ # Nonnegative numerator
254
+ if sas >= 0:
255
+ a = mpf_div(sa, tb, prec, round_floor)
256
+ b = mpf_div(sb, ta, prec, round_ceiling)
257
+ if a == fnan: a = fzero
258
+ if b == fnan: b = finf
259
+ # Nonpositive numerator
260
+ elif sbs <= 0:
261
+ a = mpf_div(sa, ta, prec, round_floor)
262
+ b = mpf_div(sb, tb, prec, round_ceiling)
263
+ if a == fnan: a = fninf
264
+ if b == fnan: b = fzero
265
+ # Numerator contains both signs?
266
+ else:
267
+ a = mpf_div(sa, ta, prec, round_floor)
268
+ b = mpf_div(sb, ta, prec, round_ceiling)
269
+ if a == fnan: a = fninf
270
+ if b == fnan: b = finf
271
+ return a, b
272
+
273
+ def mpi_pi(prec):
274
+ a = mpf_pi(prec, round_floor)
275
+ b = mpf_pi(prec, round_ceiling)
276
+ return a, b
277
+
278
+ def mpi_exp(s, prec):
279
+ sa, sb = s
280
+ # exp is monotonic
281
+ a = mpf_exp(sa, prec, round_floor)
282
+ b = mpf_exp(sb, prec, round_ceiling)
283
+ return a, b
284
+
285
+ def mpi_log(s, prec):
286
+ sa, sb = s
287
+ # log is monotonic
288
+ a = mpf_log(sa, prec, round_floor)
289
+ b = mpf_log(sb, prec, round_ceiling)
290
+ return a, b
291
+
292
+ def mpi_sqrt(s, prec):
293
+ sa, sb = s
294
+ # sqrt is monotonic
295
+ a = mpf_sqrt(sa, prec, round_floor)
296
+ b = mpf_sqrt(sb, prec, round_ceiling)
297
+ return a, b
298
+
299
+ def mpi_atan(s, prec):
300
+ sa, sb = s
301
+ a = mpf_atan(sa, prec, round_floor)
302
+ b = mpf_atan(sb, prec, round_ceiling)
303
+ return a, b
304
+
305
+ def mpi_pow_int(s, n, prec):
306
+ sa, sb = s
307
+ if n < 0:
308
+ return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec)
309
+ if n == 0:
310
+ return (fone, fone)
311
+ if n == 1:
312
+ return s
313
+ if n == 2:
314
+ return mpi_square(s, prec)
315
+ # Odd -- signs are preserved
316
+ if n & 1:
317
+ a = mpf_pow_int(sa, n, prec, round_floor)
318
+ b = mpf_pow_int(sb, n, prec, round_ceiling)
319
+ # Even -- important to ensure positivity
320
+ else:
321
+ sas = mpf_sign(sa)
322
+ sbs = mpf_sign(sb)
323
+ # Nonnegative?
324
+ if sas >= 0:
325
+ a = mpf_pow_int(sa, n, prec, round_floor)
326
+ b = mpf_pow_int(sb, n, prec, round_ceiling)
327
+ # Nonpositive?
328
+ elif sbs <= 0:
329
+ a = mpf_pow_int(sb, n, prec, round_floor)
330
+ b = mpf_pow_int(sa, n, prec, round_ceiling)
331
+ # Mixed signs?
332
+ else:
333
+ a = fzero
334
+ # max(-a,b)**n
335
+ sa = mpf_neg(sa)
336
+ if mpf_ge(sa, sb):
337
+ b = mpf_pow_int(sa, n, prec, round_ceiling)
338
+ else:
339
+ b = mpf_pow_int(sb, n, prec, round_ceiling)
340
+ return a, b
341
+
342
+ def mpi_pow(s, t, prec):
343
+ ta, tb = t
344
+ if ta == tb and ta not in (finf, fninf):
345
+ if ta == from_int(to_int(ta)):
346
+ return mpi_pow_int(s, to_int(ta), prec)
347
+ if ta == fhalf:
348
+ return mpi_sqrt(s, prec)
349
+ u = mpi_log(s, prec + 20)
350
+ v = mpi_mul(u, t, prec + 20)
351
+ return mpi_exp(v, prec)
352
+
353
+ def MIN(x, y):
354
+ if mpf_le(x, y):
355
+ return x
356
+ return y
357
+
358
+ def MAX(x, y):
359
+ if mpf_ge(x, y):
360
+ return x
361
+ return y
362
+
363
+ def cos_sin_quadrant(x, wp):
364
+ sign, man, exp, bc = x
365
+ if x == fzero:
366
+ return fone, fzero, 0
367
+ # TODO: combine evaluation code to avoid duplicate modulo
368
+ c, s = mpf_cos_sin(x, wp)
369
+ t, n, wp_ = mod_pi2(man, exp, exp+bc, 15)
370
+ if sign:
371
+ n = -1-n
372
+ return c, s, n
373
+
374
+ def mpi_cos_sin(x, prec):
375
+ a, b = x
376
+ if a == b == fzero:
377
+ return (fone, fone), (fzero, fzero)
378
+ # Guaranteed to contain both -1 and 1
379
+ if (finf in x) or (fninf in x):
380
+ return (fnone, fone), (fnone, fone)
381
+ wp = prec + 20
382
+ ca, sa, na = cos_sin_quadrant(a, wp)
383
+ cb, sb, nb = cos_sin_quadrant(b, wp)
384
+ ca, cb = mpf_min_max([ca, cb])
385
+ sa, sb = mpf_min_max([sa, sb])
386
+ # Both functions are monotonic within one quadrant
387
+ if na == nb:
388
+ pass
389
+ # Guaranteed to contain both -1 and 1
390
+ elif nb - na >= 4:
391
+ return (fnone, fone), (fnone, fone)
392
+ else:
393
+ # cos has maximum between a and b
394
+ if na//4 != nb//4:
395
+ cb = fone
396
+ # cos has minimum
397
+ if (na-2)//4 != (nb-2)//4:
398
+ ca = fnone
399
+ # sin has maximum
400
+ if (na-1)//4 != (nb-1)//4:
401
+ sb = fone
402
+ # sin has minimum
403
+ if (na-3)//4 != (nb-3)//4:
404
+ sa = fnone
405
+ # Perturb to force interval rounding
406
+ more = from_man_exp((MPZ_ONE<<wp) + (MPZ_ONE<<10), -wp)
407
+ less = from_man_exp((MPZ_ONE<<wp) - (MPZ_ONE<<10), -wp)
408
+ def finalize(v, rounding):
409
+ if bool(v[0]) == (rounding == round_floor):
410
+ p = more
411
+ else:
412
+ p = less
413
+ v = mpf_mul(v, p, prec, rounding)
414
+ sign, man, exp, bc = v
415
+ if exp+bc >= 1:
416
+ if sign:
417
+ return fnone
418
+ return fone
419
+ return v
420
+ ca = finalize(ca, round_floor)
421
+ cb = finalize(cb, round_ceiling)
422
+ sa = finalize(sa, round_floor)
423
+ sb = finalize(sb, round_ceiling)
424
+ return (ca,cb), (sa,sb)
425
+
426
+ def mpi_cos(x, prec):
427
+ return mpi_cos_sin(x, prec)[0]
428
+
429
+ def mpi_sin(x, prec):
430
+ return mpi_cos_sin(x, prec)[1]
431
+
432
+ def mpi_tan(x, prec):
433
+ cos, sin = mpi_cos_sin(x, prec+20)
434
+ return mpi_div(sin, cos, prec)
435
+
436
+ def mpi_cot(x, prec):
437
+ cos, sin = mpi_cos_sin(x, prec+20)
438
+ return mpi_div(cos, sin, prec)
439
+
440
+ def mpi_from_str_a_b(x, y, percent, prec):
441
+ wp = prec + 20
442
+ xa = from_str(x, wp, round_floor)
443
+ xb = from_str(x, wp, round_ceiling)
444
+ #ya = from_str(y, wp, round_floor)
445
+ y = from_str(y, wp, round_ceiling)
446
+ assert mpf_ge(y, fzero)
447
+ if percent:
448
+ y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling)
449
+ y = mpf_div(y, from_int(100), wp, round_ceiling)
450
+ a = mpf_sub(xa, y, prec, round_floor)
451
+ b = mpf_add(xb, y, prec, round_ceiling)
452
+ return a, b
453
+
454
+ def mpi_from_str(s, prec):
455
+ """
456
+ Parse an interval number given as a string.
457
+
458
+ Allowed forms are
459
+
460
+ "-1.23e-27"
461
+ Any single decimal floating-point literal.
462
+ "a +- b" or "a (b)"
463
+ a is the midpoint of the interval and b is the half-width
464
+ "a +- b%" or "a (b%)"
465
+ a is the midpoint of the interval and the half-width
466
+ is b percent of a (`a \times b / 100`).
467
+ "[a, b]"
468
+ The interval indicated directly.
469
+ "x[y,z]e"
470
+ x are shared digits, y and z are unequal digits, e is the exponent.
471
+
472
+ """
473
+ e = ValueError("Improperly formed interval number '%s'" % s)
474
+ s = s.replace(" ", "")
475
+ wp = prec + 20
476
+ if "+-" in s:
477
+ x, y = s.split("+-")
478
+ return mpi_from_str_a_b(x, y, False, prec)
479
+ # case 2
480
+ elif "(" in s:
481
+ # Don't confuse with a complex number (x,y)
482
+ if s[0] == "(" or ")" not in s:
483
+ raise e
484
+ s = s.replace(")", "")
485
+ percent = False
486
+ if "%" in s:
487
+ if s[-1] != "%":
488
+ raise e
489
+ percent = True
490
+ s = s.replace("%", "")
491
+ x, y = s.split("(")
492
+ return mpi_from_str_a_b(x, y, percent, prec)
493
+ elif "," in s:
494
+ if ('[' not in s) or (']' not in s):
495
+ raise e
496
+ if s[0] == '[':
497
+ # case 3
498
+ s = s.replace("[", "")
499
+ s = s.replace("]", "")
500
+ a, b = s.split(",")
501
+ a = from_str(a, prec, round_floor)
502
+ b = from_str(b, prec, round_ceiling)
503
+ return a, b
504
+ else:
505
+ # case 4
506
+ x, y = s.split('[')
507
+ y, z = y.split(',')
508
+ if 'e' in s:
509
+ z, e = z.split(']')
510
+ else:
511
+ z, e = z.rstrip(']'), ''
512
+ a = from_str(x+y+e, prec, round_floor)
513
+ b = from_str(x+z+e, prec, round_ceiling)
514
+ return a, b
515
+ else:
516
+ a = from_str(s, prec, round_floor)
517
+ b = from_str(s, prec, round_ceiling)
518
+ return a, b
519
+
520
+ def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs):
521
+ """
522
+ Convert a mpi interval to a string.
523
+
524
+ **Arguments**
525
+
526
+ *dps*
527
+ decimal places to use for printing
528
+ *use_spaces*
529
+ use spaces for more readable output, defaults to true
530
+ *brackets*
531
+ pair of strings (or two-character string) giving left and right brackets
532
+ *mode*
533
+ mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff'
534
+ *error_dps*
535
+ limit the error to *error_dps* digits (mode 'plusminus and 'percent')
536
+
537
+ Additional keyword arguments are forwarded to the mpf-to-string conversion
538
+ for the components of the output.
539
+
540
+ **Examples**
541
+
542
+ >>> from mpmath import mpi, mp
543
+ >>> mp.dps = 30
544
+ >>> x = mpi(1, 2)._mpi_
545
+ >>> mpi_to_str(x, 2, mode='plusminus')
546
+ '1.5 +- 0.5'
547
+ >>> mpi_to_str(x, 2, mode='percent')
548
+ '1.5 (33.33%)'
549
+ >>> mpi_to_str(x, 2, mode='brackets')
550
+ '[1.0, 2.0]'
551
+ >>> mpi_to_str(x, 2, mode='brackets' , brackets=('<', '>'))
552
+ '<1.0, 2.0>'
553
+ >>> x = mpi('5.2582327113062393041', '5.2582327113062749951')._mpi_
554
+ >>> mpi_to_str(x, 15, mode='diff')
555
+ '5.2582327113062[4, 7]'
556
+ >>> mpi_to_str(mpi(0)._mpi_, 2, mode='percent')
557
+ '0.0 (0.0%)'
558
+
559
+ """
560
+ prec = dps_to_prec(dps)
561
+ wp = prec + 20
562
+ a, b = x
563
+ mid = mpi_mid(x, prec)
564
+ delta = mpi_delta(x, prec)
565
+ a_str = to_str(a, dps, **kwargs)
566
+ b_str = to_str(b, dps, **kwargs)
567
+ mid_str = to_str(mid, dps, **kwargs)
568
+ sp = ""
569
+ if use_spaces:
570
+ sp = " "
571
+ br1, br2 = brackets
572
+ if mode == 'plusminus':
573
+ delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs)
574
+ s = mid_str + sp + "+-" + sp + delta_str
575
+ elif mode == 'percent':
576
+ if mid == fzero:
577
+ p = fzero
578
+ else:
579
+ # p = 100 * delta(x) / (2*mid(x))
580
+ p = mpf_mul(delta, from_int(100))
581
+ p = mpf_div(p, mpf_mul(mid, from_int(2)), wp)
582
+ s = mid_str + sp + "(" + to_str(p, error_dps) + "%)"
583
+ elif mode == 'brackets':
584
+ s = br1 + a_str + "," + sp + b_str + br2
585
+ elif mode == 'diff':
586
+ # use more digits if str(x.a) and str(x.b) are equal
587
+ if a_str == b_str:
588
+ a_str = to_str(a, dps+3, **kwargs)
589
+ b_str = to_str(b, dps+3, **kwargs)
590
+ # separate mantissa and exponent
591
+ a = a_str.split('e')
592
+ if len(a) == 1:
593
+ a.append('')
594
+ b = b_str.split('e')
595
+ if len(b) == 1:
596
+ b.append('')
597
+ if a[1] == b[1]:
598
+ if a[0] != b[0]:
599
+ for i in xrange(len(a[0]) + 1):
600
+ if a[0][i] != b[0][i]:
601
+ break
602
+ s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2
603
+ + 'e'*min(len(a[1]), 1) + a[1])
604
+ else: # no difference
605
+ s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1]
606
+ else:
607
+ s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2
608
+ else:
609
+ raise ValueError("'%s' is unknown mode for printing mpi" % mode)
610
+ return s
611
+
612
+ def mpci_add(x, y, prec):
613
+ a, b = x
614
+ c, d = y
615
+ return mpi_add(a, c, prec), mpi_add(b, d, prec)
616
+
617
+ def mpci_sub(x, y, prec):
618
+ a, b = x
619
+ c, d = y
620
+ return mpi_sub(a, c, prec), mpi_sub(b, d, prec)
621
+
622
+ def mpci_neg(x, prec=0):
623
+ a, b = x
624
+ return mpi_neg(a, prec), mpi_neg(b, prec)
625
+
626
+ def mpci_pos(x, prec):
627
+ a, b = x
628
+ return mpi_pos(a, prec), mpi_pos(b, prec)
629
+
630
+ def mpci_mul(x, y, prec):
631
+ # TODO: optimize for real/imag cases
632
+ a, b = x
633
+ c, d = y
634
+ r1 = mpi_mul(a,c)
635
+ r2 = mpi_mul(b,d)
636
+ re = mpi_sub(r1,r2,prec)
637
+ i1 = mpi_mul(a,d)
638
+ i2 = mpi_mul(b,c)
639
+ im = mpi_add(i1,i2,prec)
640
+ return re, im
641
+
642
+ def mpci_div(x, y, prec):
643
+ # TODO: optimize for real/imag cases
644
+ a, b = x
645
+ c, d = y
646
+ wp = prec+20
647
+ m1 = mpi_square(c)
648
+ m2 = mpi_square(d)
649
+ m = mpi_add(m1,m2,wp)
650
+ re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp)
651
+ im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp)
652
+ re = mpi_div(re, m, prec)
653
+ im = mpi_div(im, m, prec)
654
+ return re, im
655
+
656
+ def mpci_exp(x, prec):
657
+ a, b = x
658
+ wp = prec+20
659
+ r = mpi_exp(a, wp)
660
+ c, s = mpi_cos_sin(b, wp)
661
+ a = mpi_mul(r, c, prec)
662
+ b = mpi_mul(r, s, prec)
663
+ return a, b
664
+
665
+ def mpi_shift(x, n):
666
+ a, b = x
667
+ return mpf_shift(a,n), mpf_shift(b,n)
668
+
669
+ def mpi_cosh_sinh(x, prec):
670
+ # TODO: accuracy for small x
671
+ wp = prec+20
672
+ e1 = mpi_exp(x, wp)
673
+ e2 = mpi_div(mpi_one, e1, wp)
674
+ c = mpi_add(e1, e2, prec)
675
+ s = mpi_sub(e1, e2, prec)
676
+ c = mpi_shift(c, -1)
677
+ s = mpi_shift(s, -1)
678
+ return c, s
679
+
680
+ def mpci_cos(x, prec):
681
+ a, b = x
682
+ wp = prec+10
683
+ c, s = mpi_cos_sin(a, wp)
684
+ ch, sh = mpi_cosh_sinh(b, wp)
685
+ re = mpi_mul(c, ch, prec)
686
+ im = mpi_mul(s, sh, prec)
687
+ return re, mpi_neg(im)
688
+
689
+ def mpci_sin(x, prec):
690
+ a, b = x
691
+ wp = prec+10
692
+ c, s = mpi_cos_sin(a, wp)
693
+ ch, sh = mpi_cosh_sinh(b, wp)
694
+ re = mpi_mul(s, ch, prec)
695
+ im = mpi_mul(c, sh, prec)
696
+ return re, im
697
+
698
+ def mpci_abs(x, prec):
699
+ a, b = x
700
+ if a == mpi_zero:
701
+ return mpi_abs(b)
702
+ if b == mpi_zero:
703
+ return mpi_abs(a)
704
+ # Important: nonnegative
705
+ a = mpi_square(a)
706
+ b = mpi_square(b)
707
+ t = mpi_add(a, b, prec+20)
708
+ return mpi_sqrt(t, prec)
709
+
710
+ def mpi_atan2(y, x, prec):
711
+ ya, yb = y
712
+ xa, xb = x
713
+ # Constrained to the real line
714
+ if ya == yb == fzero:
715
+ if mpf_ge(xa, fzero):
716
+ return mpi_zero
717
+ return mpi_pi(prec)
718
+ # Right half-plane
719
+ if mpf_ge(xa, fzero):
720
+ if mpf_ge(ya, fzero):
721
+ a = mpf_atan2(ya, xb, prec, round_floor)
722
+ else:
723
+ a = mpf_atan2(ya, xa, prec, round_floor)
724
+ if mpf_ge(yb, fzero):
725
+ b = mpf_atan2(yb, xa, prec, round_ceiling)
726
+ else:
727
+ b = mpf_atan2(yb, xb, prec, round_ceiling)
728
+ # Upper half-plane
729
+ elif mpf_ge(ya, fzero):
730
+ b = mpf_atan2(ya, xa, prec, round_ceiling)
731
+ if mpf_le(xb, fzero):
732
+ a = mpf_atan2(yb, xb, prec, round_floor)
733
+ else:
734
+ a = mpf_atan2(ya, xb, prec, round_floor)
735
+ # Lower half-plane
736
+ elif mpf_le(yb, fzero):
737
+ a = mpf_atan2(yb, xa, prec, round_floor)
738
+ if mpf_le(xb, fzero):
739
+ b = mpf_atan2(ya, xb, prec, round_ceiling)
740
+ else:
741
+ b = mpf_atan2(yb, xb, prec, round_ceiling)
742
+ # Covering the origin
743
+ else:
744
+ b = mpf_pi(prec, round_ceiling)
745
+ a = mpf_neg(b)
746
+ return a, b
747
+
748
+ def mpci_arg(z, prec):
749
+ x, y = z
750
+ return mpi_atan2(y, x, prec)
751
+
752
+ def mpci_log(z, prec):
753
+ x, y = z
754
+ re = mpi_log(mpci_abs(z, prec+20), prec)
755
+ im = mpci_arg(z, prec)
756
+ return re, im
757
+
758
+ def mpci_pow(x, y, prec):
759
+ # TODO: recognize/speed up real cases, integer y
760
+ yre, yim = y
761
+ if yim == mpi_zero:
762
+ ya, yb = yre
763
+ if ya == yb:
764
+ sign, man, exp, bc = yb
765
+ if man and exp >= 0:
766
+ return mpci_pow_int(x, (-1)**sign * int(man<<exp), prec)
767
+ # x^0
768
+ if yb == fzero:
769
+ return mpci_pow_int(x, 0, prec)
770
+ wp = prec+20
771
+ return mpci_exp(mpci_mul(y, mpci_log(x, wp), wp), prec)
772
+
773
+ def mpci_square(x, prec):
774
+ a, b = x
775
+ # (a+bi)^2 = (a^2-b^2) + 2abi
776
+ re = mpi_sub(mpi_square(a), mpi_square(b), prec)
777
+ im = mpi_mul(a, b, prec)
778
+ im = mpi_shift(im, 1)
779
+ return re, im
780
+
781
+ def mpci_pow_int(x, n, prec):
782
+ if n < 0:
783
+ return mpci_div((mpi_one,mpi_zero), mpci_pow_int(x, -n, prec+20), prec)
784
+ if n == 0:
785
+ return mpi_one, mpi_zero
786
+ if n == 1:
787
+ return mpci_pos(x, prec)
788
+ if n == 2:
789
+ return mpci_square(x, prec)
790
+ wp = prec + 20
791
+ result = (mpi_one, mpi_zero)
792
+ while n:
793
+ if n & 1:
794
+ result = mpci_mul(result, x, wp)
795
+ n -= 1
796
+ x = mpci_square(x, wp)
797
+ n >>= 1
798
+ return mpci_pos(result, prec)
799
+
800
+ gamma_min_a = from_float(1.46163214496)
801
+ gamma_min_b = from_float(1.46163214497)
802
+ gamma_min = (gamma_min_a, gamma_min_b)
803
+ gamma_mono_imag_a = from_float(-1.1)
804
+ gamma_mono_imag_b = from_float(1.1)
805
+
806
+ def mpi_overlap(x, y):
807
+ a, b = x
808
+ c, d = y
809
+ if mpf_lt(d, a): return False
810
+ if mpf_gt(c, b): return False
811
+ return True
812
+
813
+ # type = 0 -- gamma
814
+ # type = 1 -- factorial
815
+ # type = 2 -- 1/gamma
816
+ # type = 3 -- log-gamma
817
+
818
+ def mpi_gamma(z, prec, type=0):
819
+ a, b = z
820
+ wp = prec+20
821
+
822
+ if type == 1:
823
+ return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0)
824
+
825
+ # increasing
826
+ if mpf_gt(a, gamma_min_b):
827
+ if type == 0:
828
+ c = mpf_gamma(a, prec, round_floor)
829
+ d = mpf_gamma(b, prec, round_ceiling)
830
+ elif type == 2:
831
+ c = mpf_rgamma(b, prec, round_floor)
832
+ d = mpf_rgamma(a, prec, round_ceiling)
833
+ elif type == 3:
834
+ c = mpf_loggamma(a, prec, round_floor)
835
+ d = mpf_loggamma(b, prec, round_ceiling)
836
+ # decreasing
837
+ elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a):
838
+ if type == 0:
839
+ c = mpf_gamma(b, prec, round_floor)
840
+ d = mpf_gamma(a, prec, round_ceiling)
841
+ elif type == 2:
842
+ c = mpf_rgamma(a, prec, round_floor)
843
+ d = mpf_rgamma(b, prec, round_ceiling)
844
+ elif type == 3:
845
+ c = mpf_loggamma(b, prec, round_floor)
846
+ d = mpf_loggamma(a, prec, round_ceiling)
847
+ else:
848
+ # TODO: reflection formula
849
+ znew = mpi_add(z, mpi_one, wp)
850
+ if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec)
851
+ if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec)
852
+ if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec)
853
+ return c, d
854
+
855
+ def mpci_gamma(z, prec, type=0):
856
+ (a1,a2), (b1,b2) = z
857
+
858
+ # Real case
859
+ if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)):
860
+ return mpi_gamma(z, prec, type), mpi_zero
861
+
862
+ # Estimate precision
863
+ wp = prec+20
864
+ if type != 3:
865
+ amag = a2[2]+a2[3]
866
+ bmag = b2[2]+b2[3]
867
+ if a2 != fzero:
868
+ mag = max(amag, bmag)
869
+ else:
870
+ mag = bmag
871
+ an = abs(to_int(a2))
872
+ bn = abs(to_int(b2))
873
+ absn = max(an, bn)
874
+ gamma_size = max(0,absn*mag)
875
+ wp += bitcount(gamma_size)
876
+
877
+ # Assume type != 1
878
+ if type == 1:
879
+ (a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2)
880
+ type = 0
881
+
882
+ # Avoid non-monotonic region near the negative real axis
883
+ if mpf_lt(a1, gamma_min_b):
884
+ if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)):
885
+ # TODO: reflection formula
886
+ #if mpf_lt(a2, mpf_shift(fone,-1)):
887
+ # znew = mpci_sub((mpi_one,mpi_zero),z,wp)
888
+ # ...
889
+ # Recurrence:
890
+ # gamma(z) = gamma(z+1)/z
891
+ znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2)
892
+ if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec)
893
+ if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec)
894
+ if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec)
895
+
896
+ # Use monotonicity (except for a small region close to the
897
+ # origin and near poles)
898
+ # upper half-plane
899
+ if mpf_ge(b1, fzero):
900
+ minre = mpc_loggamma((a1,b2), wp, round_floor)
901
+ maxre = mpc_loggamma((a2,b1), wp, round_ceiling)
902
+ minim = mpc_loggamma((a1,b1), wp, round_floor)
903
+ maxim = mpc_loggamma((a2,b2), wp, round_ceiling)
904
+ # lower half-plane
905
+ elif mpf_le(b2, fzero):
906
+ minre = mpc_loggamma((a1,b1), wp, round_floor)
907
+ maxre = mpc_loggamma((a2,b2), wp, round_ceiling)
908
+ minim = mpc_loggamma((a2,b1), wp, round_floor)
909
+ maxim = mpc_loggamma((a1,b2), wp, round_ceiling)
910
+ # crosses real axis
911
+ else:
912
+ maxre = mpc_loggamma((a2,fzero), wp, round_ceiling)
913
+ # stretches more into the lower half-plane
914
+ if mpf_gt(mpf_neg(b1), b2):
915
+ minre = mpc_loggamma((a1,b1), wp, round_ceiling)
916
+ else:
917
+ minre = mpc_loggamma((a1,b2), wp, round_ceiling)
918
+ minim = mpc_loggamma((a2,b1), wp, round_floor)
919
+ maxim = mpc_loggamma((a2,b2), wp, round_floor)
920
+
921
+ w = (minre[0], maxre[0]), (minim[1], maxim[1])
922
+ if type == 3:
923
+ return mpi_pos(w[0], prec), mpi_pos(w[1], prec)
924
+ if type == 2:
925
+ w = mpci_neg(w)
926
+ return mpci_exp(w, prec)
927
+
928
+ def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3)
929
+ def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3)
930
+
931
+ def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2)
932
+ def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2)
933
+
934
+ def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1)
935
+ def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1)
venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (251 Bytes). View file
 
venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/calculus.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/eigen.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
venv/lib/python3.10/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc ADDED
Binary file (43.8 kB). View file