diff --git a/ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2324ac109760e2d671888cb9d47ee2366c15d342 --- /dev/null +++ b/ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22fca2a333686e5de2f3b3e80533256452bbbd17bc1b55791ab9b55c33b9b53c +size 33555612 diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a56f17a1626b3ed21331288b6ca22c65340580ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f19a2f7065cc8fcce3923dce4fa1c0d191ce8d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9daeb55c308cc5557c91e199abfd741ce1a9a5bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/factor_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21444054a0380ced9e68e1b710528425ecd77fe7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/generate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5be0fe26333292763dbbed6882ab924ca9381d37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/modular.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b02235f747fa5f06b7add7753b2c6064829f249 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/__pycache__/primetest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f145ca3cce6b251f1f2acb5080bb79ee7029436 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b87e1169de7b5e1f0e9f401011117b50f0a140 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2affdf3f21a065f69d1e8f343b8862408b8cf975 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..732779e286d5132a1f7265836929dd3976353afd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..642eb994b2aeec94b29ca7b0e51da0ff9b8e8a9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/__init__.py b/venv/lib/python3.10/site-packages/sympy/utilities/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f35da4a84396618a33a12c3c6b5cf58e9d4742c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/__init__.py @@ -0,0 +1,30 @@ +"""This module contains some general purpose utilities that are used across +SymPy. +""" +from .iterables import (flatten, group, take, subsets, + variations, numbered_symbols, cartes, capture, dict_merge, + prefixes, postfixes, sift, topological_sort, unflatten, + has_dups, has_variety, reshape, rotations) + +from .misc import filldedent + +from .lambdify import lambdify + +from .decorator import threaded, xthreaded, public, memoize_property + +from .timeutils import timed + +__all__ = [ + 'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols', + 'cartes', 'capture', 'dict_merge', 'prefixes', 'postfixes', 'sift', + 'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape', + 'rotations', + + 'filldedent', + + 'lambdify', + + 'threaded', 'xthreaded', 'public', 'memoize_property', + + 'timed', +] diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/autowrap.py b/venv/lib/python3.10/site-packages/sympy/utilities/autowrap.py new file mode 100644 index 0000000000000000000000000000000000000000..566fd2ee21728ab935f1ae7332aa590f8cf01d24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/autowrap.py @@ -0,0 +1,1129 @@ +"""Module for compiling codegen output, and wrap the binary for use in +python. + +.. note:: To use the autowrap module it must first be imported + + >>> from sympy.utilities.autowrap import autowrap + +This module provides a common interface for different external backends, such +as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are +implemented) The goal is to provide access to compiled binaries of acceptable +performance with a one-button user interface, e.g., + + >>> from sympy.abc import x,y + >>> expr = (x - y)**25 + >>> flat = expr.expand() + >>> binary_callable = autowrap(flat) + >>> binary_callable(2, 3) + -1.0 + +Although a SymPy user might primarily be interested in working with +mathematical expressions and not in the details of wrapping tools +needed to evaluate such expressions efficiently in numerical form, +the user cannot do so without some understanding of the +limits in the target language. For example, the expanded expression +contains large coefficients which result in loss of precision when +computing the expression: + + >>> binary_callable(3, 2) + 0.0 + >>> binary_callable(4, 5), binary_callable(5, 4) + (-22925376.0, 25165824.0) + +Wrapping the unexpanded expression gives the expected behavior: + + >>> e = autowrap(expr) + >>> e(4, 5), e(5, 4) + (-1.0, 1.0) + +The callable returned from autowrap() is a binary Python function, not a +SymPy object. If it is desired to use the compiled function in symbolic +expressions, it is better to use binary_function() which returns a SymPy +Function object. The binary callable is attached as the _imp_ attribute and +invoked when a numerical evaluation is requested with evalf(), or with +lambdify(). + + >>> from sympy.utilities.autowrap import binary_function + >>> f = binary_function('f', expr) + >>> 2*f(x, y) + y + y + 2*f(x, y) + >>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2}) + 0.e-110 + +When is this useful? + + 1) For computations on large arrays, Python iterations may be too slow, + and depending on the mathematical expression, it may be difficult to + exploit the advanced index operations provided by NumPy. + + 2) For *really* long expressions that will be called repeatedly, the + compiled binary should be significantly faster than SymPy's .evalf() + + 3) If you are generating code with the codegen utility in order to use + it in another project, the automatic Python wrappers let you test the + binaries immediately from within SymPy. + + 4) To create customized ufuncs for use with numpy arrays. + See *ufuncify*. + +When is this module NOT the best approach? + + 1) If you are really concerned about speed or memory optimizations, + you will probably get better results by working directly with the + wrapper tools and the low level code. However, the files generated + by this utility may provide a useful starting point and reference + code. Temporary files will be left intact if you supply the keyword + tempdir="path/to/files/". + + 2) If the array computation can be handled easily by numpy, and you + do not need the binaries for another project. + +""" + +import sys +import os +import shutil +import tempfile +from subprocess import STDOUT, CalledProcessError, check_output +from string import Template +from warnings import warn + +from sympy.core.cache import cacheit +from sympy.core.function import Lambda +from sympy.core.relational import Eq +from sympy.core.symbol import Dummy, Symbol +from sympy.tensor.indexed import Idx, IndexedBase +from sympy.utilities.codegen import (make_routine, get_code_generator, + OutputArgument, InOutArgument, + InputArgument, CodeGenArgumentListError, + Result, ResultBase, C99CodeGen) +from sympy.utilities.iterables import iterable +from sympy.utilities.lambdify import implemented_function +from sympy.utilities.decorator import doctest_depends_on + +_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'), + 'modules': ('numpy',)} + + +class CodeWrapError(Exception): + pass + + +class CodeWrapper: + """Base Class for code wrappers""" + _filename = "wrapped_code" + _module_basename = "wrapper_module" + _module_counter = 0 + + @property + def filename(self): + return "%s_%s" % (self._filename, CodeWrapper._module_counter) + + @property + def module_name(self): + return "%s_%s" % (self._module_basename, CodeWrapper._module_counter) + + def __init__(self, generator, filepath=None, flags=[], verbose=False): + """ + generator -- the code generator to use + """ + self.generator = generator + self.filepath = filepath + self.flags = flags + self.quiet = not verbose + + @property + def include_header(self): + return bool(self.filepath) + + @property + def include_empty(self): + return bool(self.filepath) + + def _generate_code(self, main_routine, routines): + routines.append(main_routine) + self.generator.write( + routines, self.filename, True, self.include_header, + self.include_empty) + + def wrap_code(self, routine, helpers=None): + helpers = helpers or [] + if self.filepath: + workdir = os.path.abspath(self.filepath) + else: + workdir = tempfile.mkdtemp("_sympy_compile") + if not os.access(workdir, os.F_OK): + os.mkdir(workdir) + oldwork = os.getcwd() + os.chdir(workdir) + try: + sys.path.append(workdir) + self._generate_code(routine, helpers) + self._prepare_files(routine) + self._process_files(routine) + mod = __import__(self.module_name) + finally: + sys.path.remove(workdir) + CodeWrapper._module_counter += 1 + os.chdir(oldwork) + if not self.filepath: + try: + shutil.rmtree(workdir) + except OSError: + # Could be some issues on Windows + pass + + return self._get_wrapped_function(mod, routine.name) + + def _process_files(self, routine): + command = self.command + command.extend(self.flags) + try: + retoutput = check_output(command, stderr=STDOUT) + except CalledProcessError as e: + raise CodeWrapError( + "Error while executing command: %s. Command output is:\n%s" % ( + " ".join(command), e.output.decode('utf-8'))) + if not self.quiet: + print(retoutput) + + +class DummyWrapper(CodeWrapper): + """Class used for testing independent of backends """ + + template = """# dummy module for testing of SymPy +def %(name)s(): + return "%(expr)s" +%(name)s.args = "%(args)s" +%(name)s.returns = "%(retvals)s" +""" + + def _prepare_files(self, routine): + return + + def _generate_code(self, routine, helpers): + with open('%s.py' % self.module_name, 'w') as f: + printed = ", ".join( + [str(res.expr) for res in routine.result_variables]) + # convert OutputArguments to return value like f2py + args = filter(lambda x: not isinstance( + x, OutputArgument), routine.arguments) + retvals = [] + for val in routine.result_variables: + if isinstance(val, Result): + retvals.append('nameless') + else: + retvals.append(val.result_var) + + print(DummyWrapper.template % { + 'name': routine.name, + 'expr': printed, + 'args': ", ".join([str(a.name) for a in args]), + 'retvals': ", ".join([str(val) for val in retvals]) + }, end="", file=f) + + def _process_files(self, routine): + return + + @classmethod + def _get_wrapped_function(cls, mod, name): + return getattr(mod, name) + + +class CythonCodeWrapper(CodeWrapper): + """Wrapper that uses Cython""" + + setup_template = """\ +from setuptools import setup +from setuptools import Extension +from Cython.Build import cythonize +cy_opts = {cythonize_options} +{np_import} +ext_mods = [Extension( + {ext_args}, + include_dirs={include_dirs}, + library_dirs={library_dirs}, + libraries={libraries}, + extra_compile_args={extra_compile_args}, + extra_link_args={extra_link_args} +)] +setup(ext_modules=cythonize(ext_mods, **cy_opts)) +""" + + _cythonize_options = {'compiler_directives':{'language_level' : "3"}} + + pyx_imports = ( + "import numpy as np\n" + "cimport numpy as np\n\n") + + pyx_header = ( + "cdef extern from '{header_file}.h':\n" + " {prototype}\n\n") + + pyx_func = ( + "def {name}_c({arg_string}):\n" + "\n" + "{declarations}" + "{body}") + + std_compile_flag = '-std=c99' + + def __init__(self, *args, **kwargs): + """Instantiates a Cython code wrapper. + + The following optional parameters get passed to ``setuptools.Extension`` + for building the Python extension module. Read its documentation to + learn more. + + Parameters + ========== + include_dirs : [list of strings] + A list of directories to search for C/C++ header files (in Unix + form for portability). + library_dirs : [list of strings] + A list of directories to search for C/C++ libraries at link time. + libraries : [list of strings] + A list of library names (not filenames or paths) to link against. + extra_compile_args : [list of strings] + Any extra platform- and compiler-specific information to use when + compiling the source files in 'sources'. For platforms and + compilers where "command line" makes sense, this is typically a + list of command-line arguments, but for other platforms it could be + anything. Note that the attribute ``std_compile_flag`` will be + appended to this list. + extra_link_args : [list of strings] + Any extra platform- and compiler-specific information to use when + linking object files together to create the extension (or to create + a new static Python interpreter). Similar interpretation as for + 'extra_compile_args'. + cythonize_options : [dictionary] + Keyword arguments passed on to cythonize. + + """ + + self._include_dirs = kwargs.pop('include_dirs', []) + self._library_dirs = kwargs.pop('library_dirs', []) + self._libraries = kwargs.pop('libraries', []) + self._extra_compile_args = kwargs.pop('extra_compile_args', []) + self._extra_compile_args.append(self.std_compile_flag) + self._extra_link_args = kwargs.pop('extra_link_args', []) + self._cythonize_options = kwargs.pop('cythonize_options', self._cythonize_options) + + self._need_numpy = False + + super().__init__(*args, **kwargs) + + @property + def command(self): + command = [sys.executable, "setup.py", "build_ext", "--inplace"] + return command + + def _prepare_files(self, routine, build_dir=os.curdir): + # NOTE : build_dir is used for testing purposes. + pyxfilename = self.module_name + '.pyx' + codefilename = "%s.%s" % (self.filename, self.generator.code_extension) + + # pyx + with open(os.path.join(build_dir, pyxfilename), 'w') as f: + self.dump_pyx([routine], f, self.filename) + + # setup.py + ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])] + if self._need_numpy: + np_import = 'import numpy as np\n' + self._include_dirs.append('np.get_include()') + else: + np_import = '' + + with open(os.path.join(build_dir, 'setup.py'), 'w') as f: + includes = str(self._include_dirs).replace("'np.get_include()'", + 'np.get_include()') + f.write(self.setup_template.format( + ext_args=", ".join(ext_args), + np_import=np_import, + include_dirs=includes, + library_dirs=self._library_dirs, + libraries=self._libraries, + extra_compile_args=self._extra_compile_args, + extra_link_args=self._extra_link_args, + cythonize_options=self._cythonize_options + )) + + @classmethod + def _get_wrapped_function(cls, mod, name): + return getattr(mod, name + '_c') + + def dump_pyx(self, routines, f, prefix): + """Write a Cython file with Python wrappers + + This file contains all the definitions of the routines in c code and + refers to the header file. + + Arguments + --------- + routines + List of Routine instances + f + File-like object to write the file to + prefix + The filename prefix, used to refer to the proper header file. + Only the basename of the prefix is used. + """ + headers = [] + functions = [] + for routine in routines: + prototype = self.generator.get_prototype(routine) + + # C Function Header Import + headers.append(self.pyx_header.format(header_file=prefix, + prototype=prototype)) + + # Partition the C function arguments into categories + py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments) + + # Function prototype + name = routine.name + arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args) + + # Local Declarations + local_decs = [] + for arg, val in py_inf.items(): + proto = self._prototype_arg(arg) + mat, ind = [self._string_var(v) for v in val] + local_decs.append(" cdef {} = {}.shape[{}]".format(proto, mat, ind)) + local_decs.extend([" cdef {}".format(self._declare_arg(a)) for a in py_loc]) + declarations = "\n".join(local_decs) + if declarations: + declarations = declarations + "\n" + + # Function Body + args_c = ", ".join([self._call_arg(a) for a in routine.arguments]) + rets = ", ".join([self._string_var(r.name) for r in py_rets]) + if routine.results: + body = ' return %s(%s)' % (routine.name, args_c) + if rets: + body = body + ', ' + rets + else: + body = ' %s(%s)\n' % (routine.name, args_c) + body = body + ' return ' + rets + + functions.append(self.pyx_func.format(name=name, arg_string=arg_string, + declarations=declarations, body=body)) + + # Write text to file + if self._need_numpy: + # Only import numpy if required + f.write(self.pyx_imports) + f.write('\n'.join(headers)) + f.write('\n'.join(functions)) + + def _partition_args(self, args): + """Group function arguments into categories.""" + py_args = [] + py_returns = [] + py_locals = [] + py_inferred = {} + for arg in args: + if isinstance(arg, OutputArgument): + py_returns.append(arg) + py_locals.append(arg) + elif isinstance(arg, InOutArgument): + py_returns.append(arg) + py_args.append(arg) + else: + py_args.append(arg) + # Find arguments that are array dimensions. These can be inferred + # locally in the Cython code. + if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions: + dims = [d[1] + 1 for d in arg.dimensions] + sym_dims = [(i, d) for (i, d) in enumerate(dims) if + isinstance(d, Symbol)] + for (i, d) in sym_dims: + py_inferred[d] = (arg.name, i) + for arg in args: + if arg.name in py_inferred: + py_inferred[arg] = py_inferred.pop(arg.name) + # Filter inferred arguments from py_args + py_args = [a for a in py_args if a not in py_inferred] + return py_returns, py_args, py_locals, py_inferred + + def _prototype_arg(self, arg): + mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}" + np_types = {'double': 'np.double_t', + 'int': 'np.int_t'} + t = arg.get_datatype('c') + if arg.dimensions: + self._need_numpy = True + ndim = len(arg.dimensions) + mtype = np_types[t] + return mat_dec.format(mtype=mtype, ndim=ndim, name=self._string_var(arg.name)) + else: + return "%s %s" % (t, self._string_var(arg.name)) + + def _declare_arg(self, arg): + proto = self._prototype_arg(arg) + if arg.dimensions: + shape = '(' + ','.join(self._string_var(i[1] + 1) for i in arg.dimensions) + ')' + return proto + " = np.empty({shape})".format(shape=shape) + else: + return proto + " = 0" + + def _call_arg(self, arg): + if arg.dimensions: + t = arg.get_datatype('c') + return "<{}*> {}.data".format(t, self._string_var(arg.name)) + elif isinstance(arg, ResultBase): + return "&{}".format(self._string_var(arg.name)) + else: + return self._string_var(arg.name) + + def _string_var(self, var): + printer = self.generator.printer.doprint + return printer(var) + + +class F2PyCodeWrapper(CodeWrapper): + """Wrapper that uses f2py""" + + def __init__(self, *args, **kwargs): + + ext_keys = ['include_dirs', 'library_dirs', 'libraries', + 'extra_compile_args', 'extra_link_args'] + msg = ('The compilation option kwarg {} is not supported with the f2py ' + 'backend.') + + for k in ext_keys: + if k in kwargs.keys(): + warn(msg.format(k)) + kwargs.pop(k, None) + + super().__init__(*args, **kwargs) + + @property + def command(self): + filename = self.filename + '.' + self.generator.code_extension + args = ['-c', '-m', self.module_name, filename] + command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args + return command + + def _prepare_files(self, routine): + pass + + @classmethod + def _get_wrapped_function(cls, mod, name): + return getattr(mod, name) + + +# Here we define a lookup of backends -> tuples of languages. For now, each +# tuple is of length 1, but if a backend supports more than one language, +# the most preferable language is listed first. +_lang_lookup = {'CYTHON': ('C99', 'C89', 'C'), + 'F2PY': ('F95',), + 'NUMPY': ('C99', 'C89', 'C'), + 'DUMMY': ('F95',)} # Dummy here just for testing + + +def _infer_language(backend): + """For a given backend, return the top choice of language""" + langs = _lang_lookup.get(backend.upper(), False) + if not langs: + raise ValueError("Unrecognized backend: " + backend) + return langs[0] + + +def _validate_backend_language(backend, language): + """Throws error if backend and language are incompatible""" + langs = _lang_lookup.get(backend.upper(), False) + if not langs: + raise ValueError("Unrecognized backend: " + backend) + if language.upper() not in langs: + raise ValueError(("Backend {} and language {} are " + "incompatible").format(backend, language)) + + +@cacheit +@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',)) +def autowrap(expr, language=None, backend='f2py', tempdir=None, args=None, + flags=None, verbose=False, helpers=None, code_gen=None, **kwargs): + """Generates Python callable binaries based on the math expression. + + Parameters + ========== + + expr + The SymPy expression that should be wrapped as a binary routine. + language : string, optional + If supplied, (options: 'C' or 'F95'), specifies the language of the + generated code. If ``None`` [default], the language is inferred based + upon the specified backend. + backend : string, optional + Backend used to wrap the generated code. Either 'f2py' [default], + or 'cython'. + tempdir : string, optional + Path to directory for temporary files. If this argument is supplied, + the generated code and the wrapper input files are left intact in the + specified path. + args : iterable, optional + An ordered iterable of symbols. Specifies the argument sequence for the + function. + flags : iterable, optional + Additional option flags that will be passed to the backend. + verbose : bool, optional + If True, autowrap will not mute the command line backends. This can be + helpful for debugging. + helpers : 3-tuple or iterable of 3-tuples, optional + Used to define auxiliary expressions needed for the main expr. If the + main expression needs to call a specialized function it should be + passed in via ``helpers``. Autowrap will then make sure that the + compiled main expression can link to the helper routine. Items should + be 3-tuples with (, , + ). It is mandatory to supply an argument sequence to + helper routines. + code_gen : CodeGen instance + An instance of a CodeGen subclass. Overrides ``language``. + include_dirs : [string] + A list of directories to search for C/C++ header files (in Unix form + for portability). + library_dirs : [string] + A list of directories to search for C/C++ libraries at link time. + libraries : [string] + A list of library names (not filenames or paths) to link against. + extra_compile_args : [string] + Any extra platform- and compiler-specific information to use when + compiling the source files in 'sources'. For platforms and compilers + where "command line" makes sense, this is typically a list of + command-line arguments, but for other platforms it could be anything. + extra_link_args : [string] + Any extra platform- and compiler-specific information to use when + linking object files together to create the extension (or to create a + new static Python interpreter). Similar interpretation as for + 'extra_compile_args'. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy.utilities.autowrap import autowrap + >>> expr = ((x - y + z)**(13)).expand() + >>> binary_func = autowrap(expr) + >>> binary_func(1, 4, 2) + -1.0 + + """ + if language: + if not isinstance(language, type): + _validate_backend_language(backend, language) + else: + language = _infer_language(backend) + + # two cases 1) helpers is an iterable of 3-tuples and 2) helpers is a + # 3-tuple + if iterable(helpers) and len(helpers) != 0 and iterable(helpers[0]): + helpers = helpers if helpers else () + else: + helpers = [helpers] if helpers else () + args = list(args) if iterable(args, exclude=set) else args + + if code_gen is None: + code_gen = get_code_generator(language, "autowrap") + + CodeWrapperClass = { + 'F2PY': F2PyCodeWrapper, + 'CYTHON': CythonCodeWrapper, + 'DUMMY': DummyWrapper + }[backend.upper()] + code_wrapper = CodeWrapperClass(code_gen, tempdir, flags if flags else (), + verbose, **kwargs) + + helps = [] + for name_h, expr_h, args_h in helpers: + helps.append(code_gen.routine(name_h, expr_h, args_h)) + + for name_h, expr_h, args_h in helpers: + if expr.has(expr_h): + name_h = binary_function(name_h, expr_h, backend='dummy') + expr = expr.subs(expr_h, name_h(*args_h)) + try: + routine = code_gen.routine('autofunc', expr, args) + except CodeGenArgumentListError as e: + # if all missing arguments are for pure output, we simply attach them + # at the end and try again, because the wrappers will silently convert + # them to return values anyway. + new_args = [] + for missing in e.missing_args: + if not isinstance(missing, OutputArgument): + raise + new_args.append(missing.name) + routine = code_gen.routine('autofunc', expr, args + new_args) + + return code_wrapper.wrap_code(routine, helpers=helps) + + +@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',)) +def binary_function(symfunc, expr, **kwargs): + """Returns a SymPy function with expr as binary implementation + + This is a convenience function that automates the steps needed to + autowrap the SymPy expression and attaching it to a Function object + with implemented_function(). + + Parameters + ========== + + symfunc : SymPy Function + The function to bind the callable to. + expr : SymPy Expression + The expression used to generate the function. + kwargs : dict + Any kwargs accepted by autowrap. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy.utilities.autowrap import binary_function + >>> expr = ((x - y)**(25)).expand() + >>> f = binary_function('f', expr) + >>> type(f) + + >>> 2*f(x, y) + 2*f(x, y) + >>> f(x, y).evalf(2, subs={x: 1, y: 2}) + -1.0 + + """ + binary = autowrap(expr, **kwargs) + return implemented_function(symfunc, binary) + +################################################################# +# UFUNCIFY # +################################################################# + +_ufunc_top = Template("""\ +#include "Python.h" +#include "math.h" +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" +#include "numpy/halffloat.h" +#include ${include_file} + +static PyMethodDef ${module}Methods[] = { + {NULL, NULL, 0, NULL} +};""") + +_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});") + +_ufunc_body = Template("""\ +static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data) +{ + npy_intp i; + npy_intp n = dimensions[0]; + ${declare_args} + ${declare_steps} + for (i = 0; i < n; i++) { + ${outcalls} + ${step_increments} + } +} +PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc}; +static char ${funcname}_types[${n_types}] = ${types} +static void *${funcname}_data[1] = {NULL};""") + +_ufunc_bottom = Template("""\ +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "${module}", + NULL, + -1, + ${module}Methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_${module}(void) +{ + PyObject *m, *d; + ${function_creation} + m = PyModule_Create(&moduledef); + if (!m) { + return NULL; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ${ufunc_init} + return m; +} +#else +PyMODINIT_FUNC init${module}(void) +{ + PyObject *m, *d; + ${function_creation} + m = Py_InitModule("${module}", ${module}Methods); + if (m == NULL) { + return; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ${ufunc_init} +} +#endif\ +""") + +_ufunc_init_form = Template("""\ +ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out}, + PyUFunc_None, "${module}", ${docstring}, 0); + PyDict_SetItemString(d, "${funcname}", ufunc${ind}); + Py_DECREF(ufunc${ind});""") + +_ufunc_setup = Template("""\ +from setuptools.extension import Extension +from setuptools import setup + +from numpy import get_include + +if __name__ == "__main__": + setup(ext_modules=[ + Extension('${module}', + sources=['${module}.c', '${filename}.c'], + include_dirs=[get_include()])]) +""") + + +class UfuncifyCodeWrapper(CodeWrapper): + """Wrapper for Ufuncify""" + + def __init__(self, *args, **kwargs): + + ext_keys = ['include_dirs', 'library_dirs', 'libraries', + 'extra_compile_args', 'extra_link_args'] + msg = ('The compilation option kwarg {} is not supported with the numpy' + ' backend.') + + for k in ext_keys: + if k in kwargs.keys(): + warn(msg.format(k)) + kwargs.pop(k, None) + + super().__init__(*args, **kwargs) + + @property + def command(self): + command = [sys.executable, "setup.py", "build_ext", "--inplace"] + return command + + def wrap_code(self, routines, helpers=None): + # This routine overrides CodeWrapper because we can't assume funcname == routines[0].name + # Therefore we have to break the CodeWrapper private API. + # There isn't an obvious way to extend multi-expr support to + # the other autowrap backends, so we limit this change to ufuncify. + helpers = helpers if helpers is not None else [] + # We just need a consistent name + funcname = 'wrapped_' + str(id(routines) + id(helpers)) + + workdir = self.filepath or tempfile.mkdtemp("_sympy_compile") + if not os.access(workdir, os.F_OK): + os.mkdir(workdir) + oldwork = os.getcwd() + os.chdir(workdir) + try: + sys.path.append(workdir) + self._generate_code(routines, helpers) + self._prepare_files(routines, funcname) + self._process_files(routines) + mod = __import__(self.module_name) + finally: + sys.path.remove(workdir) + CodeWrapper._module_counter += 1 + os.chdir(oldwork) + if not self.filepath: + try: + shutil.rmtree(workdir) + except OSError: + # Could be some issues on Windows + pass + + return self._get_wrapped_function(mod, funcname) + + def _generate_code(self, main_routines, helper_routines): + all_routines = main_routines + helper_routines + self.generator.write( + all_routines, self.filename, True, self.include_header, + self.include_empty) + + def _prepare_files(self, routines, funcname): + + # C + codefilename = self.module_name + '.c' + with open(codefilename, 'w') as f: + self.dump_c(routines, f, self.filename, funcname=funcname) + + # setup.py + with open('setup.py', 'w') as f: + self.dump_setup(f) + + @classmethod + def _get_wrapped_function(cls, mod, name): + return getattr(mod, name) + + def dump_setup(self, f): + setup = _ufunc_setup.substitute(module=self.module_name, + filename=self.filename) + f.write(setup) + + def dump_c(self, routines, f, prefix, funcname=None): + """Write a C file with Python wrappers + + This file contains all the definitions of the routines in c code. + + Arguments + --------- + routines + List of Routine instances + f + File-like object to write the file to + prefix + The filename prefix, used to name the imported module. + funcname + Name of the main function to be returned. + """ + if funcname is None: + if len(routines) == 1: + funcname = routines[0].name + else: + msg = 'funcname must be specified for multiple output routines' + raise ValueError(msg) + functions = [] + function_creation = [] + ufunc_init = [] + module = self.module_name + include_file = "\"{}.h\"".format(prefix) + top = _ufunc_top.substitute(include_file=include_file, module=module) + + name = funcname + + # Partition the C function arguments into categories + # Here we assume all routines accept the same arguments + r_index = 0 + py_in, _ = self._partition_args(routines[0].arguments) + n_in = len(py_in) + n_out = len(routines) + + # Declare Args + form = "char *{0}{1} = args[{2}];" + arg_decs = [form.format('in', i, i) for i in range(n_in)] + arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)]) + declare_args = '\n '.join(arg_decs) + + # Declare Steps + form = "npy_intp {0}{1}_step = steps[{2}];" + step_decs = [form.format('in', i, i) for i in range(n_in)] + step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)]) + declare_steps = '\n '.join(step_decs) + + # Call Args + form = "*(double *)in{0}" + call_args = ', '.join([form.format(a) for a in range(n_in)]) + + # Step Increments + form = "{0}{1} += {0}{1}_step;" + step_incs = [form.format('in', i) for i in range(n_in)] + step_incs.extend([form.format('out', i, i) for i in range(n_out)]) + step_increments = '\n '.join(step_incs) + + # Types + n_types = n_in + n_out + types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};" + + # Docstring + docstring = '"Created in SymPy with Ufuncify"' + + # Function Creation + function_creation.append("PyObject *ufunc{};".format(r_index)) + + # Ufunc initialization + init_form = _ufunc_init_form.substitute(module=module, + funcname=name, + docstring=docstring, + n_in=n_in, n_out=n_out, + ind=r_index) + ufunc_init.append(init_form) + + outcalls = [_ufunc_outcalls.substitute( + outnum=i, call_args=call_args, funcname=routines[i].name) for i in + range(n_out)] + + body = _ufunc_body.substitute(module=module, funcname=name, + declare_args=declare_args, + declare_steps=declare_steps, + call_args=call_args, + step_increments=step_increments, + n_types=n_types, types=types, + outcalls='\n '.join(outcalls)) + functions.append(body) + + body = '\n\n'.join(functions) + ufunc_init = '\n '.join(ufunc_init) + function_creation = '\n '.join(function_creation) + bottom = _ufunc_bottom.substitute(module=module, + ufunc_init=ufunc_init, + function_creation=function_creation) + text = [top, body, bottom] + f.write('\n\n'.join(text)) + + def _partition_args(self, args): + """Group function arguments into categories.""" + py_in = [] + py_out = [] + for arg in args: + if isinstance(arg, OutputArgument): + py_out.append(arg) + elif isinstance(arg, InOutArgument): + raise ValueError("Ufuncify doesn't support InOutArguments") + else: + py_in.append(arg) + return py_in, py_out + + +@cacheit +@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',)) +def ufuncify(args, expr, language=None, backend='numpy', tempdir=None, + flags=None, verbose=False, helpers=None, **kwargs): + """Generates a binary function that supports broadcasting on numpy arrays. + + Parameters + ========== + + args : iterable + Either a Symbol or an iterable of symbols. Specifies the argument + sequence for the function. + expr + A SymPy expression that defines the element wise operation. + language : string, optional + If supplied, (options: 'C' or 'F95'), specifies the language of the + generated code. If ``None`` [default], the language is inferred based + upon the specified backend. + backend : string, optional + Backend used to wrap the generated code. Either 'numpy' [default], + 'cython', or 'f2py'. + tempdir : string, optional + Path to directory for temporary files. If this argument is supplied, + the generated code and the wrapper input files are left intact in + the specified path. + flags : iterable, optional + Additional option flags that will be passed to the backend. + verbose : bool, optional + If True, autowrap will not mute the command line backends. This can + be helpful for debugging. + helpers : iterable, optional + Used to define auxiliary expressions needed for the main expr. If + the main expression needs to call a specialized function it should + be put in the ``helpers`` iterable. Autowrap will then make sure + that the compiled main expression can link to the helper routine. + Items should be tuples with (, , + ). It is mandatory to supply an argument sequence to + helper routines. + kwargs : dict + These kwargs will be passed to autowrap if the `f2py` or `cython` + backend is used and ignored if the `numpy` backend is used. + + Notes + ===== + + The default backend ('numpy') will create actual instances of + ``numpy.ufunc``. These support ndimensional broadcasting, and implicit type + conversion. Use of the other backends will result in a "ufunc-like" + function, which requires equal length 1-dimensional arrays for all + arguments, and will not perform any type conversions. + + References + ========== + + .. [1] https://numpy.org/doc/stable/reference/ufuncs.html + + Examples + ======== + + >>> from sympy.utilities.autowrap import ufuncify + >>> from sympy.abc import x, y + >>> import numpy as np + >>> f = ufuncify((x, y), y + x**2) + >>> type(f) + + >>> f([1, 2, 3], 2) + array([ 3., 6., 11.]) + >>> f(np.arange(5), 3) + array([ 3., 4., 7., 12., 19.]) + + For the 'f2py' and 'cython' backends, inputs are required to be equal length + 1-dimensional arrays. The 'f2py' backend will perform type conversion, but + the Cython backend will error if the inputs are not of the expected type. + + >>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py') + >>> f_fortran(1, 2) + array([ 3.]) + >>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])) + array([ 2., 6., 12.]) + >>> f_cython = ufuncify((x, y), y + x**2, backend='Cython') + >>> f_cython(1, 2) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int) + >>> f_cython(np.array([1.0]), np.array([2.0])) + array([ 3.]) + + """ + + if isinstance(args, Symbol): + args = (args,) + else: + args = tuple(args) + + if language: + _validate_backend_language(backend, language) + else: + language = _infer_language(backend) + + helpers = helpers if helpers else () + flags = flags if flags else () + + if backend.upper() == 'NUMPY': + # maxargs is set by numpy compile-time constant NPY_MAXARGS + # If a future version of numpy modifies or removes this restriction + # this variable should be changed or removed + maxargs = 32 + helps = [] + for name, expr, args in helpers: + helps.append(make_routine(name, expr, args)) + code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir, + flags, verbose) + if not isinstance(expr, (list, tuple)): + expr = [expr] + if len(expr) == 0: + raise ValueError('Expression iterable has zero length') + if len(expr) + len(args) > maxargs: + msg = ('Cannot create ufunc with more than {0} total arguments: ' + 'got {1} in, {2} out') + raise ValueError(msg.format(maxargs, len(args), len(expr))) + routines = [make_routine('autofunc{}'.format(idx), exprx, args) for + idx, exprx in enumerate(expr)] + return code_wrapper.wrap_code(routines, helpers=helps) + else: + # Dummies are used for all added expressions to prevent name clashes + # within the original expression. + y = IndexedBase(Dummy('y')) + m = Dummy('m', integer=True) + i = Idx(Dummy('i', integer=True), m) + f_dummy = Dummy('f') + f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr)) + # For each of the args create an indexed version. + indexed_args = [IndexedBase(Dummy(str(a))) for a in args] + # Order the arguments (out, args, dim) + args = [y] + indexed_args + [m] + args_with_indices = [a[i] for a in indexed_args] + return autowrap(Eq(y[i], f(*args_with_indices)), language, backend, + tempdir, args, flags, verbose, helpers, **kwargs) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/codegen.py b/venv/lib/python3.10/site-packages/sympy/utilities/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..01ad877864ab845c207fdd3a4ada1d0d5f329c0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/codegen.py @@ -0,0 +1,2236 @@ +""" +module for generating C, C++, Fortran77, Fortran90, Julia, Rust +and Octave/Matlab routines that evaluate SymPy expressions. +This module is work in progress. +Only the milestones with a '+' character in the list below have been completed. + +--- How is sympy.utilities.codegen different from sympy.printing.ccode? --- + +We considered the idea to extend the printing routines for SymPy functions in +such a way that it prints complete compilable code, but this leads to a few +unsurmountable issues that can only be tackled with dedicated code generator: + +- For C, one needs both a code and a header file, while the printing routines + generate just one string. This code generator can be extended to support + .pyf files for f2py. + +- SymPy functions are not concerned with programming-technical issues, such + as input, output and input-output arguments. Other examples are contiguous + or non-contiguous arrays, including headers of other libraries such as gsl + or others. + +- It is highly interesting to evaluate several SymPy functions in one C + routine, eventually sharing common intermediate results with the help + of the cse routine. This is more than just printing. + +- From the programming perspective, expressions with constants should be + evaluated in the code generator as much as possible. This is different + for printing. + +--- Basic assumptions --- + +* A generic Routine data structure describes the routine that must be + translated into C/Fortran/... code. This data structure covers all + features present in one or more of the supported languages. + +* Descendants from the CodeGen class transform multiple Routine instances + into compilable code. Each derived class translates into a specific + language. + +* In many cases, one wants a simple workflow. The friendly functions in the + last part are a simple api on top of the Routine/CodeGen stuff. They are + easier to use, but are less powerful. + +--- Milestones --- + ++ First working version with scalar input arguments, generating C code, + tests ++ Friendly functions that are easier to use than the rigorous + Routine/CodeGen workflow. ++ Integer and Real numbers as input and output ++ Output arguments ++ InputOutput arguments ++ Sort input/output arguments properly ++ Contiguous array arguments (numpy matrices) ++ Also generate .pyf code for f2py (in autowrap module) ++ Isolate constants and evaluate them beforehand in double precision ++ Fortran 90 ++ Octave/Matlab + +- Common Subexpression Elimination +- User defined comments in the generated code +- Optional extra include lines for libraries/objects that can eval special + functions +- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ... +- Contiguous array arguments (SymPy matrices) +- Non-contiguous array arguments (SymPy matrices) +- ccode must raise an error when it encounters something that cannot be + translated into c. ccode(integrate(sin(x)/x, x)) does not make sense. +- Complex numbers as input and output +- A default complex datatype +- Include extra information in the header: date, user, hostname, sha1 + hash, ... +- Fortran 77 +- C++ +- Python +- Julia +- Rust +- ... + +""" + +import os +import textwrap +from io import StringIO + +from sympy import __version__ as sympy_version +from sympy.core import Symbol, S, Tuple, Equality, Function, Basic +from sympy.printing.c import c_code_printers +from sympy.printing.codeprinter import AssignmentError +from sympy.printing.fortran import FCodePrinter +from sympy.printing.julia import JuliaCodePrinter +from sympy.printing.octave import OctaveCodePrinter +from sympy.printing.rust import RustCodePrinter +from sympy.tensor import Idx, Indexed, IndexedBase +from sympy.matrices import (MatrixSymbol, ImmutableMatrix, MatrixBase, + MatrixExpr, MatrixSlice) +from sympy.utilities.iterables import is_sequence + + +__all__ = [ + # description of routines + "Routine", "DataType", "default_datatypes", "get_default_datatype", + "Argument", "InputArgument", "OutputArgument", "Result", + # routines -> code + "CodeGen", "CCodeGen", "FCodeGen", "JuliaCodeGen", "OctaveCodeGen", + "RustCodeGen", + # friendly functions + "codegen", "make_routine", +] + + +# +# Description of routines +# + + +class Routine: + """Generic description of evaluation routine for set of expressions. + + A CodeGen class can translate instances of this class into code in a + particular language. The routine specification covers all the features + present in these languages. The CodeGen part must raise an exception + when certain features are not present in the target language. For + example, multiple return values are possible in Python, but not in C or + Fortran. Another example: Fortran and Python support complex numbers, + while C does not. + + """ + + def __init__(self, name, arguments, results, local_vars, global_vars): + """Initialize a Routine instance. + + Parameters + ========== + + name : string + Name of the routine. + + arguments : list of Arguments + These are things that appear in arguments of a routine, often + appearing on the right-hand side of a function call. These are + commonly InputArguments but in some languages, they can also be + OutputArguments or InOutArguments (e.g., pass-by-reference in C + code). + + results : list of Results + These are the return values of the routine, often appearing on + the left-hand side of a function call. The difference between + Results and OutputArguments and when you should use each is + language-specific. + + local_vars : list of Results + These are variables that will be defined at the beginning of the + function. + + global_vars : list of Symbols + Variables which will not be passed into the function. + + """ + + # extract all input symbols and all symbols appearing in an expression + input_symbols = set() + symbols = set() + for arg in arguments: + if isinstance(arg, OutputArgument): + symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed)) + elif isinstance(arg, InputArgument): + input_symbols.add(arg.name) + elif isinstance(arg, InOutArgument): + input_symbols.add(arg.name) + symbols.update(arg.expr.free_symbols - arg.expr.atoms(Indexed)) + else: + raise ValueError("Unknown Routine argument: %s" % arg) + + for r in results: + if not isinstance(r, Result): + raise ValueError("Unknown Routine result: %s" % r) + symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed)) + + local_symbols = set() + for r in local_vars: + if isinstance(r, Result): + symbols.update(r.expr.free_symbols - r.expr.atoms(Indexed)) + local_symbols.add(r.name) + else: + local_symbols.add(r) + + symbols = {s.label if isinstance(s, Idx) else s for s in symbols} + + # Check that all symbols in the expressions are covered by + # InputArguments/InOutArguments---subset because user could + # specify additional (unused) InputArguments or local_vars. + notcovered = symbols.difference( + input_symbols.union(local_symbols).union(global_vars)) + if notcovered != set(): + raise ValueError("Symbols needed for output are not in input " + + ", ".join([str(x) for x in notcovered])) + + self.name = name + self.arguments = arguments + self.results = results + self.local_vars = local_vars + self.global_vars = global_vars + + def __str__(self): + return self.__class__.__name__ + "({name!r}, {arguments}, {results}, {local_vars}, {global_vars})".format(**self.__dict__) + + __repr__ = __str__ + + @property + def variables(self): + """Returns a set of all variables possibly used in the routine. + + For routines with unnamed return values, the dummies that may or + may not be used will be included in the set. + + """ + v = set(self.local_vars) + for arg in self.arguments: + v.add(arg.name) + for res in self.results: + v.add(res.result_var) + return v + + @property + def result_variables(self): + """Returns a list of OutputArgument, InOutArgument and Result. + + If return values are present, they are at the end of the list. + """ + args = [arg for arg in self.arguments if isinstance( + arg, (OutputArgument, InOutArgument))] + args.extend(self.results) + return args + + +class DataType: + """Holds strings for a certain datatype in different languages.""" + def __init__(self, cname, fname, pyname, jlname, octname, rsname): + self.cname = cname + self.fname = fname + self.pyname = pyname + self.jlname = jlname + self.octname = octname + self.rsname = rsname + + +default_datatypes = { + "int": DataType("int", "INTEGER*4", "int", "", "", "i32"), + "float": DataType("double", "REAL*8", "float", "", "", "f64"), + "complex": DataType("double", "COMPLEX*16", "complex", "", "", "float") #FIXME: + # complex is only supported in fortran, python, julia, and octave. + # So to not break c or rust code generation, we stick with double or + # float, respectively (but actually should raise an exception for + # explicitly complex variables (x.is_complex==True)) +} + + +COMPLEX_ALLOWED = False +def get_default_datatype(expr, complex_allowed=None): + """Derives an appropriate datatype based on the expression.""" + if complex_allowed is None: + complex_allowed = COMPLEX_ALLOWED + if complex_allowed: + final_dtype = "complex" + else: + final_dtype = "float" + if expr.is_integer: + return default_datatypes["int"] + elif expr.is_real: + return default_datatypes["float"] + elif isinstance(expr, MatrixBase): + #check all entries + dt = "int" + for element in expr: + if dt == "int" and not element.is_integer: + dt = "float" + if dt == "float" and not element.is_real: + return default_datatypes[final_dtype] + return default_datatypes[dt] + else: + return default_datatypes[final_dtype] + + +class Variable: + """Represents a typed variable.""" + + def __init__(self, name, datatype=None, dimensions=None, precision=None): + """Return a new variable. + + Parameters + ========== + + name : Symbol or MatrixSymbol + + datatype : optional + When not given, the data type will be guessed based on the + assumptions on the symbol argument. + + dimension : sequence containing tupes, optional + If present, the argument is interpreted as an array, where this + sequence of tuples specifies (lower, upper) bounds for each + index of the array. + + precision : int, optional + Controls the precision of floating point constants. + + """ + if not isinstance(name, (Symbol, MatrixSymbol)): + raise TypeError("The first argument must be a SymPy symbol.") + if datatype is None: + datatype = get_default_datatype(name) + elif not isinstance(datatype, DataType): + raise TypeError("The (optional) `datatype' argument must be an " + "instance of the DataType class.") + if dimensions and not isinstance(dimensions, (tuple, list)): + raise TypeError( + "The dimension argument must be a sequence of tuples") + + self._name = name + self._datatype = { + 'C': datatype.cname, + 'FORTRAN': datatype.fname, + 'JULIA': datatype.jlname, + 'OCTAVE': datatype.octname, + 'PYTHON': datatype.pyname, + 'RUST': datatype.rsname, + } + self.dimensions = dimensions + self.precision = precision + + def __str__(self): + return "%s(%r)" % (self.__class__.__name__, self.name) + + __repr__ = __str__ + + @property + def name(self): + return self._name + + def get_datatype(self, language): + """Returns the datatype string for the requested language. + + Examples + ======== + + >>> from sympy import Symbol + >>> from sympy.utilities.codegen import Variable + >>> x = Variable(Symbol('x')) + >>> x.get_datatype('c') + 'double' + >>> x.get_datatype('fortran') + 'REAL*8' + + """ + try: + return self._datatype[language.upper()] + except KeyError: + raise CodeGenError("Has datatypes for languages: %s" % + ", ".join(self._datatype)) + + +class Argument(Variable): + """An abstract Argument data structure: a name and a data type. + + This structure is refined in the descendants below. + + """ + pass + + +class InputArgument(Argument): + pass + + +class ResultBase: + """Base class for all "outgoing" information from a routine. + + Objects of this class stores a SymPy expression, and a SymPy object + representing a result variable that will be used in the generated code + only if necessary. + + """ + def __init__(self, expr, result_var): + self.expr = expr + self.result_var = result_var + + def __str__(self): + return "%s(%r, %r)" % (self.__class__.__name__, self.expr, + self.result_var) + + __repr__ = __str__ + + +class OutputArgument(Argument, ResultBase): + """OutputArgument are always initialized in the routine.""" + + def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None): + """Return a new variable. + + Parameters + ========== + + name : Symbol, MatrixSymbol + The name of this variable. When used for code generation, this + might appear, for example, in the prototype of function in the + argument list. + + result_var : Symbol, Indexed + Something that can be used to assign a value to this variable. + Typically the same as `name` but for Indexed this should be e.g., + "y[i]" whereas `name` should be the Symbol "y". + + expr : object + The expression that should be output, typically a SymPy + expression. + + datatype : optional + When not given, the data type will be guessed based on the + assumptions on the symbol argument. + + dimension : sequence containing tupes, optional + If present, the argument is interpreted as an array, where this + sequence of tuples specifies (lower, upper) bounds for each + index of the array. + + precision : int, optional + Controls the precision of floating point constants. + + """ + + Argument.__init__(self, name, datatype, dimensions, precision) + ResultBase.__init__(self, expr, result_var) + + def __str__(self): + return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.result_var, self.expr) + + __repr__ = __str__ + + +class InOutArgument(Argument, ResultBase): + """InOutArgument are never initialized in the routine.""" + + def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None): + if not datatype: + datatype = get_default_datatype(expr) + Argument.__init__(self, name, datatype, dimensions, precision) + ResultBase.__init__(self, expr, result_var) + __init__.__doc__ = OutputArgument.__init__.__doc__ + + + def __str__(self): + return "%s(%r, %r, %r)" % (self.__class__.__name__, self.name, self.expr, + self.result_var) + + __repr__ = __str__ + + +class Result(Variable, ResultBase): + """An expression for a return value. + + The name result is used to avoid conflicts with the reserved word + "return" in the Python language. It is also shorter than ReturnValue. + + These may or may not need a name in the destination (e.g., "return(x*y)" + might return a value without ever naming it). + + """ + + def __init__(self, expr, name=None, result_var=None, datatype=None, + dimensions=None, precision=None): + """Initialize a return value. + + Parameters + ========== + + expr : SymPy expression + + name : Symbol, MatrixSymbol, optional + The name of this return variable. When used for code generation, + this might appear, for example, in the prototype of function in a + list of return values. A dummy name is generated if omitted. + + result_var : Symbol, Indexed, optional + Something that can be used to assign a value to this variable. + Typically the same as `name` but for Indexed this should be e.g., + "y[i]" whereas `name` should be the Symbol "y". Defaults to + `name` if omitted. + + datatype : optional + When not given, the data type will be guessed based on the + assumptions on the expr argument. + + dimension : sequence containing tupes, optional + If present, this variable is interpreted as an array, + where this sequence of tuples specifies (lower, upper) + bounds for each index of the array. + + precision : int, optional + Controls the precision of floating point constants. + + """ + # Basic because it is the base class for all types of expressions + if not isinstance(expr, (Basic, MatrixBase)): + raise TypeError("The first argument must be a SymPy expression.") + + if name is None: + name = 'result_%d' % abs(hash(expr)) + + if datatype is None: + #try to infer data type from the expression + datatype = get_default_datatype(expr) + + if isinstance(name, str): + if isinstance(expr, (MatrixBase, MatrixExpr)): + name = MatrixSymbol(name, *expr.shape) + else: + name = Symbol(name) + + if result_var is None: + result_var = name + + Variable.__init__(self, name, datatype=datatype, + dimensions=dimensions, precision=precision) + ResultBase.__init__(self, expr, result_var) + + def __str__(self): + return "%s(%r, %r, %r)" % (self.__class__.__name__, self.expr, self.name, + self.result_var) + + __repr__ = __str__ + + +# +# Transformation of routine objects into code +# + +class CodeGen: + """Abstract class for the code generators.""" + + printer = None # will be set to an instance of a CodePrinter subclass + + def _indent_code(self, codelines): + return self.printer.indent_code(codelines) + + def _printer_method_with_settings(self, method, settings=None, *args, **kwargs): + settings = settings or {} + ori = {k: self.printer._settings[k] for k in settings} + for k, v in settings.items(): + self.printer._settings[k] = v + result = getattr(self.printer, method)(*args, **kwargs) + for k, v in ori.items(): + self.printer._settings[k] = v + return result + + def _get_symbol(self, s): + """Returns the symbol as fcode prints it.""" + if self.printer._settings['human']: + expr_str = self.printer.doprint(s) + else: + constants, not_supported, expr_str = self.printer.doprint(s) + if constants or not_supported: + raise ValueError("Failed to print %s" % str(s)) + return expr_str.strip() + + def __init__(self, project="project", cse=False): + """Initialize a code generator. + + Derived classes will offer more options that affect the generated + code. + + """ + self.project = project + self.cse = cse + + def routine(self, name, expr, argument_sequence=None, global_vars=None): + """Creates an Routine object that is appropriate for this language. + + This implementation is appropriate for at least C/Fortran. Subclasses + can override this if necessary. + + Here, we assume at most one return value (the l-value) which must be + scalar. Additional outputs are OutputArguments (e.g., pointers on + right-hand-side or pass-by-reference). Matrices are always returned + via OutputArguments. If ``argument_sequence`` is None, arguments will + be ordered alphabetically, but with all InputArguments first, and then + OutputArgument and InOutArguments. + + """ + + if self.cse: + from sympy.simplify.cse_main import cse + + if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): + if not expr: + raise ValueError("No expression given") + for e in expr: + if not e.is_Equality: + raise CodeGenError("Lists of expressions must all be Equalities. {} is not.".format(e)) + + # create a list of right hand sides and simplify them + rhs = [e.rhs for e in expr] + common, simplified = cse(rhs) + + # pack the simplified expressions back up with their left hand sides + expr = [Equality(e.lhs, rhs) for e, rhs in zip(expr, simplified)] + else: + if isinstance(expr, Equality): + common, simplified = cse(expr.rhs) #, ignore=in_out_args) + expr = Equality(expr.lhs, simplified[0]) + else: + common, simplified = cse(expr) + expr = simplified + + local_vars = [Result(b,a) for a,b in common] + local_symbols = {a for a,_ in common} + local_expressions = Tuple(*[b for _,b in common]) + else: + local_expressions = Tuple() + + if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): + if not expr: + raise ValueError("No expression given") + expressions = Tuple(*expr) + else: + expressions = Tuple(expr) + + if self.cse: + if {i.label for i in expressions.atoms(Idx)} != set(): + raise CodeGenError("CSE and Indexed expressions do not play well together yet") + else: + # local variables for indexed expressions + local_vars = {i.label for i in expressions.atoms(Idx)} + local_symbols = local_vars + + # global variables + global_vars = set() if global_vars is None else set(global_vars) + + # symbols that should be arguments + symbols = (expressions.free_symbols | local_expressions.free_symbols) - local_symbols - global_vars + new_symbols = set() + new_symbols.update(symbols) + + for symbol in symbols: + if isinstance(symbol, Idx): + new_symbols.remove(symbol) + new_symbols.update(symbol.args[1].free_symbols) + if isinstance(symbol, Indexed): + new_symbols.remove(symbol) + symbols = new_symbols + + # Decide whether to use output argument or return value + return_val = [] + output_args = [] + for expr in expressions: + if isinstance(expr, Equality): + out_arg = expr.lhs + expr = expr.rhs + if isinstance(out_arg, Indexed): + dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape]) + symbol = out_arg.base.label + elif isinstance(out_arg, Symbol): + dims = [] + symbol = out_arg + elif isinstance(out_arg, MatrixSymbol): + dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape]) + symbol = out_arg + else: + raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " + "can define output arguments.") + + if expr.has(symbol): + output_args.append( + InOutArgument(symbol, out_arg, expr, dimensions=dims)) + else: + output_args.append( + OutputArgument(symbol, out_arg, expr, dimensions=dims)) + + # remove duplicate arguments when they are not local variables + if symbol not in local_vars: + # avoid duplicate arguments + symbols.remove(symbol) + elif isinstance(expr, (ImmutableMatrix, MatrixSlice)): + # Create a "dummy" MatrixSymbol to use as the Output arg + out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape) + dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape]) + output_args.append( + OutputArgument(out_arg, out_arg, expr, dimensions=dims)) + else: + return_val.append(Result(expr)) + + arg_list = [] + + # setup input argument list + + # helper to get dimensions for data for array-like args + def dimensions(s): + return [(S.Zero, dim - 1) for dim in s.shape] + + array_symbols = {} + for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed): + array_symbols[array.base.label] = array + for array in expressions.atoms(MatrixSymbol) | local_expressions.atoms(MatrixSymbol): + array_symbols[array] = array + + for symbol in sorted(symbols, key=str): + if symbol in array_symbols: + array = array_symbols[symbol] + metadata = {'dimensions': dimensions(array)} + else: + metadata = {} + + arg_list.append(InputArgument(symbol, **metadata)) + + output_args.sort(key=lambda x: str(x.name)) + arg_list.extend(output_args) + + if argument_sequence is not None: + # if the user has supplied IndexedBase instances, we'll accept that + new_sequence = [] + for arg in argument_sequence: + if isinstance(arg, IndexedBase): + new_sequence.append(arg.label) + else: + new_sequence.append(arg) + argument_sequence = new_sequence + + missing = [x for x in arg_list if x.name not in argument_sequence] + if missing: + msg = "Argument list didn't specify: {0} " + msg = msg.format(", ".join([str(m.name) for m in missing])) + raise CodeGenArgumentListError(msg, missing) + + # create redundant arguments to produce the requested sequence + name_arg_dict = {x.name: x for x in arg_list} + new_args = [] + for symbol in argument_sequence: + try: + new_args.append(name_arg_dict[symbol]) + except KeyError: + if isinstance(symbol, (IndexedBase, MatrixSymbol)): + metadata = {'dimensions': dimensions(symbol)} + else: + metadata = {} + new_args.append(InputArgument(symbol, **metadata)) + arg_list = new_args + + return Routine(name, arg_list, return_val, local_vars, global_vars) + + def write(self, routines, prefix, to_files=False, header=True, empty=True): + """Writes all the source code files for the given routines. + + The generated source is returned as a list of (filename, contents) + tuples, or is written to files (see below). Each filename consists + of the given prefix, appended with an appropriate extension. + + Parameters + ========== + + routines : list + A list of Routine instances to be written + + prefix : string + The prefix for the output files + + to_files : bool, optional + When True, the output is written to files. Otherwise, a list + of (filename, contents) tuples is returned. [default: False] + + header : bool, optional + When True, a header comment is included on top of each source + file. [default: True] + + empty : bool, optional + When True, empty lines are included to structure the source + files. [default: True] + + """ + if to_files: + for dump_fn in self.dump_fns: + filename = "%s.%s" % (prefix, dump_fn.extension) + with open(filename, "w") as f: + dump_fn(self, routines, f, prefix, header, empty) + else: + result = [] + for dump_fn in self.dump_fns: + filename = "%s.%s" % (prefix, dump_fn.extension) + contents = StringIO() + dump_fn(self, routines, contents, prefix, header, empty) + result.append((filename, contents.getvalue())) + return result + + def dump_code(self, routines, f, prefix, header=True, empty=True): + """Write the code by calling language specific methods. + + The generated file contains all the definitions of the routines in + low-level code and refers to the header file if appropriate. + + Parameters + ========== + + routines : list + A list of Routine instances. + + f : file-like + Where to write the file. + + prefix : string + The filename prefix, used to refer to the proper header file. + Only the basename of the prefix is used. + + header : bool, optional + When True, a header comment is included on top of each source + file. [default : True] + + empty : bool, optional + When True, empty lines are included to structure the source + files. [default : True] + + """ + + code_lines = self._preprocessor_statements(prefix) + + for routine in routines: + if empty: + code_lines.append("\n") + code_lines.extend(self._get_routine_opening(routine)) + code_lines.extend(self._declare_arguments(routine)) + code_lines.extend(self._declare_globals(routine)) + code_lines.extend(self._declare_locals(routine)) + if empty: + code_lines.append("\n") + code_lines.extend(self._call_printer(routine)) + if empty: + code_lines.append("\n") + code_lines.extend(self._get_routine_ending(routine)) + + code_lines = self._indent_code(''.join(code_lines)) + + if header: + code_lines = ''.join(self._get_header() + [code_lines]) + + if code_lines: + f.write(code_lines) + + +class CodeGenError(Exception): + pass + + +class CodeGenArgumentListError(Exception): + @property + def missing_args(self): + return self.args[1] + + +header_comment = """Code generated with SymPy %(version)s + +See http://www.sympy.org/ for more information. + +This file is part of '%(project)s' +""" + + +class CCodeGen(CodeGen): + """Generator for C code. + + The .write() method inherited from CodeGen will output a code file and + an interface file, .c and .h respectively. + + """ + + code_extension = "c" + interface_extension = "h" + standard = 'c99' + + def __init__(self, project="project", printer=None, + preprocessor_statements=None, cse=False): + super().__init__(project=project, cse=cse) + self.printer = printer or c_code_printers[self.standard.lower()]() + + self.preprocessor_statements = preprocessor_statements + if preprocessor_statements is None: + self.preprocessor_statements = ['#include '] + + def _get_header(self): + """Writes a common header for the generated files.""" + code_lines = [] + code_lines.append("/" + "*"*78 + '\n') + tmp = header_comment % {"version": sympy_version, + "project": self.project} + for line in tmp.splitlines(): + code_lines.append(" *%s*\n" % line.center(76)) + code_lines.append(" " + "*"*78 + "/\n") + return code_lines + + def get_prototype(self, routine): + """Returns a string for the function prototype of the routine. + + If the routine has multiple result objects, an CodeGenError is + raised. + + See: https://en.wikipedia.org/wiki/Function_prototype + + """ + if len(routine.results) > 1: + raise CodeGenError("C only supports a single or no return value.") + elif len(routine.results) == 1: + ctype = routine.results[0].get_datatype('C') + else: + ctype = "void" + + type_args = [] + for arg in routine.arguments: + name = self.printer.doprint(arg.name) + if arg.dimensions or isinstance(arg, ResultBase): + type_args.append((arg.get_datatype('C'), "*%s" % name)) + else: + type_args.append((arg.get_datatype('C'), name)) + arguments = ", ".join([ "%s %s" % t for t in type_args]) + return "%s %s(%s)" % (ctype, routine.name, arguments) + + def _preprocessor_statements(self, prefix): + code_lines = [] + code_lines.append('#include "{}.h"'.format(os.path.basename(prefix))) + code_lines.extend(self.preprocessor_statements) + code_lines = ['{}\n'.format(l) for l in code_lines] + return code_lines + + def _get_routine_opening(self, routine): + prototype = self.get_prototype(routine) + return ["%s {\n" % prototype] + + def _declare_arguments(self, routine): + # arguments are declared in prototype + return [] + + def _declare_globals(self, routine): + # global variables are not explicitly declared within C functions + return [] + + def _declare_locals(self, routine): + + # Compose a list of symbols to be dereferenced in the function + # body. These are the arguments that were passed by a reference + # pointer, excluding arrays. + dereference = [] + for arg in routine.arguments: + if isinstance(arg, ResultBase) and not arg.dimensions: + dereference.append(arg.name) + + code_lines = [] + for result in routine.local_vars: + + # local variables that are simple symbols such as those used as indices into + # for loops are defined declared elsewhere. + if not isinstance(result, Result): + continue + + if result.name != result.result_var: + raise CodeGen("Result variable and name should match: {}".format(result)) + assign_to = result.name + t = result.get_datatype('c') + if isinstance(result.expr, (MatrixBase, MatrixExpr)): + dims = result.expr.shape + code_lines.append("{} {}[{}];\n".format(t, str(assign_to), dims[0]*dims[1])) + prefix = "" + else: + prefix = "const {} ".format(t) + + constants, not_c, c_expr = self._printer_method_with_settings( + 'doprint', {"human": False, "dereference": dereference}, + result.expr, assign_to=assign_to) + + for name, value in sorted(constants, key=str): + code_lines.append("double const %s = %s;\n" % (name, value)) + + code_lines.append("{}{}\n".format(prefix, c_expr)) + + return code_lines + + def _call_printer(self, routine): + code_lines = [] + + # Compose a list of symbols to be dereferenced in the function + # body. These are the arguments that were passed by a reference + # pointer, excluding arrays. + dereference = [] + for arg in routine.arguments: + if isinstance(arg, ResultBase) and not arg.dimensions: + dereference.append(arg.name) + + return_val = None + for result in routine.result_variables: + if isinstance(result, Result): + assign_to = routine.name + "_result" + t = result.get_datatype('c') + code_lines.append("{} {};\n".format(t, str(assign_to))) + return_val = assign_to + else: + assign_to = result.result_var + + try: + constants, not_c, c_expr = self._printer_method_with_settings( + 'doprint', {"human": False, "dereference": dereference}, + result.expr, assign_to=assign_to) + except AssignmentError: + assign_to = result.result_var + code_lines.append( + "%s %s;\n" % (result.get_datatype('c'), str(assign_to))) + constants, not_c, c_expr = self._printer_method_with_settings( + 'doprint', {"human": False, "dereference": dereference}, + result.expr, assign_to=assign_to) + + for name, value in sorted(constants, key=str): + code_lines.append("double const %s = %s;\n" % (name, value)) + code_lines.append("%s\n" % c_expr) + + if return_val: + code_lines.append(" return %s;\n" % return_val) + return code_lines + + def _get_routine_ending(self, routine): + return ["}\n"] + + def dump_c(self, routines, f, prefix, header=True, empty=True): + self.dump_code(routines, f, prefix, header, empty) + dump_c.extension = code_extension # type: ignore + dump_c.__doc__ = CodeGen.dump_code.__doc__ + + def dump_h(self, routines, f, prefix, header=True, empty=True): + """Writes the C header file. + + This file contains all the function declarations. + + Parameters + ========== + + routines : list + A list of Routine instances. + + f : file-like + Where to write the file. + + prefix : string + The filename prefix, used to construct the include guards. + Only the basename of the prefix is used. + + header : bool, optional + When True, a header comment is included on top of each source + file. [default : True] + + empty : bool, optional + When True, empty lines are included to structure the source + files. [default : True] + + """ + if header: + print(''.join(self._get_header()), file=f) + guard_name = "%s__%s__H" % (self.project.replace( + " ", "_").upper(), prefix.replace("/", "_").upper()) + # include guards + if empty: + print(file=f) + print("#ifndef %s" % guard_name, file=f) + print("#define %s" % guard_name, file=f) + if empty: + print(file=f) + # declaration of the function prototypes + for routine in routines: + prototype = self.get_prototype(routine) + print("%s;" % prototype, file=f) + # end if include guards + if empty: + print(file=f) + print("#endif", file=f) + if empty: + print(file=f) + dump_h.extension = interface_extension # type: ignore + + # This list of dump functions is used by CodeGen.write to know which dump + # functions it has to call. + dump_fns = [dump_c, dump_h] + +class C89CodeGen(CCodeGen): + standard = 'C89' + +class C99CodeGen(CCodeGen): + standard = 'C99' + +class FCodeGen(CodeGen): + """Generator for Fortran 95 code + + The .write() method inherited from CodeGen will output a code file and + an interface file, .f90 and .h respectively. + + """ + + code_extension = "f90" + interface_extension = "h" + + def __init__(self, project='project', printer=None): + super().__init__(project) + self.printer = printer or FCodePrinter() + + def _get_header(self): + """Writes a common header for the generated files.""" + code_lines = [] + code_lines.append("!" + "*"*78 + '\n') + tmp = header_comment % {"version": sympy_version, + "project": self.project} + for line in tmp.splitlines(): + code_lines.append("!*%s*\n" % line.center(76)) + code_lines.append("!" + "*"*78 + '\n') + return code_lines + + def _preprocessor_statements(self, prefix): + return [] + + def _get_routine_opening(self, routine): + """Returns the opening statements of the fortran routine.""" + code_list = [] + if len(routine.results) > 1: + raise CodeGenError( + "Fortran only supports a single or no return value.") + elif len(routine.results) == 1: + result = routine.results[0] + code_list.append(result.get_datatype('fortran')) + code_list.append("function") + else: + code_list.append("subroutine") + + args = ", ".join("%s" % self._get_symbol(arg.name) + for arg in routine.arguments) + + call_sig = "{}({})\n".format(routine.name, args) + # Fortran 95 requires all lines be less than 132 characters, so wrap + # this line before appending. + call_sig = ' &\n'.join(textwrap.wrap(call_sig, + width=60, + break_long_words=False)) + '\n' + code_list.append(call_sig) + code_list = [' '.join(code_list)] + code_list.append('implicit none\n') + return code_list + + def _declare_arguments(self, routine): + # argument type declarations + code_list = [] + array_list = [] + scalar_list = [] + for arg in routine.arguments: + + if isinstance(arg, InputArgument): + typeinfo = "%s, intent(in)" % arg.get_datatype('fortran') + elif isinstance(arg, InOutArgument): + typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran') + elif isinstance(arg, OutputArgument): + typeinfo = "%s, intent(out)" % arg.get_datatype('fortran') + else: + raise CodeGenError("Unknown Argument type: %s" % type(arg)) + + fprint = self._get_symbol + + if arg.dimensions: + # fortran arrays start at 1 + dimstr = ", ".join(["%s:%s" % ( + fprint(dim[0] + 1), fprint(dim[1] + 1)) + for dim in arg.dimensions]) + typeinfo += ", dimension(%s)" % dimstr + array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name))) + else: + scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name))) + + # scalars first, because they can be used in array declarations + code_list.extend(scalar_list) + code_list.extend(array_list) + + return code_list + + def _declare_globals(self, routine): + # Global variables not explicitly declared within Fortran 90 functions. + # Note: a future F77 mode may need to generate "common" blocks. + return [] + + def _declare_locals(self, routine): + code_list = [] + for var in sorted(routine.local_vars, key=str): + typeinfo = get_default_datatype(var) + code_list.append("%s :: %s\n" % ( + typeinfo.fname, self._get_symbol(var))) + return code_list + + def _get_routine_ending(self, routine): + """Returns the closing statements of the fortran routine.""" + if len(routine.results) == 1: + return ["end function\n"] + else: + return ["end subroutine\n"] + + def get_interface(self, routine): + """Returns a string for the function interface. + + The routine should have a single result object, which can be None. + If the routine has multiple result objects, a CodeGenError is + raised. + + See: https://en.wikipedia.org/wiki/Function_prototype + + """ + prototype = [ "interface\n" ] + prototype.extend(self._get_routine_opening(routine)) + prototype.extend(self._declare_arguments(routine)) + prototype.extend(self._get_routine_ending(routine)) + prototype.append("end interface\n") + + return "".join(prototype) + + def _call_printer(self, routine): + declarations = [] + code_lines = [] + for result in routine.result_variables: + if isinstance(result, Result): + assign_to = routine.name + elif isinstance(result, (OutputArgument, InOutArgument)): + assign_to = result.result_var + + constants, not_fortran, f_expr = self._printer_method_with_settings( + 'doprint', {"human": False, "source_format": 'free', "standard": 95}, + result.expr, assign_to=assign_to) + + for obj, v in sorted(constants, key=str): + t = get_default_datatype(obj) + declarations.append( + "%s, parameter :: %s = %s\n" % (t.fname, obj, v)) + for obj in sorted(not_fortran, key=str): + t = get_default_datatype(obj) + if isinstance(obj, Function): + name = obj.func + else: + name = obj + declarations.append("%s :: %s\n" % (t.fname, name)) + + code_lines.append("%s\n" % f_expr) + return declarations + code_lines + + def _indent_code(self, codelines): + return self._printer_method_with_settings( + 'indent_code', {"human": False, "source_format": 'free'}, codelines) + + def dump_f95(self, routines, f, prefix, header=True, empty=True): + # check that symbols are unique with ignorecase + for r in routines: + lowercase = {str(x).lower() for x in r.variables} + orig_case = {str(x) for x in r.variables} + if len(lowercase) < len(orig_case): + raise CodeGenError("Fortran ignores case. Got symbols: %s" % + (", ".join([str(var) for var in r.variables]))) + self.dump_code(routines, f, prefix, header, empty) + dump_f95.extension = code_extension # type: ignore + dump_f95.__doc__ = CodeGen.dump_code.__doc__ + + def dump_h(self, routines, f, prefix, header=True, empty=True): + """Writes the interface to a header file. + + This file contains all the function declarations. + + Parameters + ========== + + routines : list + A list of Routine instances. + + f : file-like + Where to write the file. + + prefix : string + The filename prefix. + + header : bool, optional + When True, a header comment is included on top of each source + file. [default : True] + + empty : bool, optional + When True, empty lines are included to structure the source + files. [default : True] + + """ + if header: + print(''.join(self._get_header()), file=f) + if empty: + print(file=f) + # declaration of the function prototypes + for routine in routines: + prototype = self.get_interface(routine) + f.write(prototype) + if empty: + print(file=f) + dump_h.extension = interface_extension # type: ignore + + # This list of dump functions is used by CodeGen.write to know which dump + # functions it has to call. + dump_fns = [dump_f95, dump_h] + + +class JuliaCodeGen(CodeGen): + """Generator for Julia code. + + The .write() method inherited from CodeGen will output a code file + .jl. + + """ + + code_extension = "jl" + + def __init__(self, project='project', printer=None): + super().__init__(project) + self.printer = printer or JuliaCodePrinter() + + def routine(self, name, expr, argument_sequence, global_vars): + """Specialized Routine creation for Julia.""" + + if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): + if not expr: + raise ValueError("No expression given") + expressions = Tuple(*expr) + else: + expressions = Tuple(expr) + + # local variables + local_vars = {i.label for i in expressions.atoms(Idx)} + + # global variables + global_vars = set() if global_vars is None else set(global_vars) + + # symbols that should be arguments + old_symbols = expressions.free_symbols - local_vars - global_vars + symbols = set() + for s in old_symbols: + if isinstance(s, Idx): + symbols.update(s.args[1].free_symbols) + elif not isinstance(s, Indexed): + symbols.add(s) + + # Julia supports multiple return values + return_vals = [] + output_args = [] + for (i, expr) in enumerate(expressions): + if isinstance(expr, Equality): + out_arg = expr.lhs + expr = expr.rhs + symbol = out_arg + if isinstance(out_arg, Indexed): + dims = tuple([ (S.One, dim) for dim in out_arg.shape]) + symbol = out_arg.base.label + output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims)) + if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): + raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " + "can define output arguments.") + + return_vals.append(Result(expr, name=symbol, result_var=out_arg)) + if not expr.has(symbol): + # this is a pure output: remove from the symbols list, so + # it doesn't become an input. + symbols.remove(symbol) + + else: + # we have no name for this output + return_vals.append(Result(expr, name='out%d' % (i+1))) + + # setup input argument list + output_args.sort(key=lambda x: str(x.name)) + arg_list = list(output_args) + array_symbols = {} + for array in expressions.atoms(Indexed): + array_symbols[array.base.label] = array + for array in expressions.atoms(MatrixSymbol): + array_symbols[array] = array + + for symbol in sorted(symbols, key=str): + arg_list.append(InputArgument(symbol)) + + if argument_sequence is not None: + # if the user has supplied IndexedBase instances, we'll accept that + new_sequence = [] + for arg in argument_sequence: + if isinstance(arg, IndexedBase): + new_sequence.append(arg.label) + else: + new_sequence.append(arg) + argument_sequence = new_sequence + + missing = [x for x in arg_list if x.name not in argument_sequence] + if missing: + msg = "Argument list didn't specify: {0} " + msg = msg.format(", ".join([str(m.name) for m in missing])) + raise CodeGenArgumentListError(msg, missing) + + # create redundant arguments to produce the requested sequence + name_arg_dict = {x.name: x for x in arg_list} + new_args = [] + for symbol in argument_sequence: + try: + new_args.append(name_arg_dict[symbol]) + except KeyError: + new_args.append(InputArgument(symbol)) + arg_list = new_args + + return Routine(name, arg_list, return_vals, local_vars, global_vars) + + def _get_header(self): + """Writes a common header for the generated files.""" + code_lines = [] + tmp = header_comment % {"version": sympy_version, + "project": self.project} + for line in tmp.splitlines(): + if line == '': + code_lines.append("#\n") + else: + code_lines.append("# %s\n" % line) + return code_lines + + def _preprocessor_statements(self, prefix): + return [] + + def _get_routine_opening(self, routine): + """Returns the opening statements of the routine.""" + code_list = [] + code_list.append("function ") + + # Inputs + args = [] + for i, arg in enumerate(routine.arguments): + if isinstance(arg, OutputArgument): + raise CodeGenError("Julia: invalid argument of type %s" % + str(type(arg))) + if isinstance(arg, (InputArgument, InOutArgument)): + args.append("%s" % self._get_symbol(arg.name)) + args = ", ".join(args) + code_list.append("%s(%s)\n" % (routine.name, args)) + code_list = [ "".join(code_list) ] + + return code_list + + def _declare_arguments(self, routine): + return [] + + def _declare_globals(self, routine): + return [] + + def _declare_locals(self, routine): + return [] + + def _get_routine_ending(self, routine): + outs = [] + for result in routine.results: + if isinstance(result, Result): + # Note: name not result_var; want `y` not `y[i]` for Indexed + s = self._get_symbol(result.name) + else: + raise CodeGenError("unexpected object in Routine results") + outs.append(s) + return ["return " + ", ".join(outs) + "\nend\n"] + + def _call_printer(self, routine): + declarations = [] + code_lines = [] + for i, result in enumerate(routine.results): + if isinstance(result, Result): + assign_to = result.result_var + else: + raise CodeGenError("unexpected object in Routine results") + + constants, not_supported, jl_expr = self._printer_method_with_settings( + 'doprint', {"human": False}, result.expr, assign_to=assign_to) + + for obj, v in sorted(constants, key=str): + declarations.append( + "%s = %s\n" % (obj, v)) + for obj in sorted(not_supported, key=str): + if isinstance(obj, Function): + name = obj.func + else: + name = obj + declarations.append( + "# unsupported: %s\n" % (name)) + code_lines.append("%s\n" % (jl_expr)) + return declarations + code_lines + + def _indent_code(self, codelines): + # Note that indenting seems to happen twice, first + # statement-by-statement by JuliaPrinter then again here. + p = JuliaCodePrinter({'human': False}) + return p.indent_code(codelines) + + def dump_jl(self, routines, f, prefix, header=True, empty=True): + self.dump_code(routines, f, prefix, header, empty) + + dump_jl.extension = code_extension # type: ignore + dump_jl.__doc__ = CodeGen.dump_code.__doc__ + + # This list of dump functions is used by CodeGen.write to know which dump + # functions it has to call. + dump_fns = [dump_jl] + + +class OctaveCodeGen(CodeGen): + """Generator for Octave code. + + The .write() method inherited from CodeGen will output a code file + .m. + + Octave .m files usually contain one function. That function name should + match the filename (``prefix``). If you pass multiple ``name_expr`` pairs, + the latter ones are presumed to be private functions accessed by the + primary function. + + You should only pass inputs to ``argument_sequence``: outputs are ordered + according to their order in ``name_expr``. + + """ + + code_extension = "m" + + def __init__(self, project='project', printer=None): + super().__init__(project) + self.printer = printer or OctaveCodePrinter() + + def routine(self, name, expr, argument_sequence, global_vars): + """Specialized Routine creation for Octave.""" + + # FIXME: this is probably general enough for other high-level + # languages, perhaps its the C/Fortran one that is specialized! + + if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): + if not expr: + raise ValueError("No expression given") + expressions = Tuple(*expr) + else: + expressions = Tuple(expr) + + # local variables + local_vars = {i.label for i in expressions.atoms(Idx)} + + # global variables + global_vars = set() if global_vars is None else set(global_vars) + + # symbols that should be arguments + old_symbols = expressions.free_symbols - local_vars - global_vars + symbols = set() + for s in old_symbols: + if isinstance(s, Idx): + symbols.update(s.args[1].free_symbols) + elif not isinstance(s, Indexed): + symbols.add(s) + + # Octave supports multiple return values + return_vals = [] + for (i, expr) in enumerate(expressions): + if isinstance(expr, Equality): + out_arg = expr.lhs + expr = expr.rhs + symbol = out_arg + if isinstance(out_arg, Indexed): + symbol = out_arg.base.label + if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): + raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " + "can define output arguments.") + + return_vals.append(Result(expr, name=symbol, result_var=out_arg)) + if not expr.has(symbol): + # this is a pure output: remove from the symbols list, so + # it doesn't become an input. + symbols.remove(symbol) + + else: + # we have no name for this output + return_vals.append(Result(expr, name='out%d' % (i+1))) + + # setup input argument list + arg_list = [] + array_symbols = {} + for array in expressions.atoms(Indexed): + array_symbols[array.base.label] = array + for array in expressions.atoms(MatrixSymbol): + array_symbols[array] = array + + for symbol in sorted(symbols, key=str): + arg_list.append(InputArgument(symbol)) + + if argument_sequence is not None: + # if the user has supplied IndexedBase instances, we'll accept that + new_sequence = [] + for arg in argument_sequence: + if isinstance(arg, IndexedBase): + new_sequence.append(arg.label) + else: + new_sequence.append(arg) + argument_sequence = new_sequence + + missing = [x for x in arg_list if x.name not in argument_sequence] + if missing: + msg = "Argument list didn't specify: {0} " + msg = msg.format(", ".join([str(m.name) for m in missing])) + raise CodeGenArgumentListError(msg, missing) + + # create redundant arguments to produce the requested sequence + name_arg_dict = {x.name: x for x in arg_list} + new_args = [] + for symbol in argument_sequence: + try: + new_args.append(name_arg_dict[symbol]) + except KeyError: + new_args.append(InputArgument(symbol)) + arg_list = new_args + + return Routine(name, arg_list, return_vals, local_vars, global_vars) + + def _get_header(self): + """Writes a common header for the generated files.""" + code_lines = [] + tmp = header_comment % {"version": sympy_version, + "project": self.project} + for line in tmp.splitlines(): + if line == '': + code_lines.append("%\n") + else: + code_lines.append("%% %s\n" % line) + return code_lines + + def _preprocessor_statements(self, prefix): + return [] + + def _get_routine_opening(self, routine): + """Returns the opening statements of the routine.""" + code_list = [] + code_list.append("function ") + + # Outputs + outs = [] + for i, result in enumerate(routine.results): + if isinstance(result, Result): + # Note: name not result_var; want `y` not `y(i)` for Indexed + s = self._get_symbol(result.name) + else: + raise CodeGenError("unexpected object in Routine results") + outs.append(s) + if len(outs) > 1: + code_list.append("[" + (", ".join(outs)) + "]") + else: + code_list.append("".join(outs)) + code_list.append(" = ") + + # Inputs + args = [] + for i, arg in enumerate(routine.arguments): + if isinstance(arg, (OutputArgument, InOutArgument)): + raise CodeGenError("Octave: invalid argument of type %s" % + str(type(arg))) + if isinstance(arg, InputArgument): + args.append("%s" % self._get_symbol(arg.name)) + args = ", ".join(args) + code_list.append("%s(%s)\n" % (routine.name, args)) + code_list = [ "".join(code_list) ] + + return code_list + + def _declare_arguments(self, routine): + return [] + + def _declare_globals(self, routine): + if not routine.global_vars: + return [] + s = " ".join(sorted([self._get_symbol(g) for g in routine.global_vars])) + return ["global " + s + "\n"] + + def _declare_locals(self, routine): + return [] + + def _get_routine_ending(self, routine): + return ["end\n"] + + def _call_printer(self, routine): + declarations = [] + code_lines = [] + for i, result in enumerate(routine.results): + if isinstance(result, Result): + assign_to = result.result_var + else: + raise CodeGenError("unexpected object in Routine results") + + constants, not_supported, oct_expr = self._printer_method_with_settings( + 'doprint', {"human": False}, result.expr, assign_to=assign_to) + + for obj, v in sorted(constants, key=str): + declarations.append( + " %s = %s; %% constant\n" % (obj, v)) + for obj in sorted(not_supported, key=str): + if isinstance(obj, Function): + name = obj.func + else: + name = obj + declarations.append( + " %% unsupported: %s\n" % (name)) + code_lines.append("%s\n" % (oct_expr)) + return declarations + code_lines + + def _indent_code(self, codelines): + return self._printer_method_with_settings( + 'indent_code', {"human": False}, codelines) + + def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True): + # Note used to call self.dump_code() but we need more control for header + + code_lines = self._preprocessor_statements(prefix) + + for i, routine in enumerate(routines): + if i > 0: + if empty: + code_lines.append("\n") + code_lines.extend(self._get_routine_opening(routine)) + if i == 0: + if routine.name != prefix: + raise ValueError('Octave function name should match prefix') + if header: + code_lines.append("%" + prefix.upper() + + " Autogenerated by SymPy\n") + code_lines.append(''.join(self._get_header())) + code_lines.extend(self._declare_arguments(routine)) + code_lines.extend(self._declare_globals(routine)) + code_lines.extend(self._declare_locals(routine)) + if empty: + code_lines.append("\n") + code_lines.extend(self._call_printer(routine)) + if empty: + code_lines.append("\n") + code_lines.extend(self._get_routine_ending(routine)) + + code_lines = self._indent_code(''.join(code_lines)) + + if code_lines: + f.write(code_lines) + + dump_m.extension = code_extension # type: ignore + dump_m.__doc__ = CodeGen.dump_code.__doc__ + + # This list of dump functions is used by CodeGen.write to know which dump + # functions it has to call. + dump_fns = [dump_m] + +class RustCodeGen(CodeGen): + """Generator for Rust code. + + The .write() method inherited from CodeGen will output a code file + .rs + + """ + + code_extension = "rs" + + def __init__(self, project="project", printer=None): + super().__init__(project=project) + self.printer = printer or RustCodePrinter() + + def routine(self, name, expr, argument_sequence, global_vars): + """Specialized Routine creation for Rust.""" + + if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)): + if not expr: + raise ValueError("No expression given") + expressions = Tuple(*expr) + else: + expressions = Tuple(expr) + + # local variables + local_vars = {i.label for i in expressions.atoms(Idx)} + + # global variables + global_vars = set() if global_vars is None else set(global_vars) + + # symbols that should be arguments + symbols = expressions.free_symbols - local_vars - global_vars - expressions.atoms(Indexed) + + # Rust supports multiple return values + return_vals = [] + output_args = [] + for (i, expr) in enumerate(expressions): + if isinstance(expr, Equality): + out_arg = expr.lhs + expr = expr.rhs + symbol = out_arg + if isinstance(out_arg, Indexed): + dims = tuple([ (S.One, dim) for dim in out_arg.shape]) + symbol = out_arg.base.label + output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims)) + if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)): + raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol " + "can define output arguments.") + + return_vals.append(Result(expr, name=symbol, result_var=out_arg)) + if not expr.has(symbol): + # this is a pure output: remove from the symbols list, so + # it doesn't become an input. + symbols.remove(symbol) + + else: + # we have no name for this output + return_vals.append(Result(expr, name='out%d' % (i+1))) + + # setup input argument list + output_args.sort(key=lambda x: str(x.name)) + arg_list = list(output_args) + array_symbols = {} + for array in expressions.atoms(Indexed): + array_symbols[array.base.label] = array + for array in expressions.atoms(MatrixSymbol): + array_symbols[array] = array + + for symbol in sorted(symbols, key=str): + arg_list.append(InputArgument(symbol)) + + if argument_sequence is not None: + # if the user has supplied IndexedBase instances, we'll accept that + new_sequence = [] + for arg in argument_sequence: + if isinstance(arg, IndexedBase): + new_sequence.append(arg.label) + else: + new_sequence.append(arg) + argument_sequence = new_sequence + + missing = [x for x in arg_list if x.name not in argument_sequence] + if missing: + msg = "Argument list didn't specify: {0} " + msg = msg.format(", ".join([str(m.name) for m in missing])) + raise CodeGenArgumentListError(msg, missing) + + # create redundant arguments to produce the requested sequence + name_arg_dict = {x.name: x for x in arg_list} + new_args = [] + for symbol in argument_sequence: + try: + new_args.append(name_arg_dict[symbol]) + except KeyError: + new_args.append(InputArgument(symbol)) + arg_list = new_args + + return Routine(name, arg_list, return_vals, local_vars, global_vars) + + + def _get_header(self): + """Writes a common header for the generated files.""" + code_lines = [] + code_lines.append("/*\n") + tmp = header_comment % {"version": sympy_version, + "project": self.project} + for line in tmp.splitlines(): + code_lines.append((" *%s" % line.center(76)).rstrip() + "\n") + code_lines.append(" */\n") + return code_lines + + def get_prototype(self, routine): + """Returns a string for the function prototype of the routine. + + If the routine has multiple result objects, an CodeGenError is + raised. + + See: https://en.wikipedia.org/wiki/Function_prototype + + """ + results = [i.get_datatype('Rust') for i in routine.results] + + if len(results) == 1: + rstype = " -> " + results[0] + elif len(routine.results) > 1: + rstype = " -> (" + ", ".join(results) + ")" + else: + rstype = "" + + type_args = [] + for arg in routine.arguments: + name = self.printer.doprint(arg.name) + if arg.dimensions or isinstance(arg, ResultBase): + type_args.append(("*%s" % name, arg.get_datatype('Rust'))) + else: + type_args.append((name, arg.get_datatype('Rust'))) + arguments = ", ".join([ "%s: %s" % t for t in type_args]) + return "fn %s(%s)%s" % (routine.name, arguments, rstype) + + def _preprocessor_statements(self, prefix): + code_lines = [] + # code_lines.append("use std::f64::consts::*;\n") + return code_lines + + def _get_routine_opening(self, routine): + prototype = self.get_prototype(routine) + return ["%s {\n" % prototype] + + def _declare_arguments(self, routine): + # arguments are declared in prototype + return [] + + def _declare_globals(self, routine): + # global variables are not explicitly declared within C functions + return [] + + def _declare_locals(self, routine): + # loop variables are declared in loop statement + return [] + + def _call_printer(self, routine): + + code_lines = [] + declarations = [] + returns = [] + + # Compose a list of symbols to be dereferenced in the function + # body. These are the arguments that were passed by a reference + # pointer, excluding arrays. + dereference = [] + for arg in routine.arguments: + if isinstance(arg, ResultBase) and not arg.dimensions: + dereference.append(arg.name) + + for i, result in enumerate(routine.results): + if isinstance(result, Result): + assign_to = result.result_var + returns.append(str(result.result_var)) + else: + raise CodeGenError("unexpected object in Routine results") + + constants, not_supported, rs_expr = self._printer_method_with_settings( + 'doprint', {"human": False}, result.expr, assign_to=assign_to) + + for name, value in sorted(constants, key=str): + declarations.append("const %s: f64 = %s;\n" % (name, value)) + + for obj in sorted(not_supported, key=str): + if isinstance(obj, Function): + name = obj.func + else: + name = obj + declarations.append("// unsupported: %s\n" % (name)) + + code_lines.append("let %s\n" % rs_expr); + + if len(returns) > 1: + returns = ['(' + ', '.join(returns) + ')'] + + returns.append('\n') + + return declarations + code_lines + returns + + def _get_routine_ending(self, routine): + return ["}\n"] + + def dump_rs(self, routines, f, prefix, header=True, empty=True): + self.dump_code(routines, f, prefix, header, empty) + + dump_rs.extension = code_extension # type: ignore + dump_rs.__doc__ = CodeGen.dump_code.__doc__ + + # This list of dump functions is used by CodeGen.write to know which dump + # functions it has to call. + dump_fns = [dump_rs] + + + + +def get_code_generator(language, project=None, standard=None, printer = None): + if language == 'C': + if standard is None: + pass + elif standard.lower() == 'c89': + language = 'C89' + elif standard.lower() == 'c99': + language = 'C99' + CodeGenClass = {"C": CCodeGen, "C89": C89CodeGen, "C99": C99CodeGen, + "F95": FCodeGen, "JULIA": JuliaCodeGen, + "OCTAVE": OctaveCodeGen, + "RUST": RustCodeGen}.get(language.upper()) + if CodeGenClass is None: + raise ValueError("Language '%s' is not supported." % language) + return CodeGenClass(project, printer) + + +# +# Friendly functions +# + + +def codegen(name_expr, language=None, prefix=None, project="project", + to_files=False, header=True, empty=True, argument_sequence=None, + global_vars=None, standard=None, code_gen=None, printer = None): + """Generate source code for expressions in a given language. + + Parameters + ========== + + name_expr : tuple, or list of tuples + A single (name, expression) tuple or a list of (name, expression) + tuples. Each tuple corresponds to a routine. If the expression is + an equality (an instance of class Equality) the left hand side is + considered an output argument. If expression is an iterable, then + the routine will have multiple outputs. + + language : string, + A string that indicates the source code language. This is case + insensitive. Currently, 'C', 'F95' and 'Octave' are supported. + 'Octave' generates code compatible with both Octave and Matlab. + + prefix : string, optional + A prefix for the names of the files that contain the source code. + Language-dependent suffixes will be appended. If omitted, the name + of the first name_expr tuple is used. + + project : string, optional + A project name, used for making unique preprocessor instructions. + [default: "project"] + + to_files : bool, optional + When True, the code will be written to one or more files with the + given prefix, otherwise strings with the names and contents of + these files are returned. [default: False] + + header : bool, optional + When True, a header is written on top of each source file. + [default: True] + + empty : bool, optional + When True, empty lines are used to structure the code. + [default: True] + + argument_sequence : iterable, optional + Sequence of arguments for the routine in a preferred order. A + CodeGenError is raised if required arguments are missing. + Redundant arguments are used without warning. If omitted, + arguments will be ordered alphabetically, but with all input + arguments first, and then output or in-out arguments. + + global_vars : iterable, optional + Sequence of global variables used by the routine. Variables + listed here will not show up as function arguments. + + standard : string + + code_gen : CodeGen instance + An instance of a CodeGen subclass. Overrides ``language``. + + Examples + ======== + + >>> from sympy.utilities.codegen import codegen + >>> from sympy.abc import x, y, z + >>> [(c_name, c_code), (h_name, c_header)] = codegen( + ... ("f", x+y*z), "C89", "test", header=False, empty=False) + >>> print(c_name) + test.c + >>> print(c_code) + #include "test.h" + #include + double f(double x, double y, double z) { + double f_result; + f_result = x + y*z; + return f_result; + } + + >>> print(h_name) + test.h + >>> print(c_header) + #ifndef PROJECT__TEST__H + #define PROJECT__TEST__H + double f(double x, double y, double z); + #endif + + + Another example using Equality objects to give named outputs. Here the + filename (prefix) is taken from the first (name, expr) pair. + + >>> from sympy.abc import f, g + >>> from sympy import Eq + >>> [(c_name, c_code), (h_name, c_header)] = codegen( + ... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])], + ... "C99", header=False, empty=False) + >>> print(c_name) + myfcn.c + >>> print(c_code) + #include "myfcn.h" + #include + double myfcn(double x, double y) { + double myfcn_result; + myfcn_result = x + y; + return myfcn_result; + } + void fcn2(double x, double y, double *f, double *g) { + (*f) = 2*x; + (*g) = y; + } + + + If the generated function(s) will be part of a larger project where various + global variables have been defined, the 'global_vars' option can be used + to remove the specified variables from the function signature + + >>> from sympy.utilities.codegen import codegen + >>> from sympy.abc import x, y, z + >>> [(f_name, f_code), header] = codegen( + ... ("f", x+y*z), "F95", header=False, empty=False, + ... argument_sequence=(x, y), global_vars=(z,)) + >>> print(f_code) + REAL*8 function f(x, y) + implicit none + REAL*8, intent(in) :: x + REAL*8, intent(in) :: y + f = x + y*z + end function + + + """ + + # Initialize the code generator. + if language is None: + if code_gen is None: + raise ValueError("Need either language or code_gen") + else: + if code_gen is not None: + raise ValueError("You cannot specify both language and code_gen.") + code_gen = get_code_generator(language, project, standard, printer) + + if isinstance(name_expr[0], str): + # single tuple is given, turn it into a singleton list with a tuple. + name_expr = [name_expr] + + if prefix is None: + prefix = name_expr[0][0] + + # Construct Routines appropriate for this code_gen from (name, expr) pairs. + routines = [] + for name, expr in name_expr: + routines.append(code_gen.routine(name, expr, argument_sequence, + global_vars)) + + # Write the code. + return code_gen.write(routines, prefix, to_files, header, empty) + + +def make_routine(name, expr, argument_sequence=None, + global_vars=None, language="F95"): + """A factory that makes an appropriate Routine from an expression. + + Parameters + ========== + + name : string + The name of this routine in the generated code. + + expr : expression or list/tuple of expressions + A SymPy expression that the Routine instance will represent. If + given a list or tuple of expressions, the routine will be + considered to have multiple return values and/or output arguments. + + argument_sequence : list or tuple, optional + List arguments for the routine in a preferred order. If omitted, + the results are language dependent, for example, alphabetical order + or in the same order as the given expressions. + + global_vars : iterable, optional + Sequence of global variables used by the routine. Variables + listed here will not show up as function arguments. + + language : string, optional + Specify a target language. The Routine itself should be + language-agnostic but the precise way one is created, error + checking, etc depend on the language. [default: "F95"]. + + Notes + ===== + + A decision about whether to use output arguments or return values is made + depending on both the language and the particular mathematical expressions. + For an expression of type Equality, the left hand side is typically made + into an OutputArgument (or perhaps an InOutArgument if appropriate). + Otherwise, typically, the calculated expression is made a return values of + the routine. + + Examples + ======== + + >>> from sympy.utilities.codegen import make_routine + >>> from sympy.abc import x, y, f, g + >>> from sympy import Eq + >>> r = make_routine('test', [Eq(f, 2*x), Eq(g, x + y)]) + >>> [arg.result_var for arg in r.results] + [] + >>> [arg.name for arg in r.arguments] + [x, y, f, g] + >>> [arg.name for arg in r.result_variables] + [f, g] + >>> r.local_vars + set() + + Another more complicated example with a mixture of specified and + automatically-assigned names. Also has Matrix output. + + >>> from sympy import Matrix + >>> r = make_routine('fcn', [x*y, Eq(f, 1), Eq(g, x + g), Matrix([[x, 2]])]) + >>> [arg.result_var for arg in r.results] # doctest: +SKIP + [result_5397460570204848505] + >>> [arg.expr for arg in r.results] + [x*y] + >>> [arg.name for arg in r.arguments] # doctest: +SKIP + [x, y, f, g, out_8598435338387848786] + + We can examine the various arguments more closely: + + >>> from sympy.utilities.codegen import (InputArgument, OutputArgument, + ... InOutArgument) + >>> [a.name for a in r.arguments if isinstance(a, InputArgument)] + [x, y] + + >>> [a.name for a in r.arguments if isinstance(a, OutputArgument)] # doctest: +SKIP + [f, out_8598435338387848786] + >>> [a.expr for a in r.arguments if isinstance(a, OutputArgument)] + [1, Matrix([[x, 2]])] + + >>> [a.name for a in r.arguments if isinstance(a, InOutArgument)] + [g] + >>> [a.expr for a in r.arguments if isinstance(a, InOutArgument)] + [g + x] + + """ + + # initialize a new code generator + code_gen = get_code_generator(language) + + return code_gen.routine(name, expr, argument_sequence, global_vars) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/decorator.py b/venv/lib/python3.10/site-packages/sympy/utilities/decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4e44733f93d5dc62f71559d7466ae58e41a6e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/decorator.py @@ -0,0 +1,330 @@ +"""Useful utility decorators. """ + +import sys +import types +import inspect +from functools import wraps, update_wrapper + +from sympy.utilities.exceptions import sympy_deprecation_warning + +def threaded_factory(func, use_add): + """A factory for ``threaded`` decorators. """ + from sympy.core import sympify + from sympy.matrices import MatrixBase + from sympy.utilities.iterables import iterable + + @wraps(func) + def threaded_func(expr, *args, **kwargs): + if isinstance(expr, MatrixBase): + return expr.applyfunc(lambda f: func(f, *args, **kwargs)) + elif iterable(expr): + try: + return expr.__class__([func(f, *args, **kwargs) for f in expr]) + except TypeError: + return expr + else: + expr = sympify(expr) + + if use_add and expr.is_Add: + return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ]) + elif expr.is_Relational: + return expr.__class__(func(expr.lhs, *args, **kwargs), + func(expr.rhs, *args, **kwargs)) + else: + return func(expr, *args, **kwargs) + + return threaded_func + + +def threaded(func): + """Apply ``func`` to sub--elements of an object, including :class:`~.Add`. + + This decorator is intended to make it uniformly possible to apply a + function to all elements of composite objects, e.g. matrices, lists, tuples + and other iterable containers, or just expressions. + + This version of :func:`threaded` decorator allows threading over + elements of :class:`~.Add` class. If this behavior is not desirable + use :func:`xthreaded` decorator. + + Functions using this decorator must have the following signature:: + + @threaded + def function(expr, *args, **kwargs): + + """ + return threaded_factory(func, True) + + +def xthreaded(func): + """Apply ``func`` to sub--elements of an object, excluding :class:`~.Add`. + + This decorator is intended to make it uniformly possible to apply a + function to all elements of composite objects, e.g. matrices, lists, tuples + and other iterable containers, or just expressions. + + This version of :func:`threaded` decorator disallows threading over + elements of :class:`~.Add` class. If this behavior is not desirable + use :func:`threaded` decorator. + + Functions using this decorator must have the following signature:: + + @xthreaded + def function(expr, *args, **kwargs): + + """ + return threaded_factory(func, False) + + +def conserve_mpmath_dps(func): + """After the function finishes, resets the value of mpmath.mp.dps to + the value it had before the function was run.""" + import mpmath + + def func_wrapper(*args, **kwargs): + dps = mpmath.mp.dps + try: + return func(*args, **kwargs) + finally: + mpmath.mp.dps = dps + + func_wrapper = update_wrapper(func_wrapper, func) + return func_wrapper + + +class no_attrs_in_subclass: + """Don't 'inherit' certain attributes from a base class + + >>> from sympy.utilities.decorator import no_attrs_in_subclass + + >>> class A(object): + ... x = 'test' + + >>> A.x = no_attrs_in_subclass(A, A.x) + + >>> class B(A): + ... pass + + >>> hasattr(A, 'x') + True + >>> hasattr(B, 'x') + False + + """ + def __init__(self, cls, f): + self.cls = cls + self.f = f + + def __get__(self, instance, owner=None): + if owner == self.cls: + if hasattr(self.f, '__get__'): + return self.f.__get__(instance, owner) + return self.f + raise AttributeError + + +def doctest_depends_on(exe=None, modules=None, disable_viewers=None, python_version=None): + """ + Adds metadata about the dependencies which need to be met for doctesting + the docstrings of the decorated objects. + + exe should be a list of executables + + modules should be a list of modules + + disable_viewers should be a list of viewers for preview() to disable + + python_version should be the minimum Python version required, as a tuple + (like (3, 0)) + """ + dependencies = {} + if exe is not None: + dependencies['executables'] = exe + if modules is not None: + dependencies['modules'] = modules + if disable_viewers is not None: + dependencies['disable_viewers'] = disable_viewers + if python_version is not None: + dependencies['python_version'] = python_version + + def skiptests(): + from sympy.testing.runtests import DependencyError, SymPyDocTests, PyTestReporter # lazy import + r = PyTestReporter() + t = SymPyDocTests(r, None) + try: + t._check_dependencies(**dependencies) + except DependencyError: + return True # Skip doctests + else: + return False # Run doctests + + def depends_on_deco(fn): + fn._doctest_depends_on = dependencies + fn.__doctest_skip__ = skiptests + + if inspect.isclass(fn): + fn._doctest_depdends_on = no_attrs_in_subclass( + fn, fn._doctest_depends_on) + fn.__doctest_skip__ = no_attrs_in_subclass( + fn, fn.__doctest_skip__) + return fn + + return depends_on_deco + + +def public(obj): + """ + Append ``obj``'s name to global ``__all__`` variable (call site). + + By using this decorator on functions or classes you achieve the same goal + as by filling ``__all__`` variables manually, you just do not have to repeat + yourself (object's name). You also know if object is public at definition + site, not at some random location (where ``__all__`` was set). + + Note that in multiple decorator setup (in almost all cases) ``@public`` + decorator must be applied before any other decorators, because it relies + on the pointer to object's global namespace. If you apply other decorators + first, ``@public`` may end up modifying the wrong namespace. + + Examples + ======== + + >>> from sympy.utilities.decorator import public + + >>> __all__ # noqa: F821 + Traceback (most recent call last): + ... + NameError: name '__all__' is not defined + + >>> @public + ... def some_function(): + ... pass + + >>> __all__ # noqa: F821 + ['some_function'] + + """ + if isinstance(obj, types.FunctionType): + ns = obj.__globals__ + name = obj.__name__ + elif isinstance(obj, (type(type), type)): + ns = sys.modules[obj.__module__].__dict__ + name = obj.__name__ + else: + raise TypeError("expected a function or a class, got %s" % obj) + + if "__all__" not in ns: + ns["__all__"] = [name] + else: + ns["__all__"].append(name) + + return obj + + +def memoize_property(propfunc): + """Property decorator that caches the value of potentially expensive + `propfunc` after the first evaluation. The cached value is stored in + the corresponding property name with an attached underscore.""" + attrname = '_' + propfunc.__name__ + sentinel = object() + + @wraps(propfunc) + def accessor(self): + val = getattr(self, attrname, sentinel) + if val is sentinel: + val = propfunc(self) + setattr(self, attrname, val) + return val + + return property(accessor) + + +def deprecated(message, *, deprecated_since_version, + active_deprecations_target, stacklevel=3): + ''' + Mark a function as deprecated. + + This decorator should be used if an entire function or class is + deprecated. If only a certain functionality is deprecated, you should use + :func:`~.warns_deprecated_sympy` directly. This decorator is just a + convenience. There is no functional difference between using this + decorator and calling ``warns_deprecated_sympy()`` at the top of the + function. + + The decorator takes the same arguments as + :func:`~.warns_deprecated_sympy`. See its + documentation for details on what the keywords to this decorator do. + + See the :ref:`deprecation-policy` document for details on when and how + things should be deprecated in SymPy. + + Examples + ======== + + >>> from sympy.utilities.decorator import deprecated + >>> from sympy import simplify + >>> @deprecated("""\ + ... The simplify_this(expr) function is deprecated. Use simplify(expr) + ... instead.""", deprecated_since_version="1.1", + ... active_deprecations_target='simplify-this-deprecation') + ... def simplify_this(expr): + ... """ + ... Simplify ``expr``. + ... + ... .. deprecated:: 1.1 + ... + ... The ``simplify_this`` function is deprecated. Use :func:`simplify` + ... instead. See its documentation for more information. See + ... :ref:`simplify-this-deprecation` for details. + ... + ... """ + ... return simplify(expr) + >>> from sympy.abc import x + >>> simplify_this(x*(x + 1) - x**2) # doctest: +SKIP + :1: SymPyDeprecationWarning: + + The simplify_this(expr) function is deprecated. Use simplify(expr) + instead. + + See https://docs.sympy.org/latest/explanation/active-deprecations.html#simplify-this-deprecation + for details. + + This has been deprecated since SymPy version 1.1. It + will be removed in a future version of SymPy. + + simplify_this(x) + x + + See Also + ======== + sympy.utilities.exceptions.SymPyDeprecationWarning + sympy.utilities.exceptions.sympy_deprecation_warning + sympy.utilities.exceptions.ignore_warnings + sympy.testing.pytest.warns_deprecated_sympy + + ''' + decorator_kwargs = {"deprecated_since_version": deprecated_since_version, + "active_deprecations_target": active_deprecations_target} + def deprecated_decorator(wrapped): + if hasattr(wrapped, '__mro__'): # wrapped is actually a class + class wrapper(wrapped): + __doc__ = wrapped.__doc__ + __module__ = wrapped.__module__ + _sympy_deprecated_func = wrapped + if '__new__' in wrapped.__dict__: + def __new__(cls, *args, **kwargs): + sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel) + return super().__new__(cls, *args, **kwargs) + else: + def __init__(self, *args, **kwargs): + sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel) + super().__init__(*args, **kwargs) + wrapper.__name__ = wrapped.__name__ + else: + @wraps(wrapped) + def wrapper(*args, **kwargs): + sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel) + return wrapped(*args, **kwargs) + wrapper._sympy_deprecated_func = wrapped + return wrapper + return deprecated_decorator diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/enumerative.py b/venv/lib/python3.10/site-packages/sympy/utilities/enumerative.py new file mode 100644 index 0000000000000000000000000000000000000000..017da9b1ba2ba6682ad38de17e90cf78450e5baf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/enumerative.py @@ -0,0 +1,1157 @@ +""" +Algorithms and classes to support enumerative combinatorics. + +Currently just multiset partitions, but more could be added. + +Terminology (following Knuth, algorithm 7.1.2.5M TAOCP) +*multiset* aaabbcccc has a *partition* aaabc | bccc + +The submultisets, aaabc and bccc of the partition are called +*parts*, or sometimes *vectors*. (Knuth notes that multiset +partitions can be thought of as partitions of vectors of integers, +where the ith element of the vector gives the multiplicity of +element i.) + +The values a, b and c are *components* of the multiset. These +correspond to elements of a set, but in a multiset can be present +with a multiplicity greater than 1. + +The algorithm deserves some explanation. + +Think of the part aaabc from the multiset above. If we impose an +ordering on the components of the multiset, we can represent a part +with a vector, in which the value of the first element of the vector +corresponds to the multiplicity of the first component in that +part. Thus, aaabc can be represented by the vector [3, 1, 1]. We +can also define an ordering on parts, based on the lexicographic +ordering of the vector (leftmost vector element, i.e., the element +with the smallest component number, is the most significant), so +that [3, 1, 1] > [3, 1, 0] and [3, 1, 1] > [2, 1, 4]. The ordering +on parts can be extended to an ordering on partitions: First, sort +the parts in each partition, left-to-right in decreasing order. Then +partition A is greater than partition B if A's leftmost/greatest +part is greater than B's leftmost part. If the leftmost parts are +equal, compare the second parts, and so on. + +In this ordering, the greatest partition of a given multiset has only +one part. The least partition is the one in which the components +are spread out, one per part. + +The enumeration algorithms in this file yield the partitions of the +argument multiset in decreasing order. The main data structure is a +stack of parts, corresponding to the current partition. An +important invariant is that the parts on the stack are themselves in +decreasing order. This data structure is decremented to find the +next smaller partition. Most often, decrementing the partition will +only involve adjustments to the smallest parts at the top of the +stack, much as adjacent integers *usually* differ only in their last +few digits. + +Knuth's algorithm uses two main operations on parts: + +Decrement - change the part so that it is smaller in the + (vector) lexicographic order, but reduced by the smallest amount possible. + For example, if the multiset has vector [5, + 3, 1], and the bottom/greatest part is [4, 2, 1], this part would + decrement to [4, 2, 0], while [4, 0, 0] would decrement to [3, 3, + 1]. A singleton part is never decremented -- [1, 0, 0] is not + decremented to [0, 3, 1]. Instead, the decrement operator needs + to fail for this case. In Knuth's pseudocode, the decrement + operator is step m5. + +Spread unallocated multiplicity - Once a part has been decremented, + it cannot be the rightmost part in the partition. There is some + multiplicity that has not been allocated, and new parts must be + created above it in the stack to use up this multiplicity. To + maintain the invariant that the parts on the stack are in + decreasing order, these new parts must be less than or equal to + the decremented part. + For example, if the multiset is [5, 3, 1], and its most + significant part has just been decremented to [5, 3, 0], the + spread operation will add a new part so that the stack becomes + [[5, 3, 0], [0, 0, 1]]. If the most significant part (for the + same multiset) has been decremented to [2, 0, 0] the stack becomes + [[2, 0, 0], [2, 0, 0], [1, 3, 1]]. In the pseudocode, the spread + operation for one part is step m2. The complete spread operation + is a loop of steps m2 and m3. + +In order to facilitate the spread operation, Knuth stores, for each +component of each part, not just the multiplicity of that component +in the part, but also the total multiplicity available for this +component in this part or any lesser part above it on the stack. + +One added twist is that Knuth does not represent the part vectors as +arrays. Instead, he uses a sparse representation, in which a +component of a part is represented as a component number (c), plus +the multiplicity of the component in that part (v) as well as the +total multiplicity available for that component (u). This saves +time that would be spent skipping over zeros. + +""" + +class PartComponent: + """Internal class used in support of the multiset partitions + enumerators and the associated visitor functions. + + Represents one component of one part of the current partition. + + A stack of these, plus an auxiliary frame array, f, represents a + partition of the multiset. + + Knuth's pseudocode makes c, u, and v separate arrays. + """ + + __slots__ = ('c', 'u', 'v') + + def __init__(self): + self.c = 0 # Component number + self.u = 0 # The as yet unpartitioned amount in component c + # *before* it is allocated by this triple + self.v = 0 # Amount of c component in the current part + # (v<=u). An invariant of the representation is + # that the next higher triple for this component + # (if there is one) will have a value of u-v in + # its u attribute. + + def __repr__(self): + "for debug/algorithm animation purposes" + return 'c:%d u:%d v:%d' % (self.c, self.u, self.v) + + def __eq__(self, other): + """Define value oriented equality, which is useful for testers""" + return (isinstance(other, self.__class__) and + self.c == other.c and + self.u == other.u and + self.v == other.v) + + def __ne__(self, other): + """Defined for consistency with __eq__""" + return not self == other + + +# This function tries to be a faithful implementation of algorithm +# 7.1.2.5M in Volume 4A, Combinatoral Algorithms, Part 1, of The Art +# of Computer Programming, by Donald Knuth. This includes using +# (mostly) the same variable names, etc. This makes for rather +# low-level Python. + +# Changes from Knuth's pseudocode include +# - use PartComponent struct/object instead of 3 arrays +# - make the function a generator +# - map (with some difficulty) the GOTOs to Python control structures. +# - Knuth uses 1-based numbering for components, this code is 0-based +# - renamed variable l to lpart. +# - flag variable x takes on values True/False instead of 1/0 +# +def multiset_partitions_taocp(multiplicities): + """Enumerates partitions of a multiset. + + Parameters + ========== + + multiplicities + list of integer multiplicities of the components of the multiset. + + Yields + ====== + + state + Internal data structure which encodes a particular partition. + This output is then usually processed by a visitor function + which combines the information from this data structure with + the components themselves to produce an actual partition. + + Unless they wish to create their own visitor function, users will + have little need to look inside this data structure. But, for + reference, it is a 3-element list with components: + + f + is a frame array, which is used to divide pstack into parts. + + lpart + points to the base of the topmost part. + + pstack + is an array of PartComponent objects. + + The ``state`` output offers a peek into the internal data + structures of the enumeration function. The client should + treat this as read-only; any modification of the data + structure will cause unpredictable (and almost certainly + incorrect) results. Also, the components of ``state`` are + modified in place at each iteration. Hence, the visitor must + be called at each loop iteration. Accumulating the ``state`` + instances and processing them later will not work. + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import multiset_partitions_taocp + >>> # variables components and multiplicities represent the multiset 'abb' + >>> components = 'ab' + >>> multiplicities = [1, 2] + >>> states = multiset_partitions_taocp(multiplicities) + >>> list(list_visitor(state, components) for state in states) + [[['a', 'b', 'b']], + [['a', 'b'], ['b']], + [['a'], ['b', 'b']], + [['a'], ['b'], ['b']]] + + See Also + ======== + + sympy.utilities.iterables.multiset_partitions: Takes a multiset + as input and directly yields multiset partitions. It + dispatches to a number of functions, including this one, for + implementation. Most users will find it more convenient to + use than multiset_partitions_taocp. + + """ + + # Important variables. + # m is the number of components, i.e., number of distinct elements + m = len(multiplicities) + # n is the cardinality, total number of elements whether or not distinct + n = sum(multiplicities) + + # The main data structure, f segments pstack into parts. See + # list_visitor() for example code indicating how this internal + # state corresponds to a partition. + + # Note: allocation of space for stack is conservative. Knuth's + # exercise 7.2.1.5.68 gives some indication of how to tighten this + # bound, but this is not implemented. + pstack = [PartComponent() for i in range(n * m + 1)] + f = [0] * (n + 1) + + # Step M1 in Knuth (Initialize) + # Initial state - entire multiset in one part. + for j in range(m): + ps = pstack[j] + ps.c = j + ps.u = multiplicities[j] + ps.v = multiplicities[j] + + # Other variables + f[0] = 0 + a = 0 + lpart = 0 + f[1] = m + b = m # in general, current stack frame is from a to b - 1 + + while True: + while True: + # Step M2 (Subtract v from u) + j = a + k = b + x = False + while j < b: + pstack[k].u = pstack[j].u - pstack[j].v + if pstack[k].u == 0: + x = True + elif not x: + pstack[k].c = pstack[j].c + pstack[k].v = min(pstack[j].v, pstack[k].u) + x = pstack[k].u < pstack[j].v + k = k + 1 + else: # x is True + pstack[k].c = pstack[j].c + pstack[k].v = pstack[k].u + k = k + 1 + j = j + 1 + # Note: x is True iff v has changed + + # Step M3 (Push if nonzero.) + if k > b: + a = b + b = k + lpart = lpart + 1 + f[lpart + 1] = b + # Return to M2 + else: + break # Continue to M4 + + # M4 Visit a partition + state = [f, lpart, pstack] + yield state + + # M5 (Decrease v) + while True: + j = b-1 + while (pstack[j].v == 0): + j = j - 1 + if j == a and pstack[j].v == 1: + # M6 (Backtrack) + if lpart == 0: + return + lpart = lpart - 1 + b = a + a = f[lpart] + # Return to M5 + else: + pstack[j].v = pstack[j].v - 1 + for k in range(j + 1, b): + pstack[k].v = pstack[k].u + break # GOTO M2 + +# --------------- Visitor functions for multiset partitions --------------- +# A visitor takes the partition state generated by +# multiset_partitions_taocp or other enumerator, and produces useful +# output (such as the actual partition). + + +def factoring_visitor(state, primes): + """Use with multiset_partitions_taocp to enumerate the ways a + number can be expressed as a product of factors. For this usage, + the exponents of the prime factors of a number are arguments to + the partition enumerator, while the corresponding prime factors + are input here. + + Examples + ======== + + To enumerate the factorings of a number we can think of the elements of the + partition as being the prime factors and the multiplicities as being their + exponents. + + >>> from sympy.utilities.enumerative import factoring_visitor + >>> from sympy.utilities.enumerative import multiset_partitions_taocp + >>> from sympy import factorint + >>> primes, multiplicities = zip(*factorint(24).items()) + >>> primes + (2, 3) + >>> multiplicities + (3, 1) + >>> states = multiset_partitions_taocp(multiplicities) + >>> list(factoring_visitor(state, primes) for state in states) + [[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]] + """ + f, lpart, pstack = state + factoring = [] + for i in range(lpart + 1): + factor = 1 + for ps in pstack[f[i]: f[i + 1]]: + if ps.v > 0: + factor *= primes[ps.c] ** ps.v + factoring.append(factor) + return factoring + + +def list_visitor(state, components): + """Return a list of lists to represent the partition. + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import multiset_partitions_taocp + >>> states = multiset_partitions_taocp([1, 2, 1]) + >>> s = next(states) + >>> list_visitor(s, 'abc') # for multiset 'a b b c' + [['a', 'b', 'b', 'c']] + >>> s = next(states) + >>> list_visitor(s, [1, 2, 3]) # for multiset '1 2 2 3 + [[1, 2, 2], [3]] + """ + f, lpart, pstack = state + + partition = [] + for i in range(lpart+1): + part = [] + for ps in pstack[f[i]:f[i+1]]: + if ps.v > 0: + part.extend([components[ps.c]] * ps.v) + partition.append(part) + + return partition + + +class MultisetPartitionTraverser(): + """ + Has methods to ``enumerate`` and ``count`` the partitions of a multiset. + + This implements a refactored and extended version of Knuth's algorithm + 7.1.2.5M [AOCP]_." + + The enumeration methods of this class are generators and return + data structures which can be interpreted by the same visitor + functions used for the output of ``multiset_partitions_taocp``. + + Examples + ======== + + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> m.count_partitions([4,4,4,2]) + 127750 + >>> m.count_partitions([3,3,3]) + 686 + + See Also + ======== + + multiset_partitions_taocp + sympy.utilities.iterables.multiset_partitions + + References + ========== + + .. [AOCP] Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms, + Part 1, of The Art of Computer Programming, by Donald Knuth. + + .. [Factorisatio] On a Problem of Oppenheim concerning + "Factorisatio Numerorum" E. R. Canfield, Paul Erdos, Carl + Pomerance, JOURNAL OF NUMBER THEORY, Vol. 17, No. 1. August + 1983. See section 7 for a description of an algorithm + similar to Knuth's. + + .. [Yorgey] Generating Multiset Partitions, Brent Yorgey, The + Monad.Reader, Issue 8, September 2007. + + """ + + def __init__(self): + self.debug = False + # TRACING variables. These are useful for gathering + # statistics on the algorithm itself, but have no particular + # benefit to a user of the code. + self.k1 = 0 + self.k2 = 0 + self.p1 = 0 + self.pstack = None + self.f = None + self.lpart = 0 + self.discarded = 0 + # dp_stack is list of lists of (part_key, start_count) pairs + self.dp_stack = [] + + # dp_map is map part_key-> count, where count represents the + # number of multiset which are descendants of a part with this + # key, **or any of its decrements** + + # Thus, when we find a part in the map, we add its count + # value to the running total, cut off the enumeration, and + # backtrack + + if not hasattr(self, 'dp_map'): + self.dp_map = {} + + def db_trace(self, msg): + """Useful for understanding/debugging the algorithms. Not + generally activated in end-user code.""" + if self.debug: + # XXX: animation_visitor is undefined... Clearly this does not + # work and was not tested. Previous code in comments below. + raise RuntimeError + #letters = 'abcdefghijklmnopqrstuvwxyz' + #state = [self.f, self.lpart, self.pstack] + #print("DBG:", msg, + # ["".join(part) for part in list_visitor(state, letters)], + # animation_visitor(state)) + + # + # Helper methods for enumeration + # + def _initialize_enumeration(self, multiplicities): + """Allocates and initializes the partition stack. + + This is called from the enumeration/counting routines, so + there is no need to call it separately.""" + + num_components = len(multiplicities) + # cardinality is the total number of elements, whether or not distinct + cardinality = sum(multiplicities) + + # pstack is the partition stack, which is segmented by + # f into parts. + self.pstack = [PartComponent() for i in + range(num_components * cardinality + 1)] + self.f = [0] * (cardinality + 1) + + # Initial state - entire multiset in one part. + for j in range(num_components): + ps = self.pstack[j] + ps.c = j + ps.u = multiplicities[j] + ps.v = multiplicities[j] + + self.f[0] = 0 + self.f[1] = num_components + self.lpart = 0 + + # The decrement_part() method corresponds to step M5 in Knuth's + # algorithm. This is the base version for enum_all(). Modified + # versions of this method are needed if we want to restrict + # sizes of the partitions produced. + def decrement_part(self, part): + """Decrements part (a subrange of pstack), if possible, returning + True iff the part was successfully decremented. + + If you think of the v values in the part as a multi-digit + integer (least significant digit on the right) this is + basically decrementing that integer, but with the extra + constraint that the leftmost digit cannot be decremented to 0. + + Parameters + ========== + + part + The part, represented as a list of PartComponent objects, + which is to be decremented. + + """ + plen = len(part) + for j in range(plen - 1, -1, -1): + if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0: + # found val to decrement + part[j].v -= 1 + # Reset trailing parts back to maximum + for k in range(j + 1, plen): + part[k].v = part[k].u + return True + return False + + # Version to allow number of parts to be bounded from above. + # Corresponds to (a modified) step M5. + def decrement_part_small(self, part, ub): + """Decrements part (a subrange of pstack), if possible, returning + True iff the part was successfully decremented. + + Parameters + ========== + + part + part to be decremented (topmost part on the stack) + + ub + the maximum number of parts allowed in a partition + returned by the calling traversal. + + Notes + ===== + + The goal of this modification of the ordinary decrement method + is to fail (meaning that the subtree rooted at this part is to + be skipped) when it can be proved that this part can only have + child partitions which are larger than allowed by ``ub``. If a + decision is made to fail, it must be accurate, otherwise the + enumeration will miss some partitions. But, it is OK not to + capture all the possible failures -- if a part is passed that + should not be, the resulting too-large partitions are filtered + by the enumeration one level up. However, as is usual in + constrained enumerations, failing early is advantageous. + + The tests used by this method catch the most common cases, + although this implementation is by no means the last word on + this problem. The tests include: + + 1) ``lpart`` must be less than ``ub`` by at least 2. This is because + once a part has been decremented, the partition + will gain at least one child in the spread step. + + 2) If the leading component of the part is about to be + decremented, check for how many parts will be added in + order to use up the unallocated multiplicity in that + leading component, and fail if this number is greater than + allowed by ``ub``. (See code for the exact expression.) This + test is given in the answer to Knuth's problem 7.2.1.5.69. + + 3) If there is *exactly* enough room to expand the leading + component by the above test, check the next component (if + it exists) once decrementing has finished. If this has + ``v == 0``, this next component will push the expansion over the + limit by 1, so fail. + """ + if self.lpart >= ub - 1: + self.p1 += 1 # increment to keep track of usefulness of tests + return False + plen = len(part) + for j in range(plen - 1, -1, -1): + # Knuth's mod, (answer to problem 7.2.1.5.69) + if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u: + self.k1 += 1 + return False + + if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0: + # found val to decrement + part[j].v -= 1 + # Reset trailing parts back to maximum + for k in range(j + 1, plen): + part[k].v = part[k].u + + # Have now decremented part, but are we doomed to + # failure when it is expanded? Check one oddball case + # that turns out to be surprisingly common - exactly + # enough room to expand the leading component, but no + # room for the second component, which has v=0. + if (plen > 1 and part[1].v == 0 and + (part[0].u - part[0].v) == + ((ub - self.lpart - 1) * part[0].v)): + self.k2 += 1 + self.db_trace("Decrement fails test 3") + return False + return True + return False + + def decrement_part_large(self, part, amt, lb): + """Decrements part, while respecting size constraint. + + A part can have no children which are of sufficient size (as + indicated by ``lb``) unless that part has sufficient + unallocated multiplicity. When enforcing the size constraint, + this method will decrement the part (if necessary) by an + amount needed to ensure sufficient unallocated multiplicity. + + Returns True iff the part was successfully decremented. + + Parameters + ========== + + part + part to be decremented (topmost part on the stack) + + amt + Can only take values 0 or 1. A value of 1 means that the + part must be decremented, and then the size constraint is + enforced. A value of 0 means just to enforce the ``lb`` + size constraint. + + lb + The partitions produced by the calling enumeration must + have more parts than this value. + + """ + + if amt == 1: + # In this case we always need to increment, *before* + # enforcing the "sufficient unallocated multiplicity" + # constraint. Easiest for this is just to call the + # regular decrement method. + if not self.decrement_part(part): + return False + + # Next, perform any needed additional decrementing to respect + # "sufficient unallocated multiplicity" (or fail if this is + # not possible). + min_unalloc = lb - self.lpart + if min_unalloc <= 0: + return True + total_mult = sum(pc.u for pc in part) + total_alloc = sum(pc.v for pc in part) + if total_mult <= min_unalloc: + return False + + deficit = min_unalloc - (total_mult - total_alloc) + if deficit <= 0: + return True + + for i in range(len(part) - 1, -1, -1): + if i == 0: + if part[0].v > deficit: + part[0].v -= deficit + return True + else: + return False # This shouldn't happen, due to above check + else: + if part[i].v >= deficit: + part[i].v -= deficit + return True + else: + deficit -= part[i].v + part[i].v = 0 + + def decrement_part_range(self, part, lb, ub): + """Decrements part (a subrange of pstack), if possible, returning + True iff the part was successfully decremented. + + Parameters + ========== + + part + part to be decremented (topmost part on the stack) + + ub + the maximum number of parts allowed in a partition + returned by the calling traversal. + + lb + The partitions produced by the calling enumeration must + have more parts than this value. + + Notes + ===== + + Combines the constraints of _small and _large decrement + methods. If returns success, part has been decremented at + least once, but perhaps by quite a bit more if needed to meet + the lb constraint. + """ + + # Constraint in the range case is just enforcing both the + # constraints from _small and _large cases. Note the 0 as the + # second argument to the _large call -- this is the signal to + # decrement only as needed to for constraint enforcement. The + # short circuiting and left-to-right order of the 'and' + # operator is important for this to work correctly. + return self.decrement_part_small(part, ub) and \ + self.decrement_part_large(part, 0, lb) + + def spread_part_multiplicity(self): + """Returns True if a new part has been created, and + adjusts pstack, f and lpart as needed. + + Notes + ===== + + Spreads unallocated multiplicity from the current top part + into a new part created above the current on the stack. This + new part is constrained to be less than or equal to the old in + terms of the part ordering. + + This call does nothing (and returns False) if the current top + part has no unallocated multiplicity. + + """ + j = self.f[self.lpart] # base of current top part + k = self.f[self.lpart + 1] # ub of current; potential base of next + base = k # save for later comparison + + changed = False # Set to true when the new part (so far) is + # strictly less than (as opposed to less than + # or equal) to the old. + for j in range(self.f[self.lpart], self.f[self.lpart + 1]): + self.pstack[k].u = self.pstack[j].u - self.pstack[j].v + if self.pstack[k].u == 0: + changed = True + else: + self.pstack[k].c = self.pstack[j].c + if changed: # Put all available multiplicity in this part + self.pstack[k].v = self.pstack[k].u + else: # Still maintaining ordering constraint + if self.pstack[k].u < self.pstack[j].v: + self.pstack[k].v = self.pstack[k].u + changed = True + else: + self.pstack[k].v = self.pstack[j].v + k = k + 1 + if k > base: + # Adjust for the new part on stack + self.lpart = self.lpart + 1 + self.f[self.lpart + 1] = k + return True + return False + + def top_part(self): + """Return current top part on the stack, as a slice of pstack. + + """ + return self.pstack[self.f[self.lpart]:self.f[self.lpart + 1]] + + # Same interface and functionality as multiset_partitions_taocp(), + # but some might find this refactored version easier to follow. + def enum_all(self, multiplicities): + """Enumerate the partitions of a multiset. + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> states = m.enum_all([2,2]) + >>> list(list_visitor(state, 'ab') for state in states) + [[['a', 'a', 'b', 'b']], + [['a', 'a', 'b'], ['b']], + [['a', 'a'], ['b', 'b']], + [['a', 'a'], ['b'], ['b']], + [['a', 'b', 'b'], ['a']], + [['a', 'b'], ['a', 'b']], + [['a', 'b'], ['a'], ['b']], + [['a'], ['a'], ['b', 'b']], + [['a'], ['a'], ['b'], ['b']]] + + See Also + ======== + + multiset_partitions_taocp: + which provides the same result as this method, but is + about twice as fast. Hence, enum_all is primarily useful + for testing. Also see the function for a discussion of + states and visitors. + + """ + self._initialize_enumeration(multiplicities) + while True: + while self.spread_part_multiplicity(): + pass + + # M4 Visit a partition + state = [self.f, self.lpart, self.pstack] + yield state + + # M5 (Decrease v) + while not self.decrement_part(self.top_part()): + # M6 (Backtrack) + if self.lpart == 0: + return + self.lpart -= 1 + + def enum_small(self, multiplicities, ub): + """Enumerate multiset partitions with no more than ``ub`` parts. + + Equivalent to enum_range(multiplicities, 0, ub) + + Parameters + ========== + + multiplicities + list of multiplicities of the components of the multiset. + + ub + Maximum number of parts + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> states = m.enum_small([2,2], 2) + >>> list(list_visitor(state, 'ab') for state in states) + [[['a', 'a', 'b', 'b']], + [['a', 'a', 'b'], ['b']], + [['a', 'a'], ['b', 'b']], + [['a', 'b', 'b'], ['a']], + [['a', 'b'], ['a', 'b']]] + + The implementation is based, in part, on the answer given to + exercise 69, in Knuth [AOCP]_. + + See Also + ======== + + enum_all, enum_large, enum_range + + """ + + # Keep track of iterations which do not yield a partition. + # Clearly, we would like to keep this number small. + self.discarded = 0 + if ub <= 0: + return + self._initialize_enumeration(multiplicities) + while True: + while self.spread_part_multiplicity(): + self.db_trace('spread 1') + if self.lpart >= ub: + self.discarded += 1 + self.db_trace(' Discarding') + self.lpart = ub - 2 + break + else: + # M4 Visit a partition + state = [self.f, self.lpart, self.pstack] + yield state + + # M5 (Decrease v) + while not self.decrement_part_small(self.top_part(), ub): + self.db_trace("Failed decrement, going to backtrack") + # M6 (Backtrack) + if self.lpart == 0: + return + self.lpart -= 1 + self.db_trace("Backtracked to") + self.db_trace("decrement ok, about to expand") + + def enum_large(self, multiplicities, lb): + """Enumerate the partitions of a multiset with lb < num(parts) + + Equivalent to enum_range(multiplicities, lb, sum(multiplicities)) + + Parameters + ========== + + multiplicities + list of multiplicities of the components of the multiset. + + lb + Number of parts in the partition must be greater than + this lower bound. + + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> states = m.enum_large([2,2], 2) + >>> list(list_visitor(state, 'ab') for state in states) + [[['a', 'a'], ['b'], ['b']], + [['a', 'b'], ['a'], ['b']], + [['a'], ['a'], ['b', 'b']], + [['a'], ['a'], ['b'], ['b']]] + + See Also + ======== + + enum_all, enum_small, enum_range + + """ + self.discarded = 0 + if lb >= sum(multiplicities): + return + self._initialize_enumeration(multiplicities) + self.decrement_part_large(self.top_part(), 0, lb) + while True: + good_partition = True + while self.spread_part_multiplicity(): + if not self.decrement_part_large(self.top_part(), 0, lb): + # Failure here should be rare/impossible + self.discarded += 1 + good_partition = False + break + + # M4 Visit a partition + if good_partition: + state = [self.f, self.lpart, self.pstack] + yield state + + # M5 (Decrease v) + while not self.decrement_part_large(self.top_part(), 1, lb): + # M6 (Backtrack) + if self.lpart == 0: + return + self.lpart -= 1 + + def enum_range(self, multiplicities, lb, ub): + + """Enumerate the partitions of a multiset with + ``lb < num(parts) <= ub``. + + In particular, if partitions with exactly ``k`` parts are + desired, call with ``(multiplicities, k - 1, k)``. This + method generalizes enum_all, enum_small, and enum_large. + + Examples + ======== + + >>> from sympy.utilities.enumerative import list_visitor + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> states = m.enum_range([2,2], 1, 2) + >>> list(list_visitor(state, 'ab') for state in states) + [[['a', 'a', 'b'], ['b']], + [['a', 'a'], ['b', 'b']], + [['a', 'b', 'b'], ['a']], + [['a', 'b'], ['a', 'b']]] + + """ + # combine the constraints of the _large and _small + # enumerations. + self.discarded = 0 + if ub <= 0 or lb >= sum(multiplicities): + return + self._initialize_enumeration(multiplicities) + self.decrement_part_large(self.top_part(), 0, lb) + while True: + good_partition = True + while self.spread_part_multiplicity(): + self.db_trace("spread 1") + if not self.decrement_part_large(self.top_part(), 0, lb): + # Failure here - possible in range case? + self.db_trace(" Discarding (large cons)") + self.discarded += 1 + good_partition = False + break + elif self.lpart >= ub: + self.discarded += 1 + good_partition = False + self.db_trace(" Discarding small cons") + self.lpart = ub - 2 + break + + # M4 Visit a partition + if good_partition: + state = [self.f, self.lpart, self.pstack] + yield state + + # M5 (Decrease v) + while not self.decrement_part_range(self.top_part(), lb, ub): + self.db_trace("Failed decrement, going to backtrack") + # M6 (Backtrack) + if self.lpart == 0: + return + self.lpart -= 1 + self.db_trace("Backtracked to") + self.db_trace("decrement ok, about to expand") + + def count_partitions_slow(self, multiplicities): + """Returns the number of partitions of a multiset whose elements + have the multiplicities given in ``multiplicities``. + + Primarily for comparison purposes. It follows the same path as + enumerate, and counts, rather than generates, the partitions. + + See Also + ======== + + count_partitions + Has the same calling interface, but is much faster. + + """ + # number of partitions so far in the enumeration + self.pcount = 0 + self._initialize_enumeration(multiplicities) + while True: + while self.spread_part_multiplicity(): + pass + + # M4 Visit (count) a partition + self.pcount += 1 + + # M5 (Decrease v) + while not self.decrement_part(self.top_part()): + # M6 (Backtrack) + if self.lpart == 0: + return self.pcount + self.lpart -= 1 + + def count_partitions(self, multiplicities): + """Returns the number of partitions of a multiset whose components + have the multiplicities given in ``multiplicities``. + + For larger counts, this method is much faster than calling one + of the enumerators and counting the result. Uses dynamic + programming to cut down on the number of nodes actually + explored. The dictionary used in order to accelerate the + counting process is stored in the ``MultisetPartitionTraverser`` + object and persists across calls. If the user does not + expect to call ``count_partitions`` for any additional + multisets, the object should be cleared to save memory. On + the other hand, the cache built up from one count run can + significantly speed up subsequent calls to ``count_partitions``, + so it may be advantageous not to clear the object. + + Examples + ======== + + >>> from sympy.utilities.enumerative import MultisetPartitionTraverser + >>> m = MultisetPartitionTraverser() + >>> m.count_partitions([9,8,2]) + 288716 + >>> m.count_partitions([2,2]) + 9 + >>> del m + + Notes + ===== + + If one looks at the workings of Knuth's algorithm M [AOCP]_, it + can be viewed as a traversal of a binary tree of parts. A + part has (up to) two children, the left child resulting from + the spread operation, and the right child from the decrement + operation. The ordinary enumeration of multiset partitions is + an in-order traversal of this tree, and with the partitions + corresponding to paths from the root to the leaves. The + mapping from paths to partitions is a little complicated, + since the partition would contain only those parts which are + leaves or the parents of a spread link, not those which are + parents of a decrement link. + + For counting purposes, it is sufficient to count leaves, and + this can be done with a recursive in-order traversal. The + number of leaves of a subtree rooted at a particular part is a + function only of that part itself, so memoizing has the + potential to speed up the counting dramatically. + + This method follows a computational approach which is similar + to the hypothetical memoized recursive function, but with two + differences: + + 1) This method is iterative, borrowing its structure from the + other enumerations and maintaining an explicit stack of + parts which are in the process of being counted. (There + may be multisets which can be counted reasonably quickly by + this implementation, but which would overflow the default + Python recursion limit with a recursive implementation.) + + 2) Instead of using the part data structure directly, a more + compact key is constructed. This saves space, but more + importantly coalesces some parts which would remain + separate with physical keys. + + Unlike the enumeration functions, there is currently no _range + version of count_partitions. If someone wants to stretch + their brain, it should be possible to construct one by + memoizing with a histogram of counts rather than a single + count, and combining the histograms. + """ + # number of partitions so far in the enumeration + self.pcount = 0 + + # dp_stack is list of lists of (part_key, start_count) pairs + self.dp_stack = [] + + self._initialize_enumeration(multiplicities) + pkey = part_key(self.top_part()) + self.dp_stack.append([(pkey, 0), ]) + while True: + while self.spread_part_multiplicity(): + pkey = part_key(self.top_part()) + if pkey in self.dp_map: + # Already have a cached value for the count of the + # subtree rooted at this part. Add it to the + # running counter, and break out of the spread + # loop. The -1 below is to compensate for the + # leaf that this code path would otherwise find, + # and which gets incremented for below. + + self.pcount += (self.dp_map[pkey] - 1) + self.lpart -= 1 + break + else: + self.dp_stack.append([(pkey, self.pcount), ]) + + # M4 count a leaf partition + self.pcount += 1 + + # M5 (Decrease v) + while not self.decrement_part(self.top_part()): + # M6 (Backtrack) + for key, oldcount in self.dp_stack.pop(): + self.dp_map[key] = self.pcount - oldcount + if self.lpart == 0: + return self.pcount + self.lpart -= 1 + + # At this point have successfully decremented the part on + # the stack and it does not appear in the cache. It needs + # to be added to the list at the top of dp_stack + pkey = part_key(self.top_part()) + self.dp_stack[-1].append((pkey, self.pcount),) + + +def part_key(part): + """Helper for MultisetPartitionTraverser.count_partitions that + creates a key for ``part``, that only includes information which can + affect the count for that part. (Any irrelevant information just + reduces the effectiveness of dynamic programming.) + + Notes + ===== + + This member function is a candidate for future exploration. There + are likely symmetries that can be exploited to coalesce some + ``part_key`` values, and thereby save space and improve + performance. + + """ + # The component number is irrelevant for counting partitions, so + # leave it out of the memo key. + rval = [] + for ps in part: + rval.append(ps.u) + rval.append(ps.v) + return tuple(rval) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/exceptions.py b/venv/lib/python3.10/site-packages/sympy/utilities/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9cffc50aebdbd7ccb0568d9b34001276a78ae42e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/exceptions.py @@ -0,0 +1,275 @@ +""" +General SymPy exceptions and warnings. +""" + +import warnings +import contextlib + +from textwrap import dedent + + +class SymPyDeprecationWarning(DeprecationWarning): + r""" + A warning for deprecated features of SymPy. + + See the :ref:`deprecation-policy` document for details on when and how + things should be deprecated in SymPy. + + Note that simply constructing this class will not cause a warning to be + issued. To do that, you must call the :func`sympy_deprecation_warning` + function. For this reason, it is not recommended to ever construct this + class directly. + + Explanation + =========== + + The ``SymPyDeprecationWarning`` class is a subclass of + ``DeprecationWarning`` that is used for all deprecations in SymPy. A + special subclass is used so that we can automatically augment the warning + message with additional metadata about the version the deprecation was + introduced in and a link to the documentation. This also allows users to + explicitly filter deprecation warnings from SymPy using ``warnings`` + filters (see :ref:`silencing-sympy-deprecation-warnings`). + + Additionally, ``SymPyDeprecationWarning`` is enabled to be shown by + default, unlike normal ``DeprecationWarning``\s, which are only shown by + default in interactive sessions. This ensures that deprecation warnings in + SymPy will actually be seen by users. + + See the documentation of :func:`sympy_deprecation_warning` for a + description of the parameters to this function. + + To mark a function as deprecated, you can use the :func:`@deprecated + ` decorator. + + See Also + ======== + sympy.utilities.exceptions.sympy_deprecation_warning + sympy.utilities.exceptions.ignore_warnings + sympy.utilities.decorator.deprecated + sympy.testing.pytest.warns_deprecated_sympy + + """ + def __init__(self, message, *, deprecated_since_version, active_deprecations_target): + + super().__init__(message, deprecated_since_version, + active_deprecations_target) + self.message = message + if not isinstance(deprecated_since_version, str): + raise TypeError(f"'deprecated_since_version' should be a string, got {deprecated_since_version!r}") + self.deprecated_since_version = deprecated_since_version + self.active_deprecations_target = active_deprecations_target + if any(i in active_deprecations_target for i in '()='): + raise ValueError("active_deprecations_target be the part inside of the '(...)='") + + self.full_message = f""" + +{dedent(message).strip()} + +See https://docs.sympy.org/latest/explanation/active-deprecations.html#{active_deprecations_target} +for details. + +This has been deprecated since SymPy version {deprecated_since_version}. It +will be removed in a future version of SymPy. +""" + + def __str__(self): + return self.full_message + + def __repr__(self): + return f"{self.__class__.__name__}({self.message!r}, deprecated_since_version={self.deprecated_since_version!r}, active_deprecations_target={self.active_deprecations_target!r})" + + def __eq__(self, other): + return isinstance(other, SymPyDeprecationWarning) and self.args == other.args + + # Make pickling work. The by default, it tries to recreate the expression + # from its args, but this doesn't work because of our keyword-only + # arguments. + @classmethod + def _new(cls, message, deprecated_since_version, + active_deprecations_target): + return cls(message, deprecated_since_version=deprecated_since_version, active_deprecations_target=active_deprecations_target) + + def __reduce__(self): + return (self._new, (self.message, self.deprecated_since_version, self.active_deprecations_target)) + +# Python by default hides DeprecationWarnings, which we do not want. +warnings.simplefilter("once", SymPyDeprecationWarning) + +def sympy_deprecation_warning(message, *, deprecated_since_version, + active_deprecations_target, stacklevel=3): + r''' + Warn that a feature is deprecated in SymPy. + + See the :ref:`deprecation-policy` document for details on when and how + things should be deprecated in SymPy. + + To mark an entire function or class as deprecated, you can use the + :func:`@deprecated ` decorator. + + Parameters + ========== + + message: str + + The deprecation message. This may span multiple lines and contain + code examples. Messages should be wrapped to 80 characters. The + message is automatically dedented and leading and trailing whitespace + stripped. Messages may include dynamic content based on the user + input, but avoid using ``str(expression)`` if an expression can be + arbitrary, as it might be huge and make the warning message + unreadable. + + deprecated_since_version: str + + The version of SymPy the feature has been deprecated since. For new + deprecations, this should be the version in `sympy/release.py + `_ + without the ``.dev``. If the next SymPy version ends up being + different from this, the release manager will need to update any + ``SymPyDeprecationWarning``\s using the incorrect version. This + argument is required and must be passed as a keyword argument. + (example: ``deprecated_since_version="1.10"``). + + active_deprecations_target: str + + The Sphinx target corresponding to the section for the deprecation in + the :ref:`active-deprecations` document (see + ``doc/src/explanation/active-deprecations.md``). This is used to + automatically generate a URL to the page in the warning message. This + argument is required and must be passed as a keyword argument. + (example: ``active_deprecations_target="deprecated-feature-abc"``) + + stacklevel: int (default: 3) + + The ``stacklevel`` parameter that is passed to ``warnings.warn``. If + you create a wrapper that calls this function, this should be + increased so that the warning message shows the user line of code that + produced the warning. Note that in some cases there will be multiple + possible different user code paths that could result in the warning. + In that case, just choose the smallest common stacklevel. + + Examples + ======== + + >>> from sympy.utilities.exceptions import sympy_deprecation_warning + >>> def is_this_zero(x, y=0): + ... """ + ... Determine if x = 0. + ... + ... Parameters + ... ========== + ... + ... x : Expr + ... The expression to check. + ... + ... y : Expr, optional + ... If provided, check if x = y. + ... + ... .. deprecated:: 1.1 + ... + ... The ``y`` argument to ``is_this_zero`` is deprecated. Use + ... ``is_this_zero(x - y)`` instead. + ... + ... """ + ... from sympy import simplify + ... + ... if y != 0: + ... sympy_deprecation_warning(""" + ... The y argument to is_zero() is deprecated. Use is_zero(x - y) instead.""", + ... deprecated_since_version="1.1", + ... active_deprecations_target='is-this-zero-y-deprecation') + ... return simplify(x - y) == 0 + >>> is_this_zero(0) + True + >>> is_this_zero(1, 1) # doctest: +SKIP + :1: SymPyDeprecationWarning: + + The y argument to is_zero() is deprecated. Use is_zero(x - y) instead. + + See https://docs.sympy.org/latest/explanation/active-deprecations.html#is-this-zero-y-deprecation + for details. + + This has been deprecated since SymPy version 1.1. It + will be removed in a future version of SymPy. + + is_this_zero(1, 1) + True + + See Also + ======== + + sympy.utilities.exceptions.SymPyDeprecationWarning + sympy.utilities.exceptions.ignore_warnings + sympy.utilities.decorator.deprecated + sympy.testing.pytest.warns_deprecated_sympy + + ''' + w = SymPyDeprecationWarning(message, + deprecated_since_version=deprecated_since_version, + active_deprecations_target=active_deprecations_target) + warnings.warn(w, stacklevel=stacklevel) + + +@contextlib.contextmanager +def ignore_warnings(warningcls): + ''' + Context manager to suppress warnings during tests. + + .. note:: + + Do not use this with SymPyDeprecationWarning in the tests. + warns_deprecated_sympy() should be used instead. + + This function is useful for suppressing warnings during tests. The warns + function should be used to assert that a warning is raised. The + ignore_warnings function is useful in situation when the warning is not + guaranteed to be raised (e.g. on importing a module) or if the warning + comes from third-party code. + + This function is also useful to prevent the same or similar warnings from + being issue twice due to recursive calls. + + When the warning is coming (reliably) from SymPy the warns function should + be preferred to ignore_warnings. + + >>> from sympy.utilities.exceptions import ignore_warnings + >>> import warnings + + Here's a warning: + + >>> with warnings.catch_warnings(): # reset warnings in doctest + ... warnings.simplefilter('error') + ... warnings.warn('deprecated', UserWarning) + Traceback (most recent call last): + ... + UserWarning: deprecated + + Let's suppress it with ignore_warnings: + + >>> with warnings.catch_warnings(): # reset warnings in doctest + ... warnings.simplefilter('error') + ... with ignore_warnings(UserWarning): + ... warnings.warn('deprecated', UserWarning) + + (No warning emitted) + + See Also + ======== + sympy.utilities.exceptions.SymPyDeprecationWarning + sympy.utilities.exceptions.sympy_deprecation_warning + sympy.utilities.decorator.deprecated + sympy.testing.pytest.warns_deprecated_sympy + + ''' + # Absorbs all warnings in warnrec + with warnings.catch_warnings(record=True) as warnrec: + # Make sure our warning doesn't get filtered + warnings.simplefilter("always", warningcls) + # Now run the test + yield + + # Reissue any warnings that we aren't testing for + for w in warnrec: + if not issubclass(w.category, warningcls): + warnings.warn_explicit(w.message, w.category, w.filename, w.lineno) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/iterables.py b/venv/lib/python3.10/site-packages/sympy/utilities/iterables.py new file mode 100644 index 0000000000000000000000000000000000000000..e84a694df754f33419212ad23c94aaf84bfc1e5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/iterables.py @@ -0,0 +1,3172 @@ +from collections import Counter, defaultdict, OrderedDict +from itertools import ( + chain, combinations, combinations_with_replacement, cycle, islice, + permutations, product, groupby +) +# For backwards compatibility +from itertools import product as cartes # noqa: F401 +from operator import gt + + + +# this is the logical location of these functions +from sympy.utilities.enumerative import ( + multiset_partitions_taocp, list_visitor, MultisetPartitionTraverser) + +from sympy.utilities.misc import as_int +from sympy.utilities.decorator import deprecated + + +def is_palindromic(s, i=0, j=None): + """ + Return True if the sequence is the same from left to right as it + is from right to left in the whole sequence (default) or in the + Python slice ``s[i: j]``; else False. + + Examples + ======== + + >>> from sympy.utilities.iterables import is_palindromic + >>> is_palindromic([1, 0, 1]) + True + >>> is_palindromic('abcbb') + False + >>> is_palindromic('abcbb', 1) + False + + Normal Python slicing is performed in place so there is no need to + create a slice of the sequence for testing: + + >>> is_palindromic('abcbb', 1, -1) + True + >>> is_palindromic('abcbb', -4, -1) + True + + See Also + ======== + + sympy.ntheory.digits.is_palindromic: tests integers + + """ + i, j, _ = slice(i, j).indices(len(s)) + m = (j - i)//2 + # if length is odd, middle element will be ignored + return all(s[i + k] == s[j - 1 - k] for k in range(m)) + + +def flatten(iterable, levels=None, cls=None): # noqa: F811 + """ + Recursively denest iterable containers. + + >>> from sympy import flatten + + >>> flatten([1, 2, 3]) + [1, 2, 3] + >>> flatten([1, 2, [3]]) + [1, 2, 3] + >>> flatten([1, [2, 3], [4, 5]]) + [1, 2, 3, 4, 5] + >>> flatten([1.0, 2, (1, None)]) + [1.0, 2, 1, None] + + If you want to denest only a specified number of levels of + nested containers, then set ``levels`` flag to the desired + number of levels:: + + >>> ls = [[(-2, -1), (1, 2)], [(0, 0)]] + + >>> flatten(ls, levels=1) + [(-2, -1), (1, 2), (0, 0)] + + If cls argument is specified, it will only flatten instances of that + class, for example: + + >>> from sympy import Basic, S + >>> class MyOp(Basic): + ... pass + ... + >>> flatten([MyOp(S(1), MyOp(S(2), S(3)))], cls=MyOp) + [1, 2, 3] + + adapted from https://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks + """ + from sympy.tensor.array import NDimArray + if levels is not None: + if not levels: + return iterable + elif levels > 0: + levels -= 1 + else: + raise ValueError( + "expected non-negative number of levels, got %s" % levels) + + if cls is None: + def reducible(x): + return is_sequence(x, set) + else: + def reducible(x): + return isinstance(x, cls) + + result = [] + + for el in iterable: + if reducible(el): + if hasattr(el, 'args') and not isinstance(el, NDimArray): + el = el.args + result.extend(flatten(el, levels=levels, cls=cls)) + else: + result.append(el) + + return result + + +def unflatten(iter, n=2): + """Group ``iter`` into tuples of length ``n``. Raise an error if + the length of ``iter`` is not a multiple of ``n``. + """ + if n < 1 or len(iter) % n: + raise ValueError('iter length is not a multiple of %i' % n) + return list(zip(*(iter[i::n] for i in range(n)))) + + +def reshape(seq, how): + """Reshape the sequence according to the template in ``how``. + + Examples + ======== + + >>> from sympy.utilities import reshape + >>> seq = list(range(1, 9)) + + >>> reshape(seq, [4]) # lists of 4 + [[1, 2, 3, 4], [5, 6, 7, 8]] + + >>> reshape(seq, (4,)) # tuples of 4 + [(1, 2, 3, 4), (5, 6, 7, 8)] + + >>> reshape(seq, (2, 2)) # tuples of 4 + [(1, 2, 3, 4), (5, 6, 7, 8)] + + >>> reshape(seq, (2, [2])) # (i, i, [i, i]) + [(1, 2, [3, 4]), (5, 6, [7, 8])] + + >>> reshape(seq, ((2,), [2])) # etc.... + [((1, 2), [3, 4]), ((5, 6), [7, 8])] + + >>> reshape(seq, (1, [2], 1)) + [(1, [2, 3], 4), (5, [6, 7], 8)] + + >>> reshape(tuple(seq), ([[1], 1, (2,)],)) + (([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],)) + + >>> reshape(tuple(seq), ([1], 1, (2,))) + (([1], 2, (3, 4)), ([5], 6, (7, 8))) + + >>> reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)]) + [[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]] + + """ + m = sum(flatten(how)) + n, rem = divmod(len(seq), m) + if m < 0 or rem: + raise ValueError('template must sum to positive number ' + 'that divides the length of the sequence') + i = 0 + container = type(how) + rv = [None]*n + for k in range(len(rv)): + _rv = [] + for hi in how: + if isinstance(hi, int): + _rv.extend(seq[i: i + hi]) + i += hi + else: + n = sum(flatten(hi)) + hi_type = type(hi) + _rv.append(hi_type(reshape(seq[i: i + n], hi)[0])) + i += n + rv[k] = container(_rv) + return type(seq)(rv) + + +def group(seq, multiple=True): + """ + Splits a sequence into a list of lists of equal, adjacent elements. + + Examples + ======== + + >>> from sympy import group + + >>> group([1, 1, 1, 2, 2, 3]) + [[1, 1, 1], [2, 2], [3]] + >>> group([1, 1, 1, 2, 2, 3], multiple=False) + [(1, 3), (2, 2), (3, 1)] + >>> group([1, 1, 3, 2, 2, 1], multiple=False) + [(1, 2), (3, 1), (2, 2), (1, 1)] + + See Also + ======== + + multiset + + """ + if multiple: + return [(list(g)) for _, g in groupby(seq)] + return [(k, len(list(g))) for k, g in groupby(seq)] + + +def _iproduct2(iterable1, iterable2): + '''Cartesian product of two possibly infinite iterables''' + + it1 = iter(iterable1) + it2 = iter(iterable2) + + elems1 = [] + elems2 = [] + + sentinel = object() + def append(it, elems): + e = next(it, sentinel) + if e is not sentinel: + elems.append(e) + + n = 0 + append(it1, elems1) + append(it2, elems2) + + while n <= len(elems1) + len(elems2): + for m in range(n-len(elems1)+1, len(elems2)): + yield (elems1[n-m], elems2[m]) + n += 1 + append(it1, elems1) + append(it2, elems2) + + +def iproduct(*iterables): + ''' + Cartesian product of iterables. + + Generator of the Cartesian product of iterables. This is analogous to + itertools.product except that it works with infinite iterables and will + yield any item from the infinite product eventually. + + Examples + ======== + + >>> from sympy.utilities.iterables import iproduct + >>> sorted(iproduct([1,2], [3,4])) + [(1, 3), (1, 4), (2, 3), (2, 4)] + + With an infinite iterator: + + >>> from sympy import S + >>> (3,) in iproduct(S.Integers) + True + >>> (3, 4) in iproduct(S.Integers, S.Integers) + True + + .. seealso:: + + `itertools.product + `_ + ''' + if len(iterables) == 0: + yield () + return + elif len(iterables) == 1: + for e in iterables[0]: + yield (e,) + elif len(iterables) == 2: + yield from _iproduct2(*iterables) + else: + first, others = iterables[0], iterables[1:] + for ef, eo in _iproduct2(first, iproduct(*others)): + yield (ef,) + eo + + +def multiset(seq): + """Return the hashable sequence in multiset form with values being the + multiplicity of the item in the sequence. + + Examples + ======== + + >>> from sympy.utilities.iterables import multiset + >>> multiset('mississippi') + {'i': 4, 'm': 1, 'p': 2, 's': 4} + + See Also + ======== + + group + + """ + return dict(Counter(seq).items()) + + + + +def ibin(n, bits=None, str=False): + """Return a list of length ``bits`` corresponding to the binary value + of ``n`` with small bits to the right (last). If bits is omitted, the + length will be the number required to represent ``n``. If the bits are + desired in reversed order, use the ``[::-1]`` slice of the returned list. + + If a sequence of all bits-length lists starting from ``[0, 0,..., 0]`` + through ``[1, 1, ..., 1]`` are desired, pass a non-integer for bits, e.g. + ``'all'``. + + If the bit *string* is desired pass ``str=True``. + + Examples + ======== + + >>> from sympy.utilities.iterables import ibin + >>> ibin(2) + [1, 0] + >>> ibin(2, 4) + [0, 0, 1, 0] + + If all lists corresponding to 0 to 2**n - 1, pass a non-integer + for bits: + + >>> bits = 2 + >>> for i in ibin(2, 'all'): + ... print(i) + (0, 0) + (0, 1) + (1, 0) + (1, 1) + + If a bit string is desired of a given length, use str=True: + + >>> n = 123 + >>> bits = 10 + >>> ibin(n, bits, str=True) + '0001111011' + >>> ibin(n, bits, str=True)[::-1] # small bits left + '1101111000' + >>> list(ibin(3, 'all', str=True)) + ['000', '001', '010', '011', '100', '101', '110', '111'] + + """ + if n < 0: + raise ValueError("negative numbers are not allowed") + n = as_int(n) + + if bits is None: + bits = 0 + else: + try: + bits = as_int(bits) + except ValueError: + bits = -1 + else: + if n.bit_length() > bits: + raise ValueError( + "`bits` must be >= {}".format(n.bit_length())) + + if not str: + if bits >= 0: + return [1 if i == "1" else 0 for i in bin(n)[2:].rjust(bits, "0")] + else: + return variations(range(2), n, repetition=True) + else: + if bits >= 0: + return bin(n)[2:].rjust(bits, "0") + else: + return (bin(i)[2:].rjust(n, "0") for i in range(2**n)) + + +def variations(seq, n, repetition=False): + r"""Returns an iterator over the n-sized variations of ``seq`` (size N). + ``repetition`` controls whether items in ``seq`` can appear more than once; + + Examples + ======== + + ``variations(seq, n)`` will return `\frac{N!}{(N - n)!}` permutations without + repetition of ``seq``'s elements: + + >>> from sympy import variations + >>> list(variations([1, 2], 2)) + [(1, 2), (2, 1)] + + ``variations(seq, n, True)`` will return the `N^n` permutations obtained + by allowing repetition of elements: + + >>> list(variations([1, 2], 2, repetition=True)) + [(1, 1), (1, 2), (2, 1), (2, 2)] + + If you ask for more items than are in the set you get the empty set unless + you allow repetitions: + + >>> list(variations([0, 1], 3, repetition=False)) + [] + >>> list(variations([0, 1], 3, repetition=True))[:4] + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)] + + .. seealso:: + + `itertools.permutations + `_, + `itertools.product + `_ + """ + if not repetition: + seq = tuple(seq) + if len(seq) < n: + return iter(()) # 0 length iterator + return permutations(seq, n) + else: + if n == 0: + return iter(((),)) # yields 1 empty tuple + else: + return product(seq, repeat=n) + + +def subsets(seq, k=None, repetition=False): + r"""Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``. + + A `k`-subset of an `n`-element set is any subset of length exactly `k`. The + number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``, + whereas there are `2^n` subsets all together. If `k` is ``None`` then all + `2^n` subsets will be returned from shortest to longest. + + Examples + ======== + + >>> from sympy import subsets + + ``subsets(seq, k)`` will return the + `\frac{n!}{k!(n - k)!}` `k`-subsets (combinations) + without repetition, i.e. once an item has been removed, it can no + longer be "taken": + + >>> list(subsets([1, 2], 2)) + [(1, 2)] + >>> list(subsets([1, 2])) + [(), (1,), (2,), (1, 2)] + >>> list(subsets([1, 2, 3], 2)) + [(1, 2), (1, 3), (2, 3)] + + + ``subsets(seq, k, repetition=True)`` will return the + `\frac{(n - 1 + k)!}{k!(n - 1)!}` + combinations *with* repetition: + + >>> list(subsets([1, 2], 2, repetition=True)) + [(1, 1), (1, 2), (2, 2)] + + If you ask for more items than are in the set you get the empty set unless + you allow repetitions: + + >>> list(subsets([0, 1], 3, repetition=False)) + [] + >>> list(subsets([0, 1], 3, repetition=True)) + [(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)] + + """ + if k is None: + if not repetition: + return chain.from_iterable((combinations(seq, k) + for k in range(len(seq) + 1))) + else: + return chain.from_iterable((combinations_with_replacement(seq, k) + for k in range(len(seq) + 1))) + else: + if not repetition: + return combinations(seq, k) + else: + return combinations_with_replacement(seq, k) + + +def filter_symbols(iterator, exclude): + """ + Only yield elements from `iterator` that do not occur in `exclude`. + + Parameters + ========== + + iterator : iterable + iterator to take elements from + + exclude : iterable + elements to exclude + + Returns + ======= + + iterator : iterator + filtered iterator + """ + exclude = set(exclude) + for s in iterator: + if s not in exclude: + yield s + +def numbered_symbols(prefix='x', cls=None, start=0, exclude=(), *args, **assumptions): + """ + Generate an infinite stream of Symbols consisting of a prefix and + increasing subscripts provided that they do not occur in ``exclude``. + + Parameters + ========== + + prefix : str, optional + The prefix to use. By default, this function will generate symbols of + the form "x0", "x1", etc. + + cls : class, optional + The class to use. By default, it uses ``Symbol``, but you can also use ``Wild`` + or ``Dummy``. + + start : int, optional + The start number. By default, it is 0. + + Returns + ======= + + sym : Symbol + The subscripted symbols. + """ + exclude = set(exclude or []) + if cls is None: + # We can't just make the default cls=Symbol because it isn't + # imported yet. + from sympy.core import Symbol + cls = Symbol + + while True: + name = '%s%s' % (prefix, start) + s = cls(name, *args, **assumptions) + if s not in exclude: + yield s + start += 1 + + +def capture(func): + """Return the printed output of func(). + + ``func`` should be a function without arguments that produces output with + print statements. + + >>> from sympy.utilities.iterables import capture + >>> from sympy import pprint + >>> from sympy.abc import x + >>> def foo(): + ... print('hello world!') + ... + >>> 'hello' in capture(foo) # foo, not foo() + True + >>> capture(lambda: pprint(2/x)) + '2\\n-\\nx\\n' + + """ + from io import StringIO + import sys + + stdout = sys.stdout + sys.stdout = file = StringIO() + try: + func() + finally: + sys.stdout = stdout + return file.getvalue() + + +def sift(seq, keyfunc, binary=False): + """ + Sift the sequence, ``seq`` according to ``keyfunc``. + + Returns + ======= + + When ``binary`` is ``False`` (default), the output is a dictionary + where elements of ``seq`` are stored in a list keyed to the value + of keyfunc for that element. If ``binary`` is True then a tuple + with lists ``T`` and ``F`` are returned where ``T`` is a list + containing elements of seq for which ``keyfunc`` was ``True`` and + ``F`` containing those elements for which ``keyfunc`` was ``False``; + a ValueError is raised if the ``keyfunc`` is not binary. + + Examples + ======== + + >>> from sympy.utilities import sift + >>> from sympy.abc import x, y + >>> from sympy import sqrt, exp, pi, Tuple + + >>> sift(range(5), lambda x: x % 2) + {0: [0, 2, 4], 1: [1, 3]} + + sift() returns a defaultdict() object, so any key that has no matches will + give []. + + >>> sift([x], lambda x: x.is_commutative) + {True: [x]} + >>> _[False] + [] + + Sometimes you will not know how many keys you will get: + + >>> sift([sqrt(x), exp(x), (y**x)**2], + ... lambda x: x.as_base_exp()[0]) + {E: [exp(x)], x: [sqrt(x)], y: [y**(2*x)]} + + Sometimes you expect the results to be binary; the + results can be unpacked by setting ``binary`` to True: + + >>> sift(range(4), lambda x: x % 2, binary=True) + ([1, 3], [0, 2]) + >>> sift(Tuple(1, pi), lambda x: x.is_rational, binary=True) + ([1], [pi]) + + A ValueError is raised if the predicate was not actually binary + (which is a good test for the logic where sifting is used and + binary results were expected): + + >>> unknown = exp(1) - pi # the rationality of this is unknown + >>> args = Tuple(1, pi, unknown) + >>> sift(args, lambda x: x.is_rational, binary=True) + Traceback (most recent call last): + ... + ValueError: keyfunc gave non-binary output + + The non-binary sifting shows that there were 3 keys generated: + + >>> set(sift(args, lambda x: x.is_rational).keys()) + {None, False, True} + + If you need to sort the sifted items it might be better to use + ``ordered`` which can economically apply multiple sort keys + to a sequence while sorting. + + See Also + ======== + + ordered + + """ + if not binary: + m = defaultdict(list) + for i in seq: + m[keyfunc(i)].append(i) + return m + sift = F, T = [], [] + for i in seq: + try: + sift[keyfunc(i)].append(i) + except (IndexError, TypeError): + raise ValueError('keyfunc gave non-binary output') + return T, F + + +def take(iter, n): + """Return ``n`` items from ``iter`` iterator. """ + return [ value for _, value in zip(range(n), iter) ] + + +def dict_merge(*dicts): + """Merge dictionaries into a single dictionary. """ + merged = {} + + for dict in dicts: + merged.update(dict) + + return merged + + +def common_prefix(*seqs): + """Return the subsequence that is a common start of sequences in ``seqs``. + + >>> from sympy.utilities.iterables import common_prefix + >>> common_prefix(list(range(3))) + [0, 1, 2] + >>> common_prefix(list(range(3)), list(range(4))) + [0, 1, 2] + >>> common_prefix([1, 2, 3], [1, 2, 5]) + [1, 2] + >>> common_prefix([1, 2, 3], [1, 3, 5]) + [1] + """ + if not all(seqs): + return [] + elif len(seqs) == 1: + return seqs[0] + i = 0 + for i in range(min(len(s) for s in seqs)): + if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))): + break + else: + i += 1 + return seqs[0][:i] + + +def common_suffix(*seqs): + """Return the subsequence that is a common ending of sequences in ``seqs``. + + >>> from sympy.utilities.iterables import common_suffix + >>> common_suffix(list(range(3))) + [0, 1, 2] + >>> common_suffix(list(range(3)), list(range(4))) + [] + >>> common_suffix([1, 2, 3], [9, 2, 3]) + [2, 3] + >>> common_suffix([1, 2, 3], [9, 7, 3]) + [3] + """ + + if not all(seqs): + return [] + elif len(seqs) == 1: + return seqs[0] + i = 0 + for i in range(-1, -min(len(s) for s in seqs) - 1, -1): + if not all(seqs[j][i] == seqs[0][i] for j in range(len(seqs))): + break + else: + i -= 1 + if i == -1: + return [] + else: + return seqs[0][i + 1:] + + +def prefixes(seq): + """ + Generate all prefixes of a sequence. + + Examples + ======== + + >>> from sympy.utilities.iterables import prefixes + + >>> list(prefixes([1,2,3,4])) + [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4]] + + """ + n = len(seq) + + for i in range(n): + yield seq[:i + 1] + + +def postfixes(seq): + """ + Generate all postfixes of a sequence. + + Examples + ======== + + >>> from sympy.utilities.iterables import postfixes + + >>> list(postfixes([1,2,3,4])) + [[4], [3, 4], [2, 3, 4], [1, 2, 3, 4]] + + """ + n = len(seq) + + for i in range(n): + yield seq[n - i - 1:] + + +def topological_sort(graph, key=None): + r""" + Topological sort of graph's vertices. + + Parameters + ========== + + graph : tuple[list, list[tuple[T, T]] + A tuple consisting of a list of vertices and a list of edges of + a graph to be sorted topologically. + + key : callable[T] (optional) + Ordering key for vertices on the same level. By default the natural + (e.g. lexicographic) ordering is used (in this case the base type + must implement ordering relations). + + Examples + ======== + + Consider a graph:: + + +---+ +---+ +---+ + | 7 |\ | 5 | | 3 | + +---+ \ +---+ +---+ + | _\___/ ____ _/ | + | / \___/ \ / | + V V V V | + +----+ +---+ | + | 11 | | 8 | | + +----+ +---+ | + | | \____ ___/ _ | + | \ \ / / \ | + V \ V V / V V + +---+ \ +---+ | +----+ + | 2 | | | 9 | | | 10 | + +---+ | +---+ | +----+ + \________/ + + where vertices are integers. This graph can be encoded using + elementary Python's data structures as follows:: + + >>> V = [2, 3, 5, 7, 8, 9, 10, 11] + >>> E = [(7, 11), (7, 8), (5, 11), (3, 8), (3, 10), + ... (11, 2), (11, 9), (11, 10), (8, 9)] + + To compute a topological sort for graph ``(V, E)`` issue:: + + >>> from sympy.utilities.iterables import topological_sort + + >>> topological_sort((V, E)) + [3, 5, 7, 8, 11, 2, 9, 10] + + If specific tie breaking approach is needed, use ``key`` parameter:: + + >>> topological_sort((V, E), key=lambda v: -v) + [7, 5, 11, 3, 10, 8, 9, 2] + + Only acyclic graphs can be sorted. If the input graph has a cycle, + then ``ValueError`` will be raised:: + + >>> topological_sort((V, E + [(10, 7)])) + Traceback (most recent call last): + ... + ValueError: cycle detected + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Topological_sorting + + """ + V, E = graph + + L = [] + S = set(V) + E = list(E) + + for v, u in E: + S.discard(u) + + if key is None: + def key(value): + return value + + S = sorted(S, key=key, reverse=True) + + while S: + node = S.pop() + L.append(node) + + for u, v in list(E): + if u == node: + E.remove((u, v)) + + for _u, _v in E: + if v == _v: + break + else: + kv = key(v) + + for i, s in enumerate(S): + ks = key(s) + + if kv > ks: + S.insert(i, v) + break + else: + S.append(v) + + if E: + raise ValueError("cycle detected") + else: + return L + + +def strongly_connected_components(G): + r""" + Strongly connected components of a directed graph in reverse topological + order. + + + Parameters + ========== + + graph : tuple[list, list[tuple[T, T]] + A tuple consisting of a list of vertices and a list of edges of + a graph whose strongly connected components are to be found. + + + Examples + ======== + + Consider a directed graph (in dot notation):: + + digraph { + A -> B + A -> C + B -> C + C -> B + B -> D + } + + .. graphviz:: + + digraph { + A -> B + A -> C + B -> C + C -> B + B -> D + } + + where vertices are the letters A, B, C and D. This graph can be encoded + using Python's elementary data structures as follows:: + + >>> V = ['A', 'B', 'C', 'D'] + >>> E = [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B'), ('B', 'D')] + + The strongly connected components of this graph can be computed as + + >>> from sympy.utilities.iterables import strongly_connected_components + + >>> strongly_connected_components((V, E)) + [['D'], ['B', 'C'], ['A']] + + This also gives the components in reverse topological order. + + Since the subgraph containing B and C has a cycle they must be together in + a strongly connected component. A and D are connected to the rest of the + graph but not in a cyclic manner so they appear as their own strongly + connected components. + + + Notes + ===== + + The vertices of the graph must be hashable for the data structures used. + If the vertices are unhashable replace them with integer indices. + + This function uses Tarjan's algorithm to compute the strongly connected + components in `O(|V|+|E|)` (linear) time. + + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Strongly_connected_component + .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + + + See Also + ======== + + sympy.utilities.iterables.connected_components + + """ + # Map from a vertex to its neighbours + V, E = G + Gmap = {vi: [] for vi in V} + for v1, v2 in E: + Gmap[v1].append(v2) + return _strongly_connected_components(V, Gmap) + + +def _strongly_connected_components(V, Gmap): + """More efficient internal routine for strongly_connected_components""" + # + # Here V is an iterable of vertices and Gmap is a dict mapping each vertex + # to a list of neighbours e.g.: + # + # V = [0, 1, 2, 3] + # Gmap = {0: [2, 3], 1: [0]} + # + # For a large graph these data structures can often be created more + # efficiently then those expected by strongly_connected_components() which + # in this case would be + # + # V = [0, 1, 2, 3] + # Gmap = [(0, 2), (0, 3), (1, 0)] + # + # XXX: Maybe this should be the recommended function to use instead... + # + + # Non-recursive Tarjan's algorithm: + lowlink = {} + indices = {} + stack = OrderedDict() + callstack = [] + components = [] + nomore = object() + + def start(v): + index = len(stack) + indices[v] = lowlink[v] = index + stack[v] = None + callstack.append((v, iter(Gmap[v]))) + + def finish(v1): + # Finished a component? + if lowlink[v1] == indices[v1]: + component = [stack.popitem()[0]] + while component[-1] is not v1: + component.append(stack.popitem()[0]) + components.append(component[::-1]) + v2, _ = callstack.pop() + if callstack: + v1, _ = callstack[-1] + lowlink[v1] = min(lowlink[v1], lowlink[v2]) + + for v in V: + if v in indices: + continue + start(v) + while callstack: + v1, it1 = callstack[-1] + v2 = next(it1, nomore) + # Finished children of v1? + if v2 is nomore: + finish(v1) + # Recurse on v2 + elif v2 not in indices: + start(v2) + elif v2 in stack: + lowlink[v1] = min(lowlink[v1], indices[v2]) + + # Reverse topological sort order: + return components + + +def connected_components(G): + r""" + Connected components of an undirected graph or weakly connected components + of a directed graph. + + + Parameters + ========== + + graph : tuple[list, list[tuple[T, T]] + A tuple consisting of a list of vertices and a list of edges of + a graph whose connected components are to be found. + + + Examples + ======== + + + Given an undirected graph:: + + graph { + A -- B + C -- D + } + + .. graphviz:: + + graph { + A -- B + C -- D + } + + We can find the connected components using this function if we include + each edge in both directions:: + + >>> from sympy.utilities.iterables import connected_components + + >>> V = ['A', 'B', 'C', 'D'] + >>> E = [('A', 'B'), ('B', 'A'), ('C', 'D'), ('D', 'C')] + >>> connected_components((V, E)) + [['A', 'B'], ['C', 'D']] + + The weakly connected components of a directed graph can found the same + way. + + + Notes + ===== + + The vertices of the graph must be hashable for the data structures used. + If the vertices are unhashable replace them with integer indices. + + This function uses Tarjan's algorithm to compute the connected components + in `O(|V|+|E|)` (linear) time. + + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Component_%28graph_theory%29 + .. [2] https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + + + See Also + ======== + + sympy.utilities.iterables.strongly_connected_components + + """ + # Duplicate edges both ways so that the graph is effectively undirected + # and return the strongly connected components: + V, E = G + E_undirected = [] + for v1, v2 in E: + E_undirected.extend([(v1, v2), (v2, v1)]) + return strongly_connected_components((V, E_undirected)) + + +def rotate_left(x, y): + """ + Left rotates a list x by the number of steps specified + in y. + + Examples + ======== + + >>> from sympy.utilities.iterables import rotate_left + >>> a = [0, 1, 2] + >>> rotate_left(a, 1) + [1, 2, 0] + """ + if len(x) == 0: + return [] + y = y % len(x) + return x[y:] + x[:y] + + +def rotate_right(x, y): + """ + Right rotates a list x by the number of steps specified + in y. + + Examples + ======== + + >>> from sympy.utilities.iterables import rotate_right + >>> a = [0, 1, 2] + >>> rotate_right(a, 1) + [2, 0, 1] + """ + if len(x) == 0: + return [] + y = len(x) - y % len(x) + return x[y:] + x[:y] + + +def least_rotation(x, key=None): + ''' + Returns the number of steps of left rotation required to + obtain lexicographically minimal string/list/tuple, etc. + + Examples + ======== + + >>> from sympy.utilities.iterables import least_rotation, rotate_left + >>> a = [3, 1, 5, 1, 2] + >>> least_rotation(a) + 3 + >>> rotate_left(a, _) + [1, 2, 3, 1, 5] + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Lexicographically_minimal_string_rotation + + ''' + from sympy.functions.elementary.miscellaneous import Id + if key is None: key = Id + S = x + x # Concatenate string to it self to avoid modular arithmetic + f = [-1] * len(S) # Failure function + k = 0 # Least rotation of string found so far + for j in range(1,len(S)): + sj = S[j] + i = f[j-k-1] + while i != -1 and sj != S[k+i+1]: + if key(sj) < key(S[k+i+1]): + k = j-i-1 + i = f[i] + if sj != S[k+i+1]: + if key(sj) < key(S[k]): + k = j + f[j-k] = -1 + else: + f[j-k] = i+1 + return k + + +def multiset_combinations(m, n, g=None): + """ + Return the unique combinations of size ``n`` from multiset ``m``. + + Examples + ======== + + >>> from sympy.utilities.iterables import multiset_combinations + >>> from itertools import combinations + >>> [''.join(i) for i in multiset_combinations('baby', 3)] + ['abb', 'aby', 'bby'] + + >>> def count(f, s): return len(list(f(s, 3))) + + The number of combinations depends on the number of letters; the + number of unique combinations depends on how the letters are + repeated. + + >>> s1 = 'abracadabra' + >>> s2 = 'banana tree' + >>> count(combinations, s1), count(multiset_combinations, s1) + (165, 23) + >>> count(combinations, s2), count(multiset_combinations, s2) + (165, 54) + + """ + from sympy.core.sorting import ordered + if g is None: + if isinstance(m, dict): + if any(as_int(v) < 0 for v in m.values()): + raise ValueError('counts cannot be negative') + N = sum(m.values()) + if n > N: + return + g = [[k, m[k]] for k in ordered(m)] + else: + m = list(m) + N = len(m) + if n > N: + return + try: + m = multiset(m) + g = [(k, m[k]) for k in ordered(m)] + except TypeError: + m = list(ordered(m)) + g = [list(i) for i in group(m, multiple=False)] + del m + else: + # not checking counts since g is intended for internal use + N = sum(v for k, v in g) + if n > N or not n: + yield [] + else: + for i, (k, v) in enumerate(g): + if v >= n: + yield [k]*n + v = n - 1 + for v in range(min(n, v), 0, -1): + for j in multiset_combinations(None, n - v, g[i + 1:]): + rv = [k]*v + j + if len(rv) == n: + yield rv + +def multiset_permutations(m, size=None, g=None): + """ + Return the unique permutations of multiset ``m``. + + Examples + ======== + + >>> from sympy.utilities.iterables import multiset_permutations + >>> from sympy import factorial + >>> [''.join(i) for i in multiset_permutations('aab')] + ['aab', 'aba', 'baa'] + >>> factorial(len('banana')) + 720 + >>> len(list(multiset_permutations('banana'))) + 60 + """ + from sympy.core.sorting import ordered + if g is None: + if isinstance(m, dict): + if any(as_int(v) < 0 for v in m.values()): + raise ValueError('counts cannot be negative') + g = [[k, m[k]] for k in ordered(m)] + else: + m = list(ordered(m)) + g = [list(i) for i in group(m, multiple=False)] + del m + do = [gi for gi in g if gi[1] > 0] + SUM = sum([gi[1] for gi in do]) + if not do or size is not None and (size > SUM or size < 1): + if not do and size is None or size == 0: + yield [] + return + elif size == 1: + for k, v in do: + yield [k] + elif len(do) == 1: + k, v = do[0] + v = v if size is None else (size if size <= v else 0) + yield [k for i in range(v)] + elif all(v == 1 for k, v in do): + for p in permutations([k for k, v in do], size): + yield list(p) + else: + size = size if size is not None else SUM + for i, (k, v) in enumerate(do): + do[i][1] -= 1 + for j in multiset_permutations(None, size - 1, do): + if j: + yield [k] + j + do[i][1] += 1 + + +def _partition(seq, vector, m=None): + """ + Return the partition of seq as specified by the partition vector. + + Examples + ======== + + >>> from sympy.utilities.iterables import _partition + >>> _partition('abcde', [1, 0, 1, 2, 0]) + [['b', 'e'], ['a', 'c'], ['d']] + + Specifying the number of bins in the partition is optional: + + >>> _partition('abcde', [1, 0, 1, 2, 0], 3) + [['b', 'e'], ['a', 'c'], ['d']] + + The output of _set_partitions can be passed as follows: + + >>> output = (3, [1, 0, 1, 2, 0]) + >>> _partition('abcde', *output) + [['b', 'e'], ['a', 'c'], ['d']] + + See Also + ======== + + combinatorics.partitions.Partition.from_rgs + + """ + if m is None: + m = max(vector) + 1 + elif isinstance(vector, int): # entered as m, vector + vector, m = m, vector + p = [[] for i in range(m)] + for i, v in enumerate(vector): + p[v].append(seq[i]) + return p + + +def _set_partitions(n): + """Cycle through all partitions of n elements, yielding the + current number of partitions, ``m``, and a mutable list, ``q`` + such that ``element[i]`` is in part ``q[i]`` of the partition. + + NOTE: ``q`` is modified in place and generally should not be changed + between function calls. + + Examples + ======== + + >>> from sympy.utilities.iterables import _set_partitions, _partition + >>> for m, q in _set_partitions(3): + ... print('%s %s %s' % (m, q, _partition('abc', q, m))) + 1 [0, 0, 0] [['a', 'b', 'c']] + 2 [0, 0, 1] [['a', 'b'], ['c']] + 2 [0, 1, 0] [['a', 'c'], ['b']] + 2 [0, 1, 1] [['a'], ['b', 'c']] + 3 [0, 1, 2] [['a'], ['b'], ['c']] + + Notes + ===== + + This algorithm is similar to, and solves the same problem as, + Algorithm 7.2.1.5H, from volume 4A of Knuth's The Art of Computer + Programming. Knuth uses the term "restricted growth string" where + this code refers to a "partition vector". In each case, the meaning is + the same: the value in the ith element of the vector specifies to + which part the ith set element is to be assigned. + + At the lowest level, this code implements an n-digit big-endian + counter (stored in the array q) which is incremented (with carries) to + get the next partition in the sequence. A special twist is that a + digit is constrained to be at most one greater than the maximum of all + the digits to the left of it. The array p maintains this maximum, so + that the code can efficiently decide when a digit can be incremented + in place or whether it needs to be reset to 0 and trigger a carry to + the next digit. The enumeration starts with all the digits 0 (which + corresponds to all the set elements being assigned to the same 0th + part), and ends with 0123...n, which corresponds to each set element + being assigned to a different, singleton, part. + + This routine was rewritten to use 0-based lists while trying to + preserve the beauty and efficiency of the original algorithm. + + References + ========== + + .. [1] Nijenhuis, Albert and Wilf, Herbert. (1978) Combinatorial Algorithms, + 2nd Ed, p 91, algorithm "nexequ". Available online from + https://www.math.upenn.edu/~wilf/website/CombAlgDownld.html (viewed + November 17, 2012). + + """ + p = [0]*n + q = [0]*n + nc = 1 + yield nc, q + while nc != n: + m = n + while 1: + m -= 1 + i = q[m] + if p[i] != 1: + break + q[m] = 0 + i += 1 + q[m] = i + m += 1 + nc += m - n + p[0] += n - m + if i == nc: + p[nc] = 0 + nc += 1 + p[i - 1] -= 1 + p[i] += 1 + yield nc, q + + +def multiset_partitions(multiset, m=None): + """ + Return unique partitions of the given multiset (in list form). + If ``m`` is None, all multisets will be returned, otherwise only + partitions with ``m`` parts will be returned. + + If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1] + will be supplied. + + Examples + ======== + + >>> from sympy.utilities.iterables import multiset_partitions + >>> list(multiset_partitions([1, 2, 3, 4], 2)) + [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]], + [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]], + [[1], [2, 3, 4]]] + >>> list(multiset_partitions([1, 2, 3, 4], 1)) + [[[1, 2, 3, 4]]] + + Only unique partitions are returned and these will be returned in a + canonical order regardless of the order of the input: + + >>> a = [1, 2, 2, 1] + >>> ans = list(multiset_partitions(a, 2)) + >>> a.sort() + >>> list(multiset_partitions(a, 2)) == ans + True + >>> a = range(3, 1, -1) + >>> (list(multiset_partitions(a)) == + ... list(multiset_partitions(sorted(a)))) + True + + If m is omitted then all partitions will be returned: + + >>> list(multiset_partitions([1, 1, 2])) + [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]] + >>> list(multiset_partitions([1]*3)) + [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]] + + Counting + ======== + + The number of partitions of a set is given by the bell number: + + >>> from sympy import bell + >>> len(list(multiset_partitions(5))) == bell(5) == 52 + True + + The number of partitions of length k from a set of size n is given by the + Stirling Number of the 2nd kind: + + >>> from sympy.functions.combinatorial.numbers import stirling + >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15 + True + + These comments on counting apply to *sets*, not multisets. + + Notes + ===== + + When all the elements are the same in the multiset, the order + of the returned partitions is determined by the ``partitions`` + routine. If one is counting partitions then it is better to use + the ``nT`` function. + + See Also + ======== + + partitions + sympy.combinatorics.partitions.Partition + sympy.combinatorics.partitions.IntegerPartition + sympy.functions.combinatorial.numbers.nT + + """ + # This function looks at the supplied input and dispatches to + # several special-case routines as they apply. + if isinstance(multiset, int): + n = multiset + if m and m > n: + return + multiset = list(range(n)) + if m == 1: + yield [multiset[:]] + return + + # If m is not None, it can sometimes be faster to use + # MultisetPartitionTraverser.enum_range() even for inputs + # which are sets. Since the _set_partitions code is quite + # fast, this is only advantageous when the overall set + # partitions outnumber those with the desired number of parts + # by a large factor. (At least 60.) Such a switch is not + # currently implemented. + for nc, q in _set_partitions(n): + if m is None or nc == m: + rv = [[] for i in range(nc)] + for i in range(n): + rv[q[i]].append(multiset[i]) + yield rv + return + + if len(multiset) == 1 and isinstance(multiset, str): + multiset = [multiset] + + if not has_variety(multiset): + # Only one component, repeated n times. The resulting + # partitions correspond to partitions of integer n. + n = len(multiset) + if m and m > n: + return + if m == 1: + yield [multiset[:]] + return + x = multiset[:1] + for size, p in partitions(n, m, size=True): + if m is None or size == m: + rv = [] + for k in sorted(p): + rv.extend([x*k]*p[k]) + yield rv + else: + from sympy.core.sorting import ordered + multiset = list(ordered(multiset)) + n = len(multiset) + if m and m > n: + return + if m == 1: + yield [multiset[:]] + return + + # Split the information of the multiset into two lists - + # one of the elements themselves, and one (of the same length) + # giving the number of repeats for the corresponding element. + elements, multiplicities = zip(*group(multiset, False)) + + if len(elements) < len(multiset): + # General case - multiset with more than one distinct element + # and at least one element repeated more than once. + if m: + mpt = MultisetPartitionTraverser() + for state in mpt.enum_range(multiplicities, m-1, m): + yield list_visitor(state, elements) + else: + for state in multiset_partitions_taocp(multiplicities): + yield list_visitor(state, elements) + else: + # Set partitions case - no repeated elements. Pretty much + # same as int argument case above, with same possible, but + # currently unimplemented optimization for some cases when + # m is not None + for nc, q in _set_partitions(n): + if m is None or nc == m: + rv = [[] for i in range(nc)] + for i in range(n): + rv[q[i]].append(i) + yield [[multiset[j] for j in i] for i in rv] + + +def partitions(n, m=None, k=None, size=False): + """Generate all partitions of positive integer, n. + + Parameters + ========== + + m : integer (default gives partitions of all sizes) + limits number of parts in partition (mnemonic: m, maximum parts) + k : integer (default gives partitions number from 1 through n) + limits the numbers that are kept in the partition (mnemonic: k, keys) + size : bool (default False, only partition is returned) + when ``True`` then (M, P) is returned where M is the sum of the + multiplicities and P is the generated partition. + + Each partition is represented as a dictionary, mapping an integer + to the number of copies of that integer in the partition. For example, + the first partition of 4 returned is {4: 1}, "4: one of them". + + Examples + ======== + + >>> from sympy.utilities.iterables import partitions + + The numbers appearing in the partition (the key of the returned dict) + are limited with k: + + >>> for p in partitions(6, k=2): # doctest: +SKIP + ... print(p) + {2: 3} + {1: 2, 2: 2} + {1: 4, 2: 1} + {1: 6} + + The maximum number of parts in the partition (the sum of the values in + the returned dict) are limited with m (default value, None, gives + partitions from 1 through n): + + >>> for p in partitions(6, m=2): # doctest: +SKIP + ... print(p) + ... + {6: 1} + {1: 1, 5: 1} + {2: 1, 4: 1} + {3: 2} + + References + ========== + + .. [1] modified from Tim Peter's version to allow for k and m values: + https://code.activestate.com/recipes/218332-generator-for-integer-partitions/ + + See Also + ======== + + sympy.combinatorics.partitions.Partition + sympy.combinatorics.partitions.IntegerPartition + + """ + if (n <= 0 or + m is not None and m < 1 or + k is not None and k < 1 or + m and k and m*k < n): + # the empty set is the only way to handle these inputs + # and returning {} to represent it is consistent with + # the counting convention, e.g. nT(0) == 1. + if size: + yield 0, {} + else: + yield {} + return + + if m is None: + m = n + else: + m = min(m, n) + k = min(k or n, n) + + n, m, k = as_int(n), as_int(m), as_int(k) + q, r = divmod(n, k) + ms = {k: q} + keys = [k] # ms.keys(), from largest to smallest + if r: + ms[r] = 1 + keys.append(r) + room = m - q - bool(r) + if size: + yield sum(ms.values()), ms.copy() + else: + yield ms.copy() + + while keys != [1]: + # Reuse any 1's. + if keys[-1] == 1: + del keys[-1] + reuse = ms.pop(1) + room += reuse + else: + reuse = 0 + + while 1: + # Let i be the smallest key larger than 1. Reuse one + # instance of i. + i = keys[-1] + newcount = ms[i] = ms[i] - 1 + reuse += i + if newcount == 0: + del keys[-1], ms[i] + room += 1 + + # Break the remainder into pieces of size i-1. + i -= 1 + q, r = divmod(reuse, i) + need = q + bool(r) + if need > room: + if not keys: + return + continue + + ms[i] = q + keys.append(i) + if r: + ms[r] = 1 + keys.append(r) + break + room -= need + if size: + yield sum(ms.values()), ms.copy() + else: + yield ms.copy() + + +def ordered_partitions(n, m=None, sort=True): + """Generates ordered partitions of integer ``n``. + + Parameters + ========== + + m : integer (default None) + The default value gives partitions of all sizes else only + those with size m. In addition, if ``m`` is not None then + partitions are generated *in place* (see examples). + sort : bool (default True) + Controls whether partitions are + returned in sorted order when ``m`` is not None; when False, + the partitions are returned as fast as possible with elements + sorted, but when m|n the partitions will not be in + ascending lexicographical order. + + Examples + ======== + + >>> from sympy.utilities.iterables import ordered_partitions + + All partitions of 5 in ascending lexicographical: + + >>> for p in ordered_partitions(5): + ... print(p) + [1, 1, 1, 1, 1] + [1, 1, 1, 2] + [1, 1, 3] + [1, 2, 2] + [1, 4] + [2, 3] + [5] + + Only partitions of 5 with two parts: + + >>> for p in ordered_partitions(5, 2): + ... print(p) + [1, 4] + [2, 3] + + When ``m`` is given, a given list objects will be used more than + once for speed reasons so you will not see the correct partitions + unless you make a copy of each as it is generated: + + >>> [p for p in ordered_partitions(7, 3)] + [[1, 1, 1], [1, 1, 1], [1, 1, 1], [2, 2, 2]] + >>> [list(p) for p in ordered_partitions(7, 3)] + [[1, 1, 5], [1, 2, 4], [1, 3, 3], [2, 2, 3]] + + When ``n`` is a multiple of ``m``, the elements are still sorted + but the partitions themselves will be *unordered* if sort is False; + the default is to return them in ascending lexicographical order. + + >>> for p in ordered_partitions(6, 2): + ... print(p) + [1, 5] + [2, 4] + [3, 3] + + But if speed is more important than ordering, sort can be set to + False: + + >>> for p in ordered_partitions(6, 2, sort=False): + ... print(p) + [1, 5] + [3, 3] + [2, 4] + + References + ========== + + .. [1] Generating Integer Partitions, [online], + Available: https://jeromekelleher.net/generating-integer-partitions.html + .. [2] Jerome Kelleher and Barry O'Sullivan, "Generating All + Partitions: A Comparison Of Two Encodings", [online], + Available: https://arxiv.org/pdf/0909.2331v2.pdf + """ + if n < 1 or m is not None and m < 1: + # the empty set is the only way to handle these inputs + # and returning {} to represent it is consistent with + # the counting convention, e.g. nT(0) == 1. + yield [] + return + + if m is None: + # The list `a`'s leading elements contain the partition in which + # y is the biggest element and x is either the same as y or the + # 2nd largest element; v and w are adjacent element indices + # to which x and y are being assigned, respectively. + a = [1]*n + y = -1 + v = n + while v > 0: + v -= 1 + x = a[v] + 1 + while y >= 2 * x: + a[v] = x + y -= x + v += 1 + w = v + 1 + while x <= y: + a[v] = x + a[w] = y + yield a[:w + 1] + x += 1 + y -= 1 + a[v] = x + y + y = a[v] - 1 + yield a[:w] + elif m == 1: + yield [n] + elif n == m: + yield [1]*n + else: + # recursively generate partitions of size m + for b in range(1, n//m + 1): + a = [b]*m + x = n - b*m + if not x: + if sort: + yield a + elif not sort and x <= m: + for ax in ordered_partitions(x, sort=False): + mi = len(ax) + a[-mi:] = [i + b for i in ax] + yield a + a[-mi:] = [b]*mi + else: + for mi in range(1, m): + for ax in ordered_partitions(x, mi, sort=True): + a[-mi:] = [i + b for i in ax] + yield a + a[-mi:] = [b]*mi + + +def binary_partitions(n): + """ + Generates the binary partition of n. + + A binary partition consists only of numbers that are + powers of two. Each step reduces a `2^{k+1}` to `2^k` and + `2^k`. Thus 16 is converted to 8 and 8. + + Examples + ======== + + >>> from sympy.utilities.iterables import binary_partitions + >>> for i in binary_partitions(5): + ... print(i) + ... + [4, 1] + [2, 2, 1] + [2, 1, 1, 1] + [1, 1, 1, 1, 1] + + References + ========== + + .. [1] TAOCP 4, section 7.2.1.5, problem 64 + + """ + from math import ceil, log + power = int(2**(ceil(log(n, 2)))) + acc = 0 + partition = [] + while power: + if acc + power <= n: + partition.append(power) + acc += power + power >>= 1 + + last_num = len(partition) - 1 - (n & 1) + while last_num >= 0: + yield partition + if partition[last_num] == 2: + partition[last_num] = 1 + partition.append(1) + last_num -= 1 + continue + partition.append(1) + partition[last_num] >>= 1 + x = partition[last_num + 1] = partition[last_num] + last_num += 1 + while x > 1: + if x <= len(partition) - last_num - 1: + del partition[-x + 1:] + last_num += 1 + partition[last_num] = x + else: + x >>= 1 + yield [1]*n + + +def has_dups(seq): + """Return True if there are any duplicate elements in ``seq``. + + Examples + ======== + + >>> from sympy import has_dups, Dict, Set + >>> has_dups((1, 2, 1)) + True + >>> has_dups(range(3)) + False + >>> all(has_dups(c) is False for c in (set(), Set(), dict(), Dict())) + True + """ + from sympy.core.containers import Dict + from sympy.sets.sets import Set + if isinstance(seq, (dict, set, Dict, Set)): + return False + unique = set() + try: + return any(True for s in seq if s in unique or unique.add(s)) + except TypeError: + return len(seq) != len(list(uniq(seq))) + + +def has_variety(seq): + """Return True if there are any different elements in ``seq``. + + Examples + ======== + + >>> from sympy import has_variety + + >>> has_variety((1, 2, 1)) + True + >>> has_variety((1, 1, 1)) + False + """ + for i, s in enumerate(seq): + if i == 0: + sentinel = s + else: + if s != sentinel: + return True + return False + + +def uniq(seq, result=None): + """ + Yield unique elements from ``seq`` as an iterator. The second + parameter ``result`` is used internally; it is not necessary + to pass anything for this. + + Note: changing the sequence during iteration will raise a + RuntimeError if the size of the sequence is known; if you pass + an iterator and advance the iterator you will change the + output of this routine but there will be no warning. + + Examples + ======== + + >>> from sympy.utilities.iterables import uniq + >>> dat = [1, 4, 1, 5, 4, 2, 1, 2] + >>> type(uniq(dat)) in (list, tuple) + False + + >>> list(uniq(dat)) + [1, 4, 5, 2] + >>> list(uniq(x for x in dat)) + [1, 4, 5, 2] + >>> list(uniq([[1], [2, 1], [1]])) + [[1], [2, 1]] + """ + try: + n = len(seq) + except TypeError: + n = None + def check(): + # check that size of seq did not change during iteration; + # if n == None the object won't support size changing, e.g. + # an iterator can't be changed + if n is not None and len(seq) != n: + raise RuntimeError('sequence changed size during iteration') + try: + seen = set() + result = result or [] + for i, s in enumerate(seq): + if not (s in seen or seen.add(s)): + yield s + check() + except TypeError: + if s not in result: + yield s + check() + result.append(s) + if hasattr(seq, '__getitem__'): + yield from uniq(seq[i + 1:], result) + else: + yield from uniq(seq, result) + + +def generate_bell(n): + """Return permutations of [0, 1, ..., n - 1] such that each permutation + differs from the last by the exchange of a single pair of neighbors. + The ``n!`` permutations are returned as an iterator. In order to obtain + the next permutation from a random starting permutation, use the + ``next_trotterjohnson`` method of the Permutation class (which generates + the same sequence in a different manner). + + Examples + ======== + + >>> from itertools import permutations + >>> from sympy.utilities.iterables import generate_bell + >>> from sympy import zeros, Matrix + + This is the sort of permutation used in the ringing of physical bells, + and does not produce permutations in lexicographical order. Rather, the + permutations differ from each other by exactly one inversion, and the + position at which the swapping occurs varies periodically in a simple + fashion. Consider the first few permutations of 4 elements generated + by ``permutations`` and ``generate_bell``: + + >>> list(permutations(range(4)))[:5] + [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)] + >>> list(generate_bell(4))[:5] + [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)] + + Notice how the 2nd and 3rd lexicographical permutations have 3 elements + out of place whereas each "bell" permutation always has only two + elements out of place relative to the previous permutation (and so the + signature (+/-1) of a permutation is opposite of the signature of the + previous permutation). + + How the position of inversion varies across the elements can be seen + by tracing out where the largest number appears in the permutations: + + >>> m = zeros(4, 24) + >>> for i, p in enumerate(generate_bell(4)): + ... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero + >>> m.print_nonzero('X') + [XXX XXXXXX XXXXXX XXX] + [XX XX XXXX XX XXXX XX XX] + [X XXXX XX XXXX XX XXXX X] + [ XXXXXX XXXXXX XXXXXX ] + + See Also + ======== + + sympy.combinatorics.permutations.Permutation.next_trotterjohnson + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Method_ringing + + .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018 + + .. [3] https://web.archive.org/web/20160313023044/http://programminggeeks.com/bell-algorithm-for-permutation/ + + .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm + + .. [5] Generating involutions, derangements, and relatives by ECO + Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010 + + """ + n = as_int(n) + if n < 1: + raise ValueError('n must be a positive integer') + if n == 1: + yield (0,) + elif n == 2: + yield (0, 1) + yield (1, 0) + elif n == 3: + yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)] + else: + m = n - 1 + op = [0] + [-1]*m + l = list(range(n)) + while True: + yield tuple(l) + # find biggest element with op + big = None, -1 # idx, value + for i in range(n): + if op[i] and l[i] > big[1]: + big = i, l[i] + i, _ = big + if i is None: + break # there are no ops left + # swap it with neighbor in the indicated direction + j = i + op[i] + l[i], l[j] = l[j], l[i] + op[i], op[j] = op[j], op[i] + # if it landed at the end or if the neighbor in the same + # direction is bigger then turn off op + if j == 0 or j == m or l[j + op[j]] > l[j]: + op[j] = 0 + # any element bigger to the left gets +1 op + for i in range(j): + if l[i] > l[j]: + op[i] = 1 + # any element bigger to the right gets -1 op + for i in range(j + 1, n): + if l[i] > l[j]: + op[i] = -1 + + +def generate_involutions(n): + """ + Generates involutions. + + An involution is a permutation that when multiplied + by itself equals the identity permutation. In this + implementation the involutions are generated using + Fixed Points. + + Alternatively, an involution can be considered as + a permutation that does not contain any cycles with + a length that is greater than two. + + Examples + ======== + + >>> from sympy.utilities.iterables import generate_involutions + >>> list(generate_involutions(3)) + [(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 1, 0)] + >>> len(list(generate_involutions(4))) + 10 + + References + ========== + + .. [1] https://mathworld.wolfram.com/PermutationInvolution.html + + """ + idx = list(range(n)) + for p in permutations(idx): + for i in idx: + if p[p[i]] != i: + break + else: + yield p + + +def multiset_derangements(s): + """Generate derangements of the elements of s *in place*. + + Examples + ======== + + >>> from sympy.utilities.iterables import multiset_derangements, uniq + + Because the derangements of multisets (not sets) are generated + in place, copies of the return value must be made if a collection + of derangements is desired or else all values will be the same: + + >>> list(uniq([i for i in multiset_derangements('1233')])) + [[None, None, None, None]] + >>> [i.copy() for i in multiset_derangements('1233')] + [['3', '3', '1', '2'], ['3', '3', '2', '1']] + >>> [''.join(i) for i in multiset_derangements('1233')] + ['3312', '3321'] + """ + from sympy.core.sorting import ordered + # create multiset dictionary of hashable elements or else + # remap elements to integers + try: + ms = multiset(s) + except TypeError: + # give each element a canonical integer value + key = dict(enumerate(ordered(uniq(s)))) + h = [] + for si in s: + for k in key: + if key[k] == si: + h.append(k) + break + for i in multiset_derangements(h): + yield [key[j] for j in i] + return + + mx = max(ms.values()) # max repetition of any element + n = len(s) # the number of elements + + ## special cases + + # 1) one element has more than half the total cardinality of s: no + # derangements are possible. + if mx*2 > n: + return + + # 2) all elements appear once: singletons + if len(ms) == n: + yield from _set_derangements(s) + return + + # find the first element that is repeated the most to place + # in the following two special cases where the selection + # is unambiguous: either there are two elements with multiplicity + # of mx or else there is only one with multiplicity mx + for M in ms: + if ms[M] == mx: + break + + inonM = [i for i in range(n) if s[i] != M] # location of non-M + iM = [i for i in range(n) if s[i] == M] # locations of M + rv = [None]*n + + # 3) half are the same + if 2*mx == n: + # M goes into non-M locations + for i in inonM: + rv[i] = M + # permutations of non-M go to M locations + for p in multiset_permutations([s[i] for i in inonM]): + for i, pi in zip(iM, p): + rv[i] = pi + yield rv + # clean-up (and encourages proper use of routine) + rv[:] = [None]*n + return + + # 4) single repeat covers all but 1 of the non-repeats: + # if there is one repeat then the multiset of the values + # of ms would be {mx: 1, 1: n - mx}, i.e. there would + # be n - mx + 1 values with the condition that n - 2*mx = 1 + if n - 2*mx == 1 and len(ms.values()) == n - mx + 1: + for i, i1 in enumerate(inonM): + ifill = inonM[:i] + inonM[i+1:] + for j in ifill: + rv[j] = M + for p in permutations([s[j] for j in ifill]): + rv[i1] = s[i1] + for j, pi in zip(iM, p): + rv[j] = pi + k = i1 + for j in iM: + rv[j], rv[k] = rv[k], rv[j] + yield rv + k = j + # clean-up (and encourages proper use of routine) + rv[:] = [None]*n + return + + ## general case is handled with 3 helpers: + # 1) `finish_derangements` will place the last two elements + # which have arbitrary multiplicities, e.g. for multiset + # {c: 3, a: 2, b: 2}, the last two elements are a and b + # 2) `iopen` will tell where a given element can be placed + # 3) `do` will recursively place elements into subsets of + # valid locations + + def finish_derangements(): + """Place the last two elements into the partially completed + derangement, and yield the results. + """ + + a = take[1][0] # penultimate element + a_ct = take[1][1] + b = take[0][0] # last element to be placed + b_ct = take[0][1] + + # split the indexes of the not-already-assigned elements of rv into + # three categories + forced_a = [] # positions which must have an a + forced_b = [] # positions which must have a b + open_free = [] # positions which could take either + for i in range(len(s)): + if rv[i] is None: + if s[i] == a: + forced_b.append(i) + elif s[i] == b: + forced_a.append(i) + else: + open_free.append(i) + + if len(forced_a) > a_ct or len(forced_b) > b_ct: + # No derangement possible + return + + for i in forced_a: + rv[i] = a + for i in forced_b: + rv[i] = b + for a_place in combinations(open_free, a_ct - len(forced_a)): + for a_pos in a_place: + rv[a_pos] = a + for i in open_free: + if rv[i] is None: # anything not in the subset is set to b + rv[i] = b + yield rv + # Clean up/undo the final placements + for i in open_free: + rv[i] = None + + # additional cleanup - clear forced_a, forced_b + for i in forced_a: + rv[i] = None + for i in forced_b: + rv[i] = None + + def iopen(v): + # return indices at which element v can be placed in rv: + # locations which are not already occupied if that location + # does not already contain v in the same location of s + return [i for i in range(n) if rv[i] is None and s[i] != v] + + def do(j): + if j == 1: + # handle the last two elements (regardless of multiplicity) + # with a special method + yield from finish_derangements() + else: + # place the mx elements of M into a subset of places + # into which it can be replaced + M, mx = take[j] + for i in combinations(iopen(M), mx): + # place M + for ii in i: + rv[ii] = M + # recursively place the next element + yield from do(j - 1) + # mark positions where M was placed as once again + # open for placement of other elements + for ii in i: + rv[ii] = None + + # process elements in order of canonically decreasing multiplicity + take = sorted(ms.items(), key=lambda x:(x[1], x[0])) + yield from do(len(take) - 1) + rv[:] = [None]*n + + +def random_derangement(t, choice=None, strict=True): + """Return a list of elements in which none are in the same positions + as they were originally. If an element fills more than half of the positions + then an error will be raised since no derangement is possible. To obtain + a derangement of as many items as possible--with some of the most numerous + remaining in their original positions--pass `strict=False`. To produce a + pseudorandom derangment, pass a pseudorandom selector like `choice` (see + below). + + Examples + ======== + + >>> from sympy.utilities.iterables import random_derangement + >>> t = 'SymPy: a CAS in pure Python' + >>> d = random_derangement(t) + >>> all(i != j for i, j in zip(d, t)) + True + + A predictable result can be obtained by using a pseudorandom + generator for the choice: + + >>> from sympy.core.random import seed, choice as c + >>> seed(1) + >>> d = [''.join(random_derangement(t, c)) for i in range(5)] + >>> assert len(set(d)) != 1 # we got different values + + By reseeding, the same sequence can be obtained: + + >>> seed(1) + >>> d2 = [''.join(random_derangement(t, c)) for i in range(5)] + >>> assert d == d2 + """ + if choice is None: + import secrets + choice = secrets.choice + def shuffle(rv): + '''Knuth shuffle''' + for i in range(len(rv) - 1, 0, -1): + x = choice(rv[:i + 1]) + j = rv.index(x) + rv[i], rv[j] = rv[j], rv[i] + def pick(rv, n): + '''shuffle rv and return the first n values + ''' + shuffle(rv) + return rv[:n] + ms = multiset(t) + tot = len(t) + ms = sorted(ms.items(), key=lambda x: x[1]) + # if there are not enough spaces for the most + # plentiful element to move to then some of them + # will have to stay in place + M, mx = ms[-1] + n = len(t) + xs = 2*mx - tot + if xs > 0: + if strict: + raise ValueError('no derangement possible') + opts = [i for (i, c) in enumerate(t) if c == ms[-1][0]] + pick(opts, xs) + stay = sorted(opts[:xs]) + rv = list(t) + for i in reversed(stay): + rv.pop(i) + rv = random_derangement(rv, choice) + for i in stay: + rv.insert(i, ms[-1][0]) + return ''.join(rv) if type(t) is str else rv + # the normal derangement calculated from here + if n == len(ms): + # approx 1/3 will succeed + rv = list(t) + while True: + shuffle(rv) + if all(i != j for i,j in zip(rv, t)): + break + else: + # general case + rv = [None]*n + while True: + j = 0 + while j > -len(ms): # do most numerous first + j -= 1 + e, c = ms[j] + opts = [i for i in range(n) if rv[i] is None and t[i] != e] + if len(opts) < c: + for i in range(n): + rv[i] = None + break # try again + pick(opts, c) + for i in range(c): + rv[opts[i]] = e + else: + return rv + return rv + + +def _set_derangements(s): + """ + yield derangements of items in ``s`` which are assumed to contain + no repeated elements + """ + if len(s) < 2: + return + if len(s) == 2: + yield [s[1], s[0]] + return + if len(s) == 3: + yield [s[1], s[2], s[0]] + yield [s[2], s[0], s[1]] + return + for p in permutations(s): + if not any(i == j for i, j in zip(p, s)): + yield list(p) + + +def generate_derangements(s): + """ + Return unique derangements of the elements of iterable ``s``. + + Examples + ======== + + >>> from sympy.utilities.iterables import generate_derangements + >>> list(generate_derangements([0, 1, 2])) + [[1, 2, 0], [2, 0, 1]] + >>> list(generate_derangements([0, 1, 2, 2])) + [[2, 2, 0, 1], [2, 2, 1, 0]] + >>> list(generate_derangements([0, 1, 1])) + [] + + See Also + ======== + + sympy.functions.combinatorial.factorials.subfactorial + + """ + if not has_dups(s): + yield from _set_derangements(s) + else: + for p in multiset_derangements(s): + yield list(p) + + +def necklaces(n, k, free=False): + """ + A routine to generate necklaces that may (free=True) or may not + (free=False) be turned over to be viewed. The "necklaces" returned + are comprised of ``n`` integers (beads) with ``k`` different + values (colors). Only unique necklaces are returned. + + Examples + ======== + + >>> from sympy.utilities.iterables import necklaces, bracelets + >>> def show(s, i): + ... return ''.join(s[j] for j in i) + + The "unrestricted necklace" is sometimes also referred to as a + "bracelet" (an object that can be turned over, a sequence that can + be reversed) and the term "necklace" is used to imply a sequence + that cannot be reversed. So ACB == ABC for a bracelet (rotate and + reverse) while the two are different for a necklace since rotation + alone cannot make the two sequences the same. + + (mnemonic: Bracelets can be viewed Backwards, but Not Necklaces.) + + >>> B = [show('ABC', i) for i in bracelets(3, 3)] + >>> N = [show('ABC', i) for i in necklaces(3, 3)] + >>> set(N) - set(B) + {'ACB'} + + >>> list(necklaces(4, 2)) + [(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 1), + (0, 1, 0, 1), (0, 1, 1, 1), (1, 1, 1, 1)] + + >>> [show('.o', i) for i in bracelets(4, 2)] + ['....', '...o', '..oo', '.o.o', '.ooo', 'oooo'] + + References + ========== + + .. [1] https://mathworld.wolfram.com/Necklace.html + + .. [2] Frank Ruskey, Carla Savage, and Terry Min Yih Wang, + Generating necklaces, Journal of Algorithms 13 (1992), 414-430; + https://doi.org/10.1016/0196-6774(92)90047-G + + """ + # The FKM algorithm + if k == 0 and n > 0: + return + a = [0]*n + yield tuple(a) + if n == 0: + return + while True: + i = n - 1 + while a[i] == k - 1: + i -= 1 + if i == -1: + return + a[i] += 1 + for j in range(n - i - 1): + a[j + i + 1] = a[j] + if n % (i + 1) == 0 and (not free or all(a <= a[j::-1] + a[-1:j:-1] for j in range(n - 1))): + # No need to test j = n - 1. + yield tuple(a) + + +def bracelets(n, k): + """Wrapper to necklaces to return a free (unrestricted) necklace.""" + return necklaces(n, k, free=True) + + +def generate_oriented_forest(n): + """ + This algorithm generates oriented forests. + + An oriented graph is a directed graph having no symmetric pair of directed + edges. A forest is an acyclic graph, i.e., it has no cycles. A forest can + also be described as a disjoint union of trees, which are graphs in which + any two vertices are connected by exactly one simple path. + + Examples + ======== + + >>> from sympy.utilities.iterables import generate_oriented_forest + >>> list(generate_oriented_forest(4)) + [[0, 1, 2, 3], [0, 1, 2, 2], [0, 1, 2, 1], [0, 1, 2, 0], \ + [0, 1, 1, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 0, 0, 0]] + + References + ========== + + .. [1] T. Beyer and S.M. Hedetniemi: constant time generation of + rooted trees, SIAM J. Computing Vol. 9, No. 4, November 1980 + + .. [2] https://stackoverflow.com/questions/1633833/oriented-forest-taocp-algorithm-in-python + + """ + P = list(range(-1, n)) + while True: + yield P[1:] + if P[n] > 0: + P[n] = P[P[n]] + else: + for p in range(n - 1, 0, -1): + if P[p] != 0: + target = P[p] - 1 + for q in range(p - 1, 0, -1): + if P[q] == target: + break + offset = p - q + for i in range(p, n + 1): + P[i] = P[i - offset] + break + else: + break + + +def minlex(seq, directed=True, key=None): + r""" + Return the rotation of the sequence in which the lexically smallest + elements appear first, e.g. `cba \rightarrow acb`. + + The sequence returned is a tuple, unless the input sequence is a string + in which case a string is returned. + + If ``directed`` is False then the smaller of the sequence and the + reversed sequence is returned, e.g. `cba \rightarrow abc`. + + If ``key`` is not None then it is used to extract a comparison key from each element in iterable. + + Examples + ======== + + >>> from sympy.combinatorics.polyhedron import minlex + >>> minlex((1, 2, 0)) + (0, 1, 2) + >>> minlex((1, 0, 2)) + (0, 2, 1) + >>> minlex((1, 0, 2), directed=False) + (0, 1, 2) + + >>> minlex('11010011000', directed=True) + '00011010011' + >>> minlex('11010011000', directed=False) + '00011001011' + + >>> minlex(('bb', 'aaa', 'c', 'a')) + ('a', 'bb', 'aaa', 'c') + >>> minlex(('bb', 'aaa', 'c', 'a'), key=len) + ('c', 'a', 'bb', 'aaa') + + """ + from sympy.functions.elementary.miscellaneous import Id + if key is None: key = Id + best = rotate_left(seq, least_rotation(seq, key=key)) + if not directed: + rseq = seq[::-1] + rbest = rotate_left(rseq, least_rotation(rseq, key=key)) + best = min(best, rbest, key=key) + + # Convert to tuple, unless we started with a string. + return tuple(best) if not isinstance(seq, str) else best + + +def runs(seq, op=gt): + """Group the sequence into lists in which successive elements + all compare the same with the comparison operator, ``op``: + op(seq[i + 1], seq[i]) is True from all elements in a run. + + Examples + ======== + + >>> from sympy.utilities.iterables import runs + >>> from operator import ge + >>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2]) + [[0, 1, 2], [2], [1, 4], [3], [2], [2]] + >>> runs([0, 1, 2, 2, 1, 4, 3, 2, 2], op=ge) + [[0, 1, 2, 2], [1, 4], [3], [2, 2]] + """ + cycles = [] + seq = iter(seq) + try: + run = [next(seq)] + except StopIteration: + return [] + while True: + try: + ei = next(seq) + except StopIteration: + break + if op(ei, run[-1]): + run.append(ei) + continue + else: + cycles.append(run) + run = [ei] + if run: + cycles.append(run) + return cycles + + +def sequence_partitions(l, n, /): + r"""Returns the partition of sequence $l$ into $n$ bins + + Explanation + =========== + + Given the sequence $l_1 \cdots l_m \in V^+$ where + $V^+$ is the Kleene plus of $V$ + + The set of $n$ partitions of $l$ is defined as: + + .. math:: + \{(s_1, \cdots, s_n) | s_1 \in V^+, \cdots, s_n \in V^+, + s_1 \cdots s_n = l_1 \cdots l_m\} + + Parameters + ========== + + l : Sequence[T] + A nonempty sequence of any Python objects + + n : int + A positive integer + + Yields + ====== + + out : list[Sequence[T]] + A list of sequences with concatenation equals $l$. + This should conform with the type of $l$. + + Examples + ======== + + >>> from sympy.utilities.iterables import sequence_partitions + >>> for out in sequence_partitions([1, 2, 3, 4], 2): + ... print(out) + [[1], [2, 3, 4]] + [[1, 2], [3, 4]] + [[1, 2, 3], [4]] + + Notes + ===== + + This is modified version of EnricoGiampieri's partition generator + from https://stackoverflow.com/questions/13131491/partition-n-items-into-k-bins-in-python-lazily + + See Also + ======== + + sequence_partitions_empty + """ + # Asserting l is nonempty is done only for sanity check + if n == 1 and l: + yield [l] + return + for i in range(1, len(l)): + for part in sequence_partitions(l[i:], n - 1): + yield [l[:i]] + part + + +def sequence_partitions_empty(l, n, /): + r"""Returns the partition of sequence $l$ into $n$ bins with + empty sequence + + Explanation + =========== + + Given the sequence $l_1 \cdots l_m \in V^*$ where + $V^*$ is the Kleene star of $V$ + + The set of $n$ partitions of $l$ is defined as: + + .. math:: + \{(s_1, \cdots, s_n) | s_1 \in V^*, \cdots, s_n \in V^*, + s_1 \cdots s_n = l_1 \cdots l_m\} + + There are more combinations than :func:`sequence_partitions` because + empty sequence can fill everywhere, so we try to provide different + utility for this. + + Parameters + ========== + + l : Sequence[T] + A sequence of any Python objects (can be possibly empty) + + n : int + A positive integer + + Yields + ====== + + out : list[Sequence[T]] + A list of sequences with concatenation equals $l$. + This should conform with the type of $l$. + + Examples + ======== + + >>> from sympy.utilities.iterables import sequence_partitions_empty + >>> for out in sequence_partitions_empty([1, 2, 3, 4], 2): + ... print(out) + [[], [1, 2, 3, 4]] + [[1], [2, 3, 4]] + [[1, 2], [3, 4]] + [[1, 2, 3], [4]] + [[1, 2, 3, 4], []] + + See Also + ======== + + sequence_partitions + """ + if n < 1: + return + if n == 1: + yield [l] + return + for i in range(0, len(l) + 1): + for part in sequence_partitions_empty(l[i:], n - 1): + yield [l[:i]] + part + + +def kbins(l, k, ordered=None): + """ + Return sequence ``l`` partitioned into ``k`` bins. + + Examples + ======== + + The default is to give the items in the same order, but grouped + into k partitions without any reordering: + + >>> from sympy.utilities.iterables import kbins + >>> for p in kbins(list(range(5)), 2): + ... print(p) + ... + [[0], [1, 2, 3, 4]] + [[0, 1], [2, 3, 4]] + [[0, 1, 2], [3, 4]] + [[0, 1, 2, 3], [4]] + + The ``ordered`` flag is either None (to give the simple partition + of the elements) or is a 2 digit integer indicating whether the order of + the bins and the order of the items in the bins matters. Given:: + + A = [[0], [1, 2]] + B = [[1, 2], [0]] + C = [[2, 1], [0]] + D = [[0], [2, 1]] + + the following values for ``ordered`` have the shown meanings:: + + 00 means A == B == C == D + 01 means A == B + 10 means A == D + 11 means A == A + + >>> for ordered_flag in [None, 0, 1, 10, 11]: + ... print('ordered = %s' % ordered_flag) + ... for p in kbins(list(range(3)), 2, ordered=ordered_flag): + ... print(' %s' % p) + ... + ordered = None + [[0], [1, 2]] + [[0, 1], [2]] + ordered = 0 + [[0, 1], [2]] + [[0, 2], [1]] + [[0], [1, 2]] + ordered = 1 + [[0], [1, 2]] + [[0], [2, 1]] + [[1], [0, 2]] + [[1], [2, 0]] + [[2], [0, 1]] + [[2], [1, 0]] + ordered = 10 + [[0, 1], [2]] + [[2], [0, 1]] + [[0, 2], [1]] + [[1], [0, 2]] + [[0], [1, 2]] + [[1, 2], [0]] + ordered = 11 + [[0], [1, 2]] + [[0, 1], [2]] + [[0], [2, 1]] + [[0, 2], [1]] + [[1], [0, 2]] + [[1, 0], [2]] + [[1], [2, 0]] + [[1, 2], [0]] + [[2], [0, 1]] + [[2, 0], [1]] + [[2], [1, 0]] + [[2, 1], [0]] + + See Also + ======== + + partitions, multiset_partitions + + """ + if ordered is None: + yield from sequence_partitions(l, k) + elif ordered == 11: + for pl in multiset_permutations(l): + pl = list(pl) + yield from sequence_partitions(pl, k) + elif ordered == 00: + yield from multiset_partitions(l, k) + elif ordered == 10: + for p in multiset_partitions(l, k): + for perm in permutations(p): + yield list(perm) + elif ordered == 1: + for kgot, p in partitions(len(l), k, size=True): + if kgot != k: + continue + for li in multiset_permutations(l): + rv = [] + i = j = 0 + li = list(li) + for size, multiplicity in sorted(p.items()): + for m in range(multiplicity): + j = i + size + rv.append(li[i: j]) + i = j + yield rv + else: + raise ValueError( + 'ordered must be one of 00, 01, 10 or 11, not %s' % ordered) + + +def permute_signs(t): + """Return iterator in which the signs of non-zero elements + of t are permuted. + + Examples + ======== + + >>> from sympy.utilities.iterables import permute_signs + >>> list(permute_signs((0, 1, 2))) + [(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2)] + """ + for signs in product(*[(1, -1)]*(len(t) - t.count(0))): + signs = list(signs) + yield type(t)([i*signs.pop() if i else i for i in t]) + + +def signed_permutations(t): + """Return iterator in which the signs of non-zero elements + of t and the order of the elements are permuted. + + Examples + ======== + + >>> from sympy.utilities.iterables import signed_permutations + >>> list(signed_permutations((0, 1, 2))) + [(0, 1, 2), (0, -1, 2), (0, 1, -2), (0, -1, -2), (0, 2, 1), + (0, -2, 1), (0, 2, -1), (0, -2, -1), (1, 0, 2), (-1, 0, 2), + (1, 0, -2), (-1, 0, -2), (1, 2, 0), (-1, 2, 0), (1, -2, 0), + (-1, -2, 0), (2, 0, 1), (-2, 0, 1), (2, 0, -1), (-2, 0, -1), + (2, 1, 0), (-2, 1, 0), (2, -1, 0), (-2, -1, 0)] + """ + return (type(t)(i) for j in permutations(t) + for i in permute_signs(j)) + + +def rotations(s, dir=1): + """Return a generator giving the items in s as list where + each subsequent list has the items rotated to the left (default) + or right (``dir=-1``) relative to the previous list. + + Examples + ======== + + >>> from sympy import rotations + >>> list(rotations([1,2,3])) + [[1, 2, 3], [2, 3, 1], [3, 1, 2]] + >>> list(rotations([1,2,3], -1)) + [[1, 2, 3], [3, 1, 2], [2, 3, 1]] + """ + seq = list(s) + for i in range(len(seq)): + yield seq + seq = rotate_left(seq, dir) + + +def roundrobin(*iterables): + """roundrobin recipe taken from itertools documentation: + https://docs.python.org/3/library/itertools.html#itertools-recipes + + roundrobin('ABC', 'D', 'EF') --> A D E B F C + + Recipe credited to George Sakkis + """ + nexts = cycle(iter(it).__next__ for it in iterables) + + pending = len(iterables) + while pending: + try: + for nxt in nexts: + yield nxt() + except StopIteration: + pending -= 1 + nexts = cycle(islice(nexts, pending)) + + + +class NotIterable: + """ + Use this as mixin when creating a class which is not supposed to + return true when iterable() is called on its instances because + calling list() on the instance, for example, would result in + an infinite loop. + """ + pass + + +def iterable(i, exclude=(str, dict, NotIterable)): + """ + Return a boolean indicating whether ``i`` is SymPy iterable. + True also indicates that the iterator is finite, e.g. you can + call list(...) on the instance. + + When SymPy is working with iterables, it is almost always assuming + that the iterable is not a string or a mapping, so those are excluded + by default. If you want a pure Python definition, make exclude=None. To + exclude multiple items, pass them as a tuple. + + You can also set the _iterable attribute to True or False on your class, + which will override the checks here, including the exclude test. + + As a rule of thumb, some SymPy functions use this to check if they should + recursively map over an object. If an object is technically iterable in + the Python sense but does not desire this behavior (e.g., because its + iteration is not finite, or because iteration might induce an unwanted + computation), it should disable it by setting the _iterable attribute to False. + + See also: is_sequence + + Examples + ======== + + >>> from sympy.utilities.iterables import iterable + >>> from sympy import Tuple + >>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1] + >>> for i in things: + ... print('%s %s' % (iterable(i), type(i))) + True <... 'list'> + True <... 'tuple'> + True <... 'set'> + True + True <... 'generator'> + False <... 'dict'> + False <... 'str'> + False <... 'int'> + + >>> iterable({}, exclude=None) + True + >>> iterable({}, exclude=str) + True + >>> iterable("no", exclude=str) + False + + """ + if hasattr(i, '_iterable'): + return i._iterable + try: + iter(i) + except TypeError: + return False + if exclude: + return not isinstance(i, exclude) + return True + + +def is_sequence(i, include=None): + """ + Return a boolean indicating whether ``i`` is a sequence in the SymPy + sense. If anything that fails the test below should be included as + being a sequence for your application, set 'include' to that object's + type; multiple types should be passed as a tuple of types. + + Note: although generators can generate a sequence, they often need special + handling to make sure their elements are captured before the generator is + exhausted, so these are not included by default in the definition of a + sequence. + + See also: iterable + + Examples + ======== + + >>> from sympy.utilities.iterables import is_sequence + >>> from types import GeneratorType + >>> is_sequence([]) + True + >>> is_sequence(set()) + False + >>> is_sequence('abc') + False + >>> is_sequence('abc', include=str) + True + >>> generator = (c for c in 'abc') + >>> is_sequence(generator) + False + >>> is_sequence(generator, include=(str, GeneratorType)) + True + + """ + return (hasattr(i, '__getitem__') and + iterable(i) or + bool(include) and + isinstance(i, include)) + + +@deprecated( + """ + Using postorder_traversal from the sympy.utilities.iterables submodule is + deprecated. + + Instead, use postorder_traversal from the top-level sympy namespace, like + + sympy.postorder_traversal + """, + deprecated_since_version="1.10", + active_deprecations_target="deprecated-traversal-functions-moved") +def postorder_traversal(node, keys=None): + from sympy.core.traversal import postorder_traversal as _postorder_traversal + return _postorder_traversal(node, keys=keys) + + +@deprecated( + """ + Using interactive_traversal from the sympy.utilities.iterables submodule + is deprecated. + + Instead, use interactive_traversal from the top-level sympy namespace, + like + + sympy.interactive_traversal + """, + deprecated_since_version="1.10", + active_deprecations_target="deprecated-traversal-functions-moved") +def interactive_traversal(expr): + from sympy.interactive.traversal import interactive_traversal as _interactive_traversal + return _interactive_traversal(expr) + + +@deprecated( + """ + Importing default_sort_key from sympy.utilities.iterables is deprecated. + Use from sympy import default_sort_key instead. + """, + deprecated_since_version="1.10", +active_deprecations_target="deprecated-sympy-core-compatibility", +) +def default_sort_key(*args, **kwargs): + from sympy import default_sort_key as _default_sort_key + return _default_sort_key(*args, **kwargs) + + +@deprecated( + """ + Importing default_sort_key from sympy.utilities.iterables is deprecated. + Use from sympy import default_sort_key instead. + """, + deprecated_since_version="1.10", +active_deprecations_target="deprecated-sympy-core-compatibility", +) +def ordered(*args, **kwargs): + from sympy import ordered as _ordered + return _ordered(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/lambdify.py b/venv/lib/python3.10/site-packages/sympy/utilities/lambdify.py new file mode 100644 index 0000000000000000000000000000000000000000..17941fc9277296ae568b554c527b867ce0e67c75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/lambdify.py @@ -0,0 +1,1526 @@ +""" +This module provides convenient functions to transform SymPy expressions to +lambda functions which can be used to calculate numerical values very fast. +""" + +from __future__ import annotations +from typing import Any + +import builtins +import inspect +import keyword +import textwrap +import linecache + +# Required despite static analysis claiming it is not used +from sympy.external import import_module # noqa:F401 +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.decorator import doctest_depends_on +from sympy.utilities.iterables import (is_sequence, iterable, + NotIterable, flatten) +from sympy.utilities.misc import filldedent + +__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']} + +# Default namespaces, letting us define translations that can't be defined +# by simple variable maps, like I => 1j +MATH_DEFAULT: dict[str, Any] = {} +MPMATH_DEFAULT: dict[str, Any] = {} +NUMPY_DEFAULT: dict[str, Any] = {"I": 1j} +SCIPY_DEFAULT: dict[str, Any] = {"I": 1j} +CUPY_DEFAULT: dict[str, Any] = {"I": 1j} +JAX_DEFAULT: dict[str, Any] = {"I": 1j} +TENSORFLOW_DEFAULT: dict[str, Any] = {} +SYMPY_DEFAULT: dict[str, Any] = {} +NUMEXPR_DEFAULT: dict[str, Any] = {} + +# These are the namespaces the lambda functions will use. +# These are separate from the names above because they are modified +# throughout this file, whereas the defaults should remain unmodified. + +MATH = MATH_DEFAULT.copy() +MPMATH = MPMATH_DEFAULT.copy() +NUMPY = NUMPY_DEFAULT.copy() +SCIPY = SCIPY_DEFAULT.copy() +CUPY = CUPY_DEFAULT.copy() +JAX = JAX_DEFAULT.copy() +TENSORFLOW = TENSORFLOW_DEFAULT.copy() +SYMPY = SYMPY_DEFAULT.copy() +NUMEXPR = NUMEXPR_DEFAULT.copy() + + +# Mappings between SymPy and other modules function names. +MATH_TRANSLATIONS = { + "ceiling": "ceil", + "E": "e", + "ln": "log", +} + +# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses +# of Function to automatically evalf. +MPMATH_TRANSLATIONS = { + "Abs": "fabs", + "elliptic_k": "ellipk", + "elliptic_f": "ellipf", + "elliptic_e": "ellipe", + "elliptic_pi": "ellippi", + "ceiling": "ceil", + "chebyshevt": "chebyt", + "chebyshevu": "chebyu", + "E": "e", + "I": "j", + "ln": "log", + #"lowergamma":"lower_gamma", + "oo": "inf", + #"uppergamma":"upper_gamma", + "LambertW": "lambertw", + "MutableDenseMatrix": "matrix", + "ImmutableDenseMatrix": "matrix", + "conjugate": "conj", + "dirichlet_eta": "altzeta", + "Ei": "ei", + "Shi": "shi", + "Chi": "chi", + "Si": "si", + "Ci": "ci", + "RisingFactorial": "rf", + "FallingFactorial": "ff", + "betainc_regularized": "betainc", +} + +NUMPY_TRANSLATIONS: dict[str, str] = { + "Heaviside": "heaviside", +} +SCIPY_TRANSLATIONS: dict[str, str] = {} +CUPY_TRANSLATIONS: dict[str, str] = {} +JAX_TRANSLATIONS: dict[str, str] = {} + +TENSORFLOW_TRANSLATIONS: dict[str, str] = {} + +NUMEXPR_TRANSLATIONS: dict[str, str] = {} + +# Available modules: +MODULES = { + "math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)), + "mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)), + "numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)), + "scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import scipy; import numpy; from scipy.special import *",)), + "cupy": (CUPY, CUPY_DEFAULT, CUPY_TRANSLATIONS, ("import cupy",)), + "jax": (JAX, JAX_DEFAULT, JAX_TRANSLATIONS, ("import jax",)), + "tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)), + "sympy": (SYMPY, SYMPY_DEFAULT, {}, ( + "from sympy.functions import *", + "from sympy.matrices import *", + "from sympy import Integral, pi, oo, nan, zoo, E, I",)), + "numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS, + ("import_module('numexpr')", )), +} + + +def _import(module, reload=False): + """ + Creates a global translation dictionary for module. + + The argument module has to be one of the following strings: "math", + "mpmath", "numpy", "sympy", "tensorflow", "jax". + These dictionaries map names of Python functions to their equivalent in + other modules. + """ + try: + namespace, namespace_default, translations, import_commands = MODULES[ + module] + except KeyError: + raise NameError( + "'%s' module cannot be used for lambdification" % module) + + # Clear namespace or exit + if namespace != namespace_default: + # The namespace was already generated, don't do it again if not forced. + if reload: + namespace.clear() + namespace.update(namespace_default) + else: + return + + for import_command in import_commands: + if import_command.startswith('import_module'): + module = eval(import_command) + + if module is not None: + namespace.update(module.__dict__) + continue + else: + try: + exec(import_command, {}, namespace) + continue + except ImportError: + pass + + raise ImportError( + "Cannot import '%s' with '%s' command" % (module, import_command)) + + # Add translated names to namespace + for sympyname, translation in translations.items(): + namespace[sympyname] = namespace[translation] + + # For computing the modulus of a SymPy expression we use the builtin abs + # function, instead of the previously used fabs function for all + # translation modules. This is because the fabs function in the math + # module does not accept complex valued arguments. (see issue 9474). The + # only exception, where we don't use the builtin abs function is the + # mpmath translation module, because mpmath.fabs returns mpf objects in + # contrast to abs(). + if 'Abs' not in namespace: + namespace['Abs'] = abs + +# Used for dynamically generated filenames that are inserted into the +# linecache. +_lambdify_generated_counter = 1 + + +@doctest_depends_on(modules=('numpy', 'scipy', 'tensorflow',), python_version=(3,)) +def lambdify(args, expr, modules=None, printer=None, use_imps=True, + dummify=False, cse=False, docstring_limit=1000): + """Convert a SymPy expression into a function that allows for fast + numeric evaluation. + + .. warning:: + This function uses ``exec``, and thus should not be used on + unsanitized input. + + .. deprecated:: 1.7 + Passing a set for the *args* parameter is deprecated as sets are + unordered. Use an ordered iterable such as a list or tuple. + + Explanation + =========== + + For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an + equivalent NumPy function that numerically evaluates it: + + >>> from sympy import sin, cos, symbols, lambdify + >>> import numpy as np + >>> x = symbols('x') + >>> expr = sin(x) + cos(x) + >>> expr + sin(x) + cos(x) + >>> f = lambdify(x, expr, 'numpy') + >>> a = np.array([1, 2]) + >>> f(a) + [1.38177329 0.49315059] + + The primary purpose of this function is to provide a bridge from SymPy + expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath, + and tensorflow. In general, SymPy functions do not work with objects from + other libraries, such as NumPy arrays, and functions from numeric + libraries like NumPy or mpmath do not work on SymPy expressions. + ``lambdify`` bridges the two by converting a SymPy expression to an + equivalent numeric function. + + The basic workflow with ``lambdify`` is to first create a SymPy expression + representing whatever mathematical function you wish to evaluate. This + should be done using only SymPy functions and expressions. Then, use + ``lambdify`` to convert this to an equivalent function for numerical + evaluation. For instance, above we created ``expr`` using the SymPy symbol + ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an + equivalent NumPy function ``f``, and called it on a NumPy array ``a``. + + Parameters + ========== + + args : List[Symbol] + A variable or a list of variables whose nesting represents the + nesting of the arguments that will be passed to the function. + + Variables can be symbols, undefined functions, or matrix symbols. + + >>> from sympy import Eq + >>> from sympy.abc import x, y, z + + The list of variables should match the structure of how the + arguments will be passed to the function. Simply enclose the + parameters as they will be passed in a list. + + To call a function like ``f(x)`` then ``[x]`` + should be the first argument to ``lambdify``; for this + case a single ``x`` can also be used: + + >>> f = lambdify(x, x + 1) + >>> f(1) + 2 + >>> f = lambdify([x], x + 1) + >>> f(1) + 2 + + To call a function like ``f(x, y)`` then ``[x, y]`` will + be the first argument of the ``lambdify``: + + >>> f = lambdify([x, y], x + y) + >>> f(1, 1) + 2 + + To call a function with a single 3-element tuple like + ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first + argument of the ``lambdify``: + + >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2)) + >>> f((3, 4, 5)) + True + + If two args will be passed and the first is a scalar but + the second is a tuple with two arguments then the items + in the list should match that structure: + + >>> f = lambdify([x, (y, z)], x + y + z) + >>> f(1, (2, 3)) + 6 + + expr : Expr + An expression, list of expressions, or matrix to be evaluated. + + Lists may be nested. + If the expression is a list, the output will also be a list. + + >>> f = lambdify(x, [x, [x + 1, x + 2]]) + >>> f(1) + [1, [2, 3]] + + If it is a matrix, an array will be returned (for the NumPy module). + + >>> from sympy import Matrix + >>> f = lambdify(x, Matrix([x, x + 1])) + >>> f(1) + [[1] + [2]] + + Note that the argument order here (variables then expression) is used + to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works + (roughly) like ``lambda x: expr`` + (see :ref:`lambdify-how-it-works` below). + + modules : str, optional + Specifies the numeric library to use. + + If not specified, *modules* defaults to: + + - ``["scipy", "numpy"]`` if SciPy is installed + - ``["numpy"]`` if only NumPy is installed + - ``["math", "mpmath", "sympy"]`` if neither is installed. + + That is, SymPy functions are replaced as far as possible by + either ``scipy`` or ``numpy`` functions if available, and Python's + standard library ``math``, or ``mpmath`` functions otherwise. + + *modules* can be one of the following types: + + - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``, + ``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the + corresponding printer and namespace mapping for that module. + - A module (e.g., ``math``). This uses the global namespace of the + module. If the module is one of the above known modules, it will + also use the corresponding printer and namespace mapping + (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``). + - A dictionary that maps names of SymPy functions to arbitrary + functions + (e.g., ``{'sin': custom_sin}``). + - A list that contains a mix of the arguments above, with higher + priority given to entries appearing first + (e.g., to use the NumPy module but override the ``sin`` function + with a custom version, you can use + ``[{'sin': custom_sin}, 'numpy']``). + + dummify : bool, optional + Whether or not the variables in the provided expression that are not + valid Python identifiers are substituted with dummy symbols. + + This allows for undefined functions like ``Function('f')(t)`` to be + supplied as arguments. By default, the variables are only dummified + if they are not valid Python identifiers. + + Set ``dummify=True`` to replace all arguments with dummy symbols + (if ``args`` is not a string) - for example, to ensure that the + arguments do not redefine any built-in names. + + cse : bool, or callable, optional + Large expressions can be computed more efficiently when + common subexpressions are identified and precomputed before + being used multiple time. Finding the subexpressions will make + creation of the 'lambdify' function slower, however. + + When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default) + the user may pass a function matching the ``cse`` signature. + + docstring_limit : int or None + When lambdifying large expressions, a significant proportion of the time + spent inside ``lambdify`` is spent producing a string representation of + the expression for use in the automatically generated docstring of the + returned function. For expressions containing hundreds or more nodes the + resulting docstring often becomes so long and dense that it is difficult + to read. To reduce the runtime of lambdify, the rendering of the full + expression inside the docstring can be disabled. + + When ``None``, the full expression is rendered in the docstring. When + ``0`` or a negative ``int``, an ellipsis is rendering in the docstring + instead of the expression. When a strictly positive ``int``, if the + number of nodes in the expression exceeds ``docstring_limit`` an + ellipsis is rendered in the docstring, otherwise a string representation + of the expression is rendered as normal. The default is ``1000``. + + Examples + ======== + + >>> from sympy.utilities.lambdify import implemented_function + >>> from sympy import sqrt, sin, Matrix + >>> from sympy import Function + >>> from sympy.abc import w, x, y, z + + >>> f = lambdify(x, x**2) + >>> f(2) + 4 + >>> f = lambdify((x, y, z), [z, y, x]) + >>> f(1,2,3) + [3, 2, 1] + >>> f = lambdify(x, sqrt(x)) + >>> f(4) + 2.0 + >>> f = lambdify((x, y), sin(x*y)**2) + >>> f(0, 5) + 0.0 + >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy') + >>> row(1, 2) + Matrix([[1, 3]]) + + ``lambdify`` can be used to translate SymPy expressions into mpmath + functions. This may be preferable to using ``evalf`` (which uses mpmath on + the backend) in some cases. + + >>> f = lambdify(x, sin(x), 'mpmath') + >>> f(1) + 0.8414709848078965 + + Tuple arguments are handled and the lambdified function should + be called with the same type of arguments as were used to create + the function: + + >>> f = lambdify((x, (y, z)), x + y) + >>> f(1, (2, 4)) + 3 + + The ``flatten`` function can be used to always work with flattened + arguments: + + >>> from sympy.utilities.iterables import flatten + >>> args = w, (x, (y, z)) + >>> vals = 1, (2, (3, 4)) + >>> f = lambdify(flatten(args), w + x + y + z) + >>> f(*flatten(vals)) + 10 + + Functions present in ``expr`` can also carry their own numerical + implementations, in a callable attached to the ``_imp_`` attribute. This + can be used with undefined functions using the ``implemented_function`` + factory: + + >>> f = implemented_function(Function('f'), lambda x: x+1) + >>> func = lambdify(x, f(x)) + >>> func(4) + 5 + + ``lambdify`` always prefers ``_imp_`` implementations to implementations + in other namespaces, unless the ``use_imps`` input parameter is False. + + Usage with Tensorflow: + + >>> import tensorflow as tf + >>> from sympy import Max, sin, lambdify + >>> from sympy.abc import x + + >>> f = Max(x, sin(x)) + >>> func = lambdify(x, f, 'tensorflow') + + After tensorflow v2, eager execution is enabled by default. + If you want to get the compatible result across tensorflow v1 and v2 + as same as this tutorial, run this line. + + >>> tf.compat.v1.enable_eager_execution() + + If you have eager execution enabled, you can get the result out + immediately as you can use numpy. + + If you pass tensorflow objects, you may get an ``EagerTensor`` + object instead of value. + + >>> result = func(tf.constant(1.0)) + >>> print(result) + tf.Tensor(1.0, shape=(), dtype=float32) + >>> print(result.__class__) + + + You can use ``.numpy()`` to get the numpy value of the tensor. + + >>> result.numpy() + 1.0 + + >>> var = tf.Variable(2.0) + >>> result = func(var) # also works for tf.Variable and tf.Placeholder + >>> result.numpy() + 2.0 + + And it works with any shape array. + + >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) + >>> result = func(tensor) + >>> result.numpy() + [[1. 2.] + [3. 4.]] + + Notes + ===== + + - For functions involving large array calculations, numexpr can provide a + significant speedup over numpy. Please note that the available functions + for numexpr are more limited than numpy but can be expanded with + ``implemented_function`` and user defined subclasses of Function. If + specified, numexpr may be the only option in modules. The official list + of numexpr functions can be found at: + https://numexpr.readthedocs.io/projects/NumExpr3/en/latest/user_guide.html#supported-functions + + - In the above examples, the generated functions can accept scalar + values or numpy arrays as arguments. However, in some cases + the generated function relies on the input being a numpy array: + + >>> import numpy + >>> from sympy import Piecewise + >>> from sympy.testing.pytest import ignore_warnings + >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy") + + >>> with ignore_warnings(RuntimeWarning): + ... f(numpy.array([-1, 0, 1, 2])) + [-1. 0. 1. 0.5] + + >>> f(0) + Traceback (most recent call last): + ... + ZeroDivisionError: division by zero + + In such cases, the input should be wrapped in a numpy array: + + >>> with ignore_warnings(RuntimeWarning): + ... float(f(numpy.array([0]))) + 0.0 + + Or if numpy functionality is not required another module can be used: + + >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math") + >>> f(0) + 0 + + .. _lambdify-how-it-works: + + How it works + ============ + + When using this function, it helps a great deal to have an idea of what it + is doing. At its core, lambdify is nothing more than a namespace + translation, on top of a special printer that makes some corner cases work + properly. + + To understand lambdify, first we must properly understand how Python + namespaces work. Say we had two files. One called ``sin_cos_sympy.py``, + with + + .. code:: python + + # sin_cos_sympy.py + + from sympy.functions.elementary.trigonometric import (cos, sin) + + def sin_cos(x): + return sin(x) + cos(x) + + + and one called ``sin_cos_numpy.py`` with + + .. code:: python + + # sin_cos_numpy.py + + from numpy import sin, cos + + def sin_cos(x): + return sin(x) + cos(x) + + The two files define an identical function ``sin_cos``. However, in the + first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and + ``cos``. In the second, they are defined as the NumPy versions. + + If we were to import the first file and use the ``sin_cos`` function, we + would get something like + + >>> from sin_cos_sympy import sin_cos # doctest: +SKIP + >>> sin_cos(1) # doctest: +SKIP + cos(1) + sin(1) + + On the other hand, if we imported ``sin_cos`` from the second file, we + would get + + >>> from sin_cos_numpy import sin_cos # doctest: +SKIP + >>> sin_cos(1) # doctest: +SKIP + 1.38177329068 + + In the first case we got a symbolic output, because it used the symbolic + ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric + result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions + from NumPy. But notice that the versions of ``sin`` and ``cos`` that were + used was not inherent to the ``sin_cos`` function definition. Both + ``sin_cos`` definitions are exactly the same. Rather, it was based on the + names defined at the module where the ``sin_cos`` function was defined. + + The key point here is that when function in Python references a name that + is not defined in the function, that name is looked up in the "global" + namespace of the module where that function is defined. + + Now, in Python, we can emulate this behavior without actually writing a + file to disk using the ``exec`` function. ``exec`` takes a string + containing a block of Python code, and a dictionary that should contain + the global variables of the module. It then executes the code "in" that + dictionary, as if it were the module globals. The following is equivalent + to the ``sin_cos`` defined in ``sin_cos_sympy.py``: + + >>> import sympy + >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos} + >>> exec(''' + ... def sin_cos(x): + ... return sin(x) + cos(x) + ... ''', module_dictionary) + >>> sin_cos = module_dictionary['sin_cos'] + >>> sin_cos(1) + cos(1) + sin(1) + + and similarly with ``sin_cos_numpy``: + + >>> import numpy + >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos} + >>> exec(''' + ... def sin_cos(x): + ... return sin(x) + cos(x) + ... ''', module_dictionary) + >>> sin_cos = module_dictionary['sin_cos'] + >>> sin_cos(1) + 1.38177329068 + + So now we can get an idea of how ``lambdify`` works. The name "lambdify" + comes from the fact that we can think of something like ``lambdify(x, + sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where + ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why + the symbols argument is first in ``lambdify``, as opposed to most SymPy + functions where it comes after the expression: to better mimic the + ``lambda`` keyword. + + ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and + + 1. Converts it to a string + 2. Creates a module globals dictionary based on the modules that are + passed in (by default, it uses the NumPy module) + 3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the + list of variables separated by commas, and ``{expr}`` is the string + created in step 1., then ``exec``s that string with the module globals + namespace and returns ``func``. + + In fact, functions returned by ``lambdify`` support inspection. So you can + see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you + are using IPython or the Jupyter notebook. + + >>> f = lambdify(x, sin(x) + cos(x)) + >>> import inspect + >>> print(inspect.getsource(f)) + def _lambdifygenerated(x): + return sin(x) + cos(x) + + This shows us the source code of the function, but not the namespace it + was defined in. We can inspect that by looking at the ``__globals__`` + attribute of ``f``: + + >>> f.__globals__['sin'] + + >>> f.__globals__['cos'] + + >>> f.__globals__['sin'] is numpy.sin + True + + This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be + ``numpy.sin`` and ``numpy.cos``. + + Note that there are some convenience layers in each of these steps, but at + the core, this is how ``lambdify`` works. Step 1 is done using the + ``LambdaPrinter`` printers defined in the printing module (see + :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions + to define how they should be converted to a string for different modules. + You can change which printer ``lambdify`` uses by passing a custom printer + in to the ``printer`` argument. + + Step 2 is augmented by certain translations. There are default + translations for each module, but you can provide your own by passing a + list to the ``modules`` argument. For instance, + + >>> def mysin(x): + ... print('taking the sin of', x) + ... return numpy.sin(x) + ... + >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy']) + >>> f(1) + taking the sin of 1 + 0.8414709848078965 + + The globals dictionary is generated from the list by merging the + dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The + merging is done so that earlier items take precedence, which is why + ``mysin`` is used above instead of ``numpy.sin``. + + If you want to modify the way ``lambdify`` works for a given function, it + is usually easiest to do so by modifying the globals dictionary as such. + In more complicated cases, it may be necessary to create and pass in a + custom printer. + + Finally, step 3 is augmented with certain convenience operations, such as + the addition of a docstring. + + Understanding how ``lambdify`` works can make it easier to avoid certain + gotchas when using it. For instance, a common mistake is to create a + lambdified function for one module (say, NumPy), and pass it objects from + another (say, a SymPy expression). + + For instance, say we create + + >>> from sympy.abc import x + >>> f = lambdify(x, x + 1, 'numpy') + + Now if we pass in a NumPy array, we get that array plus 1 + + >>> import numpy + >>> a = numpy.array([1, 2]) + >>> f(a) + [2 3] + + But what happens if you make the mistake of passing in a SymPy expression + instead of a NumPy array: + + >>> f(x + 1) + x + 2 + + This worked, but it was only by accident. Now take a different lambdified + function: + + >>> from sympy import sin + >>> g = lambdify(x, x + sin(x), 'numpy') + + This works as expected on NumPy arrays: + + >>> g(a) + [1.84147098 2.90929743] + + But if we try to pass in a SymPy expression, it fails + + >>> try: + ... g(x + 1) + ... # NumPy release after 1.17 raises TypeError instead of + ... # AttributeError + ... except (AttributeError, TypeError): + ... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + AttributeError: + + Now, let's look at what happened. The reason this fails is that ``g`` + calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not + know how to operate on a SymPy object. **As a general rule, NumPy + functions do not know how to operate on SymPy expressions, and SymPy + functions do not know how to operate on NumPy arrays. This is why lambdify + exists: to provide a bridge between SymPy and NumPy.** + + However, why is it that ``f`` did work? That's because ``f`` does not call + any functions, it only adds 1. So the resulting function that is created, + ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals + namespace it is defined in. Thus it works, but only by accident. A future + version of ``lambdify`` may remove this behavior. + + Be aware that certain implementation details described here may change in + future versions of SymPy. The API of passing in custom modules and + printers will not change, but the details of how a lambda function is + created may change. However, the basic idea will remain the same, and + understanding it will be helpful to understanding the behavior of + lambdify. + + **In general: you should create lambdified functions for one module (say, + NumPy), and only pass it input types that are compatible with that module + (say, NumPy arrays).** Remember that by default, if the ``module`` + argument is not provided, ``lambdify`` creates functions using the NumPy + and SciPy namespaces. + """ + from sympy.core.symbol import Symbol + from sympy.core.expr import Expr + + # If the user hasn't specified any modules, use what is available. + if modules is None: + try: + _import("scipy") + except ImportError: + try: + _import("numpy") + except ImportError: + # Use either numpy (if available) or python.math where possible. + # XXX: This leads to different behaviour on different systems and + # might be the reason for irreproducible errors. + modules = ["math", "mpmath", "sympy"] + else: + modules = ["numpy"] + else: + modules = ["numpy", "scipy"] + + # Get the needed namespaces. + namespaces = [] + # First find any function implementations + if use_imps: + namespaces.append(_imp_namespace(expr)) + # Check for dict before iterating + if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'): + namespaces.append(modules) + else: + # consistency check + if _module_present('numexpr', modules) and len(modules) > 1: + raise TypeError("numexpr must be the only item in 'modules'") + namespaces += list(modules) + # fill namespace with first having highest priority + namespace = {} + for m in namespaces[::-1]: + buf = _get_namespace(m) + namespace.update(buf) + + if hasattr(expr, "atoms"): + #Try if you can extract symbols from the expression. + #Move on if expr.atoms in not implemented. + syms = expr.atoms(Symbol) + for term in syms: + namespace.update({str(term): term}) + + if printer is None: + if _module_present('mpmath', namespaces): + from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore + elif _module_present('scipy', namespaces): + from sympy.printing.numpy import SciPyPrinter as Printer # type: ignore + elif _module_present('numpy', namespaces): + from sympy.printing.numpy import NumPyPrinter as Printer # type: ignore + elif _module_present('cupy', namespaces): + from sympy.printing.numpy import CuPyPrinter as Printer # type: ignore + elif _module_present('jax', namespaces): + from sympy.printing.numpy import JaxPrinter as Printer # type: ignore + elif _module_present('numexpr', namespaces): + from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore + elif _module_present('tensorflow', namespaces): + from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore + elif _module_present('sympy', namespaces): + from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore + else: + from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore + user_functions = {} + for m in namespaces[::-1]: + if isinstance(m, dict): + for k in m: + user_functions[k] = k + printer = Printer({'fully_qualified_modules': False, 'inline': True, + 'allow_unknown_functions': True, + 'user_functions': user_functions}) + + if isinstance(args, set): + sympy_deprecation_warning( + """ +Passing the function arguments to lambdify() as a set is deprecated. This +leads to unpredictable results since sets are unordered. Instead, use a list +or tuple for the function arguments. + """, + deprecated_since_version="1.6.3", + active_deprecations_target="deprecated-lambdify-arguments-set", + ) + + # Get the names of the args, for creating a docstring + iterable_args = (args,) if isinstance(args, Expr) else args + names = [] + + # Grab the callers frame, for getting the names by inspection (if needed) + callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore + for n, var in enumerate(iterable_args): + if hasattr(var, 'name'): + names.append(var.name) + else: + # It's an iterable. Try to get name by inspection of calling frame. + name_list = [var_name for var_name, var_val in callers_local_vars + if var_val is var] + if len(name_list) == 1: + names.append(name_list[0]) + else: + # Cannot infer name with certainty. arg_# will have to do. + names.append('arg_' + str(n)) + + # Create the function definition code and execute it + funcname = '_lambdifygenerated' + if _module_present('tensorflow', namespaces): + funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) + else: + funcprinter = _EvaluatorPrinter(printer, dummify) + + if cse == True: + from sympy.simplify.cse_main import cse as _cse + cses, _expr = _cse(expr, list=False) + elif callable(cse): + cses, _expr = cse(expr) + else: + cses, _expr = (), expr + funcstr = funcprinter.doprint(funcname, iterable_args, _expr, cses=cses) + + # Collect the module imports from the code printers. + imp_mod_lines = [] + for mod, keys in (getattr(printer, 'module_imports', None) or {}).items(): + for k in keys: + if k not in namespace: + ln = "from %s import %s" % (mod, k) + try: + exec(ln, {}, namespace) + except ImportError: + # Tensorflow 2.0 has issues with importing a specific + # function from its submodule. + # https://github.com/tensorflow/tensorflow/issues/33022 + ln = "%s = %s.%s" % (k, mod, k) + exec(ln, {}, namespace) + imp_mod_lines.append(ln) + + # Provide lambda expression with builtins, and compatible implementation of range + namespace.update({'builtins':builtins, 'range':range}) + + funclocals = {} + global _lambdify_generated_counter + filename = '' % _lambdify_generated_counter + _lambdify_generated_counter += 1 + c = compile(funcstr, filename, 'exec') + exec(c, namespace, funclocals) + # mtime has to be None or else linecache.checkcache will remove it + linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore + + func = funclocals[funcname] + + # Apply the docstring + sig = "func({})".format(", ".join(str(i) for i in names)) + sig = textwrap.fill(sig, subsequent_indent=' '*8) + if _too_large_for_docstring(expr, docstring_limit): + expr_str = 'EXPRESSION REDACTED DUE TO LENGTH' + src_str = 'SOURCE CODE REDACTED DUE TO LENGTH' + else: + expr_str = str(expr) + if len(expr_str) > 78: + expr_str = textwrap.wrap(expr_str, 75)[0] + '...' + src_str = funcstr + func.__doc__ = ( + "Created with lambdify. Signature:\n\n" + "{sig}\n\n" + "Expression:\n\n" + "{expr}\n\n" + "Source code:\n\n" + "{src}\n\n" + "Imported modules:\n\n" + "{imp_mods}" + ).format(sig=sig, expr=expr_str, src=src_str, imp_mods='\n'.join(imp_mod_lines)) + return func + +def _module_present(modname, modlist): + if modname in modlist: + return True + for m in modlist: + if hasattr(m, '__name__') and m.__name__ == modname: + return True + return False + +def _get_namespace(m): + """ + This is used by _lambdify to parse its arguments. + """ + if isinstance(m, str): + _import(m) + return MODULES[m][0] + elif isinstance(m, dict): + return m + elif hasattr(m, "__dict__"): + return m.__dict__ + else: + raise TypeError("Argument must be either a string, dict or module but it is: %s" % m) + + +def _recursive_to_string(doprint, arg): + """Functions in lambdify accept both SymPy types and non-SymPy types such as python + lists and tuples. This method ensures that we only call the doprint method of the + printer with SymPy types (so that the printer safely can use SymPy-methods).""" + from sympy.matrices.common import MatrixOperations + from sympy.core.basic import Basic + + if isinstance(arg, (Basic, MatrixOperations)): + return doprint(arg) + elif iterable(arg): + if isinstance(arg, list): + left, right = "[", "]" + elif isinstance(arg, tuple): + left, right = "(", ",)" + else: + raise NotImplementedError("unhandled type: %s, %s" % (type(arg), arg)) + return left +', '.join(_recursive_to_string(doprint, e) for e in arg) + right + elif isinstance(arg, str): + return arg + else: + return doprint(arg) + + +def lambdastr(args, expr, printer=None, dummify=None): + """ + Returns a string that can be evaluated to a lambda function. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy.utilities.lambdify import lambdastr + >>> lambdastr(x, x**2) + 'lambda x: (x**2)' + >>> lambdastr((x,y,z), [z,y,x]) + 'lambda x,y,z: ([z, y, x])' + + Although tuples may not appear as arguments to lambda in Python 3, + lambdastr will create a lambda function that will unpack the original + arguments so that nested arguments can be handled: + + >>> lambdastr((x, (y, z)), x + y) + 'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])' + """ + # Transforming everything to strings. + from sympy.matrices import DeferredVector + from sympy.core.basic import Basic + from sympy.core.function import (Derivative, Function) + from sympy.core.symbol import (Dummy, Symbol) + from sympy.core.sympify import sympify + + if printer is not None: + if inspect.isfunction(printer): + lambdarepr = printer + else: + if inspect.isclass(printer): + lambdarepr = lambda expr: printer().doprint(expr) + else: + lambdarepr = lambda expr: printer.doprint(expr) + else: + #XXX: This has to be done here because of circular imports + from sympy.printing.lambdarepr import lambdarepr + + def sub_args(args, dummies_dict): + if isinstance(args, str): + return args + elif isinstance(args, DeferredVector): + return str(args) + elif iterable(args): + dummies = flatten([sub_args(a, dummies_dict) for a in args]) + return ",".join(str(a) for a in dummies) + else: + # replace these with Dummy symbols + if isinstance(args, (Function, Symbol, Derivative)): + dummies = Dummy() + dummies_dict.update({args : dummies}) + return str(dummies) + else: + return str(args) + + def sub_expr(expr, dummies_dict): + expr = sympify(expr) + # dict/tuple are sympified to Basic + if isinstance(expr, Basic): + expr = expr.xreplace(dummies_dict) + # list is not sympified to Basic + elif isinstance(expr, list): + expr = [sub_expr(a, dummies_dict) for a in expr] + return expr + + # Transform args + def isiter(l): + return iterable(l, exclude=(str, DeferredVector, NotIterable)) + + def flat_indexes(iterable): + n = 0 + + for el in iterable: + if isiter(el): + for ndeep in flat_indexes(el): + yield (n,) + ndeep + else: + yield (n,) + + n += 1 + + if dummify is None: + dummify = any(isinstance(a, Basic) and + a.atoms(Function, Derivative) for a in ( + args if isiter(args) else [args])) + + if isiter(args) and any(isiter(i) for i in args): + dum_args = [str(Dummy(str(i))) for i in range(len(args))] + + indexed_args = ','.join([ + dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]]) + for ind in flat_indexes(args)]) + + lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify) + + return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args) + + dummies_dict = {} + if dummify: + args = sub_args(args, dummies_dict) + else: + if isinstance(args, str): + pass + elif iterable(args, exclude=DeferredVector): + args = ",".join(str(a) for a in args) + + # Transform expr + if dummify: + if isinstance(expr, str): + pass + else: + expr = sub_expr(expr, dummies_dict) + expr = _recursive_to_string(lambdarepr, expr) + return "lambda %s: (%s)" % (args, expr) + +class _EvaluatorPrinter: + def __init__(self, printer=None, dummify=False): + self._dummify = dummify + + #XXX: This has to be done here because of circular imports + from sympy.printing.lambdarepr import LambdaPrinter + + if printer is None: + printer = LambdaPrinter() + + if inspect.isfunction(printer): + self._exprrepr = printer + else: + if inspect.isclass(printer): + printer = printer() + + self._exprrepr = printer.doprint + + #if hasattr(printer, '_print_Symbol'): + # symbolrepr = printer._print_Symbol + + #if hasattr(printer, '_print_Dummy'): + # dummyrepr = printer._print_Dummy + + # Used to print the generated function arguments in a standard way + self._argrepr = LambdaPrinter().doprint + + def doprint(self, funcname, args, expr, *, cses=()): + """ + Returns the function definition code as a string. + """ + from sympy.core.symbol import Dummy + + funcbody = [] + + if not iterable(args): + args = [args] + + if cses: + subvars, subexprs = zip(*cses) + exprs = [expr] + list(subexprs) + argstrs, exprs = self._preprocess(args, exprs) + expr, subexprs = exprs[0], exprs[1:] + cses = zip(subvars, subexprs) + else: + argstrs, expr = self._preprocess(args, expr) + + # Generate argument unpacking and final argument list + funcargs = [] + unpackings = [] + + for argstr in argstrs: + if iterable(argstr): + funcargs.append(self._argrepr(Dummy())) + unpackings.extend(self._print_unpacking(argstr, funcargs[-1])) + else: + funcargs.append(argstr) + + funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs)) + + # Wrap input arguments before unpacking + funcbody.extend(self._print_funcargwrapping(funcargs)) + + funcbody.extend(unpackings) + + for s, e in cses: + if e is None: + funcbody.append('del {}'.format(s)) + else: + funcbody.append('{} = {}'.format(s, self._exprrepr(e))) + + str_expr = _recursive_to_string(self._exprrepr, expr) + + if '\n' in str_expr: + str_expr = '({})'.format(str_expr) + funcbody.append('return {}'.format(str_expr)) + + funclines = [funcsig] + funclines.extend([' ' + line for line in funcbody]) + + return '\n'.join(funclines) + '\n' + + @classmethod + def _is_safe_ident(cls, ident): + return isinstance(ident, str) and ident.isidentifier() \ + and not keyword.iskeyword(ident) + + def _preprocess(self, args, expr): + """Preprocess args, expr to replace arguments that do not map + to valid Python identifiers. + + Returns string form of args, and updated expr. + """ + from sympy.core.basic import Basic + from sympy.core.sorting import ordered + from sympy.core.function import (Derivative, Function) + from sympy.core.symbol import Dummy, uniquely_named_symbol + from sympy.matrices import DeferredVector + from sympy.core.expr import Expr + + # Args of type Dummy can cause name collisions with args + # of type Symbol. Force dummify of everything in this + # situation. + dummify = self._dummify or any( + isinstance(arg, Dummy) for arg in flatten(args)) + + argstrs = [None]*len(args) + for arg, i in reversed(list(ordered(zip(args, range(len(args)))))): + if iterable(arg): + s, expr = self._preprocess(arg, expr) + elif isinstance(arg, DeferredVector): + s = str(arg) + elif isinstance(arg, Basic) and arg.is_symbol: + s = self._argrepr(arg) + if dummify or not self._is_safe_ident(s): + dummy = Dummy() + if isinstance(expr, Expr): + dummy = uniquely_named_symbol( + dummy.name, expr, modify=lambda s: '_' + s) + s = self._argrepr(dummy) + expr = self._subexpr(expr, {arg: dummy}) + elif dummify or isinstance(arg, (Function, Derivative)): + dummy = Dummy() + s = self._argrepr(dummy) + expr = self._subexpr(expr, {arg: dummy}) + else: + s = str(arg) + argstrs[i] = s + return argstrs, expr + + def _subexpr(self, expr, dummies_dict): + from sympy.matrices import DeferredVector + from sympy.core.sympify import sympify + + expr = sympify(expr) + xreplace = getattr(expr, 'xreplace', None) + if xreplace is not None: + expr = xreplace(dummies_dict) + else: + if isinstance(expr, DeferredVector): + pass + elif isinstance(expr, dict): + k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()] + v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()] + expr = dict(zip(k, v)) + elif isinstance(expr, tuple): + expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr) + elif isinstance(expr, list): + expr = [self._subexpr(sympify(a), dummies_dict) for a in expr] + return expr + + def _print_funcargwrapping(self, args): + """Generate argument wrapping code. + + args is the argument list of the generated function (strings). + + Return value is a list of lines of code that will be inserted at + the beginning of the function definition. + """ + return [] + + def _print_unpacking(self, unpackto, arg): + """Generate argument unpacking code. + + arg is the function argument to be unpacked (a string), and + unpackto is a list or nested lists of the variable names (strings) to + unpack to. + """ + def unpack_lhs(lvalues): + return '[{}]'.format(', '.join( + unpack_lhs(val) if iterable(val) else val for val in lvalues)) + + return ['{} = {}'.format(unpack_lhs(unpackto), arg)] + +class _TensorflowEvaluatorPrinter(_EvaluatorPrinter): + def _print_unpacking(self, lvalues, rvalue): + """Generate argument unpacking code. + + This method is used when the input value is not interable, + but can be indexed (see issue #14655). + """ + + def flat_indexes(elems): + n = 0 + + for el in elems: + if iterable(el): + for ndeep in flat_indexes(el): + yield (n,) + ndeep + else: + yield (n,) + + n += 1 + + indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind))) + for ind in flat_indexes(lvalues)) + + return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)] + +def _imp_namespace(expr, namespace=None): + """ Return namespace dict with function implementations + + We need to search for functions in anything that can be thrown at + us - that is - anything that could be passed as ``expr``. Examples + include SymPy expressions, as well as tuples, lists and dicts that may + contain SymPy expressions. + + Parameters + ---------- + expr : object + Something passed to lambdify, that will generate valid code from + ``str(expr)``. + namespace : None or mapping + Namespace to fill. None results in new empty dict + + Returns + ------- + namespace : dict + dict with keys of implemented function names within ``expr`` and + corresponding values being the numerical implementation of + function + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy.utilities.lambdify import implemented_function, _imp_namespace + >>> from sympy import Function + >>> f = implemented_function(Function('f'), lambda x: x+1) + >>> g = implemented_function(Function('g'), lambda x: x*10) + >>> namespace = _imp_namespace(f(g(x))) + >>> sorted(namespace.keys()) + ['f', 'g'] + """ + # Delayed import to avoid circular imports + from sympy.core.function import FunctionClass + if namespace is None: + namespace = {} + # tuples, lists, dicts are valid expressions + if is_sequence(expr): + for arg in expr: + _imp_namespace(arg, namespace) + return namespace + elif isinstance(expr, dict): + for key, val in expr.items(): + # functions can be in dictionary keys + _imp_namespace(key, namespace) + _imp_namespace(val, namespace) + return namespace + # SymPy expressions may be Functions themselves + func = getattr(expr, 'func', None) + if isinstance(func, FunctionClass): + imp = getattr(func, '_imp_', None) + if imp is not None: + name = expr.func.__name__ + if name in namespace and namespace[name] != imp: + raise ValueError('We found more than one ' + 'implementation with name ' + '"%s"' % name) + namespace[name] = imp + # and / or they may take Functions as arguments + if hasattr(expr, 'args'): + for arg in expr.args: + _imp_namespace(arg, namespace) + return namespace + + +def implemented_function(symfunc, implementation): + """ Add numerical ``implementation`` to function ``symfunc``. + + ``symfunc`` can be an ``UndefinedFunction`` instance, or a name string. + In the latter case we create an ``UndefinedFunction`` instance with that + name. + + Be aware that this is a quick workaround, not a general method to create + special symbolic functions. If you want to create a symbolic function to be + used by all the machinery of SymPy you should subclass the ``Function`` + class. + + Parameters + ---------- + symfunc : ``str`` or ``UndefinedFunction`` instance + If ``str``, then create new ``UndefinedFunction`` with this as + name. If ``symfunc`` is an Undefined function, create a new function + with the same name and the implemented function attached. + implementation : callable + numerical implementation to be called by ``evalf()`` or ``lambdify`` + + Returns + ------- + afunc : sympy.FunctionClass instance + function with attached implementation + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy.utilities.lambdify import implemented_function + >>> from sympy import lambdify + >>> f = implemented_function('f', lambda x: x+1) + >>> lam_f = lambdify(x, f(x)) + >>> lam_f(4) + 5 + """ + # Delayed import to avoid circular imports + from sympy.core.function import UndefinedFunction + # if name, create function to hold implementation + kwargs = {} + if isinstance(symfunc, UndefinedFunction): + kwargs = symfunc._kwargs + symfunc = symfunc.__name__ + if isinstance(symfunc, str): + # Keyword arguments to UndefinedFunction are added as attributes to + # the created class. + symfunc = UndefinedFunction( + symfunc, _imp_=staticmethod(implementation), **kwargs) + elif not isinstance(symfunc, UndefinedFunction): + raise ValueError(filldedent(''' + symfunc should be either a string or + an UndefinedFunction instance.''')) + return symfunc + + +def _too_large_for_docstring(expr, limit): + """Decide whether an ``Expr`` is too large to be fully rendered in a + ``lambdify`` docstring. + + This is a fast alternative to ``count_ops``, which can become prohibitively + slow for large expressions, because in this instance we only care whether + ``limit`` is exceeded rather than counting the exact number of nodes in the + expression. + + Parameters + ========== + expr : ``Expr``, (nested) ``list`` of ``Expr``, or ``Matrix`` + The same objects that can be passed to the ``expr`` argument of + ``lambdify``. + limit : ``int`` or ``None`` + The threshold above which an expression contains too many nodes to be + usefully rendered in the docstring. If ``None`` then there is no limit. + + Returns + ======= + bool + ``True`` if the number of nodes in the expression exceeds the limit, + ``False`` otherwise. + + Examples + ======== + + >>> from sympy.abc import x, y, z + >>> from sympy.utilities.lambdify import _too_large_for_docstring + >>> expr = x + >>> _too_large_for_docstring(expr, None) + False + >>> _too_large_for_docstring(expr, 100) + False + >>> _too_large_for_docstring(expr, 1) + False + >>> _too_large_for_docstring(expr, 0) + True + >>> _too_large_for_docstring(expr, -1) + True + + Does this split it? + + >>> expr = [x, y, z] + >>> _too_large_for_docstring(expr, None) + False + >>> _too_large_for_docstring(expr, 100) + False + >>> _too_large_for_docstring(expr, 1) + True + >>> _too_large_for_docstring(expr, 0) + True + >>> _too_large_for_docstring(expr, -1) + True + + >>> expr = [x, [y], z, [[x+y], [x*y*z, [x+y+z]]]] + >>> _too_large_for_docstring(expr, None) + False + >>> _too_large_for_docstring(expr, 100) + False + >>> _too_large_for_docstring(expr, 1) + True + >>> _too_large_for_docstring(expr, 0) + True + >>> _too_large_for_docstring(expr, -1) + True + + >>> expr = ((x + y + z)**5).expand() + >>> _too_large_for_docstring(expr, None) + False + >>> _too_large_for_docstring(expr, 100) + True + >>> _too_large_for_docstring(expr, 1) + True + >>> _too_large_for_docstring(expr, 0) + True + >>> _too_large_for_docstring(expr, -1) + True + + >>> from sympy import Matrix + >>> expr = Matrix([[(x + y + z), ((x + y + z)**2).expand(), + ... ((x + y + z)**3).expand(), ((x + y + z)**4).expand()]]) + >>> _too_large_for_docstring(expr, None) + False + >>> _too_large_for_docstring(expr, 1000) + False + >>> _too_large_for_docstring(expr, 100) + True + >>> _too_large_for_docstring(expr, 1) + True + >>> _too_large_for_docstring(expr, 0) + True + >>> _too_large_for_docstring(expr, -1) + True + + """ + # Must be imported here to avoid a circular import error + from sympy.core.traversal import postorder_traversal + + if limit is None: + return False + + i = 0 + for _ in postorder_traversal(expr): + i += 1 + if i > limit: + return True + return False diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/magic.py b/venv/lib/python3.10/site-packages/sympy/utilities/magic.py new file mode 100644 index 0000000000000000000000000000000000000000..e853a0ad9a85bc252dcb24e8a1ecbfca422ac3fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/magic.py @@ -0,0 +1,12 @@ +"""Functions that involve magic. """ + +def pollute(names, objects): + """Pollute the global namespace with symbols -> objects mapping. """ + from inspect import currentframe + frame = currentframe().f_back.f_back + + try: + for name, obj in zip(names, objects): + frame.f_globals[name] = obj + finally: + del frame # break cyclic dependencies as stated in inspect docs diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/matchpy_connector.py b/venv/lib/python3.10/site-packages/sympy/utilities/matchpy_connector.py new file mode 100644 index 0000000000000000000000000000000000000000..d014d3f19dee8a8796471d6eaa6feceff6828c58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/matchpy_connector.py @@ -0,0 +1,289 @@ +""" +The objects in this module allow the usage of the MatchPy pattern matching +library on SymPy expressions. +""" +import re +from typing import List, Callable + +from sympy.core.sympify import _sympify +from sympy.external import import_module +from sympy.functions import (log, sin, cos, tan, cot, csc, sec, erf, gamma, uppergamma) +from sympy.functions.elementary.hyperbolic import acosh, asinh, atanh, acoth, acsch, asech, cosh, sinh, tanh, coth, sech, csch +from sympy.functions.elementary.trigonometric import atan, acsc, asin, acot, acos, asec +from sympy.functions.special.error_functions import fresnelc, fresnels, erfc, erfi, Ei +from sympy.core.add import Add +from sympy.core.basic import Basic +from sympy.core.expr import Expr +from sympy.core.mul import Mul +from sympy.core.power import Pow +from sympy.core.relational import (Equality, Unequality) +from sympy.core.symbol import Symbol +from sympy.functions.elementary.exponential import exp +from sympy.integrals.integrals import Integral +from sympy.printing.repr import srepr +from sympy.utilities.decorator import doctest_depends_on + +matchpy = import_module("matchpy") + +if matchpy: + from matchpy import Operation, CommutativeOperation, AssociativeOperation, OneIdentityOperation + from matchpy.expressions.functions import op_iter, create_operation_expression, op_len + + Operation.register(Integral) + Operation.register(Pow) + OneIdentityOperation.register(Pow) + + Operation.register(Add) + OneIdentityOperation.register(Add) + CommutativeOperation.register(Add) + AssociativeOperation.register(Add) + + Operation.register(Mul) + OneIdentityOperation.register(Mul) + CommutativeOperation.register(Mul) + AssociativeOperation.register(Mul) + + Operation.register(Equality) + CommutativeOperation.register(Equality) + Operation.register(Unequality) + CommutativeOperation.register(Unequality) + + Operation.register(exp) + Operation.register(log) + Operation.register(gamma) + Operation.register(uppergamma) + Operation.register(fresnels) + Operation.register(fresnelc) + Operation.register(erf) + Operation.register(Ei) + Operation.register(erfc) + Operation.register(erfi) + Operation.register(sin) + Operation.register(cos) + Operation.register(tan) + Operation.register(cot) + Operation.register(csc) + Operation.register(sec) + Operation.register(sinh) + Operation.register(cosh) + Operation.register(tanh) + Operation.register(coth) + Operation.register(csch) + Operation.register(sech) + Operation.register(asin) + Operation.register(acos) + Operation.register(atan) + Operation.register(acot) + Operation.register(acsc) + Operation.register(asec) + Operation.register(asinh) + Operation.register(acosh) + Operation.register(atanh) + Operation.register(acoth) + Operation.register(acsch) + Operation.register(asech) + + @op_iter.register(Integral) # type: ignore + def _(operation): + return iter((operation._args[0],) + operation._args[1]) + + @op_iter.register(Basic) # type: ignore + def _(operation): + return iter(operation._args) + + @op_len.register(Integral) # type: ignore + def _(operation): + return 1 + len(operation._args[1]) + + @op_len.register(Basic) # type: ignore + def _(operation): + return len(operation._args) + + @create_operation_expression.register(Basic) + def sympy_op_factory(old_operation, new_operands, variable_name=True): + return type(old_operation)(*new_operands) + + +if matchpy: + from matchpy import Wildcard +else: + class Wildcard: # type: ignore + def __init__(self, min_length, fixed_size, variable_name, optional): + self.min_count = min_length + self.fixed_size = fixed_size + self.variable_name = variable_name + self.optional = optional + + +@doctest_depends_on(modules=('matchpy',)) +class _WildAbstract(Wildcard, Symbol): + min_length: int # abstract field required in subclasses + fixed_size: bool # abstract field required in subclasses + + def __init__(self, variable_name=None, optional=None, **assumptions): + min_length = self.min_length + fixed_size = self.fixed_size + if optional is not None: + optional = _sympify(optional) + Wildcard.__init__(self, min_length, fixed_size, str(variable_name), optional) + + def __getstate__(self): + return { + "min_length": self.min_length, + "fixed_size": self.fixed_size, + "min_count": self.min_count, + "variable_name": self.variable_name, + "optional": self.optional, + } + + def __new__(cls, variable_name=None, optional=None, **assumptions): + cls._sanitize(assumptions, cls) + return _WildAbstract.__xnew__(cls, variable_name, optional, **assumptions) + + def __getnewargs__(self): + return self.variable_name, self.optional + + @staticmethod + def __xnew__(cls, variable_name=None, optional=None, **assumptions): + obj = Symbol.__xnew__(cls, variable_name, **assumptions) + return obj + + def _hashable_content(self): + if self.optional: + return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name, self.optional) + else: + return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name) + + def __copy__(self) -> '_WildAbstract': + return type(self)(variable_name=self.variable_name, optional=self.optional) + + def __repr__(self): + return str(self) + + def __str__(self): + return self.name + + +@doctest_depends_on(modules=('matchpy',)) +class WildDot(_WildAbstract): + min_length = 1 + fixed_size = True + + +@doctest_depends_on(modules=('matchpy',)) +class WildPlus(_WildAbstract): + min_length = 1 + fixed_size = False + + +@doctest_depends_on(modules=('matchpy',)) +class WildStar(_WildAbstract): + min_length = 0 + fixed_size = False + + +def _get_srepr(expr): + s = srepr(expr) + s = re.sub(r"WildDot\('(\w+)'\)", r"\1", s) + s = re.sub(r"WildPlus\('(\w+)'\)", r"*\1", s) + s = re.sub(r"WildStar\('(\w+)'\)", r"*\1", s) + return s + + +@doctest_depends_on(modules=('matchpy',)) +class Replacer: + """ + Replacer object to perform multiple pattern matching and subexpression + replacements in SymPy expressions. + + Examples + ======== + + Example to construct a simple first degree equation solver: + + >>> from sympy.utilities.matchpy_connector import WildDot, Replacer + >>> from sympy import Equality, Symbol + >>> x = Symbol("x") + >>> a_ = WildDot("a_", optional=1) + >>> b_ = WildDot("b_", optional=0) + + The lines above have defined two wildcards, ``a_`` and ``b_``, the + coefficients of the equation `a x + b = 0`. The optional values specified + indicate which expression to return in case no match is found, they are + necessary in equations like `a x = 0` and `x + b = 0`. + + Create two constraints to make sure that ``a_`` and ``b_`` will not match + any expression containing ``x``: + + >>> from matchpy import CustomConstraint + >>> free_x_a = CustomConstraint(lambda a_: not a_.has(x)) + >>> free_x_b = CustomConstraint(lambda b_: not b_.has(x)) + + Now create the rule replacer with the constraints: + + >>> replacer = Replacer(common_constraints=[free_x_a, free_x_b]) + + Add the matching rule: + + >>> replacer.add(Equality(a_*x + b_, 0), -b_/a_) + + Let's try it: + + >>> replacer.replace(Equality(3*x + 4, 0)) + -4/3 + + Notice that it will not match equations expressed with other patterns: + + >>> eq = Equality(3*x, 4) + >>> replacer.replace(eq) + Eq(3*x, 4) + + In order to extend the matching patterns, define another one (we also need + to clear the cache, because the previous result has already been memorized + and the pattern matcher will not iterate again if given the same expression) + + >>> replacer.add(Equality(a_*x, b_), b_/a_) + >>> replacer._replacer.matcher.clear() + >>> replacer.replace(eq) + 4/3 + """ + + def __init__(self, common_constraints: list = []): + self._replacer = matchpy.ManyToOneReplacer() + self._common_constraint = common_constraints + + def _get_lambda(self, lambda_str: str) -> Callable[..., Expr]: + exec("from sympy import *") + return eval(lambda_str, locals()) + + def _get_custom_constraint(self, constraint_expr: Expr, condition_template: str) -> Callable[..., Expr]: + wilds = [x.name for x in constraint_expr.atoms(_WildAbstract)] + lambdaargs = ', '.join(wilds) + fullexpr = _get_srepr(constraint_expr) + condition = condition_template.format(fullexpr) + return matchpy.CustomConstraint( + self._get_lambda(f"lambda {lambdaargs}: ({condition})")) + + def _get_custom_constraint_nonfalse(self, constraint_expr: Expr) -> Callable[..., Expr]: + return self._get_custom_constraint(constraint_expr, "({}) != False") + + def _get_custom_constraint_true(self, constraint_expr: Expr) -> Callable[..., Expr]: + return self._get_custom_constraint(constraint_expr, "({}) == True") + + def add(self, expr: Expr, result: Expr, conditions_true: List[Expr] = [], conditions_nonfalse: List[Expr] = []) -> None: + expr = _sympify(expr) + result = _sympify(result) + lambda_str = f"lambda {', '.join((x.name for x in expr.atoms(_WildAbstract)))}: {_get_srepr(result)}" + lambda_expr = self._get_lambda(lambda_str) + constraints = self._common_constraint[:] + constraint_conditions_true = [ + self._get_custom_constraint_true(cond) for cond in conditions_true] + constraint_conditions_nonfalse = [ + self._get_custom_constraint_nonfalse(cond) for cond in conditions_nonfalse] + constraints.extend(constraint_conditions_true) + constraints.extend(constraint_conditions_nonfalse) + self._replacer.add( + matchpy.ReplacementRule(matchpy.Pattern(expr, *constraints), lambda_expr)) + + def replace(self, expr: Expr) -> Expr: + return self._replacer.replace(expr) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/mathml/__init__.py b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9af4a8c23cd5c5b733728bef3acf13f008ff7f5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/__init__.py @@ -0,0 +1,79 @@ +"""Module with some functions for MathML, like transforming MathML +content in MathML presentation. + +To use this module, you will need lxml. +""" + +from sympy.utilities.pkgdata import get_resource +from sympy.utilities.decorator import doctest_depends_on + + +__doctest_requires__ = {('apply_xsl', 'c2p'): ['lxml']} + + +def add_mathml_headers(s): + return """""" + s + "" + + +@doctest_depends_on(modules=('lxml',)) +def apply_xsl(mml, xsl): + """Apply a xsl to a MathML string. + + Parameters + ========== + + mml + A string with MathML code. + xsl + A string representing a path to a xsl (xml stylesheet) file. + This file name is relative to the PYTHONPATH. + + Examples + ======== + + >>> from sympy.utilities.mathml import apply_xsl + >>> xsl = 'mathml/data/simple_mmlctop.xsl' + >>> mml = ' a b ' + >>> res = apply_xsl(mml,xsl) + >>> ''.join(res.splitlines()) + ' a + b' + """ + from lxml import etree + + parser = etree.XMLParser(resolve_entities=False) + ac = etree.XSLTAccessControl.DENY_ALL + + s = etree.XML(get_resource(xsl).read(), parser=parser) + transform = etree.XSLT(s, access_control=ac) + doc = etree.XML(mml, parser=parser) + result = transform(doc) + s = str(result) + return s + + +@doctest_depends_on(modules=('lxml',)) +def c2p(mml, simple=False): + """Transforms a document in MathML content (like the one that sympy produces) + in one document in MathML presentation, more suitable for printing, and more + widely accepted + + Examples + ======== + + >>> from sympy.utilities.mathml import c2p + >>> mml = ' 2 ' + >>> c2p(mml,simple=True) != c2p(mml,simple=False) + True + + """ + + if not mml.startswith(' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + e + + + + + + + + + + + + + - + + + + + + + + &#x2062; + &#x2148; + + + + + + + + + + + + + - + + + + + + + + &#x2062; + &#x2148; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Polar + &#x2062; + + + + + + + + + + + + + + + Polar + &#x2062; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + [ + + + ] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x03BB; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2218; + + + + + + + + + + + + + + + + + + &#x2218; + + + + + + + + + + + + + + + + id + + + id + + + + + + + + + + + + + + domain + + + codomain + + + image + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + { + + + + + + + + if + + + + + + + + + + + otherwise + + + + + + + + + + + + + + + + + + + + + + + &#x230A; + + + + + + + + + + + + + + + + + + + &#x230B; + + + + + + + + + + + + &#x2147; + + + + + + + + + + + + + + + + + ! + + + + + + + + + + + + + + + + max + + + min + + + + + + + max + + + min + + + + + + + + + + + + + | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2062; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + gcd + + + lcm + + + + + + gcd + + + lcm + + + + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2227; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2228; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x22BB; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + &#x00AC; + &#x2061; + + + + + + + + + + + + + + + &#x00AC; + &#x2061; + + + + + + + + + + + + + + + + + + + &#x2200; + + + + + + + + + + + + : + + + + , + + + + + + + + + + + + + + &#x2203; + + + + + + + + + + + + : + + + + , + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00AF; + + + + + + + + + + + + + + + + + &#x211C; + + + &#x2111; + + + &#x2061; + + + + + + + + + + + + + + + + + &#x230A; + + + &#x2308; + + + + + + &#x230B; + + + &#x2309; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2260; + + + &#x2248; + + + &#x2223; + + + + + &#x2198; + + + &#x2197; + + + &#x2192; + + + + + &#x21D2; + + + &#x2208; + + + &#x2209; + + + &#x2284; + + + &#x2288; + + + + + + + + + + &#x2286; + + + &#x2282; + + + + + + + + + + + + &#x2265; + + + &#x2264; + + + &#x2261; + + + + + + + + + + + + + + + + + + + + + + ln + + + + + ln + + + + + + + + + + + + + + + + + + + + + + log + + + + + + log + + + + + + + + log + + + + log + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2146; + + &#x2146; + + + + + + + + &#x2146; + + + + &#x2146; + + + + + + + + + + + + + + + + + + &#x2032; + + + + + + + + + + + + + + + + + &#x2145; + + + + + + + + &#x2202; + + + + + &#x2202; + + + + + + + + + + + + + + + + + + + &#x2202; + + + + &#x2202; + + + + + + + + + + &#x2202; + + &#x2202; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2207; + 2 + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x222A; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2229; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00D7; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + = + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + + + + + + + + + + + + + + + + + + + + &#x222B; + + + + + + &#x222B; + + + + + + &#x222B; + + + + + + + + + + + + &#x222B; + + + + + + + + + &#x222B; + + + + + + + + + &#x2146; + + + + + + + + + + + + + + + + lim + + + + &#x2192; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x03C3; + + + + + + + + + + + + + + + + + &#x03C3; + + + + + + + 2 + + + + + + + + + + + + + median + + + + + + + + + + + + + + + + + mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + det + + + + + + + + + + + + + + + T + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00D7; + + + &#x22C5; + + + &#x2297; + + + + + + + + + + + + &#x2124; + + + + &#x211D; + + + + &#x211A; + + + + &#x2115; + + + + &#x2102; + + + + &#x2119; + + + + &#x2147; + + + + &#x2148; + + + + NaN + + + + true + + + + false + + + + &#x2205; + + + + &#x03C0; + + + + &#x213D; + + + + &#x221E; + + + diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/mmltex.xsl b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/mmltex.xsl new file mode 100644 index 0000000000000000000000000000000000000000..5e6b85e02efd5196fe76b6ce4e10def27b9a8497 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/mmltex.xsl @@ -0,0 +1,2360 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $ + + $ + + + + + + + + + + + + + + + + i + + + + + / + + + + + + _{} + + + + + e^{i + + } + + + + + E + + + + + + + + \mathrm{} + + + + + + + + + + + + + ( + + + , + + ) + + + + + () + + + + + + + \left( + + \left[ + + + , + + + + \right) + + \right] + + + + + \left\{\right\} + + + + + ^{(-1)} + + + + + + + + \mathrm{lambda}\: + + .\: + + + + + + + + + + \circ + + + + +\mathrm{id} + + + + \mathop{\mathrm{ + + }} + + + + + + + + \begin{cases} + + + \end{cases} + + + + + & \text{if $ + + $} + \\ + + + + + & \text{otherwise} + + + + + \left\lfloor\frac{ + + }{ + + }\right\rfloor + + + + + + + + ! + + + + + + + \left( + \frac{ + + + }{ + + + } + \right) + + + + + \ + + \{ + + + + , + + + + + + , + + + + \} + + + + + - + + + + + + + + + - + + + + + + + + + + ( + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + ) + + + + + + + + + ^{ + + + + } + + + + + + + \mod + + + + + + + + + + ( + + + + \times + + + + + + + + + + ) + + + + + \sqrt + + [ + + ] + + { + + } + + + +\gcd + + + + + + + + \land + + + + + + + + + + \lor + + + + + + + + + + \mathop{\mathrm{xor}} + + + + + + \neg + + + + + + + + + + \implies + + + + + + + + \ + + + + + , + + + \colon + + + + + + + \left| + + \right| + + + + + \overline{} + + + +\Re + + +\Im + + + + \left\lfloor + + \right\rfloor + + + + + \left\lceil + + \right\rceil + + + + + + + + + = + + + + + + + + + + \neq + + + + + + + + + + > + + + + + + + + + + < + + + + + + + + + + \ge + + + + + + + + + + \le + + + + + + + + + + \equiv + + + + + + + + + + \approx + + + + + + + + | + + + + + + + + \int + + _{ + + } + + + ^{ + + } + + + + \,d + + + + + + + ^\prime + + + + \frac{ + + + d^{ + + } + + }{d + + ^{ + + } + + + d + + }{d + + } + + + } + + + + + D_{ + + + , + + } + + + + + \frac{\partial^{ + + + + + + + + + + + + + + + + + + + + + } + + }{ + + \partial + + + ^{ + + } + + + } + + + + + + + + + , + + + +\mathop{\mathrm{div}} + + +\nabla^2 + + + + \{\} + + + + + \left[\right] + + + + + + + \colon + + + + + + , + + + + + + + + + + + + \cup + + + + + + + + + + \cap + + + + + + + + \in + + + + + + + + + + \notin + + + + + + + + + + + + \subseteq + + + + + + + + + + \subset + + + + + + + + + + \nsubseteq + + + + + + + + + + \not\subset + + + + + + + + + + \setminus + + + + + + | + + | + + + + + + + + + \times + + + + + + + + ^{ + + } + + + + + \sum + + + + + \prod + + + + + _{ + + + = + + + } + + + ^{ + + } + + + + + + + + \lim_{ + + } + + + + + + \to + + + + + + + + + + + + \searrow + \nearrow + \rightarrow + \to + + + + + + + + \ + + + + + + + + + \ + + + + + + \mathrm{ + + \,} + + + + + + + \mathrm{ + + } + + + + + e^{} + + + + + \lg + + + + + + + \log_{ + + } + + + + + + + + \left\langle + + + , + + \right\rangle + + + +\sigma + + + + \sigma( + + )^2 + + + + + \left\langle + + ^{ + + }\right\rangle + + _{ + + } + + + + + + + \left(\begin{array}{c} + + + \\ + + \end{array}\right) + + + + + \begin{pmatrix} + + \end{pmatrix} + + + + + + + & + + \\ + + + + + \det + + + + + + + \begin{vmatrix} + + \end{vmatrix} + + + + + + + + ^T + + + + + + + + _{ + + + , + + } + + + + + + + + + \dot + + + + + + + + + + + +\mathbb{Z} + + +\mathbb{R} + + +\mathbb{Q} + + +\mathbb{N} + + +\mathbb{C} + + +\mathbb{P} + + +e + + +i + + +NaN + + +\mbox{true} + + +\mbox{false} + + +\emptyset + + +\pi + + +\gamma + + +\infty + + + + + + + ( + + + + + + + + + ) + + + + + + + ( + + + + + + + + ) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \multicolumn{ + + }{c}{ + + } + + & + + + + + + + \hfill + + + + \hfill + + + + & + + + + + + + \\ + + + + + \begin{array}{ + + | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + | + + } + + \hline + + + + \\ \hline + + \end{array} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \overline{ + + + + + } + + + \overbrace{ + + + + + } + + + \underline{ + + + + + + } + + + \underbrace{ + + + + + + } + + + + + _{ + + }^{ + + } + + + \underset{ + + }{\overset{ + + }{ + + }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \overline{ + + } + + + \overbrace{ + + } + + + + + ^{ + + } + + + \stackrel{ + + }{ + + } + + + + + + + + + + + \underline{ + + } + + + \underbrace{ + + } + + + + + _{ + + } + + + \underset{ + + }{ + + } + + + + + + { + + }_{ + + }^{ + + } + + + + { + + }^{ + + } + + + + { + + }_{ + + } + + + + + + {}_{ + + } + + + {}^{ + + } + + + + + + {} + + + _{ + + } + + + ^{ + + } + + + + + + + + + + + + + + {} + + + _{ + + } + + + ^{ + + } + + + + + + + + + + + + + + + + + + \genfrac{}{}{ + + + + ex + + + .05ex + + + + .2ex + + + + + + }{}{ + + + \frac{ + + + + \hfill + + + + \hfill + + }{ + + \hfill + + + + \hfill + + } + + + + + + \sqrt[ + + ]{ + + } + + + + exception 25: + \text{exception 25:} + + + + + + \sqrt{ + + } + + + + + + + \left + + + \ + + + + \left( + + + + + + + + + + + , + + + + + + + + + + + + + + + + + + + + + + + + \right + + + \ + + + + \right) + + + + + \phantom{ + + } + + + + + + \overline{ + + \hspace{.2em}|} + + + \sqrt{ + + } + + + \overline{) + + } + + + + + + + + + + + \colorbox[rgb]{ + + + + }{$ + + + \textcolor[rgb]{ + + + + }{ + + + + } + + + $} + + + + + + + + + + + + + + + + + + + + + \mathrm{ + + } + + + + + + + + + + + + + + + + + + + + + + \text{ + + } + + + + \phantom{\rule + + [- + + ] + + { + + 0ex + + + }{ + + 0ex + + + }} + + + + + + " + + + " + + + + + + \colorbox[rgb]{ + + + + }{$ + + + \textcolor[rgb]{ + + + + }{ + + + + + \mathrm{ + + + \mathbf{ + + + \mathit{ + + + \mathbit{ + + + \mathbb{ + + + { + + + \mathcal{ + + + \mathsc{ + + + \mathfrak{ + + + \mathsf{ + + + \mathbsf{ + + + \mathsfit{ + + + \mathbsfit{ + + + \mathtt{ + + + { + + + + + + } + + + } + + + $} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + , + + + + + + , + + + + + + + + + + + + + + + + + + + , + + + + + + + + + + + , + + + + + + + + + + + + + + 0,1,1 + 0,0,0 + 0,0,1 + 1,0,1 + .5,.5,.5 + 0,.5,0 + 0,1,0 + .5,0,0 + 0,0,.5 + .5,.5,0 + .5,0,.5 + 1,0,0 + .75,.75,.75 + 0,.5,.5 + 1,1,1 + 1,1,0 + + Exception at color template + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Exception at Hex2Decimal template + + + + + + + + + + + diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/simple_mmlctop.xsl b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/simple_mmlctop.xsl new file mode 100644 index 0000000000000000000000000000000000000000..3b0faf2e6e8a3055712b0c1905839515c780fdad --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/mathml/data/simple_mmlctop.xsl @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + e + + + + + + + + + + + + + - + + + + + + + + &#x2062; + &#x2148; + + + + + + + + + + + + + - + + + + + + + + &#x2062; + &#x2148; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Polar + &#x2062; + + + + + + + + + + + + + + + Polar + &#x2062; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + [ + + + ] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x03BB; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2218; + + + + + + + + + + + + + + + + + + &#x2218; + + + + + + + + + + + + + + + + id + + + id + + + + + + + + + + + + + + domain + + + codomain + + + image + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + { + + + + + + + + if + + + + + + + + + + + otherwise + + + + + + + + + + + + + + + + + + + + + + + &#x230A; + + + + + + + + + + + + + + + + + + + &#x230B; + + + + + + + + + + + + e + + + + + + + + + + + + + + + + + ! + + + + + + + + + + + + + + + + max + + + min + + + + + + + max + + + min + + + + + + + + + + + + + | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2062; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + gcd + + + lcm + + + + + + gcd + + + lcm + + + + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2227; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2228; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x22BB; + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + &#x00AC; + &#x2061; + + + + + + + + + + + + + + + &#x00AC; + &#x2061; + + + + + + + + + + + + + + + + + + + &#x2200; + + + + + + + + + + + + : + + + + , + + + + + + + + + + + + + + &#x2203; + + + + + + + + + + + + : + + + + , + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00AF; + + + + + + + + + + + + + + + + + &#x211C; + + + &#x2111; + + + &#x2061; + + + + + + + + + + + + + + + + + &#x230A; + + + &#x2308; + + + + + + &#x230B; + + + &#x2309; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2260; + + + &#x2248; + + + &#x2223; + + + + + &#x2198; + + + &#x2197; + + + &#x2192; + + + + + &#x21D2; + + + &#x2208; + + + &#x2209; + + + &#x2284; + + + &#x2288; + + + + + + + + + + &#x2286; + + + &#x2282; + + + + + + + + + + + + &#x2265; + + + &#x2264; + + + &#x2261; + + + + + + + + + + + + + + + + + + + + + + ln + + + + + ln + + + + + + + + + + + + + + + + + + + + + + log + + + + + + log + + + + + + + + log + + + + log + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + d + + d + + + + + + + + d + + + + d + + + + + + + + + + + + + + + + + + &#x2032; + + + + + + + + + + + + + + + + + &#x2145; + + + + + + + + &#x2202; + + + + + &#x2202; + + + + + + + + + + + + + + + + + + + &#x2202; + + + + &#x2202; + + + + + + + + + + &#x2202; + + &#x2202; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2207; + 2 + + &#x2061; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x222A; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2229; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00D7; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + = + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + + + + + + + &#x2211; + + + &#x220F; + + + + + + + + + + + + + + + + + + + + + + + + &#x222B; + + + + + + &#x222B; + + + + + + &#x222B; + + + + + + + + + + + + &#x222B; + + + + + + + + + &#x222B; + + + + + + + + + d + + + + + + + + + + + + + + + + lim + + + + &#x2192; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x03C3; + + + + + + + + + + + + + + + + + &#x03C3; + + + + + + + 2 + + + + + + + + + + + + + median + + + + + + + + + + + + + + + + + mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + det + + + + + + + + + + + + + + + T + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + &#x00D7; + + + &#x22C5; + + + &#x2297; + + + + + + + + + + + + &#x2124; + + + + &#x211D; + + + + &#x211A; + + + + &#x2115; + + + + &#x2102; + + + + &#x2119; + + + + e + + + + &#x2148; + + + + NaN + + + + true + + + + false + + + + &#x2205; + + + + &#x03C0; + + + + &#x213D; + + + + &#x221E; + + + diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/memoization.py b/venv/lib/python3.10/site-packages/sympy/utilities/memoization.py new file mode 100644 index 0000000000000000000000000000000000000000..55d7c4968fd92a66d0d8ba3159a150452d14ffda --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/memoization.py @@ -0,0 +1,59 @@ +from functools import wraps + + +def recurrence_memo(initial): + """ + Memo decorator for sequences defined by recurrence + + See usage examples e.g. in the specfun/combinatorial module + """ + cache = initial + + def decorator(f): + @wraps(f) + def g(n): + L = len(cache) + if n <= L - 1: + return cache[n] + for i in range(L, n + 1): + cache.append(f(i, cache)) + return cache[-1] + return g + return decorator + + +def assoc_recurrence_memo(base_seq): + """ + Memo decorator for associated sequences defined by recurrence starting from base + + base_seq(n) -- callable to get base sequence elements + + XXX works only for Pn0 = base_seq(0) cases + XXX works only for m <= n cases + """ + + cache = [] + + def decorator(f): + @wraps(f) + def g(n, m): + L = len(cache) + if n < L: + return cache[n][m] + + for i in range(L, n + 1): + # get base sequence + F_i0 = base_seq(i) + F_i_cache = [F_i0] + cache.append(F_i_cache) + + # XXX only works for m <= n cases + # generate assoc sequence + for j in range(1, i + 1): + F_ij = f(i, j, cache) + F_i_cache.append(F_ij) + + return cache[n][m] + + return g + return decorator diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/misc.py b/venv/lib/python3.10/site-packages/sympy/utilities/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..d09771abb7c02484a52b24e98b653048b89c2836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/misc.py @@ -0,0 +1,565 @@ +"""Miscellaneous stuff that does not really fit anywhere else.""" + +from __future__ import annotations + +import operator +import sys +import os +import re as _re +import struct +from textwrap import fill, dedent + + +class Undecidable(ValueError): + # an error to be raised when a decision cannot be made definitively + # where a definitive answer is needed + pass + + +def filldedent(s, w=70, **kwargs): + """ + Strips leading and trailing empty lines from a copy of ``s``, then dedents, + fills and returns it. + + Empty line stripping serves to deal with docstrings like this one that + start with a newline after the initial triple quote, inserting an empty + line at the beginning of the string. + + Additional keyword arguments will be passed to ``textwrap.fill()``. + + See Also + ======== + strlines, rawlines + + """ + return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs) + + +def strlines(s, c=64, short=False): + """Return a cut-and-pastable string that, when printed, is + equivalent to the input. The lines will be surrounded by + parentheses and no line will be longer than c (default 64) + characters. If the line contains newlines characters, the + `rawlines` result will be returned. If ``short`` is True + (default is False) then if there is one line it will be + returned without bounding parentheses. + + Examples + ======== + + >>> from sympy.utilities.misc import strlines + >>> q = 'this is a long string that should be broken into shorter lines' + >>> print(strlines(q, 40)) + ( + 'this is a long string that should be b' + 'roken into shorter lines' + ) + >>> q == ( + ... 'this is a long string that should be b' + ... 'roken into shorter lines' + ... ) + True + + See Also + ======== + filldedent, rawlines + """ + if not isinstance(s, str): + raise ValueError('expecting string input') + if '\n' in s: + return rawlines(s) + q = '"' if repr(s).startswith('"') else "'" + q = (q,)*2 + if '\\' in s: # use r-string + m = '(\nr%s%%s%s\n)' % q + j = '%s\nr%s' % q + c -= 3 + else: + m = '(\n%s%%s%s\n)' % q + j = '%s\n%s' % q + c -= 2 + out = [] + while s: + out.append(s[:c]) + s=s[c:] + if short and len(out) == 1: + return (m % out[0]).splitlines()[1] # strip bounding (\n...\n) + return m % j.join(out) + + +def rawlines(s): + """Return a cut-and-pastable string that, when printed, is equivalent + to the input. Use this when there is more than one line in the + string. The string returned is formatted so it can be indented + nicely within tests; in some cases it is wrapped in the dedent + function which has to be imported from textwrap. + + Examples + ======== + + Note: because there are characters in the examples below that need + to be escaped because they are themselves within a triple quoted + docstring, expressions below look more complicated than they would + be if they were printed in an interpreter window. + + >>> from sympy.utilities.misc import rawlines + >>> from sympy import TableForm + >>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee']))) + >>> print(rawlines(s)) + ( + 'a bee\\n' + '-----\\n' + '1 10 ' + ) + >>> print(rawlines('''this + ... that''')) + dedent('''\\ + this + that''') + + >>> print(rawlines('''this + ... that + ... ''')) + dedent('''\\ + this + that + ''') + + >>> s = \"\"\"this + ... is a triple ''' + ... \"\"\" + >>> print(rawlines(s)) + dedent(\"\"\"\\ + this + is a triple ''' + \"\"\") + + >>> print(rawlines('''this + ... that + ... ''')) + ( + 'this\\n' + 'that\\n' + ' ' + ) + + See Also + ======== + filldedent, strlines + """ + lines = s.split('\n') + if len(lines) == 1: + return repr(lines[0]) + triple = ["'''" in s, '"""' in s] + if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple): + rv = [] + # add on the newlines + trailing = s.endswith('\n') + last = len(lines) - 1 + for i, li in enumerate(lines): + if i != last or trailing: + rv.append(repr(li + '\n')) + else: + rv.append(repr(li)) + return '(\n %s\n)' % '\n '.join(rv) + else: + rv = '\n '.join(lines) + if triple[0]: + return 'dedent("""\\\n %s""")' % rv + else: + return "dedent('''\\\n %s''')" % rv + +ARCH = str(struct.calcsize('P') * 8) + "-bit" + + +# XXX: PyPy does not support hash randomization +HASH_RANDOMIZATION = getattr(sys.flags, 'hash_randomization', False) + +_debug_tmp: list[str] = [] +_debug_iter = 0 + +def debug_decorator(func): + """If SYMPY_DEBUG is True, it will print a nice execution tree with + arguments and results of all decorated functions, else do nothing. + """ + from sympy import SYMPY_DEBUG + + if not SYMPY_DEBUG: + return func + + def maketree(f, *args, **kw): + global _debug_tmp + global _debug_iter + oldtmp = _debug_tmp + _debug_tmp = [] + _debug_iter += 1 + + def tree(subtrees): + def indent(s, variant=1): + x = s.split("\n") + r = "+-%s\n" % x[0] + for a in x[1:]: + if a == "": + continue + if variant == 1: + r += "| %s\n" % a + else: + r += " %s\n" % a + return r + if len(subtrees) == 0: + return "" + f = [] + for a in subtrees[:-1]: + f.append(indent(a)) + f.append(indent(subtrees[-1], 2)) + return ''.join(f) + + # If there is a bug and the algorithm enters an infinite loop, enable the + # following lines. It will print the names and parameters of all major functions + # that are called, *before* they are called + #from functools import reduce + #print("%s%s %s%s" % (_debug_iter, reduce(lambda x, y: x + y, \ + # map(lambda x: '-', range(1, 2 + _debug_iter))), f.__name__, args)) + + r = f(*args, **kw) + + _debug_iter -= 1 + s = "%s%s = %s\n" % (f.__name__, args, r) + if _debug_tmp != []: + s += tree(_debug_tmp) + _debug_tmp = oldtmp + _debug_tmp.append(s) + if _debug_iter == 0: + print(_debug_tmp[0]) + _debug_tmp = [] + return r + + def decorated(*args, **kwargs): + return maketree(func, *args, **kwargs) + + return decorated + + +def debug(*args): + """ + Print ``*args`` if SYMPY_DEBUG is True, else do nothing. + """ + from sympy import SYMPY_DEBUG + if SYMPY_DEBUG: + print(*args, file=sys.stderr) + + +def debugf(string, args): + """ + Print ``string%args`` if SYMPY_DEBUG is True, else do nothing. This is + intended for debug messages using formatted strings. + """ + from sympy import SYMPY_DEBUG + if SYMPY_DEBUG: + print(string%args, file=sys.stderr) + + +def find_executable(executable, path=None): + """Try to find 'executable' in the directories listed in 'path' (a + string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']). Returns the complete filename or None if not + found + """ + from .exceptions import sympy_deprecation_warning + sympy_deprecation_warning( + """ + sympy.utilities.misc.find_executable() is deprecated. Use the standard + library shutil.which() function instead. + """, + deprecated_since_version="1.7", + active_deprecations_target="deprecated-find-executable", + ) + if path is None: + path = os.environ['PATH'] + paths = path.split(os.pathsep) + extlist = [''] + if os.name == 'os2': + (base, ext) = os.path.splitext(executable) + # executable files on OS/2 can have an arbitrary extension, but + # .exe is automatically appended if no dot is present in the name + if not ext: + executable = executable + ".exe" + elif sys.platform == 'win32': + pathext = os.environ['PATHEXT'].lower().split(os.pathsep) + (base, ext) = os.path.splitext(executable) + if ext.lower() not in pathext: + extlist = pathext + for ext in extlist: + execname = executable + ext + if os.path.isfile(execname): + return execname + else: + for p in paths: + f = os.path.join(p, execname) + if os.path.isfile(f): + return f + + return None + + +def func_name(x, short=False): + """Return function name of `x` (if defined) else the `type(x)`. + If short is True and there is a shorter alias for the result, + return the alias. + + Examples + ======== + + >>> from sympy.utilities.misc import func_name + >>> from sympy import Matrix + >>> from sympy.abc import x + >>> func_name(Matrix.eye(3)) + 'MutableDenseMatrix' + >>> func_name(x < 1) + 'StrictLessThan' + >>> func_name(x < 1, short=True) + 'Lt' + """ + alias = { + 'GreaterThan': 'Ge', + 'StrictGreaterThan': 'Gt', + 'LessThan': 'Le', + 'StrictLessThan': 'Lt', + 'Equality': 'Eq', + 'Unequality': 'Ne', + } + typ = type(x) + if str(typ).startswith(">> from sympy.utilities.misc import _replace + >>> f = _replace(dict(foo='bar', d='t')) + >>> f('food') + 'bart' + >>> f = _replace({}) + >>> f('food') + 'food' + """ + if not reps: + return lambda x: x + D = lambda match: reps[match.group(0)] + pattern = _re.compile("|".join( + [_re.escape(k) for k, v in reps.items()]), _re.M) + return lambda string: pattern.sub(D, string) + + +def replace(string, *reps): + """Return ``string`` with all keys in ``reps`` replaced with + their corresponding values, longer strings first, irrespective + of the order they are given. ``reps`` may be passed as tuples + or a single mapping. + + Examples + ======== + + >>> from sympy.utilities.misc import replace + >>> replace('foo', {'oo': 'ar', 'f': 'b'}) + 'bar' + >>> replace("spamham sha", ("spam", "eggs"), ("sha","md5")) + 'eggsham md5' + + There is no guarantee that a unique answer will be + obtained if keys in a mapping overlap (i.e. are the same + length and have some identical sequence at the + beginning/end): + + >>> reps = [ + ... ('ab', 'x'), + ... ('bc', 'y')] + >>> replace('abc', *reps) in ('xc', 'ay') + True + + References + ========== + + .. [1] https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string + """ + if len(reps) == 1: + kv = reps[0] + if isinstance(kv, dict): + reps = kv + else: + return string.replace(*kv) + else: + reps = dict(reps) + return _replace(reps)(string) + + +def translate(s, a, b=None, c=None): + """Return ``s`` where characters have been replaced or deleted. + + SYNTAX + ====== + + translate(s, None, deletechars): + all characters in ``deletechars`` are deleted + translate(s, map [,deletechars]): + all characters in ``deletechars`` (if provided) are deleted + then the replacements defined by map are made; if the keys + of map are strings then the longer ones are handled first. + Multicharacter deletions should have a value of ''. + translate(s, oldchars, newchars, deletechars) + all characters in ``deletechars`` are deleted + then each character in ``oldchars`` is replaced with the + corresponding character in ``newchars`` + + Examples + ======== + + >>> from sympy.utilities.misc import translate + >>> abc = 'abc' + >>> translate(abc, None, 'a') + 'bc' + >>> translate(abc, {'a': 'x'}, 'c') + 'xb' + >>> translate(abc, {'abc': 'x', 'a': 'y'}) + 'x' + + >>> translate('abcd', 'ac', 'AC', 'd') + 'AbC' + + There is no guarantee that a unique answer will be + obtained if keys in a mapping overlap are the same + length and have some identical sequences at the + beginning/end: + + >>> translate(abc, {'ab': 'x', 'bc': 'y'}) in ('xc', 'ay') + True + """ + + mr = {} + if a is None: + if c is not None: + raise ValueError('c should be None when a=None is passed, instead got %s' % c) + if b is None: + return s + c = b + a = b = '' + else: + if isinstance(a, dict): + short = {} + for k in list(a.keys()): + if len(k) == 1 and len(a[k]) == 1: + short[k] = a.pop(k) + mr = a + c = b + if short: + a, b = [''.join(i) for i in list(zip(*short.items()))] + else: + a = b = '' + elif len(a) != len(b): + raise ValueError('oldchars and newchars have different lengths') + + if c: + val = str.maketrans('', '', c) + s = s.translate(val) + s = replace(s, mr) + n = str.maketrans(a, b) + return s.translate(n) + + +def ordinal(num): + """Return ordinal number string of num, e.g. 1 becomes 1st. + """ + # modified from https://codereview.stackexchange.com/questions/41298/producing-ordinal-numbers + n = as_int(num) + k = abs(n) % 100 + if 11 <= k <= 13: + suffix = 'th' + elif k % 10 == 1: + suffix = 'st' + elif k % 10 == 2: + suffix = 'nd' + elif k % 10 == 3: + suffix = 'rd' + else: + suffix = 'th' + return str(n) + suffix + + +def as_int(n, strict=True): + """ + Convert the argument to a builtin integer. + + The return value is guaranteed to be equal to the input. ValueError is + raised if the input has a non-integral value. When ``strict`` is True, this + uses `__index__ `_ + and when it is False it uses ``int``. + + + Examples + ======== + + >>> from sympy.utilities.misc import as_int + >>> from sympy import sqrt, S + + The function is primarily concerned with sanitizing input for + functions that need to work with builtin integers, so anything that + is unambiguously an integer should be returned as an int: + + >>> as_int(S(3)) + 3 + + Floats, being of limited precision, are not assumed to be exact and + will raise an error unless the ``strict`` flag is False. This + precision issue becomes apparent for large floating point numbers: + + >>> big = 1e23 + >>> type(big) is float + True + >>> big == int(big) + True + >>> as_int(big) + Traceback (most recent call last): + ... + ValueError: ... is not an integer + >>> as_int(big, strict=False) + 99999999999999991611392 + + Input that might be a complex representation of an integer value is + also rejected by default: + + >>> one = sqrt(3 + 2*sqrt(2)) - sqrt(2) + >>> int(one) == 1 + True + >>> as_int(one) + Traceback (most recent call last): + ... + ValueError: ... is not an integer + """ + if strict: + try: + if isinstance(n, bool): + raise TypeError + return operator.index(n) + except TypeError: + raise ValueError('%s is not an integer' % (n,)) + else: + try: + result = int(n) + except TypeError: + raise ValueError('%s is not an integer' % (n,)) + if n != result: + raise ValueError('%s is not an integer' % (n,)) + return result diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/pkgdata.py b/venv/lib/python3.10/site-packages/sympy/utilities/pkgdata.py new file mode 100644 index 0000000000000000000000000000000000000000..ee568147c34e908952682b3351c8c364862a3a89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/pkgdata.py @@ -0,0 +1,56 @@ +""" +pkgdata is a simple, extensible way for a package to acquire data file +resources. + +The getResource function is equivalent to the standard idioms, such as +the following minimal implementation:: + + import sys, os + + def getResource(identifier, pkgname=__name__): + pkgpath = os.path.dirname(sys.modules[pkgname].__file__) + path = os.path.join(pkgpath, identifier) + return open(os.path.normpath(path), mode='rb') + +When a __loader__ is present on the module given by __name__, it will defer +getResource to its get_data implementation and return it as a file-like +object (such as StringIO). +""" + +import sys +import os +from io import StringIO + + +def get_resource(identifier, pkgname=__name__): + """ + Acquire a readable object for a given package name and identifier. + An IOError will be raised if the resource cannot be found. + + For example:: + + mydata = get_resource('mypkgdata.jpg').read() + + Note that the package name must be fully qualified, if given, such + that it would be found in sys.modules. + + In some cases, getResource will return a real file object. In that + case, it may be useful to use its name attribute to get the path + rather than use it as a file-like object. For example, you may + be handing data off to a C API. + """ + + mod = sys.modules[pkgname] + fn = getattr(mod, '__file__', None) + if fn is None: + raise OSError("%r has no __file__!") + path = os.path.join(os.path.dirname(fn), identifier) + loader = getattr(mod, '__loader__', None) + if loader is not None: + try: + data = loader.get_data(path) + except (OSError, AttributeError): + pass + else: + return StringIO(data.decode('utf-8')) + return open(os.path.normpath(path), 'rb') diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/pytest.py b/venv/lib/python3.10/site-packages/sympy/utilities/pytest.py new file mode 100644 index 0000000000000000000000000000000000000000..42195f5b2ba4eae320208259c681e49e13cdbbd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/pytest.py @@ -0,0 +1,12 @@ +""" +.. deprecated:: 1.6 + + sympy.utilities.pytest has been renamed to sympy.testing.pytest. +""" +from sympy.utilities.exceptions import sympy_deprecation_warning + +sympy_deprecation_warning("The sympy.utilities.pytest submodule is deprecated. Use sympy.testing.pytest instead.", + deprecated_since_version="1.6", + active_deprecations_target="deprecated-sympy-utilities-submodules") + +from sympy.testing.pytest import * # noqa:F401 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/randtest.py b/venv/lib/python3.10/site-packages/sympy/utilities/randtest.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7d8b5275a77e70e3bb6e5662f380b819edaa5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/randtest.py @@ -0,0 +1,12 @@ +""" +.. deprecated:: 1.6 + + sympy.utilities.randtest has been renamed to sympy.core.random. +""" +from sympy.utilities.exceptions import sympy_deprecation_warning + +sympy_deprecation_warning("The sympy.utilities.randtest submodule is deprecated. Use sympy.core.random instead.", + deprecated_since_version="1.6", + active_deprecations_target="deprecated-sympy-utilities-submodules") + +from sympy.core.random import * # noqa:F401 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/runtests.py b/venv/lib/python3.10/site-packages/sympy/utilities/runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..62dc195c88596ad9b894d3db9a727d7917340bfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/runtests.py @@ -0,0 +1,13 @@ +""" +.. deprecated:: 1.6 + + sympy.utilities.runtests has been renamed to sympy.testing.runtests. +""" + +from sympy.utilities.exceptions import sympy_deprecation_warning + +sympy_deprecation_warning("The sympy.utilities.runtests submodule is deprecated. Use sympy.testing.runtests instead.", + deprecated_since_version="1.6", + active_deprecations_target="deprecated-sympy-utilities-submodules") + +from sympy.testing.runtests import * # noqa:F401 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/source.py b/venv/lib/python3.10/site-packages/sympy/utilities/source.py new file mode 100644 index 0000000000000000000000000000000000000000..71692b4aaad07df70b63d7e1eaa86c402ad03c4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/source.py @@ -0,0 +1,40 @@ +""" +This module adds several functions for interactive source code inspection. +""" + + +def get_class(lookup_view): + """ + Convert a string version of a class name to the object. + + For example, get_class('sympy.core.Basic') will return + class Basic located in module sympy.core + """ + if isinstance(lookup_view, str): + mod_name, func_name = get_mod_func(lookup_view) + if func_name != '': + lookup_view = getattr( + __import__(mod_name, {}, {}, ['*']), func_name) + if not callable(lookup_view): + raise AttributeError( + "'%s.%s' is not a callable." % (mod_name, func_name)) + return lookup_view + + +def get_mod_func(callback): + """ + splits the string path to a class into a string path to the module + and the name of the class. + + Examples + ======== + + >>> from sympy.utilities.source import get_mod_func + >>> get_mod_func('sympy.core.basic.Basic') + ('sympy.core.basic', 'Basic') + + """ + dot = callback.rfind('.') + if dot == -1: + return callback, '' + return callback[:dot], callback[dot + 1:] diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__init__.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde2ded283414ce013139019264917b23e5aa1bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_autowrap.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_autowrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d040c5d7f36a0a957dcc3ad85c756a2a2ebbfae Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_autowrap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10107cd410c6c74bf3929e70d70866faaa4fa50a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_julia.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_julia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b663aa5131e40717192ca9e6879d552ba99f72fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_julia.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_octave.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_octave.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1435935f5d878743dfa791b1d9a82de0e24d884a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_octave.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_rust.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_rust.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e244a73733e569a8367c10fa10cdaccb99a33cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_codegen_rust.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_decorator.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..884fb56fdfa025e2814a271268f6eca2da8c120d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_decorator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_deprecated.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_deprecated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512ba829a1a4209db782e81ede44721fbeaf0a32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_deprecated.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_enumerative.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_enumerative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f10d49ea0128097ebe317eb3e54b6eb9a31d4de Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_enumerative.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3ee2ae6ad35929edef943c804370758bfc421af Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_iterables.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_iterables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59b7c00717f0ed408b64133c2575ae58790752f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_iterables.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_lambdify.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_lambdify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..376627a7978779c26eae6f546e9c27b9ca4346ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_lambdify.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_matchpy_connector.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_matchpy_connector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..806cbfa5e00d60afb8785d48960daaa28b146b06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_matchpy_connector.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_mathml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_mathml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a57c0dea343fe8c72e15332d36911bb1e4f5e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_mathml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a21bf91161b3421598833e0ef7d8797ca7032b8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_pickling.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_pickling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a05cb8d2fbc68fbb88c06246a38ff5c1e1e9f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_pickling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_source.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_source.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e764ec95f29e3a30996de495ed83665448b02446 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_source.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_timeutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_timeutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c4d37a81cf900c126fbeab355a07607f345f9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_timeutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d35cc1b992e1aa88b2d91359827d488653fc5cc7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_xxe.cpython-310.pyc b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_xxe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7743d1462eb96842a60825a5b9e4621acb51132d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_xxe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_autowrap.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_autowrap.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6d1796d5a7e6f2b3cf793f31d34f465949a211 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_autowrap.py @@ -0,0 +1,461 @@ +# Tests that require installed backends go into +# sympy/test_external/test_autowrap + +import os +import tempfile +import shutil +from io import StringIO + +from sympy.core import symbols, Eq +from sympy.utilities.autowrap import (autowrap, binary_function, + CythonCodeWrapper, UfuncifyCodeWrapper, CodeWrapper) +from sympy.utilities.codegen import ( + CCodeGen, C99CodeGen, CodeGenArgumentListError, make_routine +) +from sympy.testing.pytest import raises +from sympy.testing.tmpfiles import TmpFileManager + + +def get_string(dump_fn, routines, prefix="file", **kwargs): + """Wrapper for dump_fn. dump_fn writes its results to a stream object and + this wrapper returns the contents of that stream as a string. This + auxiliary function is used by many tests below. + + The header and the empty lines are not generator to facilitate the + testing of the output. + """ + output = StringIO() + dump_fn(routines, output, prefix, **kwargs) + source = output.getvalue() + output.close() + return source + + +def test_cython_wrapper_scalar_function(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + routine = make_routine("test", expr) + code_gen = CythonCodeWrapper(CCodeGen()) + source = get_string(code_gen.dump_pyx, [routine]) + + expected = ( + "cdef extern from 'file.h':\n" + " double test(double x, double y, double z)\n" + "\n" + "def test_c(double x, double y, double z):\n" + "\n" + " return test(x, y, z)") + assert source == expected + + +def test_cython_wrapper_outarg(): + from sympy.core.relational import Equality + x, y, z = symbols('x,y,z') + code_gen = CythonCodeWrapper(C99CodeGen()) + + routine = make_routine("test", Equality(z, x + y)) + source = get_string(code_gen.dump_pyx, [routine]) + expected = ( + "cdef extern from 'file.h':\n" + " void test(double x, double y, double *z)\n" + "\n" + "def test_c(double x, double y):\n" + "\n" + " cdef double z = 0\n" + " test(x, y, &z)\n" + " return z") + assert source == expected + + +def test_cython_wrapper_inoutarg(): + from sympy.core.relational import Equality + x, y, z = symbols('x,y,z') + code_gen = CythonCodeWrapper(C99CodeGen()) + routine = make_routine("test", Equality(z, x + y + z)) + source = get_string(code_gen.dump_pyx, [routine]) + expected = ( + "cdef extern from 'file.h':\n" + " void test(double x, double y, double *z)\n" + "\n" + "def test_c(double x, double y, double z):\n" + "\n" + " test(x, y, &z)\n" + " return z") + assert source == expected + + +def test_cython_wrapper_compile_flags(): + from sympy.core.relational import Equality + x, y, z = symbols('x,y,z') + routine = make_routine("test", Equality(z, x + y)) + + code_gen = CythonCodeWrapper(CCodeGen()) + + expected = """\ +from setuptools import setup +from setuptools import Extension +from Cython.Build import cythonize +cy_opts = {'compiler_directives': {'language_level': '3'}} + +ext_mods = [Extension( + 'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'], + include_dirs=[], + library_dirs=[], + libraries=[], + extra_compile_args=['-std=c99'], + extra_link_args=[] +)] +setup(ext_modules=cythonize(ext_mods, **cy_opts)) +""" % {'num': CodeWrapper._module_counter} + + temp_dir = tempfile.mkdtemp() + TmpFileManager.tmp_folder(temp_dir) + setup_file_path = os.path.join(temp_dir, 'setup.py') + + code_gen._prepare_files(routine, build_dir=temp_dir) + with open(setup_file_path) as f: + setup_text = f.read() + assert setup_text == expected + + code_gen = CythonCodeWrapper(CCodeGen(), + include_dirs=['/usr/local/include', '/opt/booger/include'], + library_dirs=['/user/local/lib'], + libraries=['thelib', 'nilib'], + extra_compile_args=['-slow-math'], + extra_link_args=['-lswamp', '-ltrident'], + cythonize_options={'compiler_directives': {'boundscheck': False}} + ) + expected = """\ +from setuptools import setup +from setuptools import Extension +from Cython.Build import cythonize +cy_opts = {'compiler_directives': {'boundscheck': False}} + +ext_mods = [Extension( + 'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'], + include_dirs=['/usr/local/include', '/opt/booger/include'], + library_dirs=['/user/local/lib'], + libraries=['thelib', 'nilib'], + extra_compile_args=['-slow-math', '-std=c99'], + extra_link_args=['-lswamp', '-ltrident'] +)] +setup(ext_modules=cythonize(ext_mods, **cy_opts)) +""" % {'num': CodeWrapper._module_counter} + + code_gen._prepare_files(routine, build_dir=temp_dir) + with open(setup_file_path) as f: + setup_text = f.read() + assert setup_text == expected + + expected = """\ +from setuptools import setup +from setuptools import Extension +from Cython.Build import cythonize +cy_opts = {'compiler_directives': {'boundscheck': False}} +import numpy as np + +ext_mods = [Extension( + 'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'], + include_dirs=['/usr/local/include', '/opt/booger/include', np.get_include()], + library_dirs=['/user/local/lib'], + libraries=['thelib', 'nilib'], + extra_compile_args=['-slow-math', '-std=c99'], + extra_link_args=['-lswamp', '-ltrident'] +)] +setup(ext_modules=cythonize(ext_mods, **cy_opts)) +""" % {'num': CodeWrapper._module_counter} + + code_gen._need_numpy = True + code_gen._prepare_files(routine, build_dir=temp_dir) + with open(setup_file_path) as f: + setup_text = f.read() + assert setup_text == expected + + TmpFileManager.cleanup() + +def test_cython_wrapper_unique_dummyvars(): + from sympy.core.relational import Equality + from sympy.core.symbol import Dummy + x, y, z = Dummy('x'), Dummy('y'), Dummy('z') + x_id, y_id, z_id = [str(d.dummy_index) for d in [x, y, z]] + expr = Equality(z, x + y) + routine = make_routine("test", expr) + code_gen = CythonCodeWrapper(CCodeGen()) + source = get_string(code_gen.dump_pyx, [routine]) + expected_template = ( + "cdef extern from 'file.h':\n" + " void test(double x_{x_id}, double y_{y_id}, double *z_{z_id})\n" + "\n" + "def test_c(double x_{x_id}, double y_{y_id}):\n" + "\n" + " cdef double z_{z_id} = 0\n" + " test(x_{x_id}, y_{y_id}, &z_{z_id})\n" + " return z_{z_id}") + expected = expected_template.format(x_id=x_id, y_id=y_id, z_id=z_id) + assert source == expected + +def test_autowrap_dummy(): + x, y, z = symbols('x y z') + + # Uses DummyWrapper to test that codegen works as expected + + f = autowrap(x + y, backend='dummy') + assert f() == str(x + y) + assert f.args == "x, y" + assert f.returns == "nameless" + f = autowrap(Eq(z, x + y), backend='dummy') + assert f() == str(x + y) + assert f.args == "x, y" + assert f.returns == "z" + f = autowrap(Eq(z, x + y + z), backend='dummy') + assert f() == str(x + y + z) + assert f.args == "x, y, z" + assert f.returns == "z" + + +def test_autowrap_args(): + x, y, z = symbols('x y z') + + raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y), + backend='dummy', args=[x])) + f = autowrap(Eq(z, x + y), backend='dummy', args=[y, x]) + assert f() == str(x + y) + assert f.args == "y, x" + assert f.returns == "z" + + raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y + z), + backend='dummy', args=[x, y])) + f = autowrap(Eq(z, x + y + z), backend='dummy', args=[y, x, z]) + assert f() == str(x + y + z) + assert f.args == "y, x, z" + assert f.returns == "z" + + f = autowrap(Eq(z, x + y + z), backend='dummy', args=(y, x, z)) + assert f() == str(x + y + z) + assert f.args == "y, x, z" + assert f.returns == "z" + +def test_autowrap_store_files(): + x, y = symbols('x y') + tmp = tempfile.mkdtemp() + TmpFileManager.tmp_folder(tmp) + + f = autowrap(x + y, backend='dummy', tempdir=tmp) + assert f() == str(x + y) + assert os.access(tmp, os.F_OK) + + TmpFileManager.cleanup() + +def test_autowrap_store_files_issue_gh12939(): + x, y = symbols('x y') + tmp = './tmp' + saved_cwd = os.getcwd() + temp_cwd = tempfile.mkdtemp() + try: + os.chdir(temp_cwd) + f = autowrap(x + y, backend='dummy', tempdir=tmp) + assert f() == str(x + y) + assert os.access(tmp, os.F_OK) + finally: + os.chdir(saved_cwd) + shutil.rmtree(temp_cwd) + + +def test_binary_function(): + x, y = symbols('x y') + f = binary_function('f', x + y, backend='dummy') + assert f._imp_() == str(x + y) + + +def test_ufuncify_source(): + x, y, z = symbols('x,y,z') + code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify")) + routine = make_routine("test", x + y + z) + source = get_string(code_wrapper.dump_c, [routine]) + expected = """\ +#include "Python.h" +#include "math.h" +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" +#include "numpy/halffloat.h" +#include "file.h" + +static PyMethodDef wrapper_module_%(num)sMethods[] = { + {NULL, NULL, 0, NULL} +}; + +static void test_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data) +{ + npy_intp i; + npy_intp n = dimensions[0]; + char *in0 = args[0]; + char *in1 = args[1]; + char *in2 = args[2]; + char *out0 = args[3]; + npy_intp in0_step = steps[0]; + npy_intp in1_step = steps[1]; + npy_intp in2_step = steps[2]; + npy_intp out0_step = steps[3]; + for (i = 0; i < n; i++) { + *((double *)out0) = test(*(double *)in0, *(double *)in1, *(double *)in2); + in0 += in0_step; + in1 += in1_step; + in2 += in2_step; + out0 += out0_step; + } +} +PyUFuncGenericFunction test_funcs[1] = {&test_ufunc}; +static char test_types[4] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; +static void *test_data[1] = {NULL}; + +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "wrapper_module_%(num)s", + NULL, + -1, + wrapper_module_%(num)sMethods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_wrapper_module_%(num)s(void) +{ + PyObject *m, *d; + PyObject *ufunc0; + m = PyModule_Create(&moduledef); + if (!m) { + return NULL; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1, + PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0); + PyDict_SetItemString(d, "test", ufunc0); + Py_DECREF(ufunc0); + return m; +} +#else +PyMODINIT_FUNC initwrapper_module_%(num)s(void) +{ + PyObject *m, *d; + PyObject *ufunc0; + m = Py_InitModule("wrapper_module_%(num)s", wrapper_module_%(num)sMethods); + if (m == NULL) { + return; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1, + PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0); + PyDict_SetItemString(d, "test", ufunc0); + Py_DECREF(ufunc0); +} +#endif""" % {'num': CodeWrapper._module_counter} + assert source == expected + + +def test_ufuncify_source_multioutput(): + x, y, z = symbols('x,y,z') + var_symbols = (x, y, z) + expr = x + y**3 + 10*z**2 + code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify")) + routines = [make_routine("func{}".format(i), expr.diff(var_symbols[i]), var_symbols) for i in range(len(var_symbols))] + source = get_string(code_wrapper.dump_c, routines, funcname='multitest') + expected = """\ +#include "Python.h" +#include "math.h" +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" +#include "numpy/halffloat.h" +#include "file.h" + +static PyMethodDef wrapper_module_%(num)sMethods[] = { + {NULL, NULL, 0, NULL} +}; + +static void multitest_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data) +{ + npy_intp i; + npy_intp n = dimensions[0]; + char *in0 = args[0]; + char *in1 = args[1]; + char *in2 = args[2]; + char *out0 = args[3]; + char *out1 = args[4]; + char *out2 = args[5]; + npy_intp in0_step = steps[0]; + npy_intp in1_step = steps[1]; + npy_intp in2_step = steps[2]; + npy_intp out0_step = steps[3]; + npy_intp out1_step = steps[4]; + npy_intp out2_step = steps[5]; + for (i = 0; i < n; i++) { + *((double *)out0) = func0(*(double *)in0, *(double *)in1, *(double *)in2); + *((double *)out1) = func1(*(double *)in0, *(double *)in1, *(double *)in2); + *((double *)out2) = func2(*(double *)in0, *(double *)in1, *(double *)in2); + in0 += in0_step; + in1 += in1_step; + in2 += in2_step; + out0 += out0_step; + out1 += out1_step; + out2 += out2_step; + } +} +PyUFuncGenericFunction multitest_funcs[1] = {&multitest_ufunc}; +static char multitest_types[6] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; +static void *multitest_data[1] = {NULL}; + +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "wrapper_module_%(num)s", + NULL, + -1, + wrapper_module_%(num)sMethods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_wrapper_module_%(num)s(void) +{ + PyObject *m, *d; + PyObject *ufunc0; + m = PyModule_Create(&moduledef); + if (!m) { + return NULL; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ufunc0 = PyUFunc_FromFuncAndData(multitest_funcs, multitest_data, multitest_types, 1, 3, 3, + PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0); + PyDict_SetItemString(d, "multitest", ufunc0); + Py_DECREF(ufunc0); + return m; +} +#else +PyMODINIT_FUNC initwrapper_module_%(num)s(void) +{ + PyObject *m, *d; + PyObject *ufunc0; + m = Py_InitModule("wrapper_module_%(num)s", wrapper_module_%(num)sMethods); + if (m == NULL) { + return; + } + import_array(); + import_umath(); + d = PyModule_GetDict(m); + ufunc0 = PyUFunc_FromFuncAndData(multitest_funcs, multitest_data, multitest_types, 1, 3, 3, + PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0); + PyDict_SetItemString(d, "multitest", ufunc0); + Py_DECREF(ufunc0); +} +#endif""" % {'num': CodeWrapper._module_counter} + assert source == expected diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..f90d5eaa68b34d056e36e2e10242d6818abd7d8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen.py @@ -0,0 +1,1613 @@ +from io import StringIO + +from sympy.core import symbols, Eq, pi, Catalan, Lambda, Dummy +from sympy.core.relational import Equality +from sympy.core.symbol import Symbol +from sympy.functions.special.error_functions import erf +from sympy.integrals.integrals import Integral +from sympy.matrices import Matrix, MatrixSymbol +from sympy.utilities.codegen import ( + codegen, make_routine, CCodeGen, C89CodeGen, C99CodeGen, InputArgument, + CodeGenError, FCodeGen, CodeGenArgumentListError, OutputArgument, + InOutArgument) +from sympy.testing.pytest import raises +from sympy.utilities.lambdify import implemented_function + +#FIXME: Fails due to circular import in with core +# from sympy import codegen + + +def get_string(dump_fn, routines, prefix="file", header=False, empty=False): + """Wrapper for dump_fn. dump_fn writes its results to a stream object and + this wrapper returns the contents of that stream as a string. This + auxiliary function is used by many tests below. + + The header and the empty lines are not generated to facilitate the + testing of the output. + """ + output = StringIO() + dump_fn(routines, output, prefix, header, empty) + source = output.getvalue() + output.close() + return source + + +def test_Routine_argument_order(): + a, x, y, z = symbols('a x y z') + expr = (x + y)*z + raises(CodeGenArgumentListError, lambda: make_routine("test", expr, + argument_sequence=[z, x])) + raises(CodeGenArgumentListError, lambda: make_routine("test", Eq(a, + expr), argument_sequence=[z, x, y])) + r = make_routine('test', Eq(a, expr), argument_sequence=[z, x, a, y]) + assert [ arg.name for arg in r.arguments ] == [z, x, a, y] + assert [ type(arg) for arg in r.arguments ] == [ + InputArgument, InputArgument, OutputArgument, InputArgument ] + r = make_routine('test', Eq(z, expr), argument_sequence=[z, x, y]) + assert [ type(arg) for arg in r.arguments ] == [ + InOutArgument, InputArgument, InputArgument ] + + from sympy.tensor import IndexedBase, Idx + A, B = map(IndexedBase, ['A', 'B']) + m = symbols('m', integer=True) + i = Idx('i', m) + r = make_routine('test', Eq(A[i], B[i]), argument_sequence=[B, A, m]) + assert [ arg.name for arg in r.arguments ] == [B.label, A.label, m] + + expr = Integral(x*y*z, (x, 1, 2), (y, 1, 3)) + r = make_routine('test', Eq(a, expr), argument_sequence=[z, x, a, y]) + assert [ arg.name for arg in r.arguments ] == [z, x, a, y] + + +def test_empty_c_code(): + code_gen = C89CodeGen() + source = get_string(code_gen.dump_c, []) + assert source == "#include \"file.h\"\n#include \n" + + +def test_empty_c_code_with_comment(): + code_gen = C89CodeGen() + source = get_string(code_gen.dump_c, [], header=True) + assert source[:82] == ( + "/******************************************************************************\n *" + ) + # " Code generated with SymPy 0.7.2-git " + assert source[158:] == ( "*\n" + " * *\n" + " * See http://www.sympy.org/ for more information. *\n" + " * *\n" + " * This file is part of 'project' *\n" + " ******************************************************************************/\n" + "#include \"file.h\"\n" + "#include \n" + ) + + +def test_empty_c_header(): + code_gen = C99CodeGen() + source = get_string(code_gen.dump_h, []) + assert source == "#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n#endif\n" + + +def test_simple_c_code(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + routine = make_routine("test", expr) + code_gen = C89CodeGen() + source = get_string(code_gen.dump_c, [routine]) + expected = ( + "#include \"file.h\"\n" + "#include \n" + "double test(double x, double y, double z) {\n" + " double test_result;\n" + " test_result = z*(x + y);\n" + " return test_result;\n" + "}\n" + ) + assert source == expected + + +def test_c_code_reserved_words(): + x, y, z = symbols('if, typedef, while') + expr = (x + y) * z + routine = make_routine("test", expr) + code_gen = C99CodeGen() + source = get_string(code_gen.dump_c, [routine]) + expected = ( + "#include \"file.h\"\n" + "#include \n" + "double test(double if_, double typedef_, double while_) {\n" + " double test_result;\n" + " test_result = while_*(if_ + typedef_);\n" + " return test_result;\n" + "}\n" + ) + assert source == expected + + +def test_numbersymbol_c_code(): + routine = make_routine("test", pi**Catalan) + code_gen = C89CodeGen() + source = get_string(code_gen.dump_c, [routine]) + expected = ( + "#include \"file.h\"\n" + "#include \n" + "double test() {\n" + " double test_result;\n" + " double const Catalan = %s;\n" + " test_result = pow(M_PI, Catalan);\n" + " return test_result;\n" + "}\n" + ) % Catalan.evalf(17) + assert source == expected + + +def test_c_code_argument_order(): + x, y, z = symbols('x,y,z') + expr = x + y + routine = make_routine("test", expr, argument_sequence=[z, x, y]) + code_gen = C89CodeGen() + source = get_string(code_gen.dump_c, [routine]) + expected = ( + "#include \"file.h\"\n" + "#include \n" + "double test(double z, double x, double y) {\n" + " double test_result;\n" + " test_result = x + y;\n" + " return test_result;\n" + "}\n" + ) + assert source == expected + + +def test_simple_c_header(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + routine = make_routine("test", expr) + code_gen = C89CodeGen() + source = get_string(code_gen.dump_h, [routine]) + expected = ( + "#ifndef PROJECT__FILE__H\n" + "#define PROJECT__FILE__H\n" + "double test(double x, double y, double z);\n" + "#endif\n" + ) + assert source == expected + + +def test_simple_c_codegen(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + expected = [ + ("file.c", + "#include \"file.h\"\n" + "#include \n" + "double test(double x, double y, double z) {\n" + " double test_result;\n" + " test_result = z*(x + y);\n" + " return test_result;\n" + "}\n"), + ("file.h", + "#ifndef PROJECT__FILE__H\n" + "#define PROJECT__FILE__H\n" + "double test(double x, double y, double z);\n" + "#endif\n") + ] + result = codegen(("test", expr), "C", "file", header=False, empty=False) + assert result == expected + + +def test_multiple_results_c(): + x, y, z = symbols('x,y,z') + expr1 = (x + y)*z + expr2 = (x - y)*z + routine = make_routine( + "test", + [expr1, expr2] + ) + code_gen = C99CodeGen() + raises(CodeGenError, lambda: get_string(code_gen.dump_h, [routine])) + + +def test_no_results_c(): + raises(ValueError, lambda: make_routine("test", [])) + + +def test_ansi_math1_codegen(): + # not included: log10 + from sympy.functions.elementary.complexes import Abs + from sympy.functions.elementary.exponential import log + from sympy.functions.elementary.hyperbolic import (cosh, sinh, tanh) + from sympy.functions.elementary.integers import (ceiling, floor) + from sympy.functions.elementary.miscellaneous import sqrt + from sympy.functions.elementary.trigonometric import (acos, asin, atan, cos, sin, tan) + x = symbols('x') + name_expr = [ + ("test_fabs", Abs(x)), + ("test_acos", acos(x)), + ("test_asin", asin(x)), + ("test_atan", atan(x)), + ("test_ceil", ceiling(x)), + ("test_cos", cos(x)), + ("test_cosh", cosh(x)), + ("test_floor", floor(x)), + ("test_log", log(x)), + ("test_ln", log(x)), + ("test_sin", sin(x)), + ("test_sinh", sinh(x)), + ("test_sqrt", sqrt(x)), + ("test_tan", tan(x)), + ("test_tanh", tanh(x)), + ] + result = codegen(name_expr, "C89", "file", header=False, empty=False) + assert result[0][0] == "file.c" + assert result[0][1] == ( + '#include "file.h"\n#include \n' + 'double test_fabs(double x) {\n double test_fabs_result;\n test_fabs_result = fabs(x);\n return test_fabs_result;\n}\n' + 'double test_acos(double x) {\n double test_acos_result;\n test_acos_result = acos(x);\n return test_acos_result;\n}\n' + 'double test_asin(double x) {\n double test_asin_result;\n test_asin_result = asin(x);\n return test_asin_result;\n}\n' + 'double test_atan(double x) {\n double test_atan_result;\n test_atan_result = atan(x);\n return test_atan_result;\n}\n' + 'double test_ceil(double x) {\n double test_ceil_result;\n test_ceil_result = ceil(x);\n return test_ceil_result;\n}\n' + 'double test_cos(double x) {\n double test_cos_result;\n test_cos_result = cos(x);\n return test_cos_result;\n}\n' + 'double test_cosh(double x) {\n double test_cosh_result;\n test_cosh_result = cosh(x);\n return test_cosh_result;\n}\n' + 'double test_floor(double x) {\n double test_floor_result;\n test_floor_result = floor(x);\n return test_floor_result;\n}\n' + 'double test_log(double x) {\n double test_log_result;\n test_log_result = log(x);\n return test_log_result;\n}\n' + 'double test_ln(double x) {\n double test_ln_result;\n test_ln_result = log(x);\n return test_ln_result;\n}\n' + 'double test_sin(double x) {\n double test_sin_result;\n test_sin_result = sin(x);\n return test_sin_result;\n}\n' + 'double test_sinh(double x) {\n double test_sinh_result;\n test_sinh_result = sinh(x);\n return test_sinh_result;\n}\n' + 'double test_sqrt(double x) {\n double test_sqrt_result;\n test_sqrt_result = sqrt(x);\n return test_sqrt_result;\n}\n' + 'double test_tan(double x) {\n double test_tan_result;\n test_tan_result = tan(x);\n return test_tan_result;\n}\n' + 'double test_tanh(double x) {\n double test_tanh_result;\n test_tanh_result = tanh(x);\n return test_tanh_result;\n}\n' + ) + assert result[1][0] == "file.h" + assert result[1][1] == ( + '#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n' + 'double test_fabs(double x);\ndouble test_acos(double x);\n' + 'double test_asin(double x);\ndouble test_atan(double x);\n' + 'double test_ceil(double x);\ndouble test_cos(double x);\n' + 'double test_cosh(double x);\ndouble test_floor(double x);\n' + 'double test_log(double x);\ndouble test_ln(double x);\n' + 'double test_sin(double x);\ndouble test_sinh(double x);\n' + 'double test_sqrt(double x);\ndouble test_tan(double x);\n' + 'double test_tanh(double x);\n#endif\n' + ) + + +def test_ansi_math2_codegen(): + # not included: frexp, ldexp, modf, fmod + from sympy.functions.elementary.trigonometric import atan2 + x, y = symbols('x,y') + name_expr = [ + ("test_atan2", atan2(x, y)), + ("test_pow", x**y), + ] + result = codegen(name_expr, "C89", "file", header=False, empty=False) + assert result[0][0] == "file.c" + assert result[0][1] == ( + '#include "file.h"\n#include \n' + 'double test_atan2(double x, double y) {\n double test_atan2_result;\n test_atan2_result = atan2(x, y);\n return test_atan2_result;\n}\n' + 'double test_pow(double x, double y) {\n double test_pow_result;\n test_pow_result = pow(x, y);\n return test_pow_result;\n}\n' + ) + assert result[1][0] == "file.h" + assert result[1][1] == ( + '#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n' + 'double test_atan2(double x, double y);\n' + 'double test_pow(double x, double y);\n' + '#endif\n' + ) + + +def test_complicated_codegen(): + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + x, y, z = symbols('x,y,z') + name_expr = [ + ("test1", ((sin(x) + cos(y) + tan(z))**7).expand()), + ("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))), + ] + result = codegen(name_expr, "C89", "file", header=False, empty=False) + assert result[0][0] == "file.c" + assert result[0][1] == ( + '#include "file.h"\n#include \n' + 'double test1(double x, double y, double z) {\n' + ' double test1_result;\n' + ' test1_result = ' + 'pow(sin(x), 7) + ' + '7*pow(sin(x), 6)*cos(y) + ' + '7*pow(sin(x), 6)*tan(z) + ' + '21*pow(sin(x), 5)*pow(cos(y), 2) + ' + '42*pow(sin(x), 5)*cos(y)*tan(z) + ' + '21*pow(sin(x), 5)*pow(tan(z), 2) + ' + '35*pow(sin(x), 4)*pow(cos(y), 3) + ' + '105*pow(sin(x), 4)*pow(cos(y), 2)*tan(z) + ' + '105*pow(sin(x), 4)*cos(y)*pow(tan(z), 2) + ' + '35*pow(sin(x), 4)*pow(tan(z), 3) + ' + '35*pow(sin(x), 3)*pow(cos(y), 4) + ' + '140*pow(sin(x), 3)*pow(cos(y), 3)*tan(z) + ' + '210*pow(sin(x), 3)*pow(cos(y), 2)*pow(tan(z), 2) + ' + '140*pow(sin(x), 3)*cos(y)*pow(tan(z), 3) + ' + '35*pow(sin(x), 3)*pow(tan(z), 4) + ' + '21*pow(sin(x), 2)*pow(cos(y), 5) + ' + '105*pow(sin(x), 2)*pow(cos(y), 4)*tan(z) + ' + '210*pow(sin(x), 2)*pow(cos(y), 3)*pow(tan(z), 2) + ' + '210*pow(sin(x), 2)*pow(cos(y), 2)*pow(tan(z), 3) + ' + '105*pow(sin(x), 2)*cos(y)*pow(tan(z), 4) + ' + '21*pow(sin(x), 2)*pow(tan(z), 5) + ' + '7*sin(x)*pow(cos(y), 6) + ' + '42*sin(x)*pow(cos(y), 5)*tan(z) + ' + '105*sin(x)*pow(cos(y), 4)*pow(tan(z), 2) + ' + '140*sin(x)*pow(cos(y), 3)*pow(tan(z), 3) + ' + '105*sin(x)*pow(cos(y), 2)*pow(tan(z), 4) + ' + '42*sin(x)*cos(y)*pow(tan(z), 5) + ' + '7*sin(x)*pow(tan(z), 6) + ' + 'pow(cos(y), 7) + ' + '7*pow(cos(y), 6)*tan(z) + ' + '21*pow(cos(y), 5)*pow(tan(z), 2) + ' + '35*pow(cos(y), 4)*pow(tan(z), 3) + ' + '35*pow(cos(y), 3)*pow(tan(z), 4) + ' + '21*pow(cos(y), 2)*pow(tan(z), 5) + ' + '7*cos(y)*pow(tan(z), 6) + ' + 'pow(tan(z), 7);\n' + ' return test1_result;\n' + '}\n' + 'double test2(double x, double y, double z) {\n' + ' double test2_result;\n' + ' test2_result = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n' + ' return test2_result;\n' + '}\n' + ) + assert result[1][0] == "file.h" + assert result[1][1] == ( + '#ifndef PROJECT__FILE__H\n' + '#define PROJECT__FILE__H\n' + 'double test1(double x, double y, double z);\n' + 'double test2(double x, double y, double z);\n' + '#endif\n' + ) + + +def test_loops_c(): + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m = symbols('n m', integer=True) + A = IndexedBase('A') + x = IndexedBase('x') + y = IndexedBase('y') + i = Idx('i', m) + j = Idx('j', n) + + (f1, code), (f2, interface) = codegen( + ('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C99", "file", header=False, empty=False) + + assert f1 == 'file.c' + expected = ( + '#include "file.h"\n' + '#include \n' + 'void matrix_vector(double *A, int m, int n, double *x, double *y) {\n' + ' for (int i=0; i\n' + 'void test_dummies(int m_%(mno)i, double *x, double *y) {\n' + ' for (int i_%(ino)i=0; i_%(ino)i\n' + 'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y) {\n' + ' for (int i=o; i<%(upperi)s; i++){\n' + ' y[i] = 0;\n' + ' }\n' + ' for (int i=o; i<%(upperi)s; i++){\n' + ' for (int j=0; j\n' + 'double foo(double x, double *y) {\n' + ' (*y) = sin(x);\n' + ' double foo_result;\n' + ' foo_result = cos(x);\n' + ' return foo_result;\n' + '}\n' + ) + assert result[0][1] == expected + + +def test_output_arg_c_reserved_words(): + from sympy.core.relational import Equality + from sympy.functions.elementary.trigonometric import (cos, sin) + x, y, z = symbols("if, while, z") + r = make_routine("foo", [Equality(y, sin(x)), cos(x)]) + c = C89CodeGen() + result = c.write([r], "test", header=False, empty=False) + assert result[0][0] == "test.c" + expected = ( + '#include "test.h"\n' + '#include \n' + 'double foo(double if_, double *while_) {\n' + ' (*while_) = sin(if_);\n' + ' double foo_result;\n' + ' foo_result = cos(if_);\n' + ' return foo_result;\n' + '}\n' + ) + assert result[0][1] == expected + + +def test_multidim_c_argument_cse(): + A_sym = MatrixSymbol('A', 3, 3) + b_sym = MatrixSymbol('b', 3, 1) + A = Matrix(A_sym) + b = Matrix(b_sym) + c = A*b + cgen = CCodeGen(project="test", cse=True) + r = cgen.routine("c", c) + r.arguments[-1].result_var = "out" + r.arguments[-1]._name = "out" + code = get_string(cgen.dump_c, [r], prefix="test") + expected = ( + '#include "test.h"\n' + "#include \n" + "void c(double *A, double *b, double *out) {\n" + " out[0] = A[0]*b[0] + A[1]*b[1] + A[2]*b[2];\n" + " out[1] = A[3]*b[0] + A[4]*b[1] + A[5]*b[2];\n" + " out[2] = A[6]*b[0] + A[7]*b[1] + A[8]*b[2];\n" + "}\n" + ) + assert code == expected + + +def test_ccode_results_named_ordered(): + x, y, z = symbols('x,y,z') + B, C = symbols('B,C') + A = MatrixSymbol('A', 1, 3) + expr1 = Equality(A, Matrix([[1, 2, x]])) + expr2 = Equality(C, (x + y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + expected = ( + '#include "test.h"\n' + '#include \n' + 'void test(double x, double *C, double z, double y, double *A, double *B) {\n' + ' (*C) = z*(x + y);\n' + ' A[0] = 1;\n' + ' A[1] = 2;\n' + ' A[2] = x;\n' + ' (*B) = 2*x;\n' + '}\n' + ) + + result = codegen(name_expr, "c", "test", header=False, empty=False, + argument_sequence=(x, C, z, y, A, B)) + source = result[0][1] + assert source == expected + + +def test_ccode_matrixsymbol_slice(): + A = MatrixSymbol('A', 5, 3) + B = MatrixSymbol('B', 1, 3) + C = MatrixSymbol('C', 1, 3) + D = MatrixSymbol('D', 5, 1) + name_expr = ("test", [Equality(B, A[0, :]), + Equality(C, A[1, :]), + Equality(D, A[:, 2])]) + result = codegen(name_expr, "c99", "test", header=False, empty=False) + source = result[0][1] + expected = ( + '#include "test.h"\n' + '#include \n' + 'void test(double *A, double *B, double *C, double *D) {\n' + ' B[0] = A[0];\n' + ' B[1] = A[1];\n' + ' B[2] = A[2];\n' + ' C[0] = A[3];\n' + ' C[1] = A[4];\n' + ' C[2] = A[5];\n' + ' D[0] = A[2];\n' + ' D[1] = A[5];\n' + ' D[2] = A[8];\n' + ' D[3] = A[11];\n' + ' D[4] = A[14];\n' + '}\n' + ) + assert source == expected + +def test_ccode_cse(): + a, b, c, d = symbols('a b c d') + e = MatrixSymbol('e', 3, 1) + name_expr = ("test", [Equality(e, Matrix([[a*b], [a*b + c*d], [a*b*c*d]]))]) + generator = CCodeGen(cse=True) + result = codegen(name_expr, code_gen=generator, header=False, empty=False) + source = result[0][1] + expected = ( + '#include "test.h"\n' + '#include \n' + 'void test(double a, double b, double c, double d, double *e) {\n' + ' const double x0 = a*b;\n' + ' const double x1 = c*d;\n' + ' e[0] = x0;\n' + ' e[1] = x0 + x1;\n' + ' e[2] = x0*x1;\n' + '}\n' + ) + assert source == expected + +def test_ccode_unused_array_arg(): + x = MatrixSymbol('x', 2, 1) + # x does not appear in output + name_expr = ("test", 1.0) + generator = CCodeGen() + result = codegen(name_expr, code_gen=generator, header=False, empty=False, argument_sequence=(x,)) + source = result[0][1] + # note: x should appear as (double *) + expected = ( + '#include "test.h"\n' + '#include \n' + 'double test(double *x) {\n' + ' double test_result;\n' + ' test_result = 1.0;\n' + ' return test_result;\n' + '}\n' + ) + assert source == expected + +def test_empty_f_code(): + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, []) + assert source == "" + + +def test_empty_f_code_with_header(): + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [], header=True) + assert source[:82] == ( + "!******************************************************************************\n!*" + ) + # " Code generated with SymPy 0.7.2-git " + assert source[158:] == ( "*\n" + "!* *\n" + "!* See http://www.sympy.org/ for more information. *\n" + "!* *\n" + "!* This file is part of 'project' *\n" + "!******************************************************************************\n" + ) + + +def test_empty_f_header(): + code_gen = FCodeGen() + source = get_string(code_gen.dump_h, []) + assert source == "" + + +def test_simple_f_code(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + routine = make_routine("test", expr) + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [routine]) + expected = ( + "REAL*8 function test(x, y, z)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(in) :: z\n" + "test = z*(x + y)\n" + "end function\n" + ) + assert source == expected + + +def test_numbersymbol_f_code(): + routine = make_routine("test", pi**Catalan) + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [routine]) + expected = ( + "REAL*8 function test()\n" + "implicit none\n" + "REAL*8, parameter :: Catalan = %sd0\n" + "REAL*8, parameter :: pi = %sd0\n" + "test = pi**Catalan\n" + "end function\n" + ) % (Catalan.evalf(17), pi.evalf(17)) + assert source == expected + +def test_erf_f_code(): + x = symbols('x') + routine = make_routine("test", erf(x) - erf(-2 * x)) + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [routine]) + expected = ( + "REAL*8 function test(x)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "test = erf(x) + erf(2.0d0*x)\n" + "end function\n" + ) + assert source == expected, source + +def test_f_code_argument_order(): + x, y, z = symbols('x,y,z') + expr = x + y + routine = make_routine("test", expr, argument_sequence=[z, x, y]) + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [routine]) + expected = ( + "REAL*8 function test(z, x, y)\n" + "implicit none\n" + "REAL*8, intent(in) :: z\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "test = x + y\n" + "end function\n" + ) + assert source == expected + + +def test_simple_f_header(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + routine = make_routine("test", expr) + code_gen = FCodeGen() + source = get_string(code_gen.dump_h, [routine]) + expected = ( + "interface\n" + "REAL*8 function test(x, y, z)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(in) :: z\n" + "end function\n" + "end interface\n" + ) + assert source == expected + + +def test_simple_f_codegen(): + x, y, z = symbols('x,y,z') + expr = (x + y)*z + result = codegen( + ("test", expr), "F95", "file", header=False, empty=False) + expected = [ + ("file.f90", + "REAL*8 function test(x, y, z)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(in) :: z\n" + "test = z*(x + y)\n" + "end function\n"), + ("file.h", + "interface\n" + "REAL*8 function test(x, y, z)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(in) :: z\n" + "end function\n" + "end interface\n") + ] + assert result == expected + + +def test_multiple_results_f(): + x, y, z = symbols('x,y,z') + expr1 = (x + y)*z + expr2 = (x - y)*z + routine = make_routine( + "test", + [expr1, expr2] + ) + code_gen = FCodeGen() + raises(CodeGenError, lambda: get_string(code_gen.dump_h, [routine])) + + +def test_no_results_f(): + raises(ValueError, lambda: make_routine("test", [])) + + +def test_intrinsic_math_codegen(): + # not included: log10 + from sympy.functions.elementary.complexes import Abs + from sympy.functions.elementary.exponential import log + from sympy.functions.elementary.hyperbolic import (cosh, sinh, tanh) + from sympy.functions.elementary.miscellaneous import sqrt + from sympy.functions.elementary.trigonometric import (acos, asin, atan, cos, sin, tan) + x = symbols('x') + name_expr = [ + ("test_abs", Abs(x)), + ("test_acos", acos(x)), + ("test_asin", asin(x)), + ("test_atan", atan(x)), + ("test_cos", cos(x)), + ("test_cosh", cosh(x)), + ("test_log", log(x)), + ("test_ln", log(x)), + ("test_sin", sin(x)), + ("test_sinh", sinh(x)), + ("test_sqrt", sqrt(x)), + ("test_tan", tan(x)), + ("test_tanh", tanh(x)), + ] + result = codegen(name_expr, "F95", "file", header=False, empty=False) + assert result[0][0] == "file.f90" + expected = ( + 'REAL*8 function test_abs(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_abs = abs(x)\n' + 'end function\n' + 'REAL*8 function test_acos(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_acos = acos(x)\n' + 'end function\n' + 'REAL*8 function test_asin(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_asin = asin(x)\n' + 'end function\n' + 'REAL*8 function test_atan(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_atan = atan(x)\n' + 'end function\n' + 'REAL*8 function test_cos(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_cos = cos(x)\n' + 'end function\n' + 'REAL*8 function test_cosh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_cosh = cosh(x)\n' + 'end function\n' + 'REAL*8 function test_log(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_log = log(x)\n' + 'end function\n' + 'REAL*8 function test_ln(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_ln = log(x)\n' + 'end function\n' + 'REAL*8 function test_sin(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_sin = sin(x)\n' + 'end function\n' + 'REAL*8 function test_sinh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_sinh = sinh(x)\n' + 'end function\n' + 'REAL*8 function test_sqrt(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_sqrt = sqrt(x)\n' + 'end function\n' + 'REAL*8 function test_tan(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_tan = tan(x)\n' + 'end function\n' + 'REAL*8 function test_tanh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'test_tanh = tanh(x)\n' + 'end function\n' + ) + assert result[0][1] == expected + + assert result[1][0] == "file.h" + expected = ( + 'interface\n' + 'REAL*8 function test_abs(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_acos(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_asin(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_atan(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_cos(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_cosh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_log(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_ln(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_sin(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_sinh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_sqrt(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_tan(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_tanh(x)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'end function\n' + 'end interface\n' + ) + assert result[1][1] == expected + + +def test_intrinsic_math2_codegen(): + # not included: frexp, ldexp, modf, fmod + from sympy.functions.elementary.trigonometric import atan2 + x, y = symbols('x,y') + name_expr = [ + ("test_atan2", atan2(x, y)), + ("test_pow", x**y), + ] + result = codegen(name_expr, "F95", "file", header=False, empty=False) + assert result[0][0] == "file.f90" + expected = ( + 'REAL*8 function test_atan2(x, y)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'test_atan2 = atan2(x, y)\n' + 'end function\n' + 'REAL*8 function test_pow(x, y)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'test_pow = x**y\n' + 'end function\n' + ) + assert result[0][1] == expected + + assert result[1][0] == "file.h" + expected = ( + 'interface\n' + 'REAL*8 function test_atan2(x, y)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test_pow(x, y)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'end function\n' + 'end interface\n' + ) + assert result[1][1] == expected + + +def test_complicated_codegen_f95(): + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + x, y, z = symbols('x,y,z') + name_expr = [ + ("test1", ((sin(x) + cos(y) + tan(z))**7).expand()), + ("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))), + ] + result = codegen(name_expr, "F95", "file", header=False, empty=False) + assert result[0][0] == "file.f90" + expected = ( + 'REAL*8 function test1(x, y, z)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'REAL*8, intent(in) :: z\n' + 'test1 = sin(x)**7 + 7*sin(x)**6*cos(y) + 7*sin(x)**6*tan(z) + 21*sin(x) &\n' + ' **5*cos(y)**2 + 42*sin(x)**5*cos(y)*tan(z) + 21*sin(x)**5*tan(z) &\n' + ' **2 + 35*sin(x)**4*cos(y)**3 + 105*sin(x)**4*cos(y)**2*tan(z) + &\n' + ' 105*sin(x)**4*cos(y)*tan(z)**2 + 35*sin(x)**4*tan(z)**3 + 35*sin( &\n' + ' x)**3*cos(y)**4 + 140*sin(x)**3*cos(y)**3*tan(z) + 210*sin(x)**3* &\n' + ' cos(y)**2*tan(z)**2 + 140*sin(x)**3*cos(y)*tan(z)**3 + 35*sin(x) &\n' + ' **3*tan(z)**4 + 21*sin(x)**2*cos(y)**5 + 105*sin(x)**2*cos(y)**4* &\n' + ' tan(z) + 210*sin(x)**2*cos(y)**3*tan(z)**2 + 210*sin(x)**2*cos(y) &\n' + ' **2*tan(z)**3 + 105*sin(x)**2*cos(y)*tan(z)**4 + 21*sin(x)**2*tan &\n' + ' (z)**5 + 7*sin(x)*cos(y)**6 + 42*sin(x)*cos(y)**5*tan(z) + 105* &\n' + ' sin(x)*cos(y)**4*tan(z)**2 + 140*sin(x)*cos(y)**3*tan(z)**3 + 105 &\n' + ' *sin(x)*cos(y)**2*tan(z)**4 + 42*sin(x)*cos(y)*tan(z)**5 + 7*sin( &\n' + ' x)*tan(z)**6 + cos(y)**7 + 7*cos(y)**6*tan(z) + 21*cos(y)**5*tan( &\n' + ' z)**2 + 35*cos(y)**4*tan(z)**3 + 35*cos(y)**3*tan(z)**4 + 21*cos( &\n' + ' y)**2*tan(z)**5 + 7*cos(y)*tan(z)**6 + tan(z)**7\n' + 'end function\n' + 'REAL*8 function test2(x, y, z)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'REAL*8, intent(in) :: z\n' + 'test2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))\n' + 'end function\n' + ) + assert result[0][1] == expected + assert result[1][0] == "file.h" + expected = ( + 'interface\n' + 'REAL*8 function test1(x, y, z)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'REAL*8, intent(in) :: z\n' + 'end function\n' + 'end interface\n' + 'interface\n' + 'REAL*8 function test2(x, y, z)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(in) :: y\n' + 'REAL*8, intent(in) :: z\n' + 'end function\n' + 'end interface\n' + ) + assert result[1][1] == expected + + +def test_loops(): + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + + n, m = symbols('n,m', integer=True) + A, x, y = map(IndexedBase, 'Axy') + i = Idx('i', m) + j = Idx('j', n) + + (f1, code), (f2, interface) = codegen( + ('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False) + + assert f1 == 'file.f90' + expected = ( + 'subroutine matrix_vector(A, m, n, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'INTEGER*4, intent(in) :: n\n' + 'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n' + 'REAL*8, intent(in), dimension(1:n) :: x\n' + 'REAL*8, intent(out), dimension(1:m) :: y\n' + 'INTEGER*4 :: i\n' + 'INTEGER*4 :: j\n' + 'do i = 1, m\n' + ' y(i) = 0\n' + 'end do\n' + 'do i = 1, m\n' + ' do j = 1, n\n' + ' y(i) = %(rhs)s + y(i)\n' + ' end do\n' + 'end do\n' + 'end subroutine\n' + ) + + assert code == expected % {'rhs': 'A(i, j)*x(j)'} or\ + code == expected % {'rhs': 'x(j)*A(i, j)'} + assert f2 == 'file.h' + assert interface == ( + 'interface\n' + 'subroutine matrix_vector(A, m, n, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'INTEGER*4, intent(in) :: n\n' + 'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n' + 'REAL*8, intent(in), dimension(1:n) :: x\n' + 'REAL*8, intent(out), dimension(1:m) :: y\n' + 'end subroutine\n' + 'end interface\n' + ) + + +def test_dummy_loops_f95(): + from sympy.tensor import IndexedBase, Idx + i, m = symbols('i m', integer=True, cls=Dummy) + x = IndexedBase('x') + y = IndexedBase('y') + i = Idx(i, m) + expected = ( + 'subroutine test_dummies(m_%(mcount)i, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m_%(mcount)i\n' + 'REAL*8, intent(in), dimension(1:m_%(mcount)i) :: x\n' + 'REAL*8, intent(out), dimension(1:m_%(mcount)i) :: y\n' + 'INTEGER*4 :: i_%(icount)i\n' + 'do i_%(icount)i = 1, m_%(mcount)i\n' + ' y(i_%(icount)i) = x(i_%(icount)i)\n' + 'end do\n' + 'end subroutine\n' + ) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index} + r = make_routine('test_dummies', Eq(y[i], x[i])) + c = FCodeGen() + code = get_string(c.dump_f95, [r]) + assert code == expected + + +def test_loops_InOut(): + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + + i, j, n, m = symbols('i,j,n,m', integer=True) + A, x, y = symbols('A,x,y') + A = IndexedBase(A)[Idx(i, m), Idx(j, n)] + x = IndexedBase(x)[Idx(j, n)] + y = IndexedBase(y)[Idx(i, m)] + + (f1, code), (f2, interface) = codegen( + ('matrix_vector', Eq(y, y + A*x)), "F95", "file", header=False, empty=False) + + assert f1 == 'file.f90' + expected = ( + 'subroutine matrix_vector(A, m, n, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'INTEGER*4, intent(in) :: n\n' + 'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n' + 'REAL*8, intent(in), dimension(1:n) :: x\n' + 'REAL*8, intent(inout), dimension(1:m) :: y\n' + 'INTEGER*4 :: i\n' + 'INTEGER*4 :: j\n' + 'do i = 1, m\n' + ' do j = 1, n\n' + ' y(i) = %(rhs)s + y(i)\n' + ' end do\n' + 'end do\n' + 'end subroutine\n' + ) + + assert (code == expected % {'rhs': 'A(i, j)*x(j)'} or + code == expected % {'rhs': 'x(j)*A(i, j)'}) + assert f2 == 'file.h' + assert interface == ( + 'interface\n' + 'subroutine matrix_vector(A, m, n, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'INTEGER*4, intent(in) :: n\n' + 'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n' + 'REAL*8, intent(in), dimension(1:n) :: x\n' + 'REAL*8, intent(inout), dimension(1:m) :: y\n' + 'end subroutine\n' + 'end interface\n' + ) + + +def test_partial_loops_f(): + # check that loop boundaries are determined by Idx, and array strides + # determined by shape of IndexedBase object. + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m, o, p = symbols('n m o p', integer=True) + A = IndexedBase('A', shape=(m, p)) + x = IndexedBase('x') + y = IndexedBase('y') + i = Idx('i', (o, m - 5)) # Note: bounds are inclusive + j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1) + + (f1, code), (f2, interface) = codegen( + ('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False) + + expected = ( + 'subroutine matrix_vector(A, m, n, o, p, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'INTEGER*4, intent(in) :: n\n' + 'INTEGER*4, intent(in) :: o\n' + 'INTEGER*4, intent(in) :: p\n' + 'REAL*8, intent(in), dimension(1:m, 1:p) :: A\n' + 'REAL*8, intent(in), dimension(1:n) :: x\n' + 'REAL*8, intent(out), dimension(1:%(iup-ilow)s) :: y\n' + 'INTEGER*4 :: i\n' + 'INTEGER*4 :: j\n' + 'do i = %(ilow)s, %(iup)s\n' + ' y(i) = 0\n' + 'end do\n' + 'do i = %(ilow)s, %(iup)s\n' + ' do j = 1, n\n' + ' y(i) = %(rhs)s + y(i)\n' + ' end do\n' + 'end do\n' + 'end subroutine\n' + ) % { + 'rhs': '%(rhs)s', + 'iup': str(m - 4), + 'ilow': str(1 + o), + 'iup-ilow': str(m - 4 - o) + } + + assert code == expected % {'rhs': 'A(i, j)*x(j)'} or\ + code == expected % {'rhs': 'x(j)*A(i, j)'} + + +def test_output_arg_f(): + from sympy.core.relational import Equality + from sympy.functions.elementary.trigonometric import (cos, sin) + x, y, z = symbols("x,y,z") + r = make_routine("foo", [Equality(y, sin(x)), cos(x)]) + c = FCodeGen() + result = c.write([r], "test", header=False, empty=False) + assert result[0][0] == "test.f90" + assert result[0][1] == ( + 'REAL*8 function foo(x, y)\n' + 'implicit none\n' + 'REAL*8, intent(in) :: x\n' + 'REAL*8, intent(out) :: y\n' + 'y = sin(x)\n' + 'foo = cos(x)\n' + 'end function\n' + ) + + +def test_inline_function(): + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m = symbols('n m', integer=True) + A, x, y = map(IndexedBase, 'Axy') + i = Idx('i', m) + p = FCodeGen() + func = implemented_function('func', Lambda(n, n*(n + 1))) + routine = make_routine('test_inline', Eq(y[i], func(x[i]))) + code = get_string(p.dump_f95, [routine]) + expected = ( + 'subroutine test_inline(m, x, y)\n' + 'implicit none\n' + 'INTEGER*4, intent(in) :: m\n' + 'REAL*8, intent(in), dimension(1:m) :: x\n' + 'REAL*8, intent(out), dimension(1:m) :: y\n' + 'INTEGER*4 :: i\n' + 'do i = 1, m\n' + ' y(i) = %s*%s\n' + 'end do\n' + 'end subroutine\n' + ) + args = ('x(i)', '(x(i) + 1)') + assert code == expected % args or\ + code == expected % args[::-1] + + +def test_f_code_call_signature_wrap(): + # Issue #7934 + x = symbols('x:20') + expr = 0 + for sym in x: + expr += sym + routine = make_routine("test", expr) + code_gen = FCodeGen() + source = get_string(code_gen.dump_f95, [routine]) + expected = """\ +REAL*8 function test(x0, x1, x10, x11, x12, x13, x14, x15, x16, x17, x18, & + x19, x2, x3, x4, x5, x6, x7, x8, x9) +implicit none +REAL*8, intent(in) :: x0 +REAL*8, intent(in) :: x1 +REAL*8, intent(in) :: x10 +REAL*8, intent(in) :: x11 +REAL*8, intent(in) :: x12 +REAL*8, intent(in) :: x13 +REAL*8, intent(in) :: x14 +REAL*8, intent(in) :: x15 +REAL*8, intent(in) :: x16 +REAL*8, intent(in) :: x17 +REAL*8, intent(in) :: x18 +REAL*8, intent(in) :: x19 +REAL*8, intent(in) :: x2 +REAL*8, intent(in) :: x3 +REAL*8, intent(in) :: x4 +REAL*8, intent(in) :: x5 +REAL*8, intent(in) :: x6 +REAL*8, intent(in) :: x7 +REAL*8, intent(in) :: x8 +REAL*8, intent(in) :: x9 +test = x0 + x1 + x10 + x11 + x12 + x13 + x14 + x15 + x16 + x17 + x18 + & + x19 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 +end function +""" + assert source == expected + + +def test_check_case(): + x, X = symbols('x,X') + raises(CodeGenError, lambda: codegen(('test', x*X), 'f95', 'prefix')) + + +def test_check_case_false_positive(): + # The upper case/lower case exception should not be triggered by SymPy + # objects that differ only because of assumptions. (It may be useful to + # have a check for that as well, but here we only want to test against + # false positives with respect to case checking.) + x1 = symbols('x') + x2 = symbols('x', my_assumption=True) + try: + codegen(('test', x1*x2), 'f95', 'prefix') + except CodeGenError as e: + if e.args[0].startswith("Fortran ignores case."): + raise AssertionError("This exception should not be raised!") + + +def test_c_fortran_omit_routine_name(): + x, y = symbols("x,y") + name_expr = [("foo", 2*x)] + result = codegen(name_expr, "F95", header=False, empty=False) + expresult = codegen(name_expr, "F95", "foo", header=False, empty=False) + assert result[0][1] == expresult[0][1] + + name_expr = ("foo", x*y) + result = codegen(name_expr, "F95", header=False, empty=False) + expresult = codegen(name_expr, "F95", "foo", header=False, empty=False) + assert result[0][1] == expresult[0][1] + + name_expr = ("foo", Matrix([[x, y], [x+y, x-y]])) + result = codegen(name_expr, "C89", header=False, empty=False) + expresult = codegen(name_expr, "C89", "foo", header=False, empty=False) + assert result[0][1] == expresult[0][1] + + +def test_fcode_matrix_output(): + x, y, z = symbols('x,y,z') + e1 = x + y + e2 = Matrix([[x, y], [z, 16]]) + name_expr = ("test", (e1, e2)) + result = codegen(name_expr, "f95", "test", header=False, empty=False) + source = result[0][1] + expected = ( + "REAL*8 function test(x, y, z, out_%(hash)s)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(in) :: z\n" + "REAL*8, intent(out), dimension(1:2, 1:2) :: out_%(hash)s\n" + "out_%(hash)s(1, 1) = x\n" + "out_%(hash)s(2, 1) = z\n" + "out_%(hash)s(1, 2) = y\n" + "out_%(hash)s(2, 2) = 16\n" + "test = x + y\n" + "end function\n" + ) + # look for the magic number + a = source.splitlines()[5] + b = a.split('_') + out = b[1] + expected = expected % {'hash': out} + assert source == expected + + +def test_fcode_results_named_ordered(): + x, y, z = symbols('x,y,z') + B, C = symbols('B,C') + A = MatrixSymbol('A', 1, 3) + expr1 = Equality(A, Matrix([[1, 2, x]])) + expr2 = Equality(C, (x + y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result = codegen(name_expr, "f95", "test", header=False, empty=False, + argument_sequence=(x, z, y, C, A, B)) + source = result[0][1] + expected = ( + "subroutine test(x, z, y, C, A, B)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: z\n" + "REAL*8, intent(in) :: y\n" + "REAL*8, intent(out) :: C\n" + "REAL*8, intent(out) :: B\n" + "REAL*8, intent(out), dimension(1:1, 1:3) :: A\n" + "C = z*(x + y)\n" + "A(1, 1) = 1\n" + "A(1, 2) = 2\n" + "A(1, 3) = x\n" + "B = 2*x\n" + "end subroutine\n" + ) + assert source == expected + + +def test_fcode_matrixsymbol_slice(): + A = MatrixSymbol('A', 2, 3) + B = MatrixSymbol('B', 1, 3) + C = MatrixSymbol('C', 1, 3) + D = MatrixSymbol('D', 2, 1) + name_expr = ("test", [Equality(B, A[0, :]), + Equality(C, A[1, :]), + Equality(D, A[:, 2])]) + result = codegen(name_expr, "f95", "test", header=False, empty=False) + source = result[0][1] + expected = ( + "subroutine test(A, B, C, D)\n" + "implicit none\n" + "REAL*8, intent(in), dimension(1:2, 1:3) :: A\n" + "REAL*8, intent(out), dimension(1:1, 1:3) :: B\n" + "REAL*8, intent(out), dimension(1:1, 1:3) :: C\n" + "REAL*8, intent(out), dimension(1:2, 1:1) :: D\n" + "B(1, 1) = A(1, 1)\n" + "B(1, 2) = A(1, 2)\n" + "B(1, 3) = A(1, 3)\n" + "C(1, 1) = A(2, 1)\n" + "C(1, 2) = A(2, 2)\n" + "C(1, 3) = A(2, 3)\n" + "D(1, 1) = A(1, 3)\n" + "D(2, 1) = A(2, 3)\n" + "end subroutine\n" + ) + assert source == expected + + +def test_fcode_matrixsymbol_slice_autoname(): + # see issue #8093 + A = MatrixSymbol('A', 2, 3) + name_expr = ("test", A[:, 1]) + result = codegen(name_expr, "f95", "test", header=False, empty=False) + source = result[0][1] + expected = ( + "subroutine test(A, out_%(hash)s)\n" + "implicit none\n" + "REAL*8, intent(in), dimension(1:2, 1:3) :: A\n" + "REAL*8, intent(out), dimension(1:2, 1:1) :: out_%(hash)s\n" + "out_%(hash)s(1, 1) = A(1, 2)\n" + "out_%(hash)s(2, 1) = A(2, 2)\n" + "end subroutine\n" + ) + # look for the magic number + a = source.splitlines()[3] + b = a.split('_') + out = b[1] + expected = expected % {'hash': out} + assert source == expected + + +def test_global_vars(): + x, y, z, t = symbols("x y z t") + result = codegen(('f', x*y), "F95", header=False, empty=False, + global_vars=(y,)) + source = result[0][1] + expected = ( + "REAL*8 function f(x)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "f = x*y\n" + "end function\n" + ) + assert source == expected + + expected = ( + '#include "f.h"\n' + '#include \n' + 'double f(double x, double y) {\n' + ' double f_result;\n' + ' f_result = x*y + z;\n' + ' return f_result;\n' + '}\n' + ) + result = codegen(('f', x*y+z), "C", header=False, empty=False, + global_vars=(z, t)) + source = result[0][1] + assert source == expected + +def test_custom_codegen(): + from sympy.printing.c import C99CodePrinter + from sympy.functions.elementary.exponential import exp + + printer = C99CodePrinter(settings={'user_functions': {'exp': 'fastexp'}}) + + x, y = symbols('x y') + expr = exp(x + y) + + # replace math.h with a different header + gen = C99CodeGen(printer=printer, + preprocessor_statements=['#include "fastexp.h"']) + + expected = ( + '#include "expr.h"\n' + '#include "fastexp.h"\n' + 'double expr(double x, double y) {\n' + ' double expr_result;\n' + ' expr_result = fastexp(x + y);\n' + ' return expr_result;\n' + '}\n' + ) + + result = codegen(('expr', expr), header=False, empty=False, code_gen=gen) + source = result[0][1] + assert source == expected + + # use both math.h and an external header + gen = C99CodeGen(printer=printer) + gen.preprocessor_statements.append('#include "fastexp.h"') + + expected = ( + '#include "expr.h"\n' + '#include \n' + '#include "fastexp.h"\n' + 'double expr(double x, double y) {\n' + ' double expr_result;\n' + ' expr_result = fastexp(x + y);\n' + ' return expr_result;\n' + '}\n' + ) + + result = codegen(('expr', expr), header=False, empty=False, code_gen=gen) + source = result[0][1] + assert source == expected + +def test_c_with_printer(): + #issue 13586 + from sympy.printing.c import C99CodePrinter + class CustomPrinter(C99CodePrinter): + def _print_Pow(self, expr): + return "fastpow({}, {})".format(self._print(expr.base), + self._print(expr.exp)) + + x = symbols('x') + expr = x**3 + expected =[ + ("file.c", + "#include \"file.h\"\n" + "#include \n" + "double test(double x) {\n" + " double test_result;\n" + " test_result = fastpow(x, 3);\n" + " return test_result;\n" + "}\n"), + ("file.h", + "#ifndef PROJECT__FILE__H\n" + "#define PROJECT__FILE__H\n" + "double test(double x);\n" + "#endif\n") + ] + result = codegen(("test", expr), "C","file", header=False, empty=False, printer = CustomPrinter()) + assert result == expected + + +def test_fcode_complex(): + import sympy.utilities.codegen + sympy.utilities.codegen.COMPLEX_ALLOWED = True + x = Symbol('x', real=True) + y = Symbol('y',real=True) + result = codegen(('test',x+y), 'f95', 'test', header=False, empty=False) + source = (result[0][1]) + expected = ( + "REAL*8 function test(x, y)\n" + "implicit none\n" + "REAL*8, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "test = x + y\n" + "end function\n") + assert source == expected + x = Symbol('x') + y = Symbol('y',real=True) + result = codegen(('test',x+y), 'f95', 'test', header=False, empty=False) + source = (result[0][1]) + expected = ( + "COMPLEX*16 function test(x, y)\n" + "implicit none\n" + "COMPLEX*16, intent(in) :: x\n" + "REAL*8, intent(in) :: y\n" + "test = x + y\n" + "end function\n" + ) + assert source==expected + sympy.utilities.codegen.COMPLEX_ALLOWED = False diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_julia.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_julia.py new file mode 100644 index 0000000000000000000000000000000000000000..eb4d5920554555a103e017b0518e028ff7d51f8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_julia.py @@ -0,0 +1,620 @@ +from io import StringIO + +from sympy.core import S, symbols, Eq, pi, Catalan, EulerGamma, Function +from sympy.core.relational import Equality +from sympy.functions.elementary.piecewise import Piecewise +from sympy.matrices import Matrix, MatrixSymbol +from sympy.utilities.codegen import JuliaCodeGen, codegen, make_routine +from sympy.testing.pytest import XFAIL +import sympy + + +x, y, z = symbols('x,y,z') + + +def test_empty_jl_code(): + code_gen = JuliaCodeGen() + output = StringIO() + code_gen.dump_jl([], output, "file", header=False, empty=False) + source = output.getvalue() + assert source == "" + + +def test_jl_simple_code(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Julia", header=False, empty=False) + assert result[0] == "test.jl" + source = result[1] + expected = ( + "function test(x, y, z)\n" + " out1 = z .* (x + y)\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +def test_jl_simple_code_with_header(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Julia", header=True, empty=False) + assert result[0] == "test.jl" + source = result[1] + expected = ( + "# Code generated with SymPy " + sympy.__version__ + "\n" + "#\n" + "# See http://www.sympy.org/ for more information.\n" + "#\n" + "# This file is part of 'project'\n" + "function test(x, y, z)\n" + " out1 = z .* (x + y)\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +def test_jl_simple_code_nameout(): + expr = Equality(z, (x + y)) + name_expr = ("test", expr) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y)\n" + " z = x + y\n" + " return z\n" + "end\n" + ) + assert source == expected + + +def test_jl_numbersymbol(): + name_expr = ("test", pi**Catalan) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test()\n" + " out1 = pi ^ catalan\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +@XFAIL +def test_jl_numbersymbol_no_inline(): + # FIXME: how to pass inline=False to the JuliaCodePrinter? + name_expr = ("test", [pi**Catalan, EulerGamma]) + result, = codegen(name_expr, "Julia", header=False, + empty=False, inline=False) + source = result[1] + expected = ( + "function test()\n" + " Catalan = 0.915965594177219\n" + " EulerGamma = 0.5772156649015329\n" + " out1 = pi ^ Catalan\n" + " out2 = EulerGamma\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_jl_code_argument_order(): + expr = x + y + routine = make_routine("test", expr, argument_sequence=[z, x, y], language="julia") + code_gen = JuliaCodeGen() + output = StringIO() + code_gen.dump_jl([routine], output, "test", header=False, empty=False) + source = output.getvalue() + expected = ( + "function test(z, x, y)\n" + " out1 = x + y\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +def test_multiple_results_m(): + # Here the output order is the input order + expr1 = (x + y)*z + expr2 = (x - y)*z + name_expr = ("test", [expr1, expr2]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y, z)\n" + " out1 = z .* (x + y)\n" + " out2 = z .* (x - y)\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_results_named_unordered(): + # Here output order is based on name_expr + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y, z)\n" + " C = z .* (x + y)\n" + " A = z .* (x - y)\n" + " B = 2 * x\n" + " return C, A, B\n" + "end\n" + ) + assert source == expected + + +def test_results_named_ordered(): + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result = codegen(name_expr, "Julia", header=False, empty=False, + argument_sequence=(x, z, y)) + assert result[0][0] == "test.jl" + source = result[0][1] + expected = ( + "function test(x, z, y)\n" + " C = z .* (x + y)\n" + " A = z .* (x - y)\n" + " B = 2 * x\n" + " return C, A, B\n" + "end\n" + ) + assert source == expected + + +def test_complicated_jl_codegen(): + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + name_expr = ("testlong", + [ ((sin(x) + cos(y) + tan(z))**3).expand(), + cos(cos(cos(cos(cos(cos(cos(cos(x + y + z)))))))) + ]) + result = codegen(name_expr, "Julia", header=False, empty=False) + assert result[0][0] == "testlong.jl" + source = result[0][1] + expected = ( + "function testlong(x, y, z)\n" + " out1 = sin(x) .^ 3 + 3 * sin(x) .^ 2 .* cos(y) + 3 * sin(x) .^ 2 .* tan(z)" + " + 3 * sin(x) .* cos(y) .^ 2 + 6 * sin(x) .* cos(y) .* tan(z) + 3 * sin(x) .* tan(z) .^ 2" + " + cos(y) .^ 3 + 3 * cos(y) .^ 2 .* tan(z) + 3 * cos(y) .* tan(z) .^ 2 + tan(z) .^ 3\n" + " out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_jl_output_arg_mixed_unordered(): + # named outputs are alphabetical, unnamed output appear in the given order + from sympy.functions.elementary.trigonometric import (cos, sin) + a = symbols("a") + name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + assert result[0] == "foo.jl" + source = result[1]; + expected = ( + 'function foo(x)\n' + ' out1 = cos(2 * x)\n' + ' y = sin(x)\n' + ' out3 = cos(x)\n' + ' a = sin(2 * x)\n' + ' return out1, y, out3, a\n' + 'end\n' + ) + assert source == expected + + +def test_jl_piecewise_(): + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True), evaluate=False) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function pwtest(x)\n" + " out1 = ((x < -1) ? (0) :\n" + " (x <= 1) ? (x .^ 2) :\n" + " (x > 1) ? (2 - x) : (1))\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +@XFAIL +def test_jl_piecewise_no_inline(): + # FIXME: how to pass inline=False to the JuliaCodePrinter? + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True)) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Julia", header=False, empty=False, + inline=False) + source = result[1] + expected = ( + "function pwtest(x)\n" + " if (x < -1)\n" + " out1 = 0\n" + " elseif (x <= 1)\n" + " out1 = x .^ 2\n" + " elseif (x > 1)\n" + " out1 = -x + 2\n" + " else\n" + " out1 = 1\n" + " end\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +def test_jl_multifcns_per_file(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Julia", header=False, empty=False) + assert result[0][0] == "foo.jl" + source = result[0][1]; + expected = ( + "function foo(x, y)\n" + " out1 = 2 * x\n" + " out2 = 3 * y\n" + " return out1, out2\n" + "end\n" + "function bar(y)\n" + " out1 = y .^ 2\n" + " out2 = 4 * y\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_jl_multifcns_per_file_w_header(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Julia", header=True, empty=False) + assert result[0][0] == "foo.jl" + source = result[0][1]; + expected = ( + "# Code generated with SymPy " + sympy.__version__ + "\n" + "#\n" + "# See http://www.sympy.org/ for more information.\n" + "#\n" + "# This file is part of 'project'\n" + "function foo(x, y)\n" + " out1 = 2 * x\n" + " out2 = 3 * y\n" + " return out1, out2\n" + "end\n" + "function bar(y)\n" + " out1 = y .^ 2\n" + " out2 = 4 * y\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_jl_filename_match_prefix(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result, = codegen(name_expr, "Julia", prefix="baz", header=False, + empty=False) + assert result[0] == "baz.jl" + + +def test_jl_matrix_named(): + e2 = Matrix([[x, 2*y, pi*z]]) + name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2)) + result = codegen(name_expr, "Julia", header=False, empty=False) + assert result[0][0] == "test.jl" + source = result[0][1] + expected = ( + "function test(x, y, z)\n" + " myout1 = [x 2 * y pi * z]\n" + " return myout1\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrix_named_matsym(): + myout1 = MatrixSymbol('myout1', 1, 3) + e2 = Matrix([[x, 2*y, pi*z]]) + name_expr = ("test", Equality(myout1, e2, evaluate=False)) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y, z)\n" + " myout1 = [x 2 * y pi * z]\n" + " return myout1\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrix_output_autoname(): + expr = Matrix([[x, x+y, 3]]) + name_expr = ("test", expr) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y)\n" + " out1 = [x x + y 3]\n" + " return out1\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrix_output_autoname_2(): + e1 = (x + y) + e2 = Matrix([[2*x, 2*y, 2*z]]) + e3 = Matrix([[x], [y], [z]]) + e4 = Matrix([[x, y], [z, 16]]) + name_expr = ("test", (e1, e2, e3, e4)) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y, z)\n" + " out1 = x + y\n" + " out2 = [2 * x 2 * y 2 * z]\n" + " out3 = [x, y, z]\n" + " out4 = [x y;\n" + " z 16]\n" + " return out1, out2, out3, out4\n" + "end\n" + ) + assert source == expected + + +def test_jl_results_matrix_named_ordered(): + B, C = symbols('B,C') + A = MatrixSymbol('A', 1, 3) + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, Matrix([[1, 2, x]])) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result, = codegen(name_expr, "Julia", header=False, empty=False, + argument_sequence=(x, z, y)) + source = result[1] + expected = ( + "function test(x, z, y)\n" + " C = z .* (x + y)\n" + " A = [1 2 x]\n" + " B = 2 * x\n" + " return C, A, B\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrixsymbol_slice(): + A = MatrixSymbol('A', 2, 3) + B = MatrixSymbol('B', 1, 3) + C = MatrixSymbol('C', 1, 3) + D = MatrixSymbol('D', 2, 1) + name_expr = ("test", [Equality(B, A[0, :]), + Equality(C, A[1, :]), + Equality(D, A[:, 2])]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(A)\n" + " B = A[1,:]\n" + " C = A[2,:]\n" + " D = A[:,3]\n" + " return B, C, D\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrixsymbol_slice2(): + A = MatrixSymbol('A', 3, 4) + B = MatrixSymbol('B', 2, 2) + C = MatrixSymbol('C', 2, 2) + name_expr = ("test", [Equality(B, A[0:2, 0:2]), + Equality(C, A[0:2, 1:3])]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(A)\n" + " B = A[1:2,1:2]\n" + " C = A[1:2,2:3]\n" + " return B, C\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrixsymbol_slice3(): + A = MatrixSymbol('A', 8, 7) + B = MatrixSymbol('B', 2, 2) + C = MatrixSymbol('C', 4, 2) + name_expr = ("test", [Equality(B, A[6:, 1::3]), + Equality(C, A[::2, ::3])]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(A)\n" + " B = A[7:end,2:3:end]\n" + " C = A[1:2:end,1:3:end]\n" + " return B, C\n" + "end\n" + ) + assert source == expected + + +def test_jl_matrixsymbol_slice_autoname(): + A = MatrixSymbol('A', 2, 3) + B = MatrixSymbol('B', 1, 3) + name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(A)\n" + " B = A[1,:]\n" + " out2 = A[2,:]\n" + " out3 = A[:,1]\n" + " out4 = A[:,2]\n" + " return B, out2, out3, out4\n" + "end\n" + ) + assert source == expected + + +def test_jl_loops(): + # Note: an Julia programmer would probably vectorize this across one or + # more dimensions. Also, size(A) would be used rather than passing in m + # and n. Perhaps users would expect us to vectorize automatically here? + # Or is it possible to represent such things using IndexedBase? + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m = symbols('n m', integer=True) + A = IndexedBase('A') + x = IndexedBase('x') + y = IndexedBase('y') + i = Idx('i', m) + j = Idx('j', n) + result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Julia", + header=False, empty=False) + source = result[1] + expected = ( + 'function mat_vec_mult(y, A, m, n, x)\n' + ' for i = 1:m\n' + ' y[i] = 0\n' + ' end\n' + ' for i = 1:m\n' + ' for j = 1:n\n' + ' y[i] = %(rhs)s + y[i]\n' + ' end\n' + ' end\n' + ' return y\n' + 'end\n' + ) + assert (source == expected % {'rhs': 'A[%s,%s] .* x[j]' % (i, j)} or + source == expected % {'rhs': 'x[j] .* A[%s,%s]' % (i, j)}) + + +def test_jl_tensor_loops_multiple_contractions(): + # see comments in previous test about vectorizing + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m, o, p = symbols('n m o p', integer=True) + A = IndexedBase('A') + B = IndexedBase('B') + y = IndexedBase('y') + i = Idx('i', m) + j = Idx('j', n) + k = Idx('k', o) + l = Idx('l', p) + result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])), + "Julia", header=False, empty=False) + source = result[1] + expected = ( + 'function tensorthing(y, A, B, m, n, o, p)\n' + ' for i = 1:m\n' + ' y[i] = 0\n' + ' end\n' + ' for i = 1:m\n' + ' for j = 1:n\n' + ' for k = 1:o\n' + ' for l = 1:p\n' + ' y[i] = A[i,j,k,l] .* B[j,k,l] + y[i]\n' + ' end\n' + ' end\n' + ' end\n' + ' end\n' + ' return y\n' + 'end\n' + ) + assert source == expected + + +def test_jl_InOutArgument(): + expr = Equality(x, x**2) + name_expr = ("mysqr", expr) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function mysqr(x)\n" + " x = x .^ 2\n" + " return x\n" + "end\n" + ) + assert source == expected + + +def test_jl_InOutArgument_order(): + # can specify the order as (x, y) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Julia", header=False, + empty=False, argument_sequence=(x,y)) + source = result[1] + expected = ( + "function test(x, y)\n" + " x = x .^ 2 + y\n" + " return x\n" + "end\n" + ) + assert source == expected + # make sure it gives (x, y) not (y, x) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x, y)\n" + " x = x .^ 2 + y\n" + " return x\n" + "end\n" + ) + assert source == expected + + +def test_jl_not_supported(): + f = Function('f') + name_expr = ("test", [f(x).diff(x), S.ComplexInfinity]) + result, = codegen(name_expr, "Julia", header=False, empty=False) + source = result[1] + expected = ( + "function test(x)\n" + " # unsupported: Derivative(f(x), x)\n" + " # unsupported: zoo\n" + " out1 = Derivative(f(x), x)\n" + " out2 = zoo\n" + " return out1, out2\n" + "end\n" + ) + assert source == expected + + +def test_global_vars_octave(): + x, y, z, t = symbols("x y z t") + result = codegen(('f', x*y), "Julia", header=False, empty=False, + global_vars=(y,)) + source = result[0][1] + expected = ( + "function f(x)\n" + " out1 = x .* y\n" + " return out1\n" + "end\n" + ) + assert source == expected + + result = codegen(('f', x*y+z), "Julia", header=False, empty=False, + argument_sequence=(x, y), global_vars=(z, t)) + source = result[0][1] + expected = ( + "function f(x, y)\n" + " out1 = x .* y + z\n" + " return out1\n" + "end\n" + ) + assert source == expected diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_octave.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_octave.py new file mode 100644 index 0000000000000000000000000000000000000000..53634cb1cd945fabcfb87dc2acbf73c9af23e519 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_octave.py @@ -0,0 +1,589 @@ +from io import StringIO + +from sympy.core import S, symbols, Eq, pi, Catalan, EulerGamma, Function +from sympy.core.relational import Equality +from sympy.functions.elementary.piecewise import Piecewise +from sympy.matrices import Matrix, MatrixSymbol +from sympy.utilities.codegen import OctaveCodeGen, codegen, make_routine +from sympy.testing.pytest import raises +from sympy.testing.pytest import XFAIL +import sympy + + +x, y, z = symbols('x,y,z') + + +def test_empty_m_code(): + code_gen = OctaveCodeGen() + output = StringIO() + code_gen.dump_m([], output, "file", header=False, empty=False) + source = output.getvalue() + assert source == "" + + +def test_m_simple_code(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Octave", header=False, empty=False) + assert result[0] == "test.m" + source = result[1] + expected = ( + "function out1 = test(x, y, z)\n" + " out1 = z.*(x + y);\n" + "end\n" + ) + assert source == expected + + +def test_m_simple_code_with_header(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Octave", header=True, empty=False) + assert result[0] == "test.m" + source = result[1] + expected = ( + "function out1 = test(x, y, z)\n" + " %TEST Autogenerated by SymPy\n" + " % Code generated with SymPy " + sympy.__version__ + "\n" + " %\n" + " % See http://www.sympy.org/ for more information.\n" + " %\n" + " % This file is part of 'project'\n" + " out1 = z.*(x + y);\n" + "end\n" + ) + assert source == expected + + +def test_m_simple_code_nameout(): + expr = Equality(z, (x + y)) + name_expr = ("test", expr) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function z = test(x, y)\n" + " z = x + y;\n" + "end\n" + ) + assert source == expected + + +def test_m_numbersymbol(): + name_expr = ("test", pi**Catalan) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function out1 = test()\n" + " out1 = pi^%s;\n" + "end\n" + ) % Catalan.evalf(17) + assert source == expected + + +@XFAIL +def test_m_numbersymbol_no_inline(): + # FIXME: how to pass inline=False to the OctaveCodePrinter? + name_expr = ("test", [pi**Catalan, EulerGamma]) + result, = codegen(name_expr, "Octave", header=False, + empty=False, inline=False) + source = result[1] + expected = ( + "function [out1, out2] = test()\n" + " Catalan = 0.915965594177219; % constant\n" + " EulerGamma = 0.5772156649015329; % constant\n" + " out1 = pi^Catalan;\n" + " out2 = EulerGamma;\n" + "end\n" + ) + assert source == expected + + +def test_m_code_argument_order(): + expr = x + y + routine = make_routine("test", expr, argument_sequence=[z, x, y], language="octave") + code_gen = OctaveCodeGen() + output = StringIO() + code_gen.dump_m([routine], output, "test", header=False, empty=False) + source = output.getvalue() + expected = ( + "function out1 = test(z, x, y)\n" + " out1 = x + y;\n" + "end\n" + ) + assert source == expected + + +def test_multiple_results_m(): + # Here the output order is the input order + expr1 = (x + y)*z + expr2 = (x - y)*z + name_expr = ("test", [expr1, expr2]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [out1, out2] = test(x, y, z)\n" + " out1 = z.*(x + y);\n" + " out2 = z.*(x - y);\n" + "end\n" + ) + assert source == expected + + +def test_results_named_unordered(): + # Here output order is based on name_expr + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [C, A, B] = test(x, y, z)\n" + " C = z.*(x + y);\n" + " A = z.*(x - y);\n" + " B = 2*x;\n" + "end\n" + ) + assert source == expected + + +def test_results_named_ordered(): + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result = codegen(name_expr, "Octave", header=False, empty=False, + argument_sequence=(x, z, y)) + assert result[0][0] == "test.m" + source = result[0][1] + expected = ( + "function [C, A, B] = test(x, z, y)\n" + " C = z.*(x + y);\n" + " A = z.*(x - y);\n" + " B = 2*x;\n" + "end\n" + ) + assert source == expected + + +def test_complicated_m_codegen(): + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + name_expr = ("testlong", + [ ((sin(x) + cos(y) + tan(z))**3).expand(), + cos(cos(cos(cos(cos(cos(cos(cos(x + y + z)))))))) + ]) + result = codegen(name_expr, "Octave", header=False, empty=False) + assert result[0][0] == "testlong.m" + source = result[0][1] + expected = ( + "function [out1, out2] = testlong(x, y, z)\n" + " out1 = sin(x).^3 + 3*sin(x).^2.*cos(y) + 3*sin(x).^2.*tan(z)" + " + 3*sin(x).*cos(y).^2 + 6*sin(x).*cos(y).*tan(z) + 3*sin(x).*tan(z).^2" + " + cos(y).^3 + 3*cos(y).^2.*tan(z) + 3*cos(y).*tan(z).^2 + tan(z).^3;\n" + " out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n" + "end\n" + ) + assert source == expected + + +def test_m_output_arg_mixed_unordered(): + # named outputs are alphabetical, unnamed output appear in the given order + from sympy.functions.elementary.trigonometric import (cos, sin) + a = symbols("a") + name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + assert result[0] == "foo.m" + source = result[1]; + expected = ( + 'function [out1, y, out3, a] = foo(x)\n' + ' out1 = cos(2*x);\n' + ' y = sin(x);\n' + ' out3 = cos(x);\n' + ' a = sin(2*x);\n' + 'end\n' + ) + assert source == expected + + +def test_m_piecewise_(): + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True), evaluate=False) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function out1 = pwtest(x)\n" + " out1 = ((x < -1).*(0) + (~(x < -1)).*( ...\n" + " (x <= 1).*(x.^2) + (~(x <= 1)).*( ...\n" + " (x > 1).*(2 - x) + (~(x > 1)).*(1))));\n" + "end\n" + ) + assert source == expected + + +@XFAIL +def test_m_piecewise_no_inline(): + # FIXME: how to pass inline=False to the OctaveCodePrinter? + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True)) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Octave", header=False, empty=False, + inline=False) + source = result[1] + expected = ( + "function out1 = pwtest(x)\n" + " if (x < -1)\n" + " out1 = 0;\n" + " elseif (x <= 1)\n" + " out1 = x.^2;\n" + " elseif (x > 1)\n" + " out1 = -x + 2;\n" + " else\n" + " out1 = 1;\n" + " end\n" + "end\n" + ) + assert source == expected + + +def test_m_multifcns_per_file(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Octave", header=False, empty=False) + assert result[0][0] == "foo.m" + source = result[0][1]; + expected = ( + "function [out1, out2] = foo(x, y)\n" + " out1 = 2*x;\n" + " out2 = 3*y;\n" + "end\n" + "function [out1, out2] = bar(y)\n" + " out1 = y.^2;\n" + " out2 = 4*y;\n" + "end\n" + ) + assert source == expected + + +def test_m_multifcns_per_file_w_header(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Octave", header=True, empty=False) + assert result[0][0] == "foo.m" + source = result[0][1]; + expected = ( + "function [out1, out2] = foo(x, y)\n" + " %FOO Autogenerated by SymPy\n" + " % Code generated with SymPy " + sympy.__version__ + "\n" + " %\n" + " % See http://www.sympy.org/ for more information.\n" + " %\n" + " % This file is part of 'project'\n" + " out1 = 2*x;\n" + " out2 = 3*y;\n" + "end\n" + "function [out1, out2] = bar(y)\n" + " out1 = y.^2;\n" + " out2 = 4*y;\n" + "end\n" + ) + assert source == expected + + +def test_m_filename_match_first_fcn(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + raises(ValueError, lambda: codegen(name_expr, + "Octave", prefix="bar", header=False, empty=False)) + + +def test_m_matrix_named(): + e2 = Matrix([[x, 2*y, pi*z]]) + name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2)) + result = codegen(name_expr, "Octave", header=False, empty=False) + assert result[0][0] == "test.m" + source = result[0][1] + expected = ( + "function myout1 = test(x, y, z)\n" + " myout1 = [x 2*y pi*z];\n" + "end\n" + ) + assert source == expected + + +def test_m_matrix_named_matsym(): + myout1 = MatrixSymbol('myout1', 1, 3) + e2 = Matrix([[x, 2*y, pi*z]]) + name_expr = ("test", Equality(myout1, e2, evaluate=False)) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function myout1 = test(x, y, z)\n" + " myout1 = [x 2*y pi*z];\n" + "end\n" + ) + assert source == expected + + +def test_m_matrix_output_autoname(): + expr = Matrix([[x, x+y, 3]]) + name_expr = ("test", expr) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function out1 = test(x, y)\n" + " out1 = [x x + y 3];\n" + "end\n" + ) + assert source == expected + + +def test_m_matrix_output_autoname_2(): + e1 = (x + y) + e2 = Matrix([[2*x, 2*y, 2*z]]) + e3 = Matrix([[x], [y], [z]]) + e4 = Matrix([[x, y], [z, 16]]) + name_expr = ("test", (e1, e2, e3, e4)) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [out1, out2, out3, out4] = test(x, y, z)\n" + " out1 = x + y;\n" + " out2 = [2*x 2*y 2*z];\n" + " out3 = [x; y; z];\n" + " out4 = [x y; z 16];\n" + "end\n" + ) + assert source == expected + + +def test_m_results_matrix_named_ordered(): + B, C = symbols('B,C') + A = MatrixSymbol('A', 1, 3) + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, Matrix([[1, 2, x]])) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result, = codegen(name_expr, "Octave", header=False, empty=False, + argument_sequence=(x, z, y)) + source = result[1] + expected = ( + "function [C, A, B] = test(x, z, y)\n" + " C = z.*(x + y);\n" + " A = [1 2 x];\n" + " B = 2*x;\n" + "end\n" + ) + assert source == expected + + +def test_m_matrixsymbol_slice(): + A = MatrixSymbol('A', 2, 3) + B = MatrixSymbol('B', 1, 3) + C = MatrixSymbol('C', 1, 3) + D = MatrixSymbol('D', 2, 1) + name_expr = ("test", [Equality(B, A[0, :]), + Equality(C, A[1, :]), + Equality(D, A[:, 2])]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [B, C, D] = test(A)\n" + " B = A(1, :);\n" + " C = A(2, :);\n" + " D = A(:, 3);\n" + "end\n" + ) + assert source == expected + + +def test_m_matrixsymbol_slice2(): + A = MatrixSymbol('A', 3, 4) + B = MatrixSymbol('B', 2, 2) + C = MatrixSymbol('C', 2, 2) + name_expr = ("test", [Equality(B, A[0:2, 0:2]), + Equality(C, A[0:2, 1:3])]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [B, C] = test(A)\n" + " B = A(1:2, 1:2);\n" + " C = A(1:2, 2:3);\n" + "end\n" + ) + assert source == expected + + +def test_m_matrixsymbol_slice3(): + A = MatrixSymbol('A', 8, 7) + B = MatrixSymbol('B', 2, 2) + C = MatrixSymbol('C', 4, 2) + name_expr = ("test", [Equality(B, A[6:, 1::3]), + Equality(C, A[::2, ::3])]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [B, C] = test(A)\n" + " B = A(7:end, 2:3:end);\n" + " C = A(1:2:end, 1:3:end);\n" + "end\n" + ) + assert source == expected + + +def test_m_matrixsymbol_slice_autoname(): + A = MatrixSymbol('A', 2, 3) + B = MatrixSymbol('B', 1, 3) + name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [B, out2, out3, out4] = test(A)\n" + " B = A(1, :);\n" + " out2 = A(2, :);\n" + " out3 = A(:, 1);\n" + " out4 = A(:, 2);\n" + "end\n" + ) + assert source == expected + + +def test_m_loops(): + # Note: an Octave programmer would probably vectorize this across one or + # more dimensions. Also, size(A) would be used rather than passing in m + # and n. Perhaps users would expect us to vectorize automatically here? + # Or is it possible to represent such things using IndexedBase? + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m = symbols('n m', integer=True) + A = IndexedBase('A') + x = IndexedBase('x') + y = IndexedBase('y') + i = Idx('i', m) + j = Idx('j', n) + result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Octave", + header=False, empty=False) + source = result[1] + expected = ( + 'function y = mat_vec_mult(A, m, n, x)\n' + ' for i = 1:m\n' + ' y(i) = 0;\n' + ' end\n' + ' for i = 1:m\n' + ' for j = 1:n\n' + ' y(i) = %(rhs)s + y(i);\n' + ' end\n' + ' end\n' + 'end\n' + ) + assert (source == expected % {'rhs': 'A(%s, %s).*x(j)' % (i, j)} or + source == expected % {'rhs': 'x(j).*A(%s, %s)' % (i, j)}) + + +def test_m_tensor_loops_multiple_contractions(): + # see comments in previous test about vectorizing + from sympy.tensor import IndexedBase, Idx + from sympy.core.symbol import symbols + n, m, o, p = symbols('n m o p', integer=True) + A = IndexedBase('A') + B = IndexedBase('B') + y = IndexedBase('y') + i = Idx('i', m) + j = Idx('j', n) + k = Idx('k', o) + l = Idx('l', p) + result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])), + "Octave", header=False, empty=False) + source = result[1] + expected = ( + 'function y = tensorthing(A, B, m, n, o, p)\n' + ' for i = 1:m\n' + ' y(i) = 0;\n' + ' end\n' + ' for i = 1:m\n' + ' for j = 1:n\n' + ' for k = 1:o\n' + ' for l = 1:p\n' + ' y(i) = A(i, j, k, l).*B(j, k, l) + y(i);\n' + ' end\n' + ' end\n' + ' end\n' + ' end\n' + 'end\n' + ) + assert source == expected + + +def test_m_InOutArgument(): + expr = Equality(x, x**2) + name_expr = ("mysqr", expr) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function x = mysqr(x)\n" + " x = x.^2;\n" + "end\n" + ) + assert source == expected + + +def test_m_InOutArgument_order(): + # can specify the order as (x, y) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Octave", header=False, + empty=False, argument_sequence=(x,y)) + source = result[1] + expected = ( + "function x = test(x, y)\n" + " x = x.^2 + y;\n" + "end\n" + ) + assert source == expected + # make sure it gives (x, y) not (y, x) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function x = test(x, y)\n" + " x = x.^2 + y;\n" + "end\n" + ) + assert source == expected + + +def test_m_not_supported(): + f = Function('f') + name_expr = ("test", [f(x).diff(x), S.ComplexInfinity]) + result, = codegen(name_expr, "Octave", header=False, empty=False) + source = result[1] + expected = ( + "function [out1, out2] = test(x)\n" + " % unsupported: Derivative(f(x), x)\n" + " % unsupported: zoo\n" + " out1 = Derivative(f(x), x);\n" + " out2 = zoo;\n" + "end\n" + ) + assert source == expected + + +def test_global_vars_octave(): + x, y, z, t = symbols("x y z t") + result = codegen(('f', x*y), "Octave", header=False, empty=False, + global_vars=(y,)) + source = result[0][1] + expected = ( + "function out1 = f(x)\n" + " global y\n" + " out1 = x.*y;\n" + "end\n" + ) + assert source == expected + + result = codegen(('f', x*y+z), "Octave", header=False, empty=False, + argument_sequence=(x, y), global_vars=(z, t)) + source = result[0][1] + expected = ( + "function out1 = f(x, y)\n" + " global t z\n" + " out1 = x.*y + z;\n" + "end\n" + ) + assert source == expected diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_rust.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_rust.py new file mode 100644 index 0000000000000000000000000000000000000000..235cc6350e051ab1a2915284aa4669274030943b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_codegen_rust.py @@ -0,0 +1,401 @@ +from io import StringIO + +from sympy.core import S, symbols, pi, Catalan, EulerGamma, Function +from sympy.core.relational import Equality +from sympy.functions.elementary.piecewise import Piecewise +from sympy.utilities.codegen import RustCodeGen, codegen, make_routine +from sympy.testing.pytest import XFAIL +import sympy + + +x, y, z = symbols('x,y,z') + + +def test_empty_rust_code(): + code_gen = RustCodeGen() + output = StringIO() + code_gen.dump_rs([], output, "file", header=False, empty=False) + source = output.getvalue() + assert source == "" + + +def test_simple_rust_code(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Rust", header=False, empty=False) + assert result[0] == "test.rs" + source = result[1] + expected = ( + "fn test(x: f64, y: f64, z: f64) -> f64 {\n" + " let out1 = z*(x + y);\n" + " out1\n" + "}\n" + ) + assert source == expected + + +def test_simple_code_with_header(): + name_expr = ("test", (x + y)*z) + result, = codegen(name_expr, "Rust", header=True, empty=False) + assert result[0] == "test.rs" + source = result[1] + version_str = "Code generated with SymPy %s" % sympy.__version__ + version_line = version_str.center(76).rstrip() + expected = ( + "/*\n" + " *%(version_line)s\n" + " *\n" + " * See http://www.sympy.org/ for more information.\n" + " *\n" + " * This file is part of 'project'\n" + " */\n" + "fn test(x: f64, y: f64, z: f64) -> f64 {\n" + " let out1 = z*(x + y);\n" + " out1\n" + "}\n" + ) % {'version_line': version_line} + assert source == expected + + +def test_simple_code_nameout(): + expr = Equality(z, (x + y)) + name_expr = ("test", expr) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test(x: f64, y: f64) -> f64 {\n" + " let z = x + y;\n" + " z\n" + "}\n" + ) + assert source == expected + + +def test_numbersymbol(): + name_expr = ("test", pi**Catalan) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test() -> f64 {\n" + " const Catalan: f64 = %s;\n" + " let out1 = PI.powf(Catalan);\n" + " out1\n" + "}\n" + ) % Catalan.evalf(17) + assert source == expected + + +@XFAIL +def test_numbersymbol_inline(): + # FIXME: how to pass inline to the RustCodePrinter? + name_expr = ("test", [pi**Catalan, EulerGamma]) + result, = codegen(name_expr, "Rust", header=False, + empty=False, inline=True) + source = result[1] + expected = ( + "fn test() -> (f64, f64) {\n" + " const Catalan: f64 = %s;\n" + " const EulerGamma: f64 = %s;\n" + " let out1 = PI.powf(Catalan);\n" + " let out2 = EulerGamma);\n" + " (out1, out2)\n" + "}\n" + ) % (Catalan.evalf(17), EulerGamma.evalf(17)) + assert source == expected + + +def test_argument_order(): + expr = x + y + routine = make_routine("test", expr, argument_sequence=[z, x, y], language="rust") + code_gen = RustCodeGen() + output = StringIO() + code_gen.dump_rs([routine], output, "test", header=False, empty=False) + source = output.getvalue() + expected = ( + "fn test(z: f64, x: f64, y: f64) -> f64 {\n" + " let out1 = x + y;\n" + " out1\n" + "}\n" + ) + assert source == expected + + +def test_multiple_results_rust(): + # Here the output order is the input order + expr1 = (x + y)*z + expr2 = (x - y)*z + name_expr = ("test", [expr1, expr2]) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test(x: f64, y: f64, z: f64) -> (f64, f64) {\n" + " let out1 = z*(x + y);\n" + " let out2 = z*(x - y);\n" + " (out1, out2)\n" + "}\n" + ) + assert source == expected + + +def test_results_named_unordered(): + # Here output order is based on name_expr + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test(x: f64, y: f64, z: f64) -> (f64, f64, f64) {\n" + " let C = z*(x + y);\n" + " let A = z*(x - y);\n" + " let B = 2*x;\n" + " (C, A, B)\n" + "}\n" + ) + assert source == expected + + +def test_results_named_ordered(): + A, B, C = symbols('A,B,C') + expr1 = Equality(C, (x + y)*z) + expr2 = Equality(A, (x - y)*z) + expr3 = Equality(B, 2*x) + name_expr = ("test", [expr1, expr2, expr3]) + result = codegen(name_expr, "Rust", header=False, empty=False, + argument_sequence=(x, z, y)) + assert result[0][0] == "test.rs" + source = result[0][1] + expected = ( + "fn test(x: f64, z: f64, y: f64) -> (f64, f64, f64) {\n" + " let C = z*(x + y);\n" + " let A = z*(x - y);\n" + " let B = 2*x;\n" + " (C, A, B)\n" + "}\n" + ) + assert source == expected + + +def test_complicated_rs_codegen(): + from sympy.functions.elementary.trigonometric import (cos, sin, tan) + name_expr = ("testlong", + [ ((sin(x) + cos(y) + tan(z))**3).expand(), + cos(cos(cos(cos(cos(cos(cos(cos(x + y + z)))))))) + ]) + result = codegen(name_expr, "Rust", header=False, empty=False) + assert result[0][0] == "testlong.rs" + source = result[0][1] + expected = ( + "fn testlong(x: f64, y: f64, z: f64) -> (f64, f64) {\n" + " let out1 = x.sin().powi(3) + 3*x.sin().powi(2)*y.cos()" + " + 3*x.sin().powi(2)*z.tan() + 3*x.sin()*y.cos().powi(2)" + " + 6*x.sin()*y.cos()*z.tan() + 3*x.sin()*z.tan().powi(2)" + " + y.cos().powi(3) + 3*y.cos().powi(2)*z.tan()" + " + 3*y.cos()*z.tan().powi(2) + z.tan().powi(3);\n" + " let out2 = (x + y + z).cos().cos().cos().cos()" + ".cos().cos().cos().cos();\n" + " (out1, out2)\n" + "}\n" + ) + assert source == expected + + +def test_output_arg_mixed_unordered(): + # named outputs are alphabetical, unnamed output appear in the given order + from sympy.functions.elementary.trigonometric import (cos, sin) + a = symbols("a") + name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))]) + result, = codegen(name_expr, "Rust", header=False, empty=False) + assert result[0] == "foo.rs" + source = result[1]; + expected = ( + "fn foo(x: f64) -> (f64, f64, f64, f64) {\n" + " let out1 = (2*x).cos();\n" + " let y = x.sin();\n" + " let out3 = x.cos();\n" + " let a = (2*x).sin();\n" + " (out1, y, out3, a)\n" + "}\n" + ) + assert source == expected + + +def test_piecewise_(): + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True), evaluate=False) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn pwtest(x: f64) -> f64 {\n" + " let out1 = if (x < -1) {\n" + " 0\n" + " } else if (x <= 1) {\n" + " x.powi(2)\n" + " } else if (x > 1) {\n" + " 2 - x\n" + " } else {\n" + " 1\n" + " };\n" + " out1\n" + "}\n" + ) + assert source == expected + + +@XFAIL +def test_piecewise_inline(): + # FIXME: how to pass inline to the RustCodePrinter? + pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True)) + name_expr = ("pwtest", pw) + result, = codegen(name_expr, "Rust", header=False, empty=False, + inline=True) + source = result[1] + expected = ( + "fn pwtest(x: f64) -> f64 {\n" + " let out1 = if (x < -1) { 0 } else if (x <= 1) { x.powi(2) }" + " else if (x > 1) { -x + 2 } else { 1 };\n" + " out1\n" + "}\n" + ) + assert source == expected + + +def test_multifcns_per_file(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Rust", header=False, empty=False) + assert result[0][0] == "foo.rs" + source = result[0][1]; + expected = ( + "fn foo(x: f64, y: f64) -> (f64, f64) {\n" + " let out1 = 2*x;\n" + " let out2 = 3*y;\n" + " (out1, out2)\n" + "}\n" + "fn bar(y: f64) -> (f64, f64) {\n" + " let out1 = y.powi(2);\n" + " let out2 = 4*y;\n" + " (out1, out2)\n" + "}\n" + ) + assert source == expected + + +def test_multifcns_per_file_w_header(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result = codegen(name_expr, "Rust", header=True, empty=False) + assert result[0][0] == "foo.rs" + source = result[0][1]; + version_str = "Code generated with SymPy %s" % sympy.__version__ + version_line = version_str.center(76).rstrip() + expected = ( + "/*\n" + " *%(version_line)s\n" + " *\n" + " * See http://www.sympy.org/ for more information.\n" + " *\n" + " * This file is part of 'project'\n" + " */\n" + "fn foo(x: f64, y: f64) -> (f64, f64) {\n" + " let out1 = 2*x;\n" + " let out2 = 3*y;\n" + " (out1, out2)\n" + "}\n" + "fn bar(y: f64) -> (f64, f64) {\n" + " let out1 = y.powi(2);\n" + " let out2 = 4*y;\n" + " (out1, out2)\n" + "}\n" + ) % {'version_line': version_line} + assert source == expected + + +def test_filename_match_prefix(): + name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ] + result, = codegen(name_expr, "Rust", prefix="baz", header=False, + empty=False) + assert result[0] == "baz.rs" + + +def test_InOutArgument(): + expr = Equality(x, x**2) + name_expr = ("mysqr", expr) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn mysqr(x: f64) -> f64 {\n" + " let x = x.powi(2);\n" + " x\n" + "}\n" + ) + assert source == expected + + +def test_InOutArgument_order(): + # can specify the order as (x, y) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Rust", header=False, + empty=False, argument_sequence=(x,y)) + source = result[1] + expected = ( + "fn test(x: f64, y: f64) -> f64 {\n" + " let x = x.powi(2) + y;\n" + " x\n" + "}\n" + ) + assert source == expected + # make sure it gives (x, y) not (y, x) + expr = Equality(x, x**2 + y) + name_expr = ("test", expr) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test(x: f64, y: f64) -> f64 {\n" + " let x = x.powi(2) + y;\n" + " x\n" + "}\n" + ) + assert source == expected + + +def test_not_supported(): + f = Function('f') + name_expr = ("test", [f(x).diff(x), S.ComplexInfinity]) + result, = codegen(name_expr, "Rust", header=False, empty=False) + source = result[1] + expected = ( + "fn test(x: f64) -> (f64, f64) {\n" + " // unsupported: Derivative(f(x), x)\n" + " // unsupported: zoo\n" + " let out1 = Derivative(f(x), x);\n" + " let out2 = zoo;\n" + " (out1, out2)\n" + "}\n" + ) + assert source == expected + + +def test_global_vars_rust(): + x, y, z, t = symbols("x y z t") + result = codegen(('f', x*y), "Rust", header=False, empty=False, + global_vars=(y,)) + source = result[0][1] + expected = ( + "fn f(x: f64) -> f64 {\n" + " let out1 = x*y;\n" + " out1\n" + "}\n" + ) + assert source == expected + + result = codegen(('f', x*y+z), "Rust", header=False, empty=False, + argument_sequence=(x, y), global_vars=(z, t)) + source = result[0][1] + expected = ( + "fn f(x: f64, y: f64) -> f64 {\n" + " let out1 = x*y + z;\n" + " out1\n" + "}\n" + ) + assert source == expected diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_decorator.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..b1870d4db8f719fdabfeab14120bfb3ce10131a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_decorator.py @@ -0,0 +1,129 @@ +from functools import wraps + +from sympy.utilities.decorator import threaded, xthreaded, memoize_property, deprecated +from sympy.testing.pytest import warns_deprecated_sympy + +from sympy.core.basic import Basic +from sympy.core.relational import Eq +from sympy.matrices.dense import Matrix + +from sympy.abc import x, y + + +def test_threaded(): + @threaded + def function(expr, *args): + return 2*expr + sum(args) + + assert function(Matrix([[x, y], [1, x]]), 1, 2) == \ + Matrix([[2*x + 3, 2*y + 3], [5, 2*x + 3]]) + + assert function(Eq(x, y), 1, 2) == Eq(2*x + 3, 2*y + 3) + + assert function([x, y], 1, 2) == [2*x + 3, 2*y + 3] + assert function((x, y), 1, 2) == (2*x + 3, 2*y + 3) + + assert function({x, y}, 1, 2) == {2*x + 3, 2*y + 3} + + @threaded + def function(expr, n): + return expr**n + + assert function(x + y, 2) == x**2 + y**2 + assert function(x, 2) == x**2 + + +def test_xthreaded(): + @xthreaded + def function(expr, n): + return expr**n + + assert function(x + y, 2) == (x + y)**2 + + +def test_wraps(): + def my_func(x): + """My function. """ + + my_func.is_my_func = True + + new_my_func = threaded(my_func) + new_my_func = wraps(my_func)(new_my_func) + + assert new_my_func.__name__ == 'my_func' + assert new_my_func.__doc__ == 'My function. ' + assert hasattr(new_my_func, 'is_my_func') + assert new_my_func.is_my_func is True + + +def test_memoize_property(): + class TestMemoize(Basic): + @memoize_property + def prop(self): + return Basic() + + member = TestMemoize() + obj1 = member.prop + obj2 = member.prop + assert obj1 is obj2 + +def test_deprecated(): + @deprecated('deprecated_function is deprecated', + deprecated_since_version='1.10', + # This is the target at the top of the file, which will never + # go away. + active_deprecations_target='active-deprecations') + def deprecated_function(x): + return x + + with warns_deprecated_sympy(): + assert deprecated_function(1) == 1 + + @deprecated('deprecated_class is deprecated', + deprecated_since_version='1.10', + active_deprecations_target='active-deprecations') + class deprecated_class: + pass + + with warns_deprecated_sympy(): + assert isinstance(deprecated_class(), deprecated_class) + + # Ensure the class decorator works even when the class never returns + # itself + @deprecated('deprecated_class_new is deprecated', + deprecated_since_version='1.10', + active_deprecations_target='active-deprecations') + class deprecated_class_new: + def __new__(cls, arg): + return arg + + with warns_deprecated_sympy(): + assert deprecated_class_new(1) == 1 + + @deprecated('deprecated_class_init is deprecated', + deprecated_since_version='1.10', + active_deprecations_target='active-deprecations') + class deprecated_class_init: + def __init__(self, arg): + self.arg = 1 + + with warns_deprecated_sympy(): + assert deprecated_class_init(1).arg == 1 + + @deprecated('deprecated_class_new_init is deprecated', + deprecated_since_version='1.10', + active_deprecations_target='active-deprecations') + class deprecated_class_new_init: + def __new__(cls, arg): + if arg == 0: + return arg + return object.__new__(cls) + + def __init__(self, arg): + self.arg = 1 + + with warns_deprecated_sympy(): + assert deprecated_class_new_init(0) == 0 + + with warns_deprecated_sympy(): + assert deprecated_class_new_init(1).arg == 1 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_deprecated.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..dd4534ef1abc38ff368011b3ef9d11c497f3675b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_deprecated.py @@ -0,0 +1,13 @@ +from sympy.testing.pytest import warns_deprecated_sympy + +# See https://github.com/sympy/sympy/pull/18095 + +def test_deprecated_utilities(): + with warns_deprecated_sympy(): + import sympy.utilities.pytest # noqa:F401 + with warns_deprecated_sympy(): + import sympy.utilities.runtests # noqa:F401 + with warns_deprecated_sympy(): + import sympy.utilities.randtest # noqa:F401 + with warns_deprecated_sympy(): + import sympy.utilities.tmpfiles # noqa:F401 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_enumerative.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_enumerative.py new file mode 100644 index 0000000000000000000000000000000000000000..a29c6341dd6f3a19f145c94f6e996cc348428325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_enumerative.py @@ -0,0 +1,178 @@ +from itertools import zip_longest + +from sympy.utilities.enumerative import ( + list_visitor, + MultisetPartitionTraverser, + multiset_partitions_taocp + ) +from sympy.utilities.iterables import _set_partitions + +# first some functions only useful as test scaffolding - these provide +# straightforward, but slow reference implementations against which to +# compare the real versions, and also a comparison to verify that +# different versions are giving identical results. + +def part_range_filter(partition_iterator, lb, ub): + """ + Filters (on the number of parts) a multiset partition enumeration + + Arguments + ========= + + lb, and ub are a range (in the Python slice sense) on the lpart + variable returned from a multiset partition enumeration. Recall + that lpart is 0-based (it points to the topmost part on the part + stack), so if you want to return parts of sizes 2,3,4,5 you would + use lb=1 and ub=5. + """ + for state in partition_iterator: + f, lpart, pstack = state + if lpart >= lb and lpart < ub: + yield state + +def multiset_partitions_baseline(multiplicities, components): + """Enumerates partitions of a multiset + + Parameters + ========== + + multiplicities + list of integer multiplicities of the components of the multiset. + + components + the components (elements) themselves + + Returns + ======= + + Set of partitions. Each partition is tuple of parts, and each + part is a tuple of components (with repeats to indicate + multiplicity) + + Notes + ===== + + Multiset partitions can be created as equivalence classes of set + partitions, and this function does just that. This approach is + slow and memory intensive compared to the more advanced algorithms + available, but the code is simple and easy to understand. Hence + this routine is strictly for testing -- to provide a + straightforward baseline against which to regress the production + versions. (This code is a simplified version of an earlier + production implementation.) + """ + + canon = [] # list of components with repeats + for ct, elem in zip(multiplicities, components): + canon.extend([elem]*ct) + + # accumulate the multiset partitions in a set to eliminate dups + cache = set() + n = len(canon) + for nc, q in _set_partitions(n): + rv = [[] for i in range(nc)] + for i in range(n): + rv[q[i]].append(canon[i]) + canonical = tuple( + sorted([tuple(p) for p in rv])) + cache.add(canonical) + return cache + + +def compare_multiset_w_baseline(multiplicities): + """ + Enumerates the partitions of multiset with AOCP algorithm and + baseline implementation, and compare the results. + + """ + letters = "abcdefghijklmnopqrstuvwxyz" + bl_partitions = multiset_partitions_baseline(multiplicities, letters) + + # The partitions returned by the different algorithms may have + # their parts in different orders. Also, they generate partitions + # in different orders. Hence the sorting, and set comparison. + + aocp_partitions = set() + for state in multiset_partitions_taocp(multiplicities): + p1 = tuple(sorted( + [tuple(p) for p in list_visitor(state, letters)])) + aocp_partitions.add(p1) + + assert bl_partitions == aocp_partitions + +def compare_multiset_states(s1, s2): + """compare for equality two instances of multiset partition states + + This is useful for comparing different versions of the algorithm + to verify correctness.""" + # Comparison is physical, the only use of semantics is to ignore + # trash off the top of the stack. + f1, lpart1, pstack1 = s1 + f2, lpart2, pstack2 = s2 + + if (lpart1 == lpart2) and (f1[0:lpart1+1] == f2[0:lpart2+1]): + if pstack1[0:f1[lpart1+1]] == pstack2[0:f2[lpart2+1]]: + return True + return False + +def test_multiset_partitions_taocp(): + """Compares the output of multiset_partitions_taocp with a baseline + (set partition based) implementation.""" + + # Test cases should not be too large, since the baseline + # implementation is fairly slow. + multiplicities = [2,2] + compare_multiset_w_baseline(multiplicities) + + multiplicities = [4,3,1] + compare_multiset_w_baseline(multiplicities) + +def test_multiset_partitions_versions(): + """Compares Knuth-based versions of multiset_partitions""" + multiplicities = [5,2,2,1] + m = MultisetPartitionTraverser() + for s1, s2 in zip_longest(m.enum_all(multiplicities), + multiset_partitions_taocp(multiplicities)): + assert compare_multiset_states(s1, s2) + +def subrange_exercise(mult, lb, ub): + """Compare filter-based and more optimized subrange implementations + + Helper for tests, called with both small and larger multisets. + """ + m = MultisetPartitionTraverser() + assert m.count_partitions(mult) == \ + m.count_partitions_slow(mult) + + # Note - multiple traversals from the same + # MultisetPartitionTraverser object cannot execute at the same + # time, hence make several instances here. + ma = MultisetPartitionTraverser() + mc = MultisetPartitionTraverser() + md = MultisetPartitionTraverser() + + # Several paths to compute just the size two partitions + a_it = ma.enum_range(mult, lb, ub) + b_it = part_range_filter(multiset_partitions_taocp(mult), lb, ub) + c_it = part_range_filter(mc.enum_small(mult, ub), lb, sum(mult)) + d_it = part_range_filter(md.enum_large(mult, lb), 0, ub) + + for sa, sb, sc, sd in zip_longest(a_it, b_it, c_it, d_it): + assert compare_multiset_states(sa, sb) + assert compare_multiset_states(sa, sc) + assert compare_multiset_states(sa, sd) + +def test_subrange(): + # Quick, but doesn't hit some of the corner cases + mult = [4,4,2,1] # mississippi + lb = 1 + ub = 2 + subrange_exercise(mult, lb, ub) + + +def test_subrange_large(): + # takes a second or so, depending on cpu, Python version, etc. + mult = [6,3,2,1] + lb = 4 + ub = 7 + subrange_exercise(mult, lb, ub) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_exceptions.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d91e55e95d0ae4ac57cdd1989e0b3d39a55cd07d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_exceptions.py @@ -0,0 +1,12 @@ +from sympy.testing.pytest import raises +from sympy.utilities.exceptions import sympy_deprecation_warning + +# Only test exceptions here because the other cases are tested in the +# warns_deprecated_sympy tests +def test_sympy_deprecation_warning(): + raises(TypeError, lambda: sympy_deprecation_warning('test', + deprecated_since_version=1.10, + active_deprecations_target='active-deprecations')) + + raises(ValueError, lambda: sympy_deprecation_warning('test', + deprecated_since_version="1.10", active_deprecations_target='(active-deprecations)=')) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_iterables.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_iterables.py new file mode 100644 index 0000000000000000000000000000000000000000..0f343e8781417e8c7953feb545647e190256a1c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_iterables.py @@ -0,0 +1,936 @@ +from textwrap import dedent +from itertools import islice, product + +from sympy.core.basic import Basic +from sympy.core.numbers import Integer +from sympy.core.sorting import ordered +from sympy.core.symbol import (Dummy, symbols) +from sympy.functions.combinatorial.factorials import factorial +from sympy.matrices.dense import Matrix +from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation +from sympy.utilities.iterables import ( + _partition, _set_partitions, binary_partitions, bracelets, capture, + cartes, common_prefix, common_suffix, connected_components, dict_merge, + filter_symbols, flatten, generate_bell, generate_derangements, + generate_involutions, generate_oriented_forest, group, has_dups, ibin, + iproduct, kbins, minlex, multiset, multiset_combinations, + multiset_partitions, multiset_permutations, necklaces, numbered_symbols, + partitions, permutations, postfixes, + prefixes, reshape, rotate_left, rotate_right, runs, sift, + strongly_connected_components, subsets, take, topological_sort, unflatten, + uniq, variations, ordered_partitions, rotations, is_palindromic, iterable, + NotIterable, multiset_derangements, + sequence_partitions, sequence_partitions_empty) +from sympy.utilities.enumerative import ( + factoring_visitor, multiset_partitions_taocp ) + +from sympy.core.singleton import S +from sympy.testing.pytest import raises, warns_deprecated_sympy + +w, x, y, z = symbols('w,x,y,z') + + +def test_deprecated_iterables(): + from sympy.utilities.iterables import default_sort_key, ordered + with warns_deprecated_sympy(): + assert list(ordered([y, x])) == [x, y] + with warns_deprecated_sympy(): + assert sorted([y, x], key=default_sort_key) == [x, y] + + +def test_is_palindromic(): + assert is_palindromic('') + assert is_palindromic('x') + assert is_palindromic('xx') + assert is_palindromic('xyx') + assert not is_palindromic('xy') + assert not is_palindromic('xyzx') + assert is_palindromic('xxyzzyx', 1) + assert not is_palindromic('xxyzzyx', 2) + assert is_palindromic('xxyzzyx', 2, -1) + assert is_palindromic('xxyzzyx', 2, 6) + assert is_palindromic('xxyzyx', 1) + assert not is_palindromic('xxyzyx', 2) + assert is_palindromic('xxyzyx', 2, 2 + 3) + + +def test_flatten(): + assert flatten((1, (1,))) == [1, 1] + assert flatten((x, (x,))) == [x, x] + + ls = [[(-2, -1), (1, 2)], [(0, 0)]] + + assert flatten(ls, levels=0) == ls + assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)] + assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0] + assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0] + + raises(ValueError, lambda: flatten(ls, levels=-1)) + + class MyOp(Basic): + pass + + assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z] + assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z] + + assert flatten({1, 11, 2}) == list({1, 11, 2}) + + +def test_iproduct(): + assert list(iproduct()) == [()] + assert list(iproduct([])) == [] + assert list(iproduct([1,2,3])) == [(1,),(2,),(3,)] + assert sorted(iproduct([1, 2], [3, 4, 5])) == [ + (1,3),(1,4),(1,5),(2,3),(2,4),(2,5)] + assert sorted(iproduct([0,1],[0,1],[0,1])) == [ + (0,0,0),(0,0,1),(0,1,0),(0,1,1),(1,0,0),(1,0,1),(1,1,0),(1,1,1)] + assert iterable(iproduct(S.Integers)) is True + assert iterable(iproduct(S.Integers, S.Integers)) is True + assert (3,) in iproduct(S.Integers) + assert (4, 5) in iproduct(S.Integers, S.Integers) + assert (1, 2, 3) in iproduct(S.Integers, S.Integers, S.Integers) + triples = set(islice(iproduct(S.Integers, S.Integers, S.Integers), 1000)) + for n1, n2, n3 in triples: + assert isinstance(n1, Integer) + assert isinstance(n2, Integer) + assert isinstance(n3, Integer) + for t in set(product(*([range(-2, 3)]*3))): + assert t in iproduct(S.Integers, S.Integers, S.Integers) + + +def test_group(): + assert group([]) == [] + assert group([], multiple=False) == [] + + assert group([1]) == [[1]] + assert group([1], multiple=False) == [(1, 1)] + + assert group([1, 1]) == [[1, 1]] + assert group([1, 1], multiple=False) == [(1, 2)] + + assert group([1, 1, 1]) == [[1, 1, 1]] + assert group([1, 1, 1], multiple=False) == [(1, 3)] + + assert group([1, 2, 1]) == [[1], [2], [1]] + assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)] + + assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]] + assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2), + (2, 3), (1, 1), (3, 2)] + + +def test_subsets(): + # combinations + assert list(subsets([1, 2, 3], 0)) == [()] + assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)] + assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)] + assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)] + l = list(range(4)) + assert list(subsets(l, 0, repetition=True)) == [()] + assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)] + assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2), + (0, 3), (1, 1), (1, 2), + (1, 3), (2, 2), (2, 3), + (3, 3)] + assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1), + (0, 0, 2), (0, 0, 3), + (0, 1, 1), (0, 1, 2), + (0, 1, 3), (0, 2, 2), + (0, 2, 3), (0, 3, 3), + (1, 1, 1), (1, 1, 2), + (1, 1, 3), (1, 2, 2), + (1, 2, 3), (1, 3, 3), + (2, 2, 2), (2, 2, 3), + (2, 3, 3), (3, 3, 3)] + assert len(list(subsets(l, 4, repetition=True))) == 35 + + assert list(subsets(l[:2], 3, repetition=False)) == [] + assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0), + (0, 0, 1), + (0, 1, 1), + (1, 1, 1)] + assert list(subsets([1, 2], repetition=True)) == \ + [(), (1,), (2,), (1, 1), (1, 2), (2, 2)] + assert list(subsets([1, 2], repetition=False)) == \ + [(), (1,), (2,), (1, 2)] + assert list(subsets([1, 2, 3], 2)) == \ + [(1, 2), (1, 3), (2, 3)] + assert list(subsets([1, 2, 3], 2, repetition=True)) == \ + [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] + + +def test_variations(): + # permutations + l = list(range(4)) + assert list(variations(l, 0, repetition=False)) == [()] + assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)] + assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)] + assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)] + assert list(variations(l, 0, repetition=True)) == [()] + assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)] + assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2), + (0, 3), (1, 0), (1, 1), + (1, 2), (1, 3), (2, 0), + (2, 1), (2, 2), (2, 3), + (3, 0), (3, 1), (3, 2), + (3, 3)] + assert len(list(variations(l, 3, repetition=True))) == 64 + assert len(list(variations(l, 4, repetition=True))) == 256 + assert list(variations(l[:2], 3, repetition=False)) == [] + assert list(variations(l[:2], 3, repetition=True)) == [ + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1) + ] + + +def test_cartes(): + assert list(cartes([1, 2], [3, 4, 5])) == \ + [(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)] + assert list(cartes()) == [()] + assert list(cartes('a')) == [('a',)] + assert list(cartes('a', repeat=2)) == [('a', 'a')] + assert list(cartes(list(range(2)))) == [(0,), (1,)] + + +def test_filter_symbols(): + s = numbered_symbols() + filtered = filter_symbols(s, symbols("x0 x2 x3")) + assert take(filtered, 3) == list(symbols("x1 x4 x5")) + + +def test_numbered_symbols(): + s = numbered_symbols(cls=Dummy) + assert isinstance(next(s), Dummy) + assert next(numbered_symbols('C', start=1, exclude=[symbols('C1')])) == \ + symbols('C2') + + +def test_sift(): + assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]} + assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]} + assert sift([S.One], lambda _: _.has(x)) == {False: [1]} + assert sift([0, 1, 2, 3], lambda x: x % 2, binary=True) == ( + [1, 3], [0, 2]) + assert sift([0, 1, 2, 3], lambda x: x % 3 == 1, binary=True) == ( + [1], [0, 2, 3]) + raises(ValueError, lambda: + sift([0, 1, 2, 3], lambda x: x % 3, binary=True)) + + +def test_take(): + X = numbered_symbols() + + assert take(X, 5) == list(symbols('x0:5')) + assert take(X, 5) == list(symbols('x5:10')) + + assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5] + + +def test_dict_merge(): + assert dict_merge({}, {1: x, y: z}) == {1: x, y: z} + assert dict_merge({1: x, y: z}, {}) == {1: x, y: z} + + assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z} + assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z} + + assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z} + assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z} + + +def test_prefixes(): + assert list(prefixes([])) == [] + assert list(prefixes([1])) == [[1]] + assert list(prefixes([1, 2])) == [[1], [1, 2]] + + assert list(prefixes([1, 2, 3, 4, 5])) == \ + [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]] + + +def test_postfixes(): + assert list(postfixes([])) == [] + assert list(postfixes([1])) == [[1]] + assert list(postfixes([1, 2])) == [[2], [1, 2]] + + assert list(postfixes([1, 2, 3, 4, 5])) == \ + [[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]] + + +def test_topological_sort(): + V = [2, 3, 5, 7, 8, 9, 10, 11] + E = [(7, 11), (7, 8), (5, 11), + (3, 8), (3, 10), (11, 2), + (11, 9), (11, 10), (8, 9)] + + assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10] + assert topological_sort((V, E), key=lambda v: -v) == \ + [7, 5, 11, 3, 10, 8, 9, 2] + + raises(ValueError, lambda: topological_sort((V, E + [(10, 7)]))) + + +def test_strongly_connected_components(): + assert strongly_connected_components(([], [])) == [] + assert strongly_connected_components(([1, 2, 3], [])) == [[1], [2], [3]] + + V = [1, 2, 3] + E = [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1)] + assert strongly_connected_components((V, E)) == [[1, 2, 3]] + + V = [1, 2, 3, 4] + E = [(1, 2), (2, 3), (3, 2), (3, 4)] + assert strongly_connected_components((V, E)) == [[4], [2, 3], [1]] + + V = [1, 2, 3, 4] + E = [(1, 2), (2, 1), (3, 4), (4, 3)] + assert strongly_connected_components((V, E)) == [[1, 2], [3, 4]] + + +def test_connected_components(): + assert connected_components(([], [])) == [] + assert connected_components(([1, 2, 3], [])) == [[1], [2], [3]] + + V = [1, 2, 3] + E = [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1)] + assert connected_components((V, E)) == [[1, 2, 3]] + + V = [1, 2, 3, 4] + E = [(1, 2), (2, 3), (3, 2), (3, 4)] + assert connected_components((V, E)) == [[1, 2, 3, 4]] + + V = [1, 2, 3, 4] + E = [(1, 2), (3, 4)] + assert connected_components((V, E)) == [[1, 2], [3, 4]] + + +def test_rotate(): + A = [0, 1, 2, 3, 4] + + assert rotate_left(A, 2) == [2, 3, 4, 0, 1] + assert rotate_right(A, 1) == [4, 0, 1, 2, 3] + A = [] + B = rotate_right(A, 1) + assert B == [] + B.append(1) + assert A == [] + B = rotate_left(A, 1) + assert B == [] + B.append(1) + assert A == [] + + +def test_multiset_partitions(): + A = [0, 1, 2, 3, 4] + + assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]] + assert len(list(multiset_partitions(A, 4))) == 10 + assert len(list(multiset_partitions(A, 3))) == 25 + + assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [ + [[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]], + [[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]] + + assert list(multiset_partitions([1, 1, 2, 2], 2)) == [ + [[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]], + [[1, 2], [1, 2]]] + + assert list(multiset_partitions([1, 2, 3, 4], 2)) == [ + [[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]], + [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]], + [[1], [2, 3, 4]]] + + assert list(multiset_partitions([1, 2, 2], 2)) == [ + [[1, 2], [2]], [[1], [2, 2]]] + + assert list(multiset_partitions(3)) == [ + [[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]], + [[0], [1], [2]]] + assert list(multiset_partitions(3, 2)) == [ + [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]] + assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]] + assert list(multiset_partitions([1] * 3)) == [ + [[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]] + a = [3, 2, 1] + assert list(multiset_partitions(a)) == \ + list(multiset_partitions(sorted(a))) + assert list(multiset_partitions(a, 5)) == [] + assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]] + assert list(multiset_partitions(a + [4], 5)) == [] + assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]] + assert list(multiset_partitions(2, 5)) == [] + assert list(multiset_partitions(2, 1)) == [[[0, 1]]] + assert list(multiset_partitions('a')) == [[['a']]] + assert list(multiset_partitions('a', 2)) == [] + assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]] + assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]] + assert list(multiset_partitions('aaa', 1)) == [['aaa']] + assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]] + ans = [('mpsyy',), ('mpsy', 'y'), ('mps', 'yy'), ('mps', 'y', 'y'), + ('mpyy', 's'), ('mpy', 'sy'), ('mpy', 's', 'y'), ('mp', 'syy'), + ('mp', 'sy', 'y'), ('mp', 's', 'yy'), ('mp', 's', 'y', 'y'), + ('msyy', 'p'), ('msy', 'py'), ('msy', 'p', 'y'), ('ms', 'pyy'), + ('ms', 'py', 'y'), ('ms', 'p', 'yy'), ('ms', 'p', 'y', 'y'), + ('myy', 'ps'), ('myy', 'p', 's'), ('my', 'psy'), ('my', 'ps', 'y'), + ('my', 'py', 's'), ('my', 'p', 'sy'), ('my', 'p', 's', 'y'), + ('m', 'psyy'), ('m', 'psy', 'y'), ('m', 'ps', 'yy'), + ('m', 'ps', 'y', 'y'), ('m', 'pyy', 's'), ('m', 'py', 'sy'), + ('m', 'py', 's', 'y'), ('m', 'p', 'syy'), + ('m', 'p', 'sy', 'y'), ('m', 'p', 's', 'yy'), + ('m', 'p', 's', 'y', 'y')] + assert [tuple("".join(part) for part in p) + for p in multiset_partitions('sympy')] == ans + factorings = [[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], + [6, 2, 2], [2, 2, 2, 3]] + assert [factoring_visitor(p, [2,3]) for + p in multiset_partitions_taocp([3, 1])] == factorings + + +def test_multiset_combinations(): + ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips', + 'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss'] + assert [''.join(i) for i in + list(multiset_combinations('mississippi', 3))] == ans + M = multiset('mississippi') + assert [''.join(i) for i in + list(multiset_combinations(M, 3))] == ans + assert [''.join(i) for i in multiset_combinations(M, 30)] == [] + assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]] + assert len(list(multiset_combinations('a', 3))) == 0 + assert len(list(multiset_combinations('a', 0))) == 1 + assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']] + raises(ValueError, lambda: list(multiset_combinations({0: 3, 1: -1}, 2))) + + +def test_multiset_permutations(): + ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab', + 'byba', 'yabb', 'ybab', 'ybba'] + assert [''.join(i) for i in multiset_permutations('baby')] == ans + assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans + assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]] + assert list(multiset_permutations([0, 2, 1], 2)) == [ + [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]] + assert len(list(multiset_permutations('a', 0))) == 1 + assert len(list(multiset_permutations('a', 3))) == 0 + for nul in ([], {}, ''): + assert list(multiset_permutations(nul)) == [[]] + assert list(multiset_permutations(nul, 0)) == [[]] + # impossible requests give no result + assert list(multiset_permutations(nul, 1)) == [] + assert list(multiset_permutations(nul, -1)) == [] + + def test(): + for i in range(1, 7): + print(i) + for p in multiset_permutations([0, 0, 1, 0, 1], i): + print(p) + assert capture(lambda: test()) == dedent('''\ + 1 + [0] + [1] + 2 + [0, 0] + [0, 1] + [1, 0] + [1, 1] + 3 + [0, 0, 0] + [0, 0, 1] + [0, 1, 0] + [0, 1, 1] + [1, 0, 0] + [1, 0, 1] + [1, 1, 0] + 4 + [0, 0, 0, 1] + [0, 0, 1, 0] + [0, 0, 1, 1] + [0, 1, 0, 0] + [0, 1, 0, 1] + [0, 1, 1, 0] + [1, 0, 0, 0] + [1, 0, 0, 1] + [1, 0, 1, 0] + [1, 1, 0, 0] + 5 + [0, 0, 0, 1, 1] + [0, 0, 1, 0, 1] + [0, 0, 1, 1, 0] + [0, 1, 0, 0, 1] + [0, 1, 0, 1, 0] + [0, 1, 1, 0, 0] + [1, 0, 0, 0, 1] + [1, 0, 0, 1, 0] + [1, 0, 1, 0, 0] + [1, 1, 0, 0, 0] + 6\n''') + raises(ValueError, lambda: list(multiset_permutations({0: 3, 1: -1}))) + + +def test_partitions(): + ans = [[{}], [(0, {})]] + for i in range(2): + assert list(partitions(0, size=i)) == ans[i] + assert list(partitions(1, 0, size=i)) == ans[i] + assert list(partitions(6, 2, 2, size=i)) == ans[i] + assert list(partitions(6, 2, None, size=i)) != ans[i] + assert list(partitions(6, None, 2, size=i)) != ans[i] + assert list(partitions(6, 2, 0, size=i)) == ans[i] + + assert list(partitions(6, k=2)) == [ + {2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}] + + assert list(partitions(6, k=3)) == [ + {3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2}, + {1: 4, 2: 1}, {1: 6}] + + assert list(partitions(8, k=4, m=3)) == [ + {4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [ + i for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i) + and sum(i.values()) <=3] + + assert list(partitions(S(3), m=2)) == [ + {3: 1}, {1: 1, 2: 1}] + + assert list(partitions(4, k=3)) == [ + {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [ + i for i in partitions(4) if all(k <= 3 for k in i)] + + + # Consistency check on output of _partitions and RGS_unrank. + # This provides a sanity test on both routines. Also verifies that + # the total number of partitions is the same in each case. + # (from pkrathmann2) + + for n in range(2, 6): + i = 0 + for m, q in _set_partitions(n): + assert q == RGS_unrank(i, n) + i += 1 + assert i == RGS_enum(n) + + +def test_binary_partitions(): + assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1], + [4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1], + [4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2], + [2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1], + [2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] + + assert len([j[:] for j in binary_partitions(16)]) == 36 + + +def test_bell_perm(): + assert [len(set(generate_bell(i))) for i in range(1, 7)] == [ + factorial(i) for i in range(1, 7)] + assert list(generate_bell(3)) == [ + (0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)] + # generate_bell and trotterjohnson are advertised to return the same + # permutations; this is not technically necessary so this test could + # be removed + for n in range(1, 5): + p = Permutation(range(n)) + b = generate_bell(n) + for bi in b: + assert bi == tuple(p.array_form) + p = p.next_trotterjohnson() + raises(ValueError, lambda: list(generate_bell(0))) # XXX is this consistent with other permutation algorithms? + + +def test_involutions(): + lengths = [1, 2, 4, 10, 26, 76] + for n, N in enumerate(lengths): + i = list(generate_involutions(n + 1)) + assert len(i) == N + assert len({Permutation(j)**2 for j in i}) == 1 + + +def test_derangements(): + assert len(list(generate_derangements(list(range(6))))) == 265 + assert ''.join(''.join(i) for i in generate_derangements('abcde')) == ( + 'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd' + 'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab' + 'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb' + 'edbacedbca') + assert list(generate_derangements([0, 1, 2, 3])) == [ + [1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1], + [2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]] + assert list(generate_derangements([0, 1, 2, 2])) == [ + [2, 2, 0, 1], [2, 2, 1, 0]] + assert list(generate_derangements('ba')) == [list('ab')] + # multiset_derangements + D = multiset_derangements + assert list(D('abb')) == [] + assert [''.join(i) for i in D('ab')] == ['ba'] + assert [''.join(i) for i in D('abc')] == ['bca', 'cab'] + assert [''.join(i) for i in D('aabb')] == ['bbaa'] + assert [''.join(i) for i in D('aabbcccc')] == [ + 'ccccaabb', 'ccccabab', 'ccccabba', 'ccccbaab', 'ccccbaba', + 'ccccbbaa'] + assert [''.join(i) for i in D('aabbccc')] == [ + 'cccabba', 'cccabab', 'cccaabb', 'ccacbba', 'ccacbab', + 'ccacabb', 'cbccbaa', 'cbccaba', 'cbccaab', 'bcccbaa', + 'bcccaba', 'bcccaab'] + assert [''.join(i) for i in D('books')] == ['kbsoo', 'ksboo', + 'sbkoo', 'skboo', 'oksbo', 'oskbo', 'okbso', 'obkso', 'oskob', + 'oksob', 'osbok', 'obsok'] + assert list(generate_derangements([[3], [2], [2], [1]])) == [ + [[2], [1], [3], [2]], [[2], [3], [1], [2]]] + + +def test_necklaces(): + def count(n, k, f): + return len(list(necklaces(n, k, f))) + m = [] + for i in range(1, 8): + m.append(( + i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1))) + assert Matrix(m) == Matrix([ + [1, 2, 2, 3], + [2, 3, 3, 6], + [3, 4, 4, 10], + [4, 6, 6, 21], + [5, 8, 8, 39], + [6, 14, 13, 92], + [7, 20, 18, 198]]) + + +def test_bracelets(): + bc = list(bracelets(2, 4)) + assert Matrix(bc) == Matrix([ + [0, 0], + [0, 1], + [0, 2], + [0, 3], + [1, 1], + [1, 2], + [1, 3], + [2, 2], + [2, 3], + [3, 3] + ]) + bc = list(bracelets(4, 2)) + assert Matrix(bc) == Matrix([ + [0, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 1], + [0, 1, 0, 1], + [0, 1, 1, 1], + [1, 1, 1, 1] + ]) + + +def test_generate_oriented_forest(): + assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0], + [0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2], + [0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0], + [0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0], + [0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]] + assert len(list(generate_oriented_forest(10))) == 1842 + + +def test_unflatten(): + r = list(range(10)) + assert unflatten(r) == list(zip(r[::2], r[1::2])) + assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])] + raises(ValueError, lambda: unflatten(list(range(10)), 3)) + raises(ValueError, lambda: unflatten(list(range(10)), -2)) + + +def test_common_prefix_suffix(): + assert common_prefix([], [1]) == [] + assert common_prefix(list(range(3))) == [0, 1, 2] + assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2] + assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2] + assert common_prefix([1, 2, 3], [1, 3, 5]) == [1] + + assert common_suffix([], [1]) == [] + assert common_suffix(list(range(3))) == [0, 1, 2] + assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2] + assert common_suffix(list(range(3)), list(range(4))) == [] + assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3] + assert common_suffix([1, 2, 3], [9, 7, 3]) == [3] + + +def test_minlex(): + assert minlex([1, 2, 0]) == (0, 1, 2) + assert minlex((1, 2, 0)) == (0, 1, 2) + assert minlex((1, 0, 2)) == (0, 2, 1) + assert minlex((1, 0, 2), directed=False) == (0, 1, 2) + assert minlex('aba') == 'aab' + assert minlex(('bb', 'aaa', 'c', 'a'), key=len) == ('c', 'a', 'bb', 'aaa') + + +def test_ordered(): + assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]] + assert list(ordered((x, y), hash, default=False)) == \ + list(ordered((y, x), hash, default=False)) + assert list(ordered((x, y))) == [x, y] + + seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]], + (lambda x: len(x), lambda x: sum(x))] + assert list(ordered(seq, keys, default=False, warn=False)) == \ + [[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]] + raises(ValueError, lambda: + list(ordered(seq, keys, default=False, warn=True))) + + +def test_runs(): + assert runs([]) == [] + assert runs([1]) == [[1]] + assert runs([1, 1]) == [[1], [1]] + assert runs([1, 1, 2]) == [[1], [1, 2]] + assert runs([1, 2, 1]) == [[1, 2], [1]] + assert runs([2, 1, 1]) == [[2], [1], [1]] + from operator import lt + assert runs([2, 1, 1], lt) == [[2, 1], [1]] + + +def test_reshape(): + seq = list(range(1, 9)) + assert reshape(seq, [4]) == \ + [[1, 2, 3, 4], [5, 6, 7, 8]] + assert reshape(seq, (4,)) == \ + [(1, 2, 3, 4), (5, 6, 7, 8)] + assert reshape(seq, (2, 2)) == \ + [(1, 2, 3, 4), (5, 6, 7, 8)] + assert reshape(seq, (2, [2])) == \ + [(1, 2, [3, 4]), (5, 6, [7, 8])] + assert reshape(seq, ((2,), [2])) == \ + [((1, 2), [3, 4]), ((5, 6), [7, 8])] + assert reshape(seq, (1, [2], 1)) == \ + [(1, [2, 3], 4), (5, [6, 7], 8)] + assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \ + (([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],)) + assert reshape(tuple(seq), ([1], 1, (2,))) == \ + (([1], 2, (3, 4)), ([5], 6, (7, 8))) + assert reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)]) == \ + [[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]] + raises(ValueError, lambda: reshape([0, 1], [-1])) + raises(ValueError, lambda: reshape([0, 1], [3])) + + +def test_uniq(): + assert list(uniq(p for p in partitions(4))) == \ + [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] + assert list(uniq(x % 2 for x in range(5))) == [0, 1] + assert list(uniq('a')) == ['a'] + assert list(uniq('ababc')) == list('abc') + assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]] + assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \ + [([1], 2, 2), (2, [1], 2), (2, 2, [1])] + assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \ + [2, 3, 4, [2], [1], [3]] + f = [1] + raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)]) + f = [[1]] + raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)]) + + +def test_kbins(): + assert len(list(kbins('1123', 2, ordered=1))) == 24 + assert len(list(kbins('1123', 2, ordered=11))) == 36 + assert len(list(kbins('1123', 2, ordered=10))) == 10 + assert len(list(kbins('1123', 2, ordered=0))) == 5 + assert len(list(kbins('1123', 2, ordered=None))) == 3 + + def test1(): + for orderedval in [None, 0, 1, 10, 11]: + print('ordered =', orderedval) + for p in kbins([0, 0, 1], 2, ordered=orderedval): + print(' ', p) + assert capture(lambda : test1()) == dedent('''\ + ordered = None + [[0], [0, 1]] + [[0, 0], [1]] + ordered = 0 + [[0, 0], [1]] + [[0, 1], [0]] + ordered = 1 + [[0], [0, 1]] + [[0], [1, 0]] + [[1], [0, 0]] + ordered = 10 + [[0, 0], [1]] + [[1], [0, 0]] + [[0, 1], [0]] + [[0], [0, 1]] + ordered = 11 + [[0], [0, 1]] + [[0, 0], [1]] + [[0], [1, 0]] + [[0, 1], [0]] + [[1], [0, 0]] + [[1, 0], [0]]\n''') + + def test2(): + for orderedval in [None, 0, 1, 10, 11]: + print('ordered =', orderedval) + for p in kbins(list(range(3)), 2, ordered=orderedval): + print(' ', p) + assert capture(lambda : test2()) == dedent('''\ + ordered = None + [[0], [1, 2]] + [[0, 1], [2]] + ordered = 0 + [[0, 1], [2]] + [[0, 2], [1]] + [[0], [1, 2]] + ordered = 1 + [[0], [1, 2]] + [[0], [2, 1]] + [[1], [0, 2]] + [[1], [2, 0]] + [[2], [0, 1]] + [[2], [1, 0]] + ordered = 10 + [[0, 1], [2]] + [[2], [0, 1]] + [[0, 2], [1]] + [[1], [0, 2]] + [[0], [1, 2]] + [[1, 2], [0]] + ordered = 11 + [[0], [1, 2]] + [[0, 1], [2]] + [[0], [2, 1]] + [[0, 2], [1]] + [[1], [0, 2]] + [[1, 0], [2]] + [[1], [2, 0]] + [[1, 2], [0]] + [[2], [0, 1]] + [[2, 0], [1]] + [[2], [1, 0]] + [[2, 1], [0]]\n''') + + +def test_has_dups(): + assert has_dups(set()) is False + assert has_dups(list(range(3))) is False + assert has_dups([1, 2, 1]) is True + assert has_dups([[1], [1]]) is True + assert has_dups([[1], [2]]) is False + + +def test__partition(): + assert _partition('abcde', [1, 0, 1, 2, 0]) == [ + ['b', 'e'], ['a', 'c'], ['d']] + assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [ + ['b', 'e'], ['a', 'c'], ['d']] + output = (3, [1, 0, 1, 2, 0]) + assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']] + + +def test_ordered_partitions(): + from sympy.functions.combinatorial.numbers import nT + f = ordered_partitions + assert list(f(0, 1)) == [[]] + assert list(f(1, 0)) == [[]] + for i in range(1, 7): + for j in [None] + list(range(1, i)): + assert ( + sum(1 for p in f(i, j, 1)) == + sum(1 for p in f(i, j, 0)) == + nT(i, j)) + + +def test_rotations(): + assert list(rotations('ab')) == [['a', 'b'], ['b', 'a']] + assert list(rotations(range(3))) == [[0, 1, 2], [1, 2, 0], [2, 0, 1]] + assert list(rotations(range(3), dir=-1)) == [[0, 1, 2], [2, 0, 1], [1, 2, 0]] + + +def test_ibin(): + assert ibin(3) == [1, 1] + assert ibin(3, 3) == [0, 1, 1] + assert ibin(3, str=True) == '11' + assert ibin(3, 3, str=True) == '011' + assert list(ibin(2, 'all')) == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(ibin(2, '', str=True)) == ['00', '01', '10', '11'] + raises(ValueError, lambda: ibin(-.5)) + raises(ValueError, lambda: ibin(2, 1)) + + +def test_iterable(): + assert iterable(0) is False + assert iterable(1) is False + assert iterable(None) is False + + class Test1(NotIterable): + pass + + assert iterable(Test1()) is False + + class Test2(NotIterable): + _iterable = True + + assert iterable(Test2()) is True + + class Test3: + pass + + assert iterable(Test3()) is False + + class Test4: + _iterable = True + + assert iterable(Test4()) is True + + class Test5: + def __iter__(self): + yield 1 + + assert iterable(Test5()) is True + + class Test6(Test5): + _iterable = False + + assert iterable(Test6()) is False + + +def test_sequence_partitions(): + assert list(sequence_partitions([1], 1)) == [[[1]]] + assert list(sequence_partitions([1, 2], 1)) == [[[1, 2]]] + assert list(sequence_partitions([1, 2], 2)) == [[[1], [2]]] + assert list(sequence_partitions([1, 2, 3], 1)) == [[[1, 2, 3]]] + assert list(sequence_partitions([1, 2, 3], 2)) == \ + [[[1], [2, 3]], [[1, 2], [3]]] + assert list(sequence_partitions([1, 2, 3], 3)) == [[[1], [2], [3]]] + + # Exceptional cases + assert list(sequence_partitions([], 0)) == [] + assert list(sequence_partitions([], 1)) == [] + assert list(sequence_partitions([1, 2], 0)) == [] + assert list(sequence_partitions([1, 2], 3)) == [] + + +def test_sequence_partitions_empty(): + assert list(sequence_partitions_empty([], 1)) == [[[]]] + assert list(sequence_partitions_empty([], 2)) == [[[], []]] + assert list(sequence_partitions_empty([], 3)) == [[[], [], []]] + assert list(sequence_partitions_empty([1], 1)) == [[[1]]] + assert list(sequence_partitions_empty([1], 2)) == [[[], [1]], [[1], []]] + assert list(sequence_partitions_empty([1], 3)) == \ + [[[], [], [1]], [[], [1], []], [[1], [], []]] + assert list(sequence_partitions_empty([1, 2], 1)) == [[[1, 2]]] + assert list(sequence_partitions_empty([1, 2], 2)) == \ + [[[], [1, 2]], [[1], [2]], [[1, 2], []]] + assert list(sequence_partitions_empty([1, 2], 3)) == [ + [[], [], [1, 2]], [[], [1], [2]], [[], [1, 2], []], + [[1], [], [2]], [[1], [2], []], [[1, 2], [], []] + ] + assert list(sequence_partitions_empty([1, 2, 3], 1)) == [[[1, 2, 3]]] + assert list(sequence_partitions_empty([1, 2, 3], 2)) == \ + [[[], [1, 2, 3]], [[1], [2, 3]], [[1, 2], [3]], [[1, 2, 3], []]] + assert list(sequence_partitions_empty([1, 2, 3], 3)) == [ + [[], [], [1, 2, 3]], [[], [1], [2, 3]], + [[], [1, 2], [3]], [[], [1, 2, 3], []], + [[1], [], [2, 3]], [[1], [2], [3]], + [[1], [2, 3], []], [[1, 2], [], [3]], + [[1, 2], [3], []], [[1, 2, 3], [], []] + ] + + # Exceptional cases + assert list(sequence_partitions([], 0)) == [] + assert list(sequence_partitions([1], 0)) == [] + assert list(sequence_partitions([1, 2], 0)) == [] diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_lambdify.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_lambdify.py new file mode 100644 index 0000000000000000000000000000000000000000..a26bd7139b9b7050d4ffc61760a309bb733aeaa1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_lambdify.py @@ -0,0 +1,1862 @@ +from itertools import product +import math +import inspect + + + +import mpmath +from sympy.testing.pytest import raises, warns_deprecated_sympy +from sympy.concrete.summations import Sum +from sympy.core.function import (Function, Lambda, diff) +from sympy.core.numbers import (E, Float, I, Rational, oo, pi) +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, symbols) +from sympy.functions.combinatorial.factorials import (RisingFactorial, factorial) +from sympy.functions.combinatorial.numbers import bernoulli, harmonic +from sympy.functions.elementary.complexes import Abs +from sympy.functions.elementary.exponential import exp, log +from sympy.functions.elementary.hyperbolic import acosh +from sympy.functions.elementary.integers import floor +from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt) +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (acos, cos, cot, sin, + sinc, tan) +from sympy.functions.special.bessel import (besseli, besselj, besselk, bessely) +from sympy.functions.special.beta_functions import (beta, betainc, betainc_regularized) +from sympy.functions.special.delta_functions import (Heaviside) +from sympy.functions.special.error_functions import (Ei, erf, erfc, fresnelc, fresnels, Si, Ci) +from sympy.functions.special.gamma_functions import (digamma, gamma, loggamma, polygamma) +from sympy.integrals.integrals import Integral +from sympy.logic.boolalg import (And, false, ITE, Not, Or, true) +from sympy.matrices.expressions.dotproduct import DotProduct +from sympy.tensor.array import derive_by_array, Array +from sympy.tensor.indexed import IndexedBase +from sympy.utilities.lambdify import lambdify +from sympy.core.expr import UnevaluatedExpr +from sympy.codegen.cfunctions import expm1, log1p, exp2, log2, log10, hypot +from sympy.codegen.numpy_nodes import logaddexp, logaddexp2 +from sympy.codegen.scipy_nodes import cosm1, powm1 +from sympy.functions.elementary.complexes import re, im, arg +from sympy.functions.special.polynomials import \ + chebyshevt, chebyshevu, legendre, hermite, laguerre, gegenbauer, \ + assoc_legendre, assoc_laguerre, jacobi +from sympy.matrices import Matrix, MatrixSymbol, SparseMatrix +from sympy.printing.lambdarepr import LambdaPrinter +from sympy.printing.numpy import NumPyPrinter +from sympy.utilities.lambdify import implemented_function, lambdastr +from sympy.testing.pytest import skip +from sympy.utilities.decorator import conserve_mpmath_dps +from sympy.utilities.exceptions import ignore_warnings +from sympy.external import import_module +from sympy.functions.special.gamma_functions import uppergamma, lowergamma + + +import sympy + + +MutableDenseMatrix = Matrix + +numpy = import_module('numpy') +scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']}) +numexpr = import_module('numexpr') +tensorflow = import_module('tensorflow') +cupy = import_module('cupy') +jax = import_module('jax') +numba = import_module('numba') + +if tensorflow: + # Hide Tensorflow warnings + import os + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + +w, x, y, z = symbols('w,x,y,z') + +#================== Test different arguments ======================= + + +def test_no_args(): + f = lambdify([], 1) + raises(TypeError, lambda: f(-1)) + assert f() == 1 + + +def test_single_arg(): + f = lambdify(x, 2*x) + assert f(1) == 2 + + +def test_list_args(): + f = lambdify([x, y], x + y) + assert f(1, 2) == 3 + + +def test_nested_args(): + f1 = lambdify([[w, x]], [w, x]) + assert f1([91, 2]) == [91, 2] + raises(TypeError, lambda: f1(1, 2)) + + f2 = lambdify([(w, x), (y, z)], [w, x, y, z]) + assert f2((18, 12), (73, 4)) == [18, 12, 73, 4] + raises(TypeError, lambda: f2(3, 4)) + + f3 = lambdify([w, [[[x]], y], z], [w, x, y, z]) + assert f3(10, [[[52]], 31], 44) == [10, 52, 31, 44] + + +def test_str_args(): + f = lambdify('x,y,z', 'z,y,x') + assert f(3, 2, 1) == (1, 2, 3) + assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0) + # make sure correct number of args required + raises(TypeError, lambda: f(0)) + + +def test_own_namespace_1(): + myfunc = lambda x: 1 + f = lambdify(x, sin(x), {"sin": myfunc}) + assert f(0.1) == 1 + assert f(100) == 1 + + +def test_own_namespace_2(): + def myfunc(x): + return 1 + f = lambdify(x, sin(x), {'sin': myfunc}) + assert f(0.1) == 1 + assert f(100) == 1 + + +def test_own_module(): + f = lambdify(x, sin(x), math) + assert f(0) == 0.0 + + p, q, r = symbols("p q r", real=True) + ae = abs(exp(p+UnevaluatedExpr(q+r))) + f = lambdify([p, q, r], [ae, ae], modules=math) + results = f(1.0, 1e18, -1e18) + refvals = [math.exp(1.0)]*2 + for res, ref in zip(results, refvals): + assert abs((res-ref)/ref) < 1e-15 + + +def test_bad_args(): + # no vargs given + raises(TypeError, lambda: lambdify(1)) + # same with vector exprs + raises(TypeError, lambda: lambdify([1, 2])) + + +def test_atoms(): + # Non-Symbol atoms should not be pulled out from the expression namespace + f = lambdify(x, pi + x, {"pi": 3.14}) + assert f(0) == 3.14 + f = lambdify(x, I + x, {"I": 1j}) + assert f(1) == 1 + 1j + +#================== Test different modules ========================= + +# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted + + +@conserve_mpmath_dps +def test_sympy_lambda(): + mpmath.mp.dps = 50 + sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020") + f = lambdify(x, sin(x), "sympy") + assert f(x) == sin(x) + prec = 1e-15 + assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec + # arctan is in numpy module and should not be available + # The arctan below gives NameError. What is this supposed to test? + # raises(NameError, lambda: lambdify(x, arctan(x), "sympy")) + + +@conserve_mpmath_dps +def test_math_lambda(): + mpmath.mp.dps = 50 + sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020") + f = lambdify(x, sin(x), "math") + prec = 1e-15 + assert -prec < f(0.2) - sin02 < prec + raises(TypeError, lambda: f(x)) + # if this succeeds, it can't be a Python math function + + +@conserve_mpmath_dps +def test_mpmath_lambda(): + mpmath.mp.dps = 50 + sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020") + f = lambdify(x, sin(x), "mpmath") + prec = 1e-49 # mpmath precision is around 50 decimal places + assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec + raises(TypeError, lambda: f(x)) + # if this succeeds, it can't be a mpmath function + + ref2 = (mpmath.mpf("1e-30") + - mpmath.mpf("1e-45")/2 + + 5*mpmath.mpf("1e-60")/6 + - 3*mpmath.mpf("1e-75")/4 + + 33*mpmath.mpf("1e-90")/40 + ) + f2a = lambdify((x, y), x**y - 1, "mpmath") + f2b = lambdify((x, y), powm1(x, y), "mpmath") + f2c = lambdify((x,), expm1(x*log1p(x)), "mpmath") + ans2a = f2a(mpmath.mpf("1")+mpmath.mpf("1e-15"), mpmath.mpf("1e-15")) + ans2b = f2b(mpmath.mpf("1")+mpmath.mpf("1e-15"), mpmath.mpf("1e-15")) + ans2c = f2c(mpmath.mpf("1e-15")) + assert abs(ans2a - ref2) < 1e-51 + assert abs(ans2b - ref2) < 1e-67 + assert abs(ans2c - ref2) < 1e-80 + + +@conserve_mpmath_dps +def test_number_precision(): + mpmath.mp.dps = 50 + sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020") + f = lambdify(x, sin02, "mpmath") + prec = 1e-49 # mpmath precision is around 50 decimal places + assert -prec < f(0) - sin02 < prec + +@conserve_mpmath_dps +def test_mpmath_precision(): + mpmath.mp.dps = 100 + assert str(lambdify((), pi.evalf(100), 'mpmath')()) == str(pi.evalf(100)) + +#================== Test Translations ============================== +# We can only check if all translated functions are valid. It has to be checked +# by hand if they are complete. + + +def test_math_transl(): + from sympy.utilities.lambdify import MATH_TRANSLATIONS + for sym, mat in MATH_TRANSLATIONS.items(): + assert sym in sympy.__dict__ + assert mat in math.__dict__ + + +def test_mpmath_transl(): + from sympy.utilities.lambdify import MPMATH_TRANSLATIONS + for sym, mat in MPMATH_TRANSLATIONS.items(): + assert sym in sympy.__dict__ or sym == 'Matrix' + assert mat in mpmath.__dict__ + + +def test_numpy_transl(): + if not numpy: + skip("numpy not installed.") + + from sympy.utilities.lambdify import NUMPY_TRANSLATIONS + for sym, nump in NUMPY_TRANSLATIONS.items(): + assert sym in sympy.__dict__ + assert nump in numpy.__dict__ + + +def test_scipy_transl(): + if not scipy: + skip("scipy not installed.") + + from sympy.utilities.lambdify import SCIPY_TRANSLATIONS + for sym, scip in SCIPY_TRANSLATIONS.items(): + assert sym in sympy.__dict__ + assert scip in scipy.__dict__ or scip in scipy.special.__dict__ + + +def test_numpy_translation_abs(): + if not numpy: + skip("numpy not installed.") + + f = lambdify(x, Abs(x), "numpy") + assert f(-1) == 1 + assert f(1) == 1 + + +def test_numexpr_printer(): + if not numexpr: + skip("numexpr not installed.") + + # if translation/printing is done incorrectly then evaluating + # a lambdified numexpr expression will throw an exception + from sympy.printing.lambdarepr import NumExprPrinter + + blacklist = ('where', 'complex', 'contains') + arg_tuple = (x, y, z) # some functions take more than one argument + for sym in NumExprPrinter._numexpr_functions.keys(): + if sym in blacklist: + continue + ssym = S(sym) + if hasattr(ssym, '_nargs'): + nargs = ssym._nargs[0] + else: + nargs = 1 + args = arg_tuple[:nargs] + f = lambdify(args, ssym(*args), modules='numexpr') + assert f(*(1, )*nargs) is not None + + +def test_issue_9334(): + if not numexpr: + skip("numexpr not installed.") + if not numpy: + skip("numpy not installed.") + expr = S('b*a - sqrt(a**2)') + a, b = sorted(expr.free_symbols, key=lambda s: s.name) + func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False) + foo, bar = numpy.random.random((2, 4)) + func_numexpr(foo, bar) + + +def test_issue_12984(): + if not numexpr: + skip("numexpr not installed.") + func_numexpr = lambdify((x,y,z), Piecewise((y, x >= 0), (z, x > -1)), numexpr) + with ignore_warnings(RuntimeWarning): + assert func_numexpr(1, 24, 42) == 24 + assert str(func_numexpr(-1, 24, 42)) == 'nan' + + +def test_empty_modules(): + x, y = symbols('x y') + expr = -(x % y) + + no_modules = lambdify([x, y], expr) + empty_modules = lambdify([x, y], expr, modules=[]) + assert no_modules(3, 7) == empty_modules(3, 7) + assert no_modules(3, 7) == -3 + + +def test_exponentiation(): + f = lambdify(x, x**2) + assert f(-1) == 1 + assert f(0) == 0 + assert f(1) == 1 + assert f(-2) == 4 + assert f(2) == 4 + assert f(2.5) == 6.25 + + +def test_sqrt(): + f = lambdify(x, sqrt(x)) + assert f(0) == 0.0 + assert f(1) == 1.0 + assert f(4) == 2.0 + assert abs(f(2) - 1.414) < 0.001 + assert f(6.25) == 2.5 + + +def test_trig(): + f = lambdify([x], [cos(x), sin(x)], 'math') + d = f(pi) + prec = 1e-11 + assert -prec < d[0] + 1 < prec + assert -prec < d[1] < prec + d = f(3.14159) + prec = 1e-5 + assert -prec < d[0] + 1 < prec + assert -prec < d[1] < prec + + +def test_integral(): + if numpy and not scipy: + skip("scipy not installed.") + f = Lambda(x, exp(-x**2)) + l = lambdify(y, Integral(f(x), (x, y, oo))) + d = l(-oo) + assert 1.77245385 < d < 1.772453851 + + +def test_double_integral(): + if numpy and not scipy: + skip("scipy not installed.") + # example from http://mpmath.org/doc/current/calculus/integration.html + i = Integral(1/(1 - x**2*y**2), (x, 0, 1), (y, 0, z)) + l = lambdify([z], i) + d = l(1) + assert 1.23370055 < d < 1.233700551 + + +#================== Test vectors =================================== + + +def test_vector_simple(): + f = lambdify((x, y, z), (z, y, x)) + assert f(3, 2, 1) == (1, 2, 3) + assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0) + # make sure correct number of args required + raises(TypeError, lambda: f(0)) + + +def test_vector_discontinuous(): + f = lambdify(x, (-1/x, 1/x)) + raises(ZeroDivisionError, lambda: f(0)) + assert f(1) == (-1.0, 1.0) + assert f(2) == (-0.5, 0.5) + assert f(-2) == (0.5, -0.5) + + +def test_trig_symbolic(): + f = lambdify([x], [cos(x), sin(x)], 'math') + d = f(pi) + assert abs(d[0] + 1) < 0.0001 + assert abs(d[1] - 0) < 0.0001 + + +def test_trig_float(): + f = lambdify([x], [cos(x), sin(x)]) + d = f(3.14159) + assert abs(d[0] + 1) < 0.0001 + assert abs(d[1] - 0) < 0.0001 + + +def test_docs(): + f = lambdify(x, x**2) + assert f(2) == 4 + f = lambdify([x, y, z], [z, y, x]) + assert f(1, 2, 3) == [3, 2, 1] + f = lambdify(x, sqrt(x)) + assert f(4) == 2.0 + f = lambdify((x, y), sin(x*y)**2) + assert f(0, 5) == 0 + + +def test_math(): + f = lambdify((x, y), sin(x), modules="math") + assert f(0, 5) == 0 + + +def test_sin(): + f = lambdify(x, sin(x)**2) + assert isinstance(f(2), float) + f = lambdify(x, sin(x)**2, modules="math") + assert isinstance(f(2), float) + + +def test_matrix(): + A = Matrix([[x, x*y], [sin(z) + 4, x**z]]) + sol = Matrix([[1, 2], [sin(3) + 4, 1]]) + f = lambdify((x, y, z), A, modules="sympy") + assert f(1, 2, 3) == sol + f = lambdify((x, y, z), (A, [A]), modules="sympy") + assert f(1, 2, 3) == (sol, [sol]) + J = Matrix((x, x + y)).jacobian((x, y)) + v = Matrix((x, y)) + sol = Matrix([[1, 0], [1, 1]]) + assert lambdify(v, J, modules='sympy')(1, 2) == sol + assert lambdify(v.T, J, modules='sympy')(1, 2) == sol + + +def test_numpy_matrix(): + if not numpy: + skip("numpy not installed.") + A = Matrix([[x, x*y], [sin(z) + 4, x**z]]) + sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]]) + #Lambdify array first, to ensure return to array as default + f = lambdify((x, y, z), A, ['numpy']) + numpy.testing.assert_allclose(f(1, 2, 3), sol_arr) + #Check that the types are arrays and matrices + assert isinstance(f(1, 2, 3), numpy.ndarray) + + # gh-15071 + class dot(Function): + pass + x_dot_mtx = dot(x, Matrix([[2], [1], [0]])) + f_dot1 = lambdify(x, x_dot_mtx) + inp = numpy.zeros((17, 3)) + assert numpy.all(f_dot1(inp) == 0) + + strict_kw = {"allow_unknown_functions": False, "inline": True, "fully_qualified_modules": False} + p2 = NumPyPrinter(dict(user_functions={'dot': 'dot'}, **strict_kw)) + f_dot2 = lambdify(x, x_dot_mtx, printer=p2) + assert numpy.all(f_dot2(inp) == 0) + + p3 = NumPyPrinter(strict_kw) + # The line below should probably fail upon construction (before calling with "(inp)"): + raises(Exception, lambda: lambdify(x, x_dot_mtx, printer=p3)(inp)) + + +def test_numpy_transpose(): + if not numpy: + skip("numpy not installed.") + A = Matrix([[1, x], [0, 1]]) + f = lambdify((x), A.T, modules="numpy") + numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]])) + + +def test_numpy_dotproduct(): + if not numpy: + skip("numpy not installed") + A = Matrix([x, y, z]) + f1 = lambdify([x, y, z], DotProduct(A, A), modules='numpy') + f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy') + f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='numpy') + f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy') + + assert f1(1, 2, 3) == \ + f2(1, 2, 3) == \ + f3(1, 2, 3) == \ + f4(1, 2, 3) == \ + numpy.array([14]) + + +def test_numpy_inverse(): + if not numpy: + skip("numpy not installed.") + A = Matrix([[1, x], [0, 1]]) + f = lambdify((x), A**-1, modules="numpy") + numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]])) + + +def test_numpy_old_matrix(): + if not numpy: + skip("numpy not installed.") + A = Matrix([[x, x*y], [sin(z) + 4, x**z]]) + sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]]) + f = lambdify((x, y, z), A, [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']) + with ignore_warnings(PendingDeprecationWarning): + numpy.testing.assert_allclose(f(1, 2, 3), sol_arr) + assert isinstance(f(1, 2, 3), numpy.matrix) + + +def test_scipy_sparse_matrix(): + if not scipy: + skip("scipy not installed.") + A = SparseMatrix([[x, 0], [0, y]]) + f = lambdify((x, y), A, modules="scipy") + B = f(1, 2) + assert isinstance(B, scipy.sparse.coo_matrix) + + +def test_python_div_zero_issue_11306(): + if not numpy: + skip("numpy not installed.") + p = Piecewise((1 / x, y < -1), (x, y < 1), (1 / x, True)) + f = lambdify([x, y], p, modules='numpy') + numpy.seterr(divide='ignore') + assert float(f(numpy.array([0]),numpy.array([0.5]))) == 0 + assert str(float(f(numpy.array([0]),numpy.array([1])))) == 'inf' + numpy.seterr(divide='warn') + + +def test_issue9474(): + mods = [None, 'math'] + if numpy: + mods.append('numpy') + if mpmath: + mods.append('mpmath') + for mod in mods: + f = lambdify(x, S.One/x, modules=mod) + assert f(2) == 0.5 + f = lambdify(x, floor(S.One/x), modules=mod) + assert f(2) == 0 + + for absfunc, modules in product([Abs, abs], mods): + f = lambdify(x, absfunc(x), modules=modules) + assert f(-1) == 1 + assert f(1) == 1 + assert f(3+4j) == 5 + + +def test_issue_9871(): + if not numexpr: + skip("numexpr not installed.") + if not numpy: + skip("numpy not installed.") + + r = sqrt(x**2 + y**2) + expr = diff(1/r, x) + + xn = yn = numpy.linspace(1, 10, 16) + # expr(xn, xn) = -xn/(sqrt(2)*xn)^3 + fv_exact = -numpy.sqrt(2.)**-3 * xn**-2 + + fv_numpy = lambdify((x, y), expr, modules='numpy')(xn, yn) + fv_numexpr = lambdify((x, y), expr, modules='numexpr')(xn, yn) + numpy.testing.assert_allclose(fv_numpy, fv_exact, rtol=1e-10) + numpy.testing.assert_allclose(fv_numexpr, fv_exact, rtol=1e-10) + + +def test_numpy_piecewise(): + if not numpy: + skip("numpy not installed.") + pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True)) + f = lambdify(x, pieces, modules="numpy") + numpy.testing.assert_array_equal(f(numpy.arange(10)), + numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81])) + # If we evaluate somewhere all conditions are False, we should get back NaN + nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0))) + numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])), + numpy.array([1, numpy.nan, 1])) + + +def test_numpy_logical_ops(): + if not numpy: + skip("numpy not installed.") + and_func = lambdify((x, y), And(x, y), modules="numpy") + and_func_3 = lambdify((x, y, z), And(x, y, z), modules="numpy") + or_func = lambdify((x, y), Or(x, y), modules="numpy") + or_func_3 = lambdify((x, y, z), Or(x, y, z), modules="numpy") + not_func = lambdify((x), Not(x), modules="numpy") + arr1 = numpy.array([True, True]) + arr2 = numpy.array([False, True]) + arr3 = numpy.array([True, False]) + numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True])) + numpy.testing.assert_array_equal(and_func_3(arr1, arr2, arr3), numpy.array([False, False])) + numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True])) + numpy.testing.assert_array_equal(or_func_3(arr1, arr2, arr3), numpy.array([True, True])) + numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False])) + + +def test_numpy_matmul(): + if not numpy: + skip("numpy not installed.") + xmat = Matrix([[x, y], [z, 1+z]]) + ymat = Matrix([[x**2], [Abs(x)]]) + mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy") + numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]])) + numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]])) + # Multiple matrices chained together in multiplication + f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy") + numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25], + [159, 251]])) + + +def test_numpy_numexpr(): + if not numpy: + skip("numpy not installed.") + if not numexpr: + skip("numexpr not installed.") + a, b, c = numpy.random.randn(3, 128, 128) + # ensure that numpy and numexpr return same value for complicated expression + expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \ + Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2) + npfunc = lambdify((x, y, z), expr, modules='numpy') + nefunc = lambdify((x, y, z), expr, modules='numexpr') + assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c)) + + +def test_numexpr_userfunctions(): + if not numpy: + skip("numpy not installed.") + if not numexpr: + skip("numexpr not installed.") + a, b = numpy.random.randn(2, 10) + uf = type('uf', (Function, ), + {'eval' : classmethod(lambda x, y : y**2+1)}) + func = lambdify(x, 1-uf(x), modules='numexpr') + assert numpy.allclose(func(a), -(a**2)) + + uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1) + func = lambdify((x, y), uf(x, y), modules='numexpr') + assert numpy.allclose(func(a, b), 2*a*b+1) + + +def test_tensorflow_basic_math(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Max(sin(x), Abs(1/(x+2))) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + a = tensorflow.constant(0, dtype=tensorflow.float32) + assert func(a).eval(session=s) == 0.5 + + +def test_tensorflow_placeholders(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Max(sin(x), Abs(1/(x+2))) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + a = tensorflow.compat.v1.placeholder(dtype=tensorflow.float32) + assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5 + + +def test_tensorflow_variables(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Max(sin(x), Abs(1/(x+2))) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + a = tensorflow.Variable(0, dtype=tensorflow.float32) + s.run(a.initializer) + assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5 + + +def test_tensorflow_logical_operations(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Not(And(Or(x, y), y)) + func = lambdify([x, y], expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + assert func(False, True).eval(session=s) == False + + +def test_tensorflow_piecewise(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Piecewise((0, Eq(x,0)), (-1, x < 0), (1, x > 0)) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + assert func(-1).eval(session=s) == -1 + assert func(0).eval(session=s) == 0 + assert func(1).eval(session=s) == 1 + + +def test_tensorflow_multi_max(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Max(x, -x, x**2) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + assert func(-2).eval(session=s) == 4 + + +def test_tensorflow_multi_min(): + if not tensorflow: + skip("tensorflow not installed.") + expr = Min(x, -x, x**2) + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + assert func(-2).eval(session=s) == -2 + + +def test_tensorflow_relational(): + if not tensorflow: + skip("tensorflow not installed.") + expr = x >= 0 + func = lambdify(x, expr, modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + assert func(1).eval(session=s) == True + + +def test_tensorflow_complexes(): + if not tensorflow: + skip("tensorflow not installed") + + func1 = lambdify(x, re(x), modules="tensorflow") + func2 = lambdify(x, im(x), modules="tensorflow") + func3 = lambdify(x, Abs(x), modules="tensorflow") + func4 = lambdify(x, arg(x), modules="tensorflow") + + with tensorflow.compat.v1.Session() as s: + # For versions before + # https://github.com/tensorflow/tensorflow/issues/30029 + # resolved, using Python numeric types may not work + a = tensorflow.constant(1+2j) + assert func1(a).eval(session=s) == 1 + assert func2(a).eval(session=s) == 2 + + tensorflow_result = func3(a).eval(session=s) + sympy_result = Abs(1 + 2j).evalf() + assert abs(tensorflow_result-sympy_result) < 10**-6 + + tensorflow_result = func4(a).eval(session=s) + sympy_result = arg(1 + 2j).evalf() + assert abs(tensorflow_result-sympy_result) < 10**-6 + + +def test_tensorflow_array_arg(): + # Test for issue 14655 (tensorflow part) + if not tensorflow: + skip("tensorflow not installed.") + + f = lambdify([[x, y]], x*x + y, 'tensorflow') + + with tensorflow.compat.v1.Session() as s: + fcall = f(tensorflow.constant([2.0, 1.0])) + assert fcall.eval(session=s) == 5.0 + + +#================== Test symbolic ================================== + + +def test_sym_single_arg(): + f = lambdify(x, x * y) + assert f(z) == z * y + + +def test_sym_list_args(): + f = lambdify([x, y], x + y + z) + assert f(1, 2) == 3 + z + + +def test_sym_integral(): + f = Lambda(x, exp(-x**2)) + l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy") + assert l(y) == Integral(exp(-y**2), (y, -oo, oo)) + assert l(y).doit() == sqrt(pi) + + +def test_namespace_order(): + # lambdify had a bug, such that module dictionaries or cached module + # dictionaries would pull earlier namespaces into themselves. + # Because the module dictionaries form the namespace of the + # generated lambda, this meant that the behavior of a previously + # generated lambda function could change as a result of later calls + # to lambdify. + n1 = {'f': lambda x: 'first f'} + n2 = {'f': lambda x: 'second f', + 'g': lambda x: 'function g'} + f = sympy.Function('f') + g = sympy.Function('g') + if1 = lambdify(x, f(x), modules=(n1, "sympy")) + assert if1(1) == 'first f' + if2 = lambdify(x, g(x), modules=(n2, "sympy")) + # previously gave 'second f' + assert if1(1) == 'first f' + + assert if2(1) == 'function g' + + +def test_imps(): + # Here we check if the default returned functions are anonymous - in + # the sense that we can have more than one function with the same name + f = implemented_function('f', lambda x: 2*x) + g = implemented_function('f', lambda x: math.sqrt(x)) + l1 = lambdify(x, f(x)) + l2 = lambdify(x, g(x)) + assert str(f(x)) == str(g(x)) + assert l1(3) == 6 + assert l2(3) == math.sqrt(3) + # check that we can pass in a Function as input + func = sympy.Function('myfunc') + assert not hasattr(func, '_imp_') + my_f = implemented_function(func, lambda x: 2*x) + assert hasattr(my_f, '_imp_') + # Error for functions with same name and different implementation + f2 = implemented_function("f", lambda x: x + 101) + raises(ValueError, lambda: lambdify(x, f(f2(x)))) + + +def test_imps_errors(): + # Test errors that implemented functions can return, and still be able to + # form expressions. + # See: https://github.com/sympy/sympy/issues/10810 + # + # XXX: Removed AttributeError here. This test was added due to issue 10810 + # but that issue was about ValueError. It doesn't seem reasonable to + # "support" catching AttributeError in the same context... + for val, error_class in product((0, 0., 2, 2.0), (TypeError, ValueError)): + + def myfunc(a): + if a == 0: + raise error_class + return 1 + + f = implemented_function('f', myfunc) + expr = f(val) + assert expr == f(val) + + +def test_imps_wrong_args(): + raises(ValueError, lambda: implemented_function(sin, lambda x: x)) + + +def test_lambdify_imps(): + # Test lambdify with implemented functions + # first test basic (sympy) lambdify + f = sympy.cos + assert lambdify(x, f(x))(0) == 1 + assert lambdify(x, 1 + f(x))(0) == 2 + assert lambdify((x, y), y + f(x))(0, 1) == 2 + # make an implemented function and test + f = implemented_function("f", lambda x: x + 100) + assert lambdify(x, f(x))(0) == 100 + assert lambdify(x, 1 + f(x))(0) == 101 + assert lambdify((x, y), y + f(x))(0, 1) == 101 + # Can also handle tuples, lists, dicts as expressions + lam = lambdify(x, (f(x), x)) + assert lam(3) == (103, 3) + lam = lambdify(x, [f(x), x]) + assert lam(3) == [103, 3] + lam = lambdify(x, [f(x), (f(x), x)]) + assert lam(3) == [103, (103, 3)] + lam = lambdify(x, {f(x): x}) + assert lam(3) == {103: 3} + lam = lambdify(x, {f(x): x}) + assert lam(3) == {103: 3} + lam = lambdify(x, {x: f(x)}) + assert lam(3) == {3: 103} + # Check that imp preferred to other namespaces by default + d = {'f': lambda x: x + 99} + lam = lambdify(x, f(x), d) + assert lam(3) == 103 + # Unless flag passed + lam = lambdify(x, f(x), d, use_imps=False) + assert lam(3) == 102 + + +def test_dummification(): + t = symbols('t') + F = Function('F') + G = Function('G') + #"\alpha" is not a valid Python variable name + #lambdify should sub in a dummy for it, and return + #without a syntax error + alpha = symbols(r'\alpha') + some_expr = 2 * F(t)**2 / G(t) + lam = lambdify((F(t), G(t)), some_expr) + assert lam(3, 9) == 2 + lam = lambdify(sin(t), 2 * sin(t)**2) + assert lam(F(t)) == 2 * F(t)**2 + #Test that \alpha was properly dummified + lam = lambdify((alpha, t), 2*alpha + t) + assert lam(2, 1) == 5 + raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5)) + raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5)) + raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5)) + + +def test_curly_matrix_symbol(): + # Issue #15009 + curlyv = sympy.MatrixSymbol("{v}", 2, 1) + lam = lambdify(curlyv, curlyv) + assert lam(1)==1 + lam = lambdify(curlyv, curlyv, dummify=True) + assert lam(1)==1 + + +def test_python_keywords(): + # Test for issue 7452. The automatic dummification should ensure use of + # Python reserved keywords as symbol names will create valid lambda + # functions. This is an additional regression test. + python_if = symbols('if') + expr = python_if / 2 + f = lambdify(python_if, expr) + assert f(4.0) == 2.0 + + +def test_lambdify_docstring(): + func = lambdify((w, x, y, z), w + x + y + z) + ref = ( + "Created with lambdify. Signature:\n\n" + "func(w, x, y, z)\n\n" + "Expression:\n\n" + "w + x + y + z" + ).splitlines() + assert func.__doc__.splitlines()[:len(ref)] == ref + syms = symbols('a1:26') + func = lambdify(syms, sum(syms)) + ref = ( + "Created with lambdify. Signature:\n\n" + "func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n" + " a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n" + "Expression:\n\n" + "a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +..." + ).splitlines() + assert func.__doc__.splitlines()[:len(ref)] == ref + + +#================== Test special printers ========================== + + +def test_special_printers(): + from sympy.printing.lambdarepr import IntervalPrinter + + def intervalrepr(expr): + return IntervalPrinter().doprint(expr) + + expr = sqrt(sqrt(2) + sqrt(3)) + S.Half + + func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr) + func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter) + func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter()) + + mpi = type(mpmath.mpi(1, 2)) + + assert isinstance(func0(), mpi) + assert isinstance(func1(), mpi) + assert isinstance(func2(), mpi) + + # To check Is lambdify loggamma works for mpmath or not + exp1 = lambdify(x, loggamma(x), 'mpmath')(5) + exp2 = lambdify(x, loggamma(x), 'mpmath')(1.8) + exp3 = lambdify(x, loggamma(x), 'mpmath')(15) + exp_ls = [exp1, exp2, exp3] + + sol1 = mpmath.loggamma(5) + sol2 = mpmath.loggamma(1.8) + sol3 = mpmath.loggamma(15) + sol_ls = [sol1, sol2, sol3] + + assert exp_ls == sol_ls + + +def test_true_false(): + # We want exact is comparison here, not just == + assert lambdify([], true)() is True + assert lambdify([], false)() is False + + +def test_issue_2790(): + assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3 + assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10 + assert lambdify(x, x + 1, dummify=False)(1) == 2 + + +def test_issue_12092(): + f = implemented_function('f', lambda x: x**2) + assert f(f(2)).evalf() == Float(16) + + +def test_issue_14911(): + class Variable(sympy.Symbol): + def _sympystr(self, printer): + return printer.doprint(self.name) + + _lambdacode = _sympystr + _numpycode = _sympystr + + x = Variable('x') + y = 2 * x + code = LambdaPrinter().doprint(y) + assert code.replace(' ', '') == '2*x' + + +def test_ITE(): + assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5 + assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3 + + +def test_Min_Max(): + # see gh-10375 + assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1 + assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3 + + +def test_Indexed(): + # Issue #10934 + if not numpy: + skip("numpy not installed") + + a = IndexedBase('a') + i, j = symbols('i j') + b = numpy.array([[1, 2], [3, 4]]) + assert lambdify(a, Sum(a[x, y], (x, 0, 1), (y, 0, 1)))(b) == 10 + + +def test_issue_12173(): + #test for issue 12173 + expr1 = lambdify((x, y), uppergamma(x, y),"mpmath")(1, 2) + expr2 = lambdify((x, y), lowergamma(x, y),"mpmath")(1, 2) + assert expr1 == uppergamma(1, 2).evalf() + assert expr2 == lowergamma(1, 2).evalf() + + +def test_issue_13642(): + if not numpy: + skip("numpy not installed") + f = lambdify(x, sinc(x)) + assert Abs(f(1) - sinc(1)).n() < 1e-15 + + +def test_sinc_mpmath(): + f = lambdify(x, sinc(x), "mpmath") + assert Abs(f(1) - sinc(1)).n() < 1e-15 + + +def test_lambdify_dummy_arg(): + d1 = Dummy() + f1 = lambdify(d1, d1 + 1, dummify=False) + assert f1(2) == 3 + f1b = lambdify(d1, d1 + 1) + assert f1b(2) == 3 + d2 = Dummy('x') + f2 = lambdify(d2, d2 + 1) + assert f2(2) == 3 + f3 = lambdify([[d2]], d2 + 1) + assert f3([2]) == 3 + + +def test_lambdify_mixed_symbol_dummy_args(): + d = Dummy() + # Contrived example of name clash + dsym = symbols(str(d)) + f = lambdify([d, dsym], d - dsym) + assert f(4, 1) == 3 + + +def test_numpy_array_arg(): + # Test for issue 14655 (numpy part) + if not numpy: + skip("numpy not installed") + + f = lambdify([[x, y]], x*x + y, 'numpy') + + assert f(numpy.array([2.0, 1.0])) == 5 + + +def test_scipy_fns(): + if not scipy: + skip("scipy not installed") + + single_arg_sympy_fns = [Ei, erf, erfc, factorial, gamma, loggamma, digamma, Si, Ci] + single_arg_scipy_fns = [scipy.special.expi, scipy.special.erf, scipy.special.erfc, + scipy.special.factorial, scipy.special.gamma, scipy.special.gammaln, + scipy.special.psi, scipy.special.sici, scipy.special.sici] + numpy.random.seed(0) + for (sympy_fn, scipy_fn) in zip(single_arg_sympy_fns, single_arg_scipy_fns): + f = lambdify(x, sympy_fn(x), modules="scipy") + for i in range(20): + tv = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5) + # SciPy thinks that factorial(z) is 0 when re(z) < 0 and + # does not support complex numbers. + # SymPy does not think so. + if sympy_fn == factorial: + tv = numpy.abs(tv) + # SciPy supports gammaln for real arguments only, + # and there is also a branch cut along the negative real axis + if sympy_fn == loggamma: + tv = numpy.abs(tv) + # SymPy's digamma evaluates as polygamma(0, z) + # which SciPy supports for real arguments only + if sympy_fn == digamma: + tv = numpy.real(tv) + sympy_result = sympy_fn(tv).evalf() + scipy_result = scipy_fn(tv) + # SciPy's sici returns a tuple with both Si and Ci present in it + # which needs to be unpacked + if sympy_fn == Si: + scipy_result = scipy_fn(tv)[0] + if sympy_fn == Ci: + scipy_result = scipy_fn(tv)[1] + assert abs(f(tv) - sympy_result) < 1e-13*(1 + abs(sympy_result)) + assert abs(f(tv) - scipy_result) < 1e-13*(1 + abs(sympy_result)) + + double_arg_sympy_fns = [RisingFactorial, besselj, bessely, besseli, + besselk, polygamma] + double_arg_scipy_fns = [scipy.special.poch, scipy.special.jv, + scipy.special.yv, scipy.special.iv, scipy.special.kv, scipy.special.polygamma] + for (sympy_fn, scipy_fn) in zip(double_arg_sympy_fns, double_arg_scipy_fns): + f = lambdify((x, y), sympy_fn(x, y), modules="scipy") + for i in range(20): + # SciPy supports only real orders of Bessel functions + tv1 = numpy.random.uniform(-10, 10) + tv2 = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5) + # SciPy requires a real valued 2nd argument for: poch, polygamma + if sympy_fn in (RisingFactorial, polygamma): + tv2 = numpy.real(tv2) + if sympy_fn == polygamma: + tv1 = abs(int(tv1)) # first argument to polygamma must be a non-negative integral. + sympy_result = sympy_fn(tv1, tv2).evalf() + assert abs(f(tv1, tv2) - sympy_result) < 1e-13*(1 + abs(sympy_result)) + assert abs(f(tv1, tv2) - scipy_fn(tv1, tv2)) < 1e-13*(1 + abs(sympy_result)) + + +def test_scipy_polys(): + if not scipy: + skip("scipy not installed") + numpy.random.seed(0) + + params = symbols('n k a b') + # list polynomials with the number of parameters + polys = [ + (chebyshevt, 1), + (chebyshevu, 1), + (legendre, 1), + (hermite, 1), + (laguerre, 1), + (gegenbauer, 2), + (assoc_legendre, 2), + (assoc_laguerre, 2), + (jacobi, 3) + ] + + msg = \ + "The random test of the function {func} with the arguments " \ + "{args} had failed because the SymPy result {sympy_result} " \ + "and SciPy result {scipy_result} had failed to converge " \ + "within the tolerance {tol} " \ + "(Actual absolute difference : {diff})" + + for sympy_fn, num_params in polys: + args = params[:num_params] + (x,) + f = lambdify(args, sympy_fn(*args)) + for _ in range(10): + tn = numpy.random.randint(3, 10) + tparams = tuple(numpy.random.uniform(0, 5, size=num_params-1)) + tv = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5) + # SciPy supports hermite for real arguments only + if sympy_fn == hermite: + tv = numpy.real(tv) + # assoc_legendre needs x in (-1, 1) and integer param at most n + if sympy_fn == assoc_legendre: + tv = numpy.random.uniform(-1, 1) + tparams = tuple(numpy.random.randint(1, tn, size=1)) + + vals = (tn,) + tparams + (tv,) + scipy_result = f(*vals) + sympy_result = sympy_fn(*vals).evalf() + atol = 1e-9*(1 + abs(sympy_result)) + diff = abs(scipy_result - sympy_result) + try: + assert diff < atol + except TypeError: + raise AssertionError( + msg.format( + func=repr(sympy_fn), + args=repr(vals), + sympy_result=repr(sympy_result), + scipy_result=repr(scipy_result), + diff=diff, + tol=atol) + ) + + +def test_lambdify_inspect(): + f = lambdify(x, x**2) + # Test that inspect.getsource works but don't hard-code implementation + # details + assert 'x**2' in inspect.getsource(f) + + +def test_issue_14941(): + x, y = Dummy(), Dummy() + + # test dict + f1 = lambdify([x, y], {x: 3, y: 3}, 'sympy') + assert f1(2, 3) == {2: 3, 3: 3} + + # test tuple + f2 = lambdify([x, y], (y, x), 'sympy') + assert f2(2, 3) == (3, 2) + f2b = lambdify([], (1,)) # gh-23224 + assert f2b() == (1,) + + # test list + f3 = lambdify([x, y], [y, x], 'sympy') + assert f3(2, 3) == [3, 2] + + +def test_lambdify_Derivative_arg_issue_16468(): + f = Function('f')(x) + fx = f.diff() + assert lambdify((f, fx), f + fx)(10, 5) == 15 + assert eval(lambdastr((f, fx), f/fx))(10, 5) == 2 + raises(SyntaxError, lambda: + eval(lambdastr((f, fx), f/fx, dummify=False))) + assert eval(lambdastr((f, fx), f/fx, dummify=True))(10, 5) == 2 + assert eval(lambdastr((fx, f), f/fx, dummify=True))(S(10), 5) == S.Half + assert lambdify(fx, 1 + fx)(41) == 42 + assert eval(lambdastr(fx, 1 + fx, dummify=True))(41) == 42 + + +def test_imag_real(): + f_re = lambdify([z], sympy.re(z)) + val = 3+2j + assert f_re(val) == val.real + + f_im = lambdify([z], sympy.im(z)) # see #15400 + assert f_im(val) == val.imag + + +def test_MatrixSymbol_issue_15578(): + if not numpy: + skip("numpy not installed") + A = MatrixSymbol('A', 2, 2) + A0 = numpy.array([[1, 2], [3, 4]]) + f = lambdify(A, A**(-1)) + assert numpy.allclose(f(A0), numpy.array([[-2., 1.], [1.5, -0.5]])) + g = lambdify(A, A**3) + assert numpy.allclose(g(A0), numpy.array([[37, 54], [81, 118]])) + + +def test_issue_15654(): + if not scipy: + skip("scipy not installed") + from sympy.abc import n, l, r, Z + from sympy.physics import hydrogen + nv, lv, rv, Zv = 1, 0, 3, 1 + sympy_value = hydrogen.R_nl(nv, lv, rv, Zv).evalf() + f = lambdify((n, l, r, Z), hydrogen.R_nl(n, l, r, Z)) + scipy_value = f(nv, lv, rv, Zv) + assert abs(sympy_value - scipy_value) < 1e-15 + + +def test_issue_15827(): + if not numpy: + skip("numpy not installed") + A = MatrixSymbol("A", 3, 3) + B = MatrixSymbol("B", 2, 3) + C = MatrixSymbol("C", 3, 4) + D = MatrixSymbol("D", 4, 5) + k=symbols("k") + f = lambdify(A, (2*k)*A) + g = lambdify(A, (2+k)*A) + h = lambdify(A, 2*A) + i = lambdify((B, C, D), 2*B*C*D) + assert numpy.array_equal(f(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \ + numpy.array([[2*k, 4*k, 6*k], [2*k, 4*k, 6*k], [2*k, 4*k, 6*k]], dtype=object)) + + assert numpy.array_equal(g(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \ + numpy.array([[k + 2, 2*k + 4, 3*k + 6], [k + 2, 2*k + 4, 3*k + 6], \ + [k + 2, 2*k + 4, 3*k + 6]], dtype=object)) + + assert numpy.array_equal(h(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \ + numpy.array([[2, 4, 6], [2, 4, 6], [2, 4, 6]])) + + assert numpy.array_equal(i(numpy.array([[1, 2, 3], [1, 2, 3]]), numpy.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]), \ + numpy.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])), numpy.array([[ 120, 240, 360, 480, 600], \ + [ 120, 240, 360, 480, 600]])) + + +def test_issue_16930(): + if not scipy: + skip("scipy not installed") + + x = symbols("x") + f = lambda x: S.GoldenRatio * x**2 + f_ = lambdify(x, f(x), modules='scipy') + assert f_(1) == scipy.constants.golden_ratio + +def test_issue_17898(): + if not scipy: + skip("scipy not installed") + x = symbols("x") + f_ = lambdify([x], sympy.LambertW(x,-1), modules='scipy') + assert f_(0.1) == mpmath.lambertw(0.1, -1) + +def test_issue_13167_21411(): + if not numpy: + skip("numpy not installed") + f1 = lambdify(x, sympy.Heaviside(x)) + f2 = lambdify(x, sympy.Heaviside(x, 1)) + res1 = f1([-1, 0, 1]) + res2 = f2([-1, 0, 1]) + assert Abs(res1[0]).n() < 1e-15 # First functionality: only one argument passed + assert Abs(res1[1] - 1/2).n() < 1e-15 + assert Abs(res1[2] - 1).n() < 1e-15 + assert Abs(res2[0]).n() < 1e-15 # Second functionality: two arguments passed + assert Abs(res2[1] - 1).n() < 1e-15 + assert Abs(res2[2] - 1).n() < 1e-15 + +def test_single_e(): + f = lambdify(x, E) + assert f(23) == exp(1.0) + +def test_issue_16536(): + if not scipy: + skip("scipy not installed") + + a = symbols('a') + f1 = lowergamma(a, x) + F = lambdify((a, x), f1, modules='scipy') + assert abs(lowergamma(1, 3) - F(1, 3)) <= 1e-10 + + f2 = uppergamma(a, x) + F = lambdify((a, x), f2, modules='scipy') + assert abs(uppergamma(1, 3) - F(1, 3)) <= 1e-10 + + +def test_issue_22726(): + if not numpy: + skip("numpy not installed") + + x1, x2 = symbols('x1 x2') + f = Max(S.Zero, Min(x1, x2)) + g = derive_by_array(f, (x1, x2)) + G = lambdify((x1, x2), g, modules='numpy') + point = {x1: 1, x2: 2} + assert (abs(g.subs(point) - G(*point.values())) <= 1e-10).all() + + +def test_issue_22739(): + if not numpy: + skip("numpy not installed") + + x1, x2 = symbols('x1 x2') + f = Heaviside(Min(x1, x2)) + F = lambdify((x1, x2), f, modules='numpy') + point = {x1: 1, x2: 2} + assert abs(f.subs(point) - F(*point.values())) <= 1e-10 + + +def test_issue_22992(): + if not numpy: + skip("numpy not installed") + + a, t = symbols('a t') + expr = a*(log(cot(t/2)) - cos(t)) + F = lambdify([a, t], expr, 'numpy') + + point = {a: 10, t: 2} + + assert abs(expr.subs(point) - F(*point.values())) <= 1e-10 + + # Standard math + F = lambdify([a, t], expr) + + assert abs(expr.subs(point) - F(*point.values())) <= 1e-10 + + +def test_issue_19764(): + if not numpy: + skip("numpy not installed") + + expr = Array([x, x**2]) + f = lambdify(x, expr, 'numpy') + + assert f(1).__class__ == numpy.ndarray + +def test_issue_20070(): + if not numba: + skip("numba not installed") + + f = lambdify(x, sin(x), 'numpy') + assert numba.jit(f)(1)==0.8414709848078965 + + +def test_fresnel_integrals_scipy(): + if not scipy: + skip("scipy not installed") + + f1 = fresnelc(x) + f2 = fresnels(x) + F1 = lambdify(x, f1, modules='scipy') + F2 = lambdify(x, f2, modules='scipy') + + assert abs(fresnelc(1.3) - F1(1.3)) <= 1e-10 + assert abs(fresnels(1.3) - F2(1.3)) <= 1e-10 + + +def test_beta_scipy(): + if not scipy: + skip("scipy not installed") + + f = beta(x, y) + F = lambdify((x, y), f, modules='scipy') + + assert abs(beta(1.3, 2.3) - F(1.3, 2.3)) <= 1e-10 + + +def test_beta_math(): + f = beta(x, y) + F = lambdify((x, y), f, modules='math') + + assert abs(beta(1.3, 2.3) - F(1.3, 2.3)) <= 1e-10 + + +def test_betainc_scipy(): + if not scipy: + skip("scipy not installed") + + f = betainc(w, x, y, z) + F = lambdify((w, x, y, z), f, modules='scipy') + + assert abs(betainc(1.4, 3.1, 0.1, 0.5) - F(1.4, 3.1, 0.1, 0.5)) <= 1e-10 + + +def test_betainc_regularized_scipy(): + if not scipy: + skip("scipy not installed") + + f = betainc_regularized(w, x, y, z) + F = lambdify((w, x, y, z), f, modules='scipy') + + assert abs(betainc_regularized(0.2, 3.5, 0.1, 1) - F(0.2, 3.5, 0.1, 1)) <= 1e-10 + + +def test_numpy_special_math(): + if not numpy: + skip("numpy not installed") + + funcs = [expm1, log1p, exp2, log2, log10, hypot, logaddexp, logaddexp2] + for func in funcs: + if 2 in func.nargs: + expr = func(x, y) + args = (x, y) + num_args = (0.3, 0.4) + elif 1 in func.nargs: + expr = func(x) + args = (x,) + num_args = (0.3,) + else: + raise NotImplementedError("Need to handle other than unary & binary functions in test") + f = lambdify(args, expr) + result = f(*num_args) + reference = expr.subs(dict(zip(args, num_args))).evalf() + assert numpy.allclose(result, float(reference)) + + lae2 = lambdify((x, y), logaddexp2(log2(x), log2(y))) + assert abs(2.0**lae2(1e-50, 2.5e-50) - 3.5e-50) < 1e-62 # from NumPy's docstring + + +def test_scipy_special_math(): + if not scipy: + skip("scipy not installed") + + cm1 = lambdify((x,), cosm1(x), modules='scipy') + assert abs(cm1(1e-20) + 5e-41) < 1e-200 + + have_scipy_1_10plus = tuple(map(int, scipy.version.version.split('.')[:2])) >= (1, 10) + + if have_scipy_1_10plus: + cm2 = lambdify((x, y), powm1(x, y), modules='scipy') + assert abs(cm2(1.2, 1e-9) - 1.82321557e-10) < 1e-17 + + +def test_scipy_bernoulli(): + if not scipy: + skip("scipy not installed") + + bern = lambdify((x,), bernoulli(x), modules='scipy') + assert bern(1) == 0.5 + + +def test_scipy_harmonic(): + if not scipy: + skip("scipy not installed") + + hn = lambdify((x,), harmonic(x), modules='scipy') + assert hn(2) == 1.5 + hnm = lambdify((x, y), harmonic(x, y), modules='scipy') + assert hnm(2, 2) == 1.25 + + +def test_cupy_array_arg(): + if not cupy: + skip("CuPy not installed") + + f = lambdify([[x, y]], x*x + y, 'cupy') + result = f(cupy.array([2.0, 1.0])) + assert result == 5 + assert "cupy" in str(type(result)) + + +def test_cupy_array_arg_using_numpy(): + # numpy functions can be run on cupy arrays + # unclear if we can "officially" support this, + # depends on numpy __array_function__ support + if not cupy: + skip("CuPy not installed") + + f = lambdify([[x, y]], x*x + y, 'numpy') + result = f(cupy.array([2.0, 1.0])) + assert result == 5 + assert "cupy" in str(type(result)) + +def test_cupy_dotproduct(): + if not cupy: + skip("CuPy not installed") + + A = Matrix([x, y, z]) + f1 = lambdify([x, y, z], DotProduct(A, A), modules='cupy') + f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='cupy') + f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='cupy') + f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='cupy') + + assert f1(1, 2, 3) == \ + f2(1, 2, 3) == \ + f3(1, 2, 3) == \ + f4(1, 2, 3) == \ + cupy.array([14]) + + +def test_jax_array_arg(): + if not jax: + skip("JAX not installed") + + f = lambdify([[x, y]], x*x + y, 'jax') + result = f(jax.numpy.array([2.0, 1.0])) + assert result == 5 + assert "jax" in str(type(result)) + + +def test_jax_array_arg_using_numpy(): + if not jax: + skip("JAX not installed") + + f = lambdify([[x, y]], x*x + y, 'numpy') + result = f(jax.numpy.array([2.0, 1.0])) + assert result == 5 + assert "jax" in str(type(result)) + + +def test_jax_dotproduct(): + if not jax: + skip("JAX not installed") + + A = Matrix([x, y, z]) + f1 = lambdify([x, y, z], DotProduct(A, A), modules='jax') + f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='jax') + f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='jax') + f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='jax') + + assert f1(1, 2, 3) == \ + f2(1, 2, 3) == \ + f3(1, 2, 3) == \ + f4(1, 2, 3) == \ + jax.numpy.array([14]) + + +def test_lambdify_cse(): + def dummy_cse(exprs): + return (), exprs + + def minmem(exprs): + from sympy.simplify.cse_main import cse_release_variables, cse + return cse(exprs, postprocess=cse_release_variables) + + class Case: + def __init__(self, *, args, exprs, num_args, requires_numpy=False): + self.args = args + self.exprs = exprs + self.num_args = num_args + subs_dict = dict(zip(self.args, self.num_args)) + self.ref = [e.subs(subs_dict).evalf() for e in exprs] + self.requires_numpy = requires_numpy + + def lambdify(self, *, cse): + return lambdify(self.args, self.exprs, cse=cse) + + def assertAllClose(self, result, *, abstol=1e-15, reltol=1e-15): + if self.requires_numpy: + assert all(numpy.allclose(result[i], numpy.asarray(r, dtype=float), + rtol=reltol, atol=abstol) + for i, r in enumerate(self.ref)) + return + + for i, r in enumerate(self.ref): + abs_err = abs(result[i] - r) + if r == 0: + assert abs_err < abstol + else: + assert abs_err/abs(r) < reltol + + cases = [ + Case( + args=(x, y, z), + exprs=[ + x + y + z, + x + y - z, + 2*x + 2*y - z, + (x+y)**2 + (y+z)**2, + ], + num_args=(2., 3., 4.) + ), + Case( + args=(x, y, z), + exprs=[ + x + sympy.Heaviside(x), + y + sympy.Heaviside(x), + z + sympy.Heaviside(x, 1), + z/sympy.Heaviside(x, 1) + ], + num_args=(0., 3., 4.) + ), + Case( + args=(x, y, z), + exprs=[ + x + sinc(y), + y + sinc(y), + z - sinc(y) + ], + num_args=(0.1, 0.2, 0.3) + ), + Case( + args=(x, y, z), + exprs=[ + Matrix([[x, x*y], [sin(z) + 4, x**z]]), + x*y+sin(z)-x**z, + Matrix([x*x, sin(z), x**z]) + ], + num_args=(1.,2.,3.), + requires_numpy=True + ), + Case( + args=(x, y), + exprs=[(x + y - 1)**2, x, x + y, + (x + y)/(2*x + 1) + (x + y - 1)**2, (2*x + 1)**(x + y)], + num_args=(1,2) + ) + ] + for case in cases: + if not numpy and case.requires_numpy: + continue + for cse in [False, True, minmem, dummy_cse]: + f = case.lambdify(cse=cse) + result = f(*case.num_args) + case.assertAllClose(result) + +def test_deprecated_set(): + with warns_deprecated_sympy(): + lambdify({x, y}, x + y) + +def test_issue_13881(): + if not numpy: + skip("numpy not installed.") + + X = MatrixSymbol('X', 3, 1) + + f = lambdify(X, X.T*X, 'numpy') + assert f(numpy.array([1, 2, 3])) == 14 + assert f(numpy.array([3, 2, 1])) == 14 + + f = lambdify(X, X*X.T, 'numpy') + assert f(numpy.array([1, 2, 3])) == 14 + assert f(numpy.array([3, 2, 1])) == 14 + + f = lambdify(X, (X*X.T)*X, 'numpy') + arr1 = numpy.array([[1], [2], [3]]) + arr2 = numpy.array([[14],[28],[42]]) + + assert numpy.array_equal(f(arr1), arr2) + + +def test_23536_lambdify_cse_dummy(): + + f = Function('x')(y) + g = Function('w')(y) + expr = z + (f**4 + g**5)*(f**3 + (g*f)**3) + expr = expr.expand() + eval_expr = lambdify(((f, g), z), expr, cse=True) + ans = eval_expr((1.0, 2.0), 3.0) # shouldn't raise NameError + assert ans == 300.0 # not a list and value is 300 + + +class LambdifyDocstringTestCase: + SIGNATURE = None + EXPR = None + SRC = None + + def __init__(self, docstring_limit, expected_redacted): + self.docstring_limit = docstring_limit + self.expected_redacted = expected_redacted + + @property + def expected_expr(self): + expr_redacted_msg = 'EXPRESSION REDACTED DUE TO LENGTH' + return self.EXPR if not self.expected_redacted else expr_redacted_msg + + @property + def expected_src(self): + src_redacted_msg = 'SOURCE CODE REDACTED DUE TO LENGTH' + return self.SRC if not self.expected_redacted else src_redacted_msg + + @property + def expected_docstring(self): + expected_docstring = ( + f'Created with lambdify. Signature:\n\n' + f'func({self.SIGNATURE})\n\n' + f'Expression:\n\n' + f'{self.expected_expr}\n\n' + f'Source code:\n\n' + f'{self.expected_src}\n\n' + f'Imported modules:\n\n' + ) + return expected_docstring + + def __len__(self): + return len(self.expected_docstring) + + def __repr__(self): + return ( + f'{self.__class__.__name__}(' + f'docstring_limit={self.docstring_limit}, ' + f'expected_redacted={self.expected_redacted})' + ) + + +def test_lambdify_docstring_size_limit_simple_symbol(): + + class SimpleSymbolTestCase(LambdifyDocstringTestCase): + SIGNATURE = 'x' + EXPR = 'x' + SRC = ( + 'def _lambdifygenerated(x):\n' + ' return x\n' + ) + + x = symbols('x') + + test_cases = ( + SimpleSymbolTestCase(docstring_limit=None, expected_redacted=False), + SimpleSymbolTestCase(docstring_limit=100, expected_redacted=False), + SimpleSymbolTestCase(docstring_limit=1, expected_redacted=False), + SimpleSymbolTestCase(docstring_limit=0, expected_redacted=True), + SimpleSymbolTestCase(docstring_limit=-1, expected_redacted=True), + ) + for test_case in test_cases: + lambdified_expr = lambdify( + [x], + x, + 'sympy', + docstring_limit=test_case.docstring_limit, + ) + assert lambdified_expr.__doc__ == test_case.expected_docstring + + +def test_lambdify_docstring_size_limit_nested_expr(): + + class ExprListTestCase(LambdifyDocstringTestCase): + SIGNATURE = 'x, y, z' + EXPR = ( + '[x, [y], z, x**3 + 3*x**2*y + 3*x**2*z + 3*x*y**2 + 6*x*y*z ' + '+ 3*x*z**2 +...' + ) + SRC = ( + 'def _lambdifygenerated(x, y, z):\n' + ' return [x, [y], z, x**3 + 3*x**2*y + 3*x**2*z + 3*x*y**2 ' + '+ 6*x*y*z + 3*x*z**2 + y**3 + 3*y**2*z + 3*y*z**2 + z**3]\n' + ) + + x, y, z = symbols('x, y, z') + expr = [x, [y], z, ((x + y + z)**3).expand()] + + test_cases = ( + ExprListTestCase(docstring_limit=None, expected_redacted=False), + ExprListTestCase(docstring_limit=200, expected_redacted=False), + ExprListTestCase(docstring_limit=50, expected_redacted=True), + ExprListTestCase(docstring_limit=0, expected_redacted=True), + ExprListTestCase(docstring_limit=-1, expected_redacted=True), + ) + for test_case in test_cases: + lambdified_expr = lambdify( + [x, y, z], + expr, + 'sympy', + docstring_limit=test_case.docstring_limit, + ) + assert lambdified_expr.__doc__ == test_case.expected_docstring + + +def test_lambdify_docstring_size_limit_matrix(): + + class MatrixTestCase(LambdifyDocstringTestCase): + SIGNATURE = 'x, y, z' + EXPR = ( + 'Matrix([[0, x], [x + y + z, x**3 + 3*x**2*y + 3*x**2*z + 3*x*y**2 ' + '+ 6*x*y*z...' + ) + SRC = ( + 'def _lambdifygenerated(x, y, z):\n' + ' return ImmutableDenseMatrix([[0, x], [x + y + z, x**3 ' + '+ 3*x**2*y + 3*x**2*z + 3*x*y**2 + 6*x*y*z + 3*x*z**2 + y**3 ' + '+ 3*y**2*z + 3*y*z**2 + z**3]])\n' + ) + + x, y, z = symbols('x, y, z') + expr = Matrix([[S.Zero, x], [x + y + z, ((x + y + z)**3).expand()]]) + + test_cases = ( + MatrixTestCase(docstring_limit=None, expected_redacted=False), + MatrixTestCase(docstring_limit=200, expected_redacted=False), + MatrixTestCase(docstring_limit=50, expected_redacted=True), + MatrixTestCase(docstring_limit=0, expected_redacted=True), + MatrixTestCase(docstring_limit=-1, expected_redacted=True), + ) + for test_case in test_cases: + lambdified_expr = lambdify( + [x, y, z], + expr, + 'sympy', + docstring_limit=test_case.docstring_limit, + ) + assert lambdified_expr.__doc__ == test_case.expected_docstring diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_matchpy_connector.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_matchpy_connector.py new file mode 100644 index 0000000000000000000000000000000000000000..6f244b4279f1e44e3f76d82ab9336a12b437a375 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_matchpy_connector.py @@ -0,0 +1,153 @@ +import pickle + +from sympy.core.relational import (Eq, Ne) +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.external import import_module +from sympy.testing.pytest import skip +from sympy.utilities.matchpy_connector import WildDot, WildPlus, WildStar, Replacer + +matchpy = import_module("matchpy") + +x, y, z = symbols("x y z") + + +def _get_first_match(expr, pattern): + from matchpy import ManyToOneMatcher, Pattern + + matcher = ManyToOneMatcher() + matcher.add(Pattern(pattern)) + return next(iter(matcher.match(expr))) + + +def test_matchpy_connector(): + if matchpy is None: + skip("matchpy not installed") + + from multiset import Multiset + from matchpy import Pattern, Substitution + + w_ = WildDot("w_") + w__ = WildPlus("w__") + w___ = WildStar("w___") + + expr = x + y + pattern = x + w_ + p, subst = _get_first_match(expr, pattern) + assert p == Pattern(pattern) + assert subst == Substitution({'w_': y}) + + expr = x + y + z + pattern = x + w__ + p, subst = _get_first_match(expr, pattern) + assert p == Pattern(pattern) + assert subst == Substitution({'w__': Multiset([y, z])}) + + expr = x + y + z + pattern = x + y + z + w___ + p, subst = _get_first_match(expr, pattern) + assert p == Pattern(pattern) + assert subst == Substitution({'w___': Multiset()}) + + +def test_matchpy_optional(): + if matchpy is None: + skip("matchpy not installed") + + from matchpy import Pattern, Substitution + from matchpy import ManyToOneReplacer, ReplacementRule + + p = WildDot("p", optional=1) + q = WildDot("q", optional=0) + + pattern = p*x + q + + expr1 = 2*x + pa, subst = _get_first_match(expr1, pattern) + assert pa == Pattern(pattern) + assert subst == Substitution({'p': 2, 'q': 0}) + + expr2 = x + 3 + pa, subst = _get_first_match(expr2, pattern) + assert pa == Pattern(pattern) + assert subst == Substitution({'p': 1, 'q': 3}) + + expr3 = x + pa, subst = _get_first_match(expr3, pattern) + assert pa == Pattern(pattern) + assert subst == Substitution({'p': 1, 'q': 0}) + + expr4 = x*y + z + pa, subst = _get_first_match(expr4, pattern) + assert pa == Pattern(pattern) + assert subst == Substitution({'p': y, 'q': z}) + + replacer = ManyToOneReplacer() + replacer.add(ReplacementRule(Pattern(pattern), lambda p, q: sin(p)*cos(q))) + assert replacer.replace(expr1) == sin(2)*cos(0) + assert replacer.replace(expr2) == sin(1)*cos(3) + assert replacer.replace(expr3) == sin(1)*cos(0) + assert replacer.replace(expr4) == sin(y)*cos(z) + + +def test_replacer(): + if matchpy is None: + skip("matchpy not installed") + + x1_ = WildDot("x1_") + x2_ = WildDot("x2_") + + a_ = WildDot("a_", optional=S.One) + b_ = WildDot("b_", optional=S.One) + c_ = WildDot("c_", optional=S.Zero) + + replacer = Replacer(common_constraints=[ + matchpy.CustomConstraint(lambda a_: not a_.has(x)), + matchpy.CustomConstraint(lambda b_: not b_.has(x)), + matchpy.CustomConstraint(lambda c_: not c_.has(x)), + ]) + + # Rewrite the equation into implicit form, unless it's already solved: + replacer.add(Eq(x1_, x2_), Eq(x1_ - x2_, 0), conditions_nonfalse=[Ne(x2_, 0), Ne(x1_, 0), Ne(x1_, x), Ne(x2_, x)]) + + # Simple equation solver for real numbers: + replacer.add(Eq(a_*x + b_, 0), Eq(x, -b_/a_)) + disc = b_**2 - 4*a_*c_ + replacer.add( + Eq(a_*x**2 + b_*x + c_, 0), + Eq(x, (-b_ - sqrt(disc))/(2*a_)) | Eq(x, (-b_ + sqrt(disc))/(2*a_)), + conditions_nonfalse=[disc >= 0] + ) + replacer.add( + Eq(a_*x**2 + c_, 0), + Eq(x, sqrt(-c_/a_)) | Eq(x, -sqrt(-c_/a_)), + conditions_nonfalse=[-c_*a_ > 0] + ) + + assert replacer.replace(Eq(3*x, y)) == Eq(x, y/3) + assert replacer.replace(Eq(x**2 + 1, 0)) == Eq(x**2 + 1, 0) + assert replacer.replace(Eq(x**2, 4)) == (Eq(x, 2) | Eq(x, -2)) + assert replacer.replace(Eq(x**2 + 4*y*x + 4*y**2, 0)) == Eq(x, -2*y) + + +def test_matchpy_object_pickle(): + if matchpy is None: + return + + a1 = WildDot("a") + a2 = pickle.loads(pickle.dumps(a1)) + assert a1 == a2 + + a1 = WildDot("a", S(1)) + a2 = pickle.loads(pickle.dumps(a1)) + assert a1 == a2 + + a1 = WildPlus("a", S(1)) + a2 = pickle.loads(pickle.dumps(a1)) + assert a1 == a2 + + a1 = WildStar("a", S(1)) + a2 = pickle.loads(pickle.dumps(a1)) + assert a1 == a2 diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_mathml.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_mathml.py new file mode 100644 index 0000000000000000000000000000000000000000..e4a7598f175be34f8bb34c0bd9c003d1c0238c7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_mathml.py @@ -0,0 +1,33 @@ +import os +from textwrap import dedent +from sympy.external import import_module +from sympy.testing.pytest import skip +from sympy.utilities.mathml import apply_xsl + + + +lxml = import_module('lxml') + +path = os.path.abspath(os.path.join(os.path.dirname(__file__), "test_xxe.py")) + + +def test_xxe(): + assert os.path.isfile(path) + if not lxml: + skip("lxml not installed.") + + mml = dedent( + rf""" + + ]> + + John + &ent; + + """ + ) + xsl = 'mathml/data/simple_mmlctop.xsl' + + res = apply_xsl(mml, xsl) + assert res == \ + '\n\nJohn\n\n\n' diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_misc.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..f9f61ee6c84def5388ba9cd206851f36950aa2c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_misc.py @@ -0,0 +1,151 @@ +from textwrap import dedent +import sys +from subprocess import Popen, PIPE +import os + +from sympy.core.singleton import S +from sympy.testing.pytest import (raises, warns_deprecated_sympy, + skip_under_pyodide) +from sympy.utilities.misc import (translate, replace, ordinal, rawlines, + strlines, as_int, find_executable) +from sympy.external import import_module + +pyodide_js = import_module('pyodide_js') + + +def test_translate(): + abc = 'abc' + assert translate(abc, None, 'a') == 'bc' + assert translate(abc, None, '') == 'abc' + assert translate(abc, {'a': 'x'}, 'c') == 'xb' + assert translate(abc, {'a': 'bc'}, 'c') == 'bcb' + assert translate(abc, {'ab': 'x'}, 'c') == 'x' + assert translate(abc, {'ab': ''}, 'c') == '' + assert translate(abc, {'bc': 'x'}, 'c') == 'ab' + assert translate(abc, {'abc': 'x', 'a': 'y'}) == 'x' + u = chr(4096) + assert translate(abc, 'a', 'x', u) == 'xbc' + assert (u in translate(abc, 'a', u, u)) is True + + +def test_replace(): + assert replace('abc', ('a', 'b')) == 'bbc' + assert replace('abc', {'a': 'Aa'}) == 'Aabc' + assert replace('abc', ('a', 'b'), ('c', 'C')) == 'bbC' + + +def test_ordinal(): + assert ordinal(-1) == '-1st' + assert ordinal(0) == '0th' + assert ordinal(1) == '1st' + assert ordinal(2) == '2nd' + assert ordinal(3) == '3rd' + assert all(ordinal(i).endswith('th') for i in range(4, 21)) + assert ordinal(100) == '100th' + assert ordinal(101) == '101st' + assert ordinal(102) == '102nd' + assert ordinal(103) == '103rd' + assert ordinal(104) == '104th' + assert ordinal(200) == '200th' + assert all(ordinal(i) == str(i) + 'th' for i in range(-220, -203)) + + +def test_rawlines(): + assert rawlines('a a\na') == "dedent('''\\\n a a\n a''')" + assert rawlines('a a') == "'a a'" + assert rawlines(strlines('\\le"ft')) == ( + '(\n' + " '(\\n'\n" + ' \'r\\\'\\\\le"ft\\\'\\n\'\n' + " ')'\n" + ')') + + +def test_strlines(): + q = 'this quote (") is in the middle' + # the following assert rhs was prepared with + # print(rawlines(strlines(q, 10))) + assert strlines(q, 10) == dedent('''\ + ( + 'this quo' + 'te (") i' + 's in the' + ' middle' + )''') + assert q == ( + 'this quo' + 'te (") i' + 's in the' + ' middle' + ) + q = "this quote (') is in the middle" + assert strlines(q, 20) == dedent('''\ + ( + "this quote (') is " + "in the middle" + )''') + assert strlines('\\left') == ( + '(\n' + "r'\\left'\n" + ')') + assert strlines('\\left', short=True) == r"r'\left'" + assert strlines('\\le"ft') == ( + '(\n' + 'r\'\\le"ft\'\n' + ')') + q = 'this\nother line' + assert strlines(q) == rawlines(q) + + +def test_translate_args(): + try: + translate(None, None, None, 'not_none') + except ValueError: + pass # Exception raised successfully + else: + assert False + + assert translate('s', None, None, None) == 's' + + try: + translate('s', 'a', 'bc') + except ValueError: + pass # Exception raised successfully + else: + assert False + + +@skip_under_pyodide("Cannot create subprocess under pyodide.") +def test_debug_output(): + env = os.environ.copy() + env['SYMPY_DEBUG'] = 'True' + cmd = 'from sympy import *; x = Symbol("x"); print(integrate((1-cos(x))/x, x))' + cmdline = [sys.executable, '-c', cmd] + proc = Popen(cmdline, env=env, stdout=PIPE, stderr=PIPE) + out, err = proc.communicate() + out = out.decode('ascii') # utf-8? + err = err.decode('ascii') + expected = 'substituted: -x*(1 - cos(x)), u: 1/x, u_var: _u' + assert expected in err, err + + +def test_as_int(): + raises(ValueError, lambda : as_int(True)) + raises(ValueError, lambda : as_int(1.1)) + raises(ValueError, lambda : as_int([])) + raises(ValueError, lambda : as_int(S.NaN)) + raises(ValueError, lambda : as_int(S.Infinity)) + raises(ValueError, lambda : as_int(S.NegativeInfinity)) + raises(ValueError, lambda : as_int(S.ComplexInfinity)) + # for the following, limited precision makes int(arg) == arg + # but the int value is not necessarily what a user might have + # expected; Q.prime is more nuanced in its response for + # expressions which might be complex representations of an + # integer. This is not -- by design -- as_ints role. + raises(ValueError, lambda : as_int(1e23)) + raises(ValueError, lambda : as_int(S('1.'+'0'*20+'1'))) + assert as_int(True, strict=False) == 1 + +def test_deprecated_find_executable(): + with warns_deprecated_sympy(): + find_executable('python') diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_pickling.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_pickling.py new file mode 100644 index 0000000000000000000000000000000000000000..9cfa9af74f3f563429d4147e5e0aa630808f81d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_pickling.py @@ -0,0 +1,709 @@ +import inspect +import copy +import pickle + +from sympy.physics.units import meter + +from sympy.testing.pytest import XFAIL, raises + +from sympy.core.basic import Atom, Basic +from sympy.core.singleton import SingletonRegistry +from sympy.core.symbol import Str, Dummy, Symbol, Wild +from sympy.core.numbers import (E, I, pi, oo, zoo, nan, Integer, + Rational, Float, AlgebraicNumber) +from sympy.core.relational import (Equality, GreaterThan, LessThan, Relational, + StrictGreaterThan, StrictLessThan, Unequality) +from sympy.core.add import Add +from sympy.core.mul import Mul +from sympy.core.power import Pow +from sympy.core.function import Derivative, Function, FunctionClass, Lambda, \ + WildFunction +from sympy.sets.sets import Interval +from sympy.core.multidimensional import vectorize + +from sympy.external.gmpy import HAS_GMPY +from sympy.utilities.exceptions import SymPyDeprecationWarning + +from sympy.core.singleton import S +from sympy.core.symbol import symbols + +from sympy.external import import_module +cloudpickle = import_module('cloudpickle') + +excluded_attrs = { + '_assumptions', # This is a local cache that isn't automatically filled on creation + '_mhash', # Cached after __hash__ is called but set to None after creation + 'is_EmptySet', # Deprecated from SymPy 1.5. This can be removed when is_EmptySet is removed. + 'expr_free_symbols', # Deprecated from SymPy 1.9. This can be removed when exr_free_symbols is removed. + '_mat', # Deprecated from SymPy 1.9. This can be removed when Matrix._mat is removed + '_smat', # Deprecated from SymPy 1.9. This can be removed when SparseMatrix._smat is removed + } + + +def check(a, exclude=[], check_attr=True): + """ Check that pickling and copying round-trips. + """ + # Pickling with protocols 0 and 1 is disabled for Basic instances: + if isinstance(a, Basic): + for protocol in [0, 1]: + raises(NotImplementedError, lambda: pickle.dumps(a, protocol)) + + protocols = [2, copy.copy, copy.deepcopy, 3, 4] + if cloudpickle: + protocols.extend([cloudpickle]) + + for protocol in protocols: + if protocol in exclude: + continue + + if callable(protocol): + if isinstance(a, type): + # Classes can't be copied, but that's okay. + continue + b = protocol(a) + elif inspect.ismodule(protocol): + b = protocol.loads(protocol.dumps(a)) + else: + b = pickle.loads(pickle.dumps(a, protocol)) + + d1 = dir(a) + d2 = dir(b) + assert set(d1) == set(d2) + + if not check_attr: + continue + + def c(a, b, d): + for i in d: + if i in excluded_attrs: + continue + if not hasattr(a, i): + continue + attr = getattr(a, i) + if not hasattr(attr, "__call__"): + assert hasattr(b, i), i + assert getattr(b, i) == attr, "%s != %s, protocol: %s" % (getattr(b, i), attr, protocol) + + c(a, b, d1) + c(b, a, d2) + + + +#================== core ========================= + + +def test_core_basic(): + for c in (Atom, Atom(), Basic, Basic(), SingletonRegistry, S): + check(c) + +def test_core_Str(): + check(Str('x')) + +def test_core_symbol(): + # make the Symbol a unique name that doesn't class with any other + # testing variable in this file since after this test the symbol + # having the same name will be cached as noncommutative + for c in (Dummy, Dummy("x", commutative=False), Symbol, + Symbol("_issue_3130", commutative=False), Wild, Wild("x")): + check(c) + + +def test_core_numbers(): + for c in (Integer(2), Rational(2, 3), Float("1.2")): + check(c) + for c in (AlgebraicNumber, AlgebraicNumber(sqrt(3))): + check(c, check_attr=False) + + +def test_core_float_copy(): + # See gh-7457 + y = Symbol("x") + 1.0 + check(y) # does not raise TypeError ("argument is not an mpz") + + +def test_core_relational(): + x = Symbol("x") + y = Symbol("y") + for c in (Equality, Equality(x, y), GreaterThan, GreaterThan(x, y), + LessThan, LessThan(x, y), Relational, Relational(x, y), + StrictGreaterThan, StrictGreaterThan(x, y), StrictLessThan, + StrictLessThan(x, y), Unequality, Unequality(x, y)): + check(c) + + +def test_core_add(): + x = Symbol("x") + for c in (Add, Add(x, 4)): + check(c) + + +def test_core_mul(): + x = Symbol("x") + for c in (Mul, Mul(x, 4)): + check(c) + + +def test_core_power(): + x = Symbol("x") + for c in (Pow, Pow(x, 4)): + check(c) + + +def test_core_function(): + x = Symbol("x") + for f in (Derivative, Derivative(x), Function, FunctionClass, Lambda, + WildFunction): + check(f) + + +def test_core_undefinedfunctions(): + f = Function("f") + # Full XFAILed test below + exclude = list(range(5)) + # https://github.com/cloudpipe/cloudpickle/issues/65 + # https://github.com/cloudpipe/cloudpickle/issues/190 + exclude.append(cloudpickle) + check(f, exclude=exclude) + +@XFAIL +def test_core_undefinedfunctions_fail(): + # This fails because f is assumed to be a class at sympy.basic.function.f + f = Function("f") + check(f) + + +def test_core_interval(): + for c in (Interval, Interval(0, 2)): + check(c) + + +def test_core_multidimensional(): + for c in (vectorize, vectorize(0)): + check(c) + + +def test_Singletons(): + protocols = [0, 1, 2, 3, 4] + copiers = [copy.copy, copy.deepcopy] + copiers += [lambda x: pickle.loads(pickle.dumps(x, proto)) + for proto in protocols] + if cloudpickle: + copiers += [lambda x: cloudpickle.loads(cloudpickle.dumps(x))] + + for obj in (Integer(-1), Integer(0), Integer(1), Rational(1, 2), pi, E, I, + oo, -oo, zoo, nan, S.GoldenRatio, S.TribonacciConstant, + S.EulerGamma, S.Catalan, S.EmptySet, S.IdentityFunction): + for func in copiers: + assert func(obj) is obj + + +#================== functions =================== +from sympy.functions import (Piecewise, lowergamma, acosh, chebyshevu, + chebyshevt, ln, chebyshevt_root, legendre, Heaviside, bernoulli, coth, + tanh, assoc_legendre, sign, arg, asin, DiracDelta, re, rf, Abs, + uppergamma, binomial, sinh, cos, cot, acos, acot, gamma, bell, + hermite, harmonic, LambertW, zeta, log, factorial, asinh, acoth, cosh, + dirichlet_eta, Eijk, loggamma, erf, ceiling, im, fibonacci, + tribonacci, conjugate, tan, chebyshevu_root, floor, atanh, sqrt, sin, + atan, ff, lucas, atan2, polygamma, exp) + + +def test_functions(): + one_var = (acosh, ln, Heaviside, factorial, bernoulli, coth, tanh, + sign, arg, asin, DiracDelta, re, Abs, sinh, cos, cot, acos, acot, + gamma, bell, harmonic, LambertW, zeta, log, factorial, asinh, + acoth, cosh, dirichlet_eta, loggamma, erf, ceiling, im, fibonacci, + tribonacci, conjugate, tan, floor, atanh, sin, atan, lucas, exp) + two_var = (rf, ff, lowergamma, chebyshevu, chebyshevt, binomial, + atan2, polygamma, hermite, legendre, uppergamma) + x, y, z = symbols("x,y,z") + others = (chebyshevt_root, chebyshevu_root, Eijk(x, y, z), + Piecewise( (0, x < -1), (x**2, x <= 1), (x**3, True)), + assoc_legendre) + for cls in one_var: + check(cls) + c = cls(x) + check(c) + for cls in two_var: + check(cls) + c = cls(x, y) + check(c) + for cls in others: + check(cls) + +#================== geometry ==================== +from sympy.geometry.entity import GeometryEntity +from sympy.geometry.point import Point +from sympy.geometry.ellipse import Circle, Ellipse +from sympy.geometry.line import Line, LinearEntity, Ray, Segment +from sympy.geometry.polygon import Polygon, RegularPolygon, Triangle + + +def test_geometry(): + p1 = Point(1, 2) + p2 = Point(2, 3) + p3 = Point(0, 0) + p4 = Point(0, 1) + for c in ( + GeometryEntity, GeometryEntity(), Point, p1, Circle, Circle(p1, 2), + Ellipse, Ellipse(p1, 3, 4), Line, Line(p1, p2), LinearEntity, + LinearEntity(p1, p2), Ray, Ray(p1, p2), Segment, Segment(p1, p2), + Polygon, Polygon(p1, p2, p3, p4), RegularPolygon, + RegularPolygon(p1, 4, 5), Triangle, Triangle(p1, p2, p3)): + check(c, check_attr=False) + +#================== integrals ==================== +from sympy.integrals.integrals import Integral + + +def test_integrals(): + x = Symbol("x") + for c in (Integral, Integral(x)): + check(c) + +#==================== logic ===================== +from sympy.core.logic import Logic + + +def test_logic(): + for c in (Logic, Logic(1)): + check(c) + +#================== matrices ==================== +from sympy.matrices import Matrix, SparseMatrix + + +def test_matrices(): + for c in (Matrix, Matrix([1, 2, 3]), SparseMatrix, SparseMatrix([[1, 2], [3, 4]])): + check(c) + +#================== ntheory ===================== +from sympy.ntheory.generate import Sieve + + +def test_ntheory(): + for c in (Sieve, Sieve()): + check(c) + +#================== physics ===================== +from sympy.physics.paulialgebra import Pauli +from sympy.physics.units import Unit + + +def test_physics(): + for c in (Unit, meter, Pauli, Pauli(1)): + check(c) + +#================== plotting ==================== +# XXX: These tests are not complete, so XFAIL them + + +@XFAIL +def test_plotting(): + from sympy.plotting.pygletplot.color_scheme import ColorGradient, ColorScheme + from sympy.plotting.pygletplot.managed_window import ManagedWindow + from sympy.plotting.plot import Plot, ScreenShot + from sympy.plotting.pygletplot.plot_axes import PlotAxes, PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate + from sympy.plotting.pygletplot.plot_camera import PlotCamera + from sympy.plotting.pygletplot.plot_controller import PlotController + from sympy.plotting.pygletplot.plot_curve import PlotCurve + from sympy.plotting.pygletplot.plot_interval import PlotInterval + from sympy.plotting.pygletplot.plot_mode import PlotMode + from sympy.plotting.pygletplot.plot_modes import Cartesian2D, Cartesian3D, Cylindrical, \ + ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical + from sympy.plotting.pygletplot.plot_object import PlotObject + from sympy.plotting.pygletplot.plot_surface import PlotSurface + from sympy.plotting.pygletplot.plot_window import PlotWindow + for c in ( + ColorGradient, ColorGradient(0.2, 0.4), ColorScheme, ManagedWindow, + ManagedWindow, Plot, ScreenShot, PlotAxes, PlotAxesBase, + PlotAxesFrame, PlotAxesOrdinate, PlotCamera, PlotController, + PlotCurve, PlotInterval, PlotMode, Cartesian2D, Cartesian3D, + Cylindrical, ParametricCurve2D, ParametricCurve3D, + ParametricSurface, Polar, Spherical, PlotObject, PlotSurface, + PlotWindow): + check(c) + + +@XFAIL +def test_plotting2(): + #from sympy.plotting.color_scheme import ColorGradient + from sympy.plotting.pygletplot.color_scheme import ColorScheme + #from sympy.plotting.managed_window import ManagedWindow + from sympy.plotting.plot import Plot + #from sympy.plotting.plot import ScreenShot + from sympy.plotting.pygletplot.plot_axes import PlotAxes + #from sympy.plotting.plot_axes import PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate + #from sympy.plotting.plot_camera import PlotCamera + #from sympy.plotting.plot_controller import PlotController + #from sympy.plotting.plot_curve import PlotCurve + #from sympy.plotting.plot_interval import PlotInterval + #from sympy.plotting.plot_mode import PlotMode + #from sympy.plotting.plot_modes import Cartesian2D, Cartesian3D, Cylindrical, \ + # ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical + #from sympy.plotting.plot_object import PlotObject + #from sympy.plotting.plot_surface import PlotSurface + # from sympy.plotting.plot_window import PlotWindow + check(ColorScheme("rainbow")) + check(Plot(1, visible=False)) + check(PlotAxes()) + +#================== polys ======================= +from sympy.polys.domains.integerring import ZZ +from sympy.polys.domains.rationalfield import QQ +from sympy.polys.orderings import lex +from sympy.polys.polytools import Poly + +def test_pickling_polys_polytools(): + from sympy.polys.polytools import PurePoly + # from sympy.polys.polytools import GroebnerBasis + x = Symbol('x') + + for c in (Poly, Poly(x, x)): + check(c) + + for c in (PurePoly, PurePoly(x)): + check(c) + + # TODO: fix pickling of Options class (see GroebnerBasis._options) + # for c in (GroebnerBasis, GroebnerBasis([x**2 - 1], x, order=lex)): + # check(c) + +def test_pickling_polys_polyclasses(): + from sympy.polys.polyclasses import DMP, DMF, ANP + + for c in (DMP, DMP([[ZZ(1)], [ZZ(2)], [ZZ(3)]], ZZ)): + check(c) + for c in (DMF, DMF(([ZZ(1), ZZ(2)], [ZZ(1), ZZ(3)]), ZZ)): + check(c) + for c in (ANP, ANP([QQ(1), QQ(2)], [QQ(1), QQ(2), QQ(3)], QQ)): + check(c) + +@XFAIL +def test_pickling_polys_rings(): + # NOTE: can't use protocols < 2 because we have to execute __new__ to + # make sure caching of rings works properly. + + from sympy.polys.rings import PolyRing + + ring = PolyRing("x,y,z", ZZ, lex) + + for c in (PolyRing, ring): + check(c, exclude=[0, 1]) + + for c in (ring.dtype, ring.one): + check(c, exclude=[0, 1], check_attr=False) # TODO: Py3k + +def test_pickling_polys_fields(): + pass + # NOTE: can't use protocols < 2 because we have to execute __new__ to + # make sure caching of fields works properly. + + # from sympy.polys.fields import FracField + + # field = FracField("x,y,z", ZZ, lex) + + # TODO: AssertionError: assert id(obj) not in self.memo + # for c in (FracField, field): + # check(c, exclude=[0, 1]) + + # TODO: AssertionError: assert id(obj) not in self.memo + # for c in (field.dtype, field.one): + # check(c, exclude=[0, 1]) + +def test_pickling_polys_elements(): + from sympy.polys.domains.pythonrational import PythonRational + #from sympy.polys.domains.pythonfinitefield import PythonFiniteField + #from sympy.polys.domains.mpelements import MPContext + + for c in (PythonRational, PythonRational(1, 7)): + check(c) + + #gf = PythonFiniteField(17) + + # TODO: fix pickling of ModularInteger + # for c in (gf.dtype, gf(5)): + # check(c) + + #mp = MPContext() + + # TODO: fix pickling of RealElement + # for c in (mp.mpf, mp.mpf(1.0)): + # check(c) + + # TODO: fix pickling of ComplexElement + # for c in (mp.mpc, mp.mpc(1.0, -1.5)): + # check(c) + +def test_pickling_polys_domains(): + # from sympy.polys.domains.pythonfinitefield import PythonFiniteField + from sympy.polys.domains.pythonintegerring import PythonIntegerRing + from sympy.polys.domains.pythonrationalfield import PythonRationalField + + # TODO: fix pickling of ModularInteger + # for c in (PythonFiniteField, PythonFiniteField(17)): + # check(c) + + for c in (PythonIntegerRing, PythonIntegerRing()): + check(c, check_attr=False) + + for c in (PythonRationalField, PythonRationalField()): + check(c, check_attr=False) + + if HAS_GMPY: + # from sympy.polys.domains.gmpyfinitefield import GMPYFiniteField + from sympy.polys.domains.gmpyintegerring import GMPYIntegerRing + from sympy.polys.domains.gmpyrationalfield import GMPYRationalField + + # TODO: fix pickling of ModularInteger + # for c in (GMPYFiniteField, GMPYFiniteField(17)): + # check(c) + + for c in (GMPYIntegerRing, GMPYIntegerRing()): + check(c, check_attr=False) + + for c in (GMPYRationalField, GMPYRationalField()): + check(c, check_attr=False) + + #from sympy.polys.domains.realfield import RealField + #from sympy.polys.domains.complexfield import ComplexField + from sympy.polys.domains.algebraicfield import AlgebraicField + #from sympy.polys.domains.polynomialring import PolynomialRing + #from sympy.polys.domains.fractionfield import FractionField + from sympy.polys.domains.expressiondomain import ExpressionDomain + + # TODO: fix pickling of RealElement + # for c in (RealField, RealField(100)): + # check(c) + + # TODO: fix pickling of ComplexElement + # for c in (ComplexField, ComplexField(100)): + # check(c) + + for c in (AlgebraicField, AlgebraicField(QQ, sqrt(3))): + check(c, check_attr=False) + + # TODO: AssertionError + # for c in (PolynomialRing, PolynomialRing(ZZ, "x,y,z")): + # check(c) + + # TODO: AttributeError: 'PolyElement' object has no attribute 'ring' + # for c in (FractionField, FractionField(ZZ, "x,y,z")): + # check(c) + + for c in (ExpressionDomain, ExpressionDomain()): + check(c, check_attr=False) + + +def test_pickling_polys_orderings(): + from sympy.polys.orderings import (LexOrder, GradedLexOrder, + ReversedGradedLexOrder, InverseOrder) + # from sympy.polys.orderings import ProductOrder + + for c in (LexOrder, LexOrder()): + check(c) + + for c in (GradedLexOrder, GradedLexOrder()): + check(c) + + for c in (ReversedGradedLexOrder, ReversedGradedLexOrder()): + check(c) + + # TODO: Argh, Python is so naive. No lambdas nor inner function support in + # pickling module. Maybe someone could figure out what to do with this. + # + # for c in (ProductOrder, ProductOrder((LexOrder(), lambda m: m[:2]), + # (GradedLexOrder(), lambda m: m[2:]))): + # check(c) + + for c in (InverseOrder, InverseOrder(LexOrder())): + check(c) + +def test_pickling_polys_monomials(): + from sympy.polys.monomials import MonomialOps, Monomial + x, y, z = symbols("x,y,z") + + for c in (MonomialOps, MonomialOps(3)): + check(c) + + for c in (Monomial, Monomial((1, 2, 3), (x, y, z))): + check(c) + +def test_pickling_polys_errors(): + from sympy.polys.polyerrors import (HeuristicGCDFailed, + HomomorphismFailed, IsomorphismFailed, ExtraneousFactors, + EvaluationFailed, RefinementFailed, CoercionFailed, NotInvertible, + NotReversible, NotAlgebraic, DomainError, PolynomialError, + UnificationFailed, GeneratorsError, GeneratorsNeeded, + UnivariatePolynomialError, MultivariatePolynomialError, OptionError, + FlagError) + # from sympy.polys.polyerrors import (ExactQuotientFailed, + # OperationNotSupported, ComputationFailed, PolificationFailed) + + # x = Symbol('x') + + # TODO: TypeError: __init__() takes at least 3 arguments (1 given) + # for c in (ExactQuotientFailed, ExactQuotientFailed(x, 3*x, ZZ)): + # check(c) + + # TODO: TypeError: can't pickle instancemethod objects + # for c in (OperationNotSupported, OperationNotSupported(Poly(x), Poly.gcd)): + # check(c) + + for c in (HeuristicGCDFailed, HeuristicGCDFailed()): + check(c) + + for c in (HomomorphismFailed, HomomorphismFailed()): + check(c) + + for c in (IsomorphismFailed, IsomorphismFailed()): + check(c) + + for c in (ExtraneousFactors, ExtraneousFactors()): + check(c) + + for c in (EvaluationFailed, EvaluationFailed()): + check(c) + + for c in (RefinementFailed, RefinementFailed()): + check(c) + + for c in (CoercionFailed, CoercionFailed()): + check(c) + + for c in (NotInvertible, NotInvertible()): + check(c) + + for c in (NotReversible, NotReversible()): + check(c) + + for c in (NotAlgebraic, NotAlgebraic()): + check(c) + + for c in (DomainError, DomainError()): + check(c) + + for c in (PolynomialError, PolynomialError()): + check(c) + + for c in (UnificationFailed, UnificationFailed()): + check(c) + + for c in (GeneratorsError, GeneratorsError()): + check(c) + + for c in (GeneratorsNeeded, GeneratorsNeeded()): + check(c) + + # TODO: PicklingError: Can't pickle at 0x38578c0>: it's not found as __main__. + # for c in (ComputationFailed, ComputationFailed(lambda t: t, 3, None)): + # check(c) + + for c in (UnivariatePolynomialError, UnivariatePolynomialError()): + check(c) + + for c in (MultivariatePolynomialError, MultivariatePolynomialError()): + check(c) + + # TODO: TypeError: __init__() takes at least 3 arguments (1 given) + # for c in (PolificationFailed, PolificationFailed({}, x, x, False)): + # check(c) + + for c in (OptionError, OptionError()): + check(c) + + for c in (FlagError, FlagError()): + check(c) + +#def test_pickling_polys_options(): + #from sympy.polys.polyoptions import Options + + # TODO: fix pickling of `symbols' flag + # for c in (Options, Options((), dict(domain='ZZ', polys=False))): + # check(c) + +# TODO: def test_pickling_polys_rootisolation(): +# RealInterval +# ComplexInterval + +def test_pickling_polys_rootoftools(): + from sympy.polys.rootoftools import CRootOf, RootSum + + x = Symbol('x') + f = x**3 + x + 3 + + for c in (CRootOf, CRootOf(f, 0)): + check(c) + + for c in (RootSum, RootSum(f, exp)): + check(c) + +#================== printing ==================== +from sympy.printing.latex import LatexPrinter +from sympy.printing.mathml import MathMLContentPrinter, MathMLPresentationPrinter +from sympy.printing.pretty.pretty import PrettyPrinter +from sympy.printing.pretty.stringpict import prettyForm, stringPict +from sympy.printing.printer import Printer +from sympy.printing.python import PythonPrinter + + +def test_printing(): + for c in (LatexPrinter, LatexPrinter(), MathMLContentPrinter, + MathMLPresentationPrinter, PrettyPrinter, prettyForm, stringPict, + stringPict("a"), Printer, Printer(), PythonPrinter, + PythonPrinter()): + check(c) + + +@XFAIL +def test_printing1(): + check(MathMLContentPrinter()) + + +@XFAIL +def test_printing2(): + check(MathMLPresentationPrinter()) + + +@XFAIL +def test_printing3(): + check(PrettyPrinter()) + +#================== series ====================== +from sympy.series.limits import Limit +from sympy.series.order import Order + + +def test_series(): + e = Symbol("e") + x = Symbol("x") + for c in (Limit, Limit(e, x, 1), Order, Order(e)): + check(c) + +#================== concrete ================== +from sympy.concrete.products import Product +from sympy.concrete.summations import Sum + + +def test_concrete(): + x = Symbol("x") + for c in (Product, Product(x, (x, 2, 4)), Sum, Sum(x, (x, 2, 4))): + check(c) + +def test_deprecation_warning(): + w = SymPyDeprecationWarning("message", deprecated_since_version='1.0', active_deprecations_target="active-deprecations") + check(w) + +def test_issue_18438(): + assert pickle.loads(pickle.dumps(S.Half)) == S.Half + + +#================= old pickles ================= +def test_unpickle_from_older_versions(): + data = ( + b'\x80\x04\x95^\x00\x00\x00\x00\x00\x00\x00\x8c\x10sympy.core.power' + b'\x94\x8c\x03Pow\x94\x93\x94\x8c\x12sympy.core.numbers\x94\x8c' + b'\x07Integer\x94\x93\x94K\x02\x85\x94R\x94}\x94bh\x03\x8c\x04Half' + b'\x94\x93\x94)R\x94}\x94b\x86\x94R\x94}\x94b.' + ) + assert pickle.loads(data) == sqrt(2) diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_source.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_source.py new file mode 100644 index 0000000000000000000000000000000000000000..468185bc579fc325aee21024dfa15ebf14287b5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_source.py @@ -0,0 +1,11 @@ +from sympy.utilities.source import get_mod_func, get_class + + +def test_get_mod_func(): + assert get_mod_func( + 'sympy.core.basic.Basic') == ('sympy.core.basic', 'Basic') + + +def test_get_class(): + _basic = get_class('sympy.core.basic.Basic') + assert _basic.__name__ == 'Basic' diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_timeutils.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_timeutils.py new file mode 100644 index 0000000000000000000000000000000000000000..14edfd089c7315ee9f39a4298af0289f8919da6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_timeutils.py @@ -0,0 +1,10 @@ +"""Tests for simple tools for timing functions' execution. """ + +from sympy.utilities.timeutils import timed + +def test_timed(): + result = timed(lambda: 1 + 1, limit=100000) + assert result[0] == 100000 and result[3] == "ns", str(result) + + result = timed("1 + 1", limit=100000) + assert result[0] == 100000 and result[3] == "ns" diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_wester.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_wester.py new file mode 100644 index 0000000000000000000000000000000000000000..20aefd9154016aac8313a6e849a4b549db706155 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_wester.py @@ -0,0 +1,3099 @@ +""" Tests from Michael Wester's 1999 paper "Review of CAS mathematical +capabilities". + +http://www.math.unm.edu/~wester/cas/book/Wester.pdf +See also http://math.unm.edu/~wester/cas_review.html for detailed output of +each tested system. +""" + +from sympy.assumptions.ask import Q, ask +from sympy.assumptions.refine import refine +from sympy.concrete.products import product +from sympy.core import EulerGamma +from sympy.core.evalf import N +from sympy.core.function import (Derivative, Function, Lambda, Subs, + diff, expand, expand_func) +from sympy.core.mul import Mul +from sympy.core.numbers import (AlgebraicNumber, E, I, Rational, igcd, + nan, oo, pi, zoo) +from sympy.core.relational import Eq, Lt +from sympy.core.singleton import S +from sympy.core.symbol import Dummy, Symbol, symbols +from sympy.functions.combinatorial.factorials import (rf, binomial, + factorial, factorial2) +from sympy.functions.combinatorial.numbers import bernoulli, fibonacci +from sympy.functions.elementary.complexes import (conjugate, im, re, + sign) +from sympy.functions.elementary.exponential import LambertW, exp, log +from sympy.functions.elementary.hyperbolic import (asinh, cosh, sinh, + tanh) +from sympy.functions.elementary.integers import ceiling, floor +from sympy.functions.elementary.miscellaneous import Max, Min, sqrt +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (acos, acot, asin, + atan, cos, cot, csc, sec, sin, tan) +from sympy.functions.special.bessel import besselj +from sympy.functions.special.delta_functions import DiracDelta +from sympy.functions.special.elliptic_integrals import (elliptic_e, + elliptic_f) +from sympy.functions.special.gamma_functions import gamma, polygamma +from sympy.functions.special.hyper import hyper +from sympy.functions.special.polynomials import (assoc_legendre, + chebyshevt) +from sympy.functions.special.zeta_functions import polylog +from sympy.geometry.util import idiff +from sympy.logic.boolalg import And +from sympy.matrices.dense import hessian, wronskian +from sympy.matrices.expressions.matmul import MatMul +from sympy.ntheory.continued_fraction import ( + continued_fraction_convergents as cf_c, + continued_fraction_iterator as cf_i, continued_fraction_periodic as + cf_p, continued_fraction_reduce as cf_r) +from sympy.ntheory.factor_ import factorint, totient +from sympy.ntheory.generate import primerange +from sympy.ntheory.partitions_ import npartitions +from sympy.polys.domains.integerring import ZZ +from sympy.polys.orthopolys import legendre_poly +from sympy.polys.partfrac import apart +from sympy.polys.polytools import Poly, factor, gcd, resultant +from sympy.series.limits import limit +from sympy.series.order import O +from sympy.series.residues import residue +from sympy.series.series import series +from sympy.sets.fancysets import ImageSet +from sympy.sets.sets import FiniteSet, Intersection, Interval, Union +from sympy.simplify.combsimp import combsimp +from sympy.simplify.hyperexpand import hyperexpand +from sympy.simplify.powsimp import powdenest, powsimp +from sympy.simplify.radsimp import radsimp +from sympy.simplify.simplify import logcombine, simplify +from sympy.simplify.sqrtdenest import sqrtdenest +from sympy.simplify.trigsimp import trigsimp +from sympy.solvers.solvers import solve + +import mpmath +from sympy.functions.combinatorial.numbers import stirling +from sympy.functions.special.delta_functions import Heaviside +from sympy.functions.special.error_functions import Ci, Si, erf +from sympy.functions.special.zeta_functions import zeta +from sympy.testing.pytest import (XFAIL, slow, SKIP, skip, ON_CI, + raises) +from sympy.utilities.iterables import partitions +from mpmath import mpi, mpc +from sympy.matrices import Matrix, GramSchmidt, eye +from sympy.matrices.expressions.blockmatrix import BlockMatrix, block_collapse +from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix +from sympy.physics.quantum import Commutator +from sympy.polys.rings import PolyRing +from sympy.polys.fields import FracField +from sympy.polys.solvers import solve_lin_sys +from sympy.concrete import Sum +from sympy.concrete.products import Product +from sympy.integrals import integrate +from sympy.integrals.transforms import laplace_transform,\ + inverse_laplace_transform, LaplaceTransform, fourier_transform,\ + mellin_transform +from sympy.solvers.recurr import rsolve +from sympy.solvers.solveset import solveset, solveset_real, linsolve +from sympy.solvers.ode import dsolve +from sympy.core.relational import Equality +from itertools import islice, takewhile +from sympy.series.formal import fps +from sympy.series.fourier import fourier_series +from sympy.calculus.util import minimum + + +EmptySet = S.EmptySet +R = Rational +x, y, z = symbols('x y z') +i, j, k, l, m, n = symbols('i j k l m n', integer=True) +f = Function('f') +g = Function('g') + +# A. Boolean Logic and Quantifier Elimination +# Not implemented. + +# B. Set Theory + + +def test_B1(): + assert (FiniteSet(i, j, j, k, k, k) | FiniteSet(l, k, j) | + FiniteSet(j, m, j)) == FiniteSet(i, j, k, l, m) + + +def test_B2(): + assert (FiniteSet(i, j, j, k, k, k) & FiniteSet(l, k, j) & + FiniteSet(j, m, j)) == Intersection({j, m}, {i, j, k}, {j, k, l}) + # Previous output below. Not sure why that should be the expected output. + # There should probably be a way to rewrite Intersections that way but I + # don't see why an Intersection should evaluate like that: + # + # == Union({j}, Intersection({m}, Union({j, k}, Intersection({i}, {l})))) + + +def test_B3(): + assert (FiniteSet(i, j, k, l, m) - FiniteSet(j) == + FiniteSet(i, k, l, m)) + + +def test_B4(): + assert (FiniteSet(*(FiniteSet(i, j)*FiniteSet(k, l))) == + FiniteSet((i, k), (i, l), (j, k), (j, l))) + + +# C. Numbers + + +def test_C1(): + assert (factorial(50) == + 30414093201713378043612608166064768844377641568960512000000000000) + + +def test_C2(): + assert (factorint(factorial(50)) == {2: 47, 3: 22, 5: 12, 7: 8, + 11: 4, 13: 3, 17: 2, 19: 2, 23: 2, 29: 1, 31: 1, 37: 1, + 41: 1, 43: 1, 47: 1}) + + +def test_C3(): + assert (factorial2(10), factorial2(9)) == (3840, 945) + + +# Base conversions; not really implemented by SymPy +# Whatever. Take credit! +def test_C4(): + assert 0xABC == 2748 + + +def test_C5(): + assert 123 == int('234', 7) + + +def test_C6(): + assert int('677', 8) == int('1BF', 16) == 447 + + +def test_C7(): + assert log(32768, 8) == 5 + + +def test_C8(): + # Modular multiplicative inverse. Would be nice if divmod could do this. + assert ZZ.invert(5, 7) == 3 + assert ZZ.invert(5, 6) == 5 + + +def test_C9(): + assert igcd(igcd(1776, 1554), 5698) == 74 + + +def test_C10(): + x = 0 + for n in range(2, 11): + x += R(1, n) + assert x == R(4861, 2520) + + +def test_C11(): + assert R(1, 7) == S('0.[142857]') + + +def test_C12(): + assert R(7, 11) * R(22, 7) == 2 + + +def test_C13(): + test = R(10, 7) * (1 + R(29, 1000)) ** R(1, 3) + good = 3 ** R(1, 3) + assert test == good + + +def test_C14(): + assert sqrtdenest(sqrt(2*sqrt(3) + 4)) == 1 + sqrt(3) + + +def test_C15(): + test = sqrtdenest(sqrt(14 + 3*sqrt(3 + 2*sqrt(5 - 12*sqrt(3 - 2*sqrt(2)))))) + good = sqrt(2) + 3 + assert test == good + + +def test_C16(): + test = sqrtdenest(sqrt(10 + 2*sqrt(6) + 2*sqrt(10) + 2*sqrt(15))) + good = sqrt(2) + sqrt(3) + sqrt(5) + assert test == good + + +def test_C17(): + test = radsimp((sqrt(3) + sqrt(2)) / (sqrt(3) - sqrt(2))) + good = 5 + 2*sqrt(6) + assert test == good + + +def test_C18(): + assert simplify((sqrt(-2 + sqrt(-5)) * sqrt(-2 - sqrt(-5))).expand(complex=True)) == 3 + + +@XFAIL +def test_C19(): + assert radsimp(simplify((90 + 34*sqrt(7)) ** R(1, 3))) == 3 + sqrt(7) + + +def test_C20(): + inside = (135 + 78*sqrt(3)) + test = AlgebraicNumber((inside**R(2, 3) + 3) * sqrt(3) / inside**R(1, 3)) + assert simplify(test) == AlgebraicNumber(12) + + +def test_C21(): + assert simplify(AlgebraicNumber((41 + 29*sqrt(2)) ** R(1, 5))) == \ + AlgebraicNumber(1 + sqrt(2)) + + +@XFAIL +def test_C22(): + test = simplify(((6 - 4*sqrt(2))*log(3 - 2*sqrt(2)) + (3 - 2*sqrt(2))*log(17 + - 12*sqrt(2)) + 32 - 24*sqrt(2)) / (48*sqrt(2) - 72)) + good = sqrt(2)/3 - log(sqrt(2) - 1)/3 + assert test == good + + +def test_C23(): + assert 2 * oo - 3 is oo + + +@XFAIL +def test_C24(): + raise NotImplementedError("2**aleph_null == aleph_1") + +# D. Numerical Analysis + + +def test_D1(): + assert 0.0 / sqrt(2) == 0.0 + + +def test_D2(): + assert str(exp(-1000000).evalf()) == '3.29683147808856e-434295' + + +def test_D3(): + assert exp(pi*sqrt(163)).evalf(50).num.ae(262537412640768744) + + +def test_D4(): + assert floor(R(-5, 3)) == -2 + assert ceiling(R(-5, 3)) == -1 + + +@XFAIL +def test_D5(): + raise NotImplementedError("cubic_spline([1, 2, 4, 5], [1, 4, 2, 3], x)(3) == 27/8") + + +@XFAIL +def test_D6(): + raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to FORTRAN") + + +@XFAIL +def test_D7(): + raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to C") + + +@XFAIL +def test_D8(): + # One way is to cheat by converting the sum to a string, + # and replacing the '[' and ']' with ''. + # E.g., horner(S(str(_).replace('[','').replace(']',''))) + raise NotImplementedError("apply Horner's rule to sum(a[i]*x**i, (i,1,5))") + + +@XFAIL +def test_D9(): + raise NotImplementedError("translate D8 to FORTRAN") + + +@XFAIL +def test_D10(): + raise NotImplementedError("translate D8 to C") + + +@XFAIL +def test_D11(): + #Is there a way to use count_ops? + raise NotImplementedError("flops(sum(product(f[i][k], (i,1,k)), (k,1,n)))") + + +@XFAIL +def test_D12(): + assert (mpi(-4, 2) * x + mpi(1, 3)) ** 2 == mpi(-8, 16)*x**2 + mpi(-24, 12)*x + mpi(1, 9) + + +@XFAIL +def test_D13(): + raise NotImplementedError("discretize a PDE: diff(f(x,t),t) == diff(diff(f(x,t),x),x)") + +# E. Statistics +# See scipy; all of this is numerical. + +# F. Combinatorial Theory. + + +def test_F1(): + assert rf(x, 3) == x*(1 + x)*(2 + x) + + +def test_F2(): + assert expand_func(binomial(n, 3)) == n*(n - 1)*(n - 2)/6 + + +@XFAIL +def test_F3(): + assert combsimp(2**n * factorial(n) * factorial2(2*n - 1)) == factorial(2*n) + + +@XFAIL +def test_F4(): + assert combsimp(2**n * factorial(n) * product(2*k - 1, (k, 1, n))) == factorial(2*n) + + +@XFAIL +def test_F5(): + assert gamma(n + R(1, 2)) / sqrt(pi) / factorial(n) == factorial(2*n)/2**(2*n)/factorial(n)**2 + + +def test_F6(): + partTest = [p.copy() for p in partitions(4)] + partDesired = [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2:1}, {1: 4}] + assert partTest == partDesired + + +def test_F7(): + assert npartitions(4) == 5 + + +def test_F8(): + assert stirling(5, 2, signed=True) == -50 # if signed, then kind=1 + + +def test_F9(): + assert totient(1776) == 576 + +# G. Number Theory + + +def test_G1(): + assert list(primerange(999983, 1000004)) == [999983, 1000003] + + +@XFAIL +def test_G2(): + raise NotImplementedError("find the primitive root of 191 == 19") + + +@XFAIL +def test_G3(): + raise NotImplementedError("(a+b)**p mod p == a**p + b**p mod p; p prime") + +# ... G14 Modular equations are not implemented. + +def test_G15(): + assert Rational(sqrt(3).evalf()).limit_denominator(15) == R(26, 15) + assert list(takewhile(lambda x: x.q <= 15, cf_c(cf_i(sqrt(3)))))[-1] == \ + R(26, 15) + + +def test_G16(): + assert list(islice(cf_i(pi),10)) == [3, 7, 15, 1, 292, 1, 1, 1, 2, 1] + + +def test_G17(): + assert cf_p(0, 1, 23) == [4, [1, 3, 1, 8]] + + +def test_G18(): + assert cf_p(1, 2, 5) == [[1]] + assert cf_r([[1]]).expand() == S.Half + sqrt(5)/2 + + +@XFAIL +def test_G19(): + s = symbols('s', integer=True, positive=True) + it = cf_i((exp(1/s) - 1)/(exp(1/s) + 1)) + assert list(islice(it, 5)) == [0, 2*s, 6*s, 10*s, 14*s] + + +def test_G20(): + s = symbols('s', integer=True, positive=True) + # Wester erroneously has this as -s + sqrt(s**2 + 1) + assert cf_r([[2*s]]) == s + sqrt(s**2 + 1) + + +@XFAIL +def test_G20b(): + s = symbols('s', integer=True, positive=True) + assert cf_p(s, 1, s**2 + 1) == [[2*s]] + + +# H. Algebra + + +def test_H1(): + assert simplify(2*2**n) == simplify(2**(n + 1)) + assert powdenest(2*2**n) == simplify(2**(n + 1)) + + +def test_H2(): + assert powsimp(4 * 2**n) == 2**(n + 2) + + +def test_H3(): + assert (-1)**(n*(n + 1)) == 1 + + +def test_H4(): + expr = factor(6*x - 10) + assert type(expr) is Mul + assert expr.args[0] == 2 + assert expr.args[1] == 3*x - 5 + +p1 = 64*x**34 - 21*x**47 - 126*x**8 - 46*x**5 - 16*x**60 - 81 +p2 = 72*x**60 - 25*x**25 - 19*x**23 - 22*x**39 - 83*x**52 + 54*x**10 + 81 +q = 34*x**19 - 25*x**16 + 70*x**7 + 20*x**3 - 91*x - 86 + + +def test_H5(): + assert gcd(p1, p2, x) == 1 + + +def test_H6(): + assert gcd(expand(p1 * q), expand(p2 * q)) == q + + +def test_H7(): + p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5 + p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z + assert gcd(p1, p2, x, y, z) == 1 + + +def test_H8(): + p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5 + p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z + q = 11*x**12*y**7*z**13 - 23*x**2*y**8*z**10 + 47*x**17*y**5*z**8 + assert gcd(p1 * q, p2 * q, x, y, z) == q + + +def test_H9(): + x = Symbol('x', zero=False) + p1 = 2*x**(n + 4) - x**(n + 2) + p2 = 4*x**(n + 1) + 3*x**n + assert gcd(p1, p2) == x**n + + +def test_H10(): + p1 = 3*x**4 + 3*x**3 + x**2 - x - 2 + p2 = x**3 - 3*x**2 + x + 5 + assert resultant(p1, p2, x) == 0 + + +def test_H11(): + assert resultant(p1 * q, p2 * q, x) == 0 + + +def test_H12(): + num = x**2 - 4 + den = x**2 + 4*x + 4 + assert simplify(num/den) == (x - 2)/(x + 2) + + +@XFAIL +def test_H13(): + assert simplify((exp(x) - 1) / (exp(x/2) + 1)) == exp(x/2) - 1 + + +def test_H14(): + p = (x + 1) ** 20 + ep = expand(p) + assert ep == (1 + 20*x + 190*x**2 + 1140*x**3 + 4845*x**4 + 15504*x**5 + + 38760*x**6 + 77520*x**7 + 125970*x**8 + 167960*x**9 + 184756*x**10 + + 167960*x**11 + 125970*x**12 + 77520*x**13 + 38760*x**14 + 15504*x**15 + + 4845*x**16 + 1140*x**17 + 190*x**18 + 20*x**19 + x**20) + dep = diff(ep, x) + assert dep == (20 + 380*x + 3420*x**2 + 19380*x**3 + 77520*x**4 + + 232560*x**5 + 542640*x**6 + 1007760*x**7 + 1511640*x**8 + 1847560*x**9 + + 1847560*x**10 + 1511640*x**11 + 1007760*x**12 + 542640*x**13 + + 232560*x**14 + 77520*x**15 + 19380*x**16 + 3420*x**17 + 380*x**18 + + 20*x**19) + assert factor(dep) == 20*(1 + x)**19 + + +def test_H15(): + assert simplify(Mul(*[x - r for r in solveset(x**3 + x**2 - 7)])) == x**3 + x**2 - 7 + + +def test_H16(): + assert factor(x**100 - 1) == ((x - 1)*(x + 1)*(x**2 + 1)*(x**4 - x**3 + + x**2 - x + 1)*(x**4 + x**3 + x**2 + x + 1)*(x**8 - x**6 + x**4 + - x**2 + 1)*(x**20 - x**15 + x**10 - x**5 + 1)*(x**20 + x**15 + x**10 + + x**5 + 1)*(x**40 - x**30 + x**20 - x**10 + 1)) + + +def test_H17(): + assert simplify(factor(expand(p1 * p2)) - p1*p2) == 0 + + +@XFAIL +def test_H18(): + # Factor over complex rationals. + test = factor(4*x**4 + 8*x**3 + 77*x**2 + 18*x + 153) + good = (2*x + 3*I)*(2*x - 3*I)*(x + 1 - 4*I)*(x + 1 + 4*I) + assert test == good + + +def test_H19(): + a = symbols('a') + # The idea is to let a**2 == 2, then solve 1/(a-1). Answer is a+1") + assert Poly(a - 1).invert(Poly(a**2 - 2)) == a + 1 + + +@XFAIL +def test_H20(): + raise NotImplementedError("let a**2==2; (x**3 + (a-2)*x**2 - " + + "(2*a+3)*x - 3*a) / (x**2-2) = (x**2 - 2*x - 3) / (x-a)") + + +@XFAIL +def test_H21(): + raise NotImplementedError("evaluate (b+c)**4 assuming b**3==2, c**2==3. \ + Answer is 2*b + 8*c + 18*b**2 + 12*b*c + 9") + + +def test_H22(): + assert factor(x**4 - 3*x**2 + 1, modulus=5) == (x - 2)**2 * (x + 2)**2 + + +def test_H23(): + f = x**11 + x + 1 + g = (x**2 + x + 1) * (x**9 - x**8 + x**6 - x**5 + x**3 - x**2 + 1) + assert factor(f, modulus=65537) == g + + +def test_H24(): + phi = AlgebraicNumber(S.GoldenRatio.expand(func=True), alias='phi') + assert factor(x**4 - 3*x**2 + 1, extension=phi) == \ + (x - phi)*(x + 1 - phi)*(x - 1 + phi)*(x + phi) + + +def test_H25(): + e = (x - 2*y**2 + 3*z**3) ** 20 + assert factor(expand(e)) == e + + +def test_H26(): + g = expand((sin(x) - 2*cos(y)**2 + 3*tan(z)**3)**20) + assert factor(g, expand=False) == (-sin(x) + 2*cos(y)**2 - 3*tan(z)**3)**20 + + +def test_H27(): + f = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5 + g = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z + h = -2*z*y**7 \ + *(6*x**9*y**9*z**3 + 10*x**7*z**6 + 17*y*x**5*z**12 + 40*y**7) \ + *(3*x**22 + 47*x**17*y**5*z**8 - 6*x**15*y**9*z**2 - 24*x*y**19*z**8 - 5) + assert factor(expand(f*g)) == h + + +@XFAIL +def test_H28(): + raise NotImplementedError("expand ((1 - c**2)**5 * (1 - s**2)**5 * " + + "(c**2 + s**2)**10) with c**2 + s**2 = 1. Answer is c**10*s**10.") + + +@XFAIL +def test_H29(): + assert factor(4*x**2 - 21*x*y + 20*y**2, modulus=3) == (x + y)*(x - y) + + +def test_H30(): + test = factor(x**3 + y**3, extension=sqrt(-3)) + answer = (x + y)*(x + y*(-R(1, 2) - sqrt(3)/2*I))*(x + y*(-R(1, 2) + sqrt(3)/2*I)) + assert answer == test + + +def test_H31(): + f = (x**2 + 2*x + 3)/(x**3 + 4*x**2 + 5*x + 2) + g = 2 / (x + 1)**2 - 2 / (x + 1) + 3 / (x + 2) + assert apart(f) == g + + +@XFAIL +def test_H32(): # issue 6558 + raise NotImplementedError("[A*B*C - (A*B*C)**(-1)]*A*C*B (product \ + of a non-commuting product and its inverse)") + + +def test_H33(): + A, B, C = symbols('A, B, C', commutative=False) + assert (Commutator(A, Commutator(B, C)) + + Commutator(B, Commutator(C, A)) + + Commutator(C, Commutator(A, B))).doit().expand() == 0 + + +# I. Trigonometry + +def test_I1(): + assert tan(pi*R(7, 10)) == -sqrt(1 + 2/sqrt(5)) + + +@XFAIL +def test_I2(): + assert sqrt((1 + cos(6))/2) == -cos(3) + + +def test_I3(): + assert cos(n*pi) + sin((4*n - 1)*pi/2) == (-1)**n - 1 + + +def test_I4(): + assert refine(cos(pi*cos(n*pi)) + sin(pi/2*cos(n*pi)), Q.integer(n)) == (-1)**n - 1 + + +@XFAIL +def test_I5(): + assert sin((n**5/5 + n**4/2 + n**3/3 - n/30) * pi) == 0 + + +@XFAIL +def test_I6(): + raise NotImplementedError("assuming -3*pi pi**E) + + +@XFAIL +def test_N2(): + x = symbols('x', real=True) + assert ask(x**4 - x + 1 > 0) is True + assert ask(x**4 - x + 1 > 1) is False + + +@XFAIL +def test_N3(): + x = symbols('x', real=True) + assert ask(And(Lt(-1, x), Lt(x, 1)), abs(x) < 1 ) + +@XFAIL +def test_N4(): + x, y = symbols('x y', real=True) + assert ask(2*x**2 > 2*y**2, (x > y) & (y > 0)) is True + + +@XFAIL +def test_N5(): + x, y, k = symbols('x y k', real=True) + assert ask(k*x**2 > k*y**2, (x > y) & (y > 0) & (k > 0)) is True + + +@slow +@XFAIL +def test_N6(): + x, y, k, n = symbols('x y k n', real=True) + assert ask(k*x**n > k*y**n, (x > y) & (y > 0) & (k > 0) & (n > 0)) is True + + +@XFAIL +def test_N7(): + x, y = symbols('x y', real=True) + assert ask(y > 0, (x > 1) & (y >= x - 1)) is True + + +@XFAIL +@slow +def test_N8(): + x, y, z = symbols('x y z', real=True) + assert ask(Eq(x, y) & Eq(y, z), + (x >= y) & (y >= z) & (z >= x)) + + +def test_N9(): + x = Symbol('x') + assert solveset(abs(x - 1) > 2, domain=S.Reals) == Union(Interval(-oo, -1, False, True), + Interval(3, oo, True)) + + +def test_N10(): + x = Symbol('x') + p = (x - 1)*(x - 2)*(x - 3)*(x - 4)*(x - 5) + assert solveset(expand(p) < 0, domain=S.Reals) == Union(Interval(-oo, 1, True, True), + Interval(2, 3, True, True), + Interval(4, 5, True, True)) + + +def test_N11(): + x = Symbol('x') + assert solveset(6/(x - 3) <= 3, domain=S.Reals) == Union(Interval(-oo, 3, True, True), Interval(5, oo)) + + +def test_N12(): + x = Symbol('x') + assert solveset(sqrt(x) < 2, domain=S.Reals) == Interval(0, 4, False, True) + + +def test_N13(): + x = Symbol('x') + assert solveset(sin(x) < 2, domain=S.Reals) == S.Reals + + +@XFAIL +def test_N14(): + x = Symbol('x') + # Gives 'Union(Interval(Integer(0), Mul(Rational(1, 2), pi), false, true), + # Interval(Mul(Rational(1, 2), pi), Mul(Integer(2), pi), true, false))' + # which is not the correct answer, but the provided also seems wrong. + assert solveset(sin(x) < 1, x, domain=S.Reals) == Union(Interval(-oo, pi/2, True, True), + Interval(pi/2, oo, True, True)) + + +def test_N15(): + r, t = symbols('r t') + # raises NotImplementedError: only univariate inequalities are supported + solveset(abs(2*r*(cos(t) - 1) + 1) <= 1, r, S.Reals) + + +def test_N16(): + r, t = symbols('r t') + solveset((r**2)*((cos(t) - 4)**2)*sin(t)**2 < 9, r, S.Reals) + + +@XFAIL +def test_N17(): + # currently only univariate inequalities are supported + assert solveset((x + y > 0, x - y < 0), (x, y)) == (abs(x) < y) + + +def test_O1(): + M = Matrix((1 + I, -2, 3*I)) + assert sqrt(expand(M.dot(M.H))) == sqrt(15) + + +def test_O2(): + assert Matrix((2, 2, -3)).cross(Matrix((1, 3, 1))) == Matrix([[11], + [-5], + [4]]) + +# The vector module has no way of representing vectors symbolically (without +# respect to a basis) +@XFAIL +def test_O3(): + # assert (va ^ vb) | (vc ^ vd) == -(va | vc)*(vb | vd) + (va | vd)*(vb | vc) + raise NotImplementedError("""The vector module has no way of representing + vectors symbolically (without respect to a basis)""") + +def test_O4(): + from sympy.vector import CoordSys3D, Del + N = CoordSys3D("N") + delop = Del() + i, j, k = N.base_vectors() + x, y, z = N.base_scalars() + F = i*(x*y*z) + j*((x*y*z)**2) + k*((y**2)*(z**3)) + assert delop.cross(F).doit() == (-2*x**2*y**2*z + 2*y*z**3)*i + x*y*j + (2*x*y**2*z**2 - x*z)*k + +@XFAIL +def test_O5(): + #assert grad|(f^g)-g|(grad^f)+f|(grad^g) == 0 + raise NotImplementedError("""The vector module has no way of representing + vectors symbolically (without respect to a basis)""") + +#testO8-O9 MISSING!! + + +def test_O10(): + L = [Matrix([2, 3, 5]), Matrix([3, 6, 2]), Matrix([8, 3, 6])] + assert GramSchmidt(L) == [Matrix([ + [2], + [3], + [5]]), + Matrix([ + [R(23, 19)], + [R(63, 19)], + [R(-47, 19)]]), + Matrix([ + [R(1692, 353)], + [R(-1551, 706)], + [R(-423, 706)]])] + + +def test_P1(): + assert Matrix(3, 3, lambda i, j: j - i).diagonal(-1) == Matrix( + 1, 2, [-1, -1]) + + +def test_P2(): + M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + M.row_del(1) + M.col_del(2) + assert M == Matrix([[1, 2], + [7, 8]]) + + +def test_P3(): + A = Matrix([ + [11, 12, 13, 14], + [21, 22, 23, 24], + [31, 32, 33, 34], + [41, 42, 43, 44]]) + + A11 = A[0:3, 1:4] + A12 = A[(0, 1, 3), (2, 0, 3)] + A21 = A + A221 = -A[0:2, 2:4] + A222 = -A[(3, 0), (2, 1)] + A22 = BlockMatrix([[A221, A222]]).T + rows = [[-A11, A12], [A21, A22]] + raises(ValueError, lambda: BlockMatrix(rows)) + B = Matrix(rows) + assert B == Matrix([ + [-12, -13, -14, 13, 11, 14], + [-22, -23, -24, 23, 21, 24], + [-32, -33, -34, 43, 41, 44], + [11, 12, 13, 14, -13, -23], + [21, 22, 23, 24, -14, -24], + [31, 32, 33, 34, -43, -13], + [41, 42, 43, 44, -42, -12]]) + + +@XFAIL +def test_P4(): + raise NotImplementedError("Block matrix diagonalization not supported") + + +def test_P5(): + M = Matrix([[7, 11], + [3, 8]]) + assert M % 2 == Matrix([[1, 1], + [1, 0]]) + + +def test_P6(): + M = Matrix([[cos(x), sin(x)], + [-sin(x), cos(x)]]) + assert M.diff(x, 2) == Matrix([[-cos(x), -sin(x)], + [sin(x), -cos(x)]]) + + +def test_P7(): + M = Matrix([[x, y]])*( + z*Matrix([[1, 3, 5], + [2, 4, 6]]) + Matrix([[7, -9, 11], + [-8, 10, -12]])) + assert M == Matrix([[x*(z + 7) + y*(2*z - 8), x*(3*z - 9) + y*(4*z + 10), + x*(5*z + 11) + y*(6*z - 12)]]) + + +def test_P8(): + M = Matrix([[1, -2*I], + [-3*I, 4]]) + assert M.norm(ord=S.Infinity) == 7 + + +def test_P9(): + a, b, c = symbols('a b c', nonzero=True) + M = Matrix([[a/(b*c), 1/c, 1/b], + [1/c, b/(a*c), 1/a], + [1/b, 1/a, c/(a*b)]]) + assert factor(M.norm('fro')) == (a**2 + b**2 + c**2)/(abs(a)*abs(b)*abs(c)) + + +@XFAIL +def test_P10(): + M = Matrix([[1, 2 + 3*I], + [f(4 - 5*I), 6]]) + # conjugate(f(4 - 5*i)) is not simplified to f(4+5*I) + assert M.H == Matrix([[1, f(4 + 5*I)], + [2 + 3*I, 6]]) + + +@XFAIL +def test_P11(): + # raises NotImplementedError("Matrix([[x,y],[1,x*y]]).inv() + # not simplifying to extract common factor") + assert Matrix([[x, y], + [1, x*y]]).inv() == (1/(x**2 - 1))*Matrix([[x, -1], + [-1/y, x/y]]) + + +def test_P11_workaround(): + # This test was changed to inverse method ADJ because it depended on the + # specific form of inverse returned from the 'GE' method which has changed. + M = Matrix([[x, y], [1, x*y]]).inv('ADJ') + c = gcd(tuple(M)) + assert MatMul(c, M/c, evaluate=False) == MatMul(c, Matrix([ + [x*y, -y], + [ -1, x]]), evaluate=False) + + +def test_P12(): + A11 = MatrixSymbol('A11', n, n) + A12 = MatrixSymbol('A12', n, n) + A22 = MatrixSymbol('A22', n, n) + B = BlockMatrix([[A11, A12], + [ZeroMatrix(n, n), A22]]) + assert block_collapse(B.I) == BlockMatrix([[A11.I, (-1)*A11.I*A12*A22.I], + [ZeroMatrix(n, n), A22.I]]) + + +def test_P13(): + M = Matrix([[1, x - 2, x - 3], + [x - 1, x**2 - 3*x + 6, x**2 - 3*x - 2], + [x - 2, x**2 - 8, 2*(x**2) - 12*x + 14]]) + L, U, _ = M.LUdecomposition() + assert simplify(L) == Matrix([[1, 0, 0], + [x - 1, 1, 0], + [x - 2, x - 3, 1]]) + assert simplify(U) == Matrix([[1, x - 2, x - 3], + [0, 4, x - 5], + [0, 0, x - 7]]) + + +def test_P14(): + M = Matrix([[1, 2, 3, 1, 3], + [3, 2, 1, 1, 7], + [0, 2, 4, 1, 1], + [1, 1, 1, 1, 4]]) + R, _ = M.rref() + assert R == Matrix([[1, 0, -1, 0, 2], + [0, 1, 2, 0, -1], + [0, 0, 0, 1, 3], + [0, 0, 0, 0, 0]]) + + +def test_P15(): + M = Matrix([[-1, 3, 7, -5], + [4, -2, 1, 3], + [2, 4, 15, -7]]) + assert M.rank() == 2 + + +def test_P16(): + M = Matrix([[2*sqrt(2), 8], + [6*sqrt(6), 24*sqrt(3)]]) + assert M.rank() == 1 + + +def test_P17(): + t = symbols('t', real=True) + M=Matrix([ + [sin(2*t), cos(2*t)], + [2*(1 - (cos(t)**2))*cos(t), (1 - 2*(sin(t)**2))*sin(t)]]) + assert M.rank() == 1 + + +def test_P18(): + M = Matrix([[1, 0, -2, 0], + [-2, 1, 0, 3], + [-1, 2, -6, 6]]) + assert M.nullspace() == [Matrix([[2], + [4], + [1], + [0]]), + Matrix([[0], + [-3], + [0], + [1]])] + + +def test_P19(): + w = symbols('w') + M = Matrix([[1, 1, 1, 1], + [w, x, y, z], + [w**2, x**2, y**2, z**2], + [w**3, x**3, y**3, z**3]]) + assert M.det() == (w**3*x**2*y - w**3*x**2*z - w**3*x*y**2 + w**3*x*z**2 + + w**3*y**2*z - w**3*y*z**2 - w**2*x**3*y + w**2*x**3*z + + w**2*x*y**3 - w**2*x*z**3 - w**2*y**3*z + w**2*y*z**3 + + w*x**3*y**2 - w*x**3*z**2 - w*x**2*y**3 + w*x**2*z**3 + + w*y**3*z**2 - w*y**2*z**3 - x**3*y**2*z + x**3*y*z**2 + + x**2*y**3*z - x**2*y*z**3 - x*y**3*z**2 + x*y**2*z**3 + ) + + +@XFAIL +def test_P20(): + raise NotImplementedError("Matrix minimal polynomial not supported") + + +def test_P21(): + M = Matrix([[5, -3, -7], + [-2, 1, 2], + [2, -3, -4]]) + assert M.charpoly(x).as_expr() == x**3 - 2*x**2 - 5*x + 6 + + +def test_P22(): + d = 100 + M = (2 - x)*eye(d) + assert M.eigenvals() == {-x + 2: d} + + +def test_P23(): + M = Matrix([ + [2, 1, 0, 0, 0], + [1, 2, 1, 0, 0], + [0, 1, 2, 1, 0], + [0, 0, 1, 2, 1], + [0, 0, 0, 1, 2]]) + assert M.eigenvals() == { + S('1'): 1, + S('2'): 1, + S('3'): 1, + S('sqrt(3) + 2'): 1, + S('-sqrt(3) + 2'): 1} + + +def test_P24(): + M = Matrix([[611, 196, -192, 407, -8, -52, -49, 29], + [196, 899, 113, -192, -71, -43, -8, -44], + [-192, 113, 899, 196, 61, 49, 8, 52], + [ 407, -192, 196, 611, 8, 44, 59, -23], + [ -8, -71, 61, 8, 411, -599, 208, 208], + [ -52, -43, 49, 44, -599, 411, 208, 208], + [ -49, -8, 8, 59, 208, 208, 99, -911], + [ 29, -44, 52, -23, 208, 208, -911, 99]]) + assert M.eigenvals() == { + S('0'): 1, + S('10*sqrt(10405)'): 1, + S('100*sqrt(26) + 510'): 1, + S('1000'): 2, + S('-100*sqrt(26) + 510'): 1, + S('-10*sqrt(10405)'): 1, + S('1020'): 1} + + +def test_P25(): + MF = N(Matrix([[ 611, 196, -192, 407, -8, -52, -49, 29], + [ 196, 899, 113, -192, -71, -43, -8, -44], + [-192, 113, 899, 196, 61, 49, 8, 52], + [ 407, -192, 196, 611, 8, 44, 59, -23], + [ -8, -71, 61, 8, 411, -599, 208, 208], + [ -52, -43, 49, 44, -599, 411, 208, 208], + [ -49, -8, 8, 59, 208, 208, 99, -911], + [ 29, -44, 52, -23, 208, 208, -911, 99]])) + + ev_1 = sorted(MF.eigenvals(multiple=True)) + ev_2 = sorted( + [-1020.0490184299969, 0.0, 0.09804864072151699, 1000.0, 1000.0, + 1019.9019513592784, 1020.0, 1020.0490184299969]) + + for x, y in zip(ev_1, ev_2): + assert abs(x - y) < 1e-12 + + +def test_P26(): + a0, a1, a2, a3, a4 = symbols('a0 a1 a2 a3 a4') + M = Matrix([[-a4, -a3, -a2, -a1, -a0, 0, 0, 0, 0], + [ 1, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 1, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 1, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 1, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, -1, -1, 0, 0], + [ 0, 0, 0, 0, 0, 1, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 1, -1, -1], + [ 0, 0, 0, 0, 0, 0, 0, 1, 0]]) + assert M.eigenvals(error_when_incomplete=False) == { + S('-1/2 - sqrt(3)*I/2'): 2, + S('-1/2 + sqrt(3)*I/2'): 2} + + +def test_P27(): + a = symbols('a') + M = Matrix([[a, 0, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, a, 0, 0], + [0, 0, 0, a, 0], + [0, -2, 0, 0, 2]]) + + assert M.eigenvects() == [ + (a, 3, [ + Matrix([1, 0, 0, 0, 0]), + Matrix([0, 0, 1, 0, 0]), + Matrix([0, 0, 0, 1, 0]) + ]), + (1 - I, 1, [ + Matrix([0, (1 + I)/2, 0, 0, 1]) + ]), + (1 + I, 1, [ + Matrix([0, (1 - I)/2, 0, 0, 1]) + ]), + ] + + +@XFAIL +def test_P28(): + raise NotImplementedError("Generalized eigenvectors not supported \ +https://github.com/sympy/sympy/issues/5293") + + +@XFAIL +def test_P29(): + raise NotImplementedError("Generalized eigenvectors not supported \ +https://github.com/sympy/sympy/issues/5293") + + +def test_P30(): + M = Matrix([[1, 0, 0, 1, -1], + [0, 1, -2, 3, -3], + [0, 0, -1, 2, -2], + [1, -1, 1, 0, 1], + [1, -1, 1, -1, 2]]) + _, J = M.jordan_form() + assert J == Matrix([[-1, 0, 0, 0, 0], + [0, 1, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [0, 0, 0, 0, 1]]) + + +@XFAIL +def test_P31(): + raise NotImplementedError("Smith normal form not implemented") + + +def test_P32(): + M = Matrix([[1, -2], + [2, 1]]) + assert exp(M).rewrite(cos).simplify() == Matrix([[E*cos(2), -E*sin(2)], + [E*sin(2), E*cos(2)]]) + + +def test_P33(): + w, t = symbols('w t') + M = Matrix([[0, 1, 0, 0], + [0, 0, 0, 2*w], + [0, 0, 0, 1], + [0, -2*w, 3*w**2, 0]]) + assert exp(M*t).rewrite(cos).expand() == Matrix([ + [1, -3*t + 4*sin(t*w)/w, 6*t*w - 6*sin(t*w), -2*cos(t*w)/w + 2/w], + [0, 4*cos(t*w) - 3, -6*w*cos(t*w) + 6*w, 2*sin(t*w)], + [0, 2*cos(t*w)/w - 2/w, -3*cos(t*w) + 4, sin(t*w)/w], + [0, -2*sin(t*w), 3*w*sin(t*w), cos(t*w)]]) + + +@XFAIL +def test_P34(): + a, b, c = symbols('a b c', real=True) + M = Matrix([[a, 1, 0, 0, 0, 0], + [0, a, 0, 0, 0, 0], + [0, 0, b, 0, 0, 0], + [0, 0, 0, c, 1, 0], + [0, 0, 0, 0, c, 1], + [0, 0, 0, 0, 0, c]]) + # raises exception, sin(M) not supported. exp(M*I) also not supported + # https://github.com/sympy/sympy/issues/6218 + assert sin(M) == Matrix([[sin(a), cos(a), 0, 0, 0, 0], + [0, sin(a), 0, 0, 0, 0], + [0, 0, sin(b), 0, 0, 0], + [0, 0, 0, sin(c), cos(c), -sin(c)/2], + [0, 0, 0, 0, sin(c), cos(c)], + [0, 0, 0, 0, 0, sin(c)]]) + + +@XFAIL +def test_P35(): + M = pi/2*Matrix([[2, 1, 1], + [2, 3, 2], + [1, 1, 2]]) + # raises exception, sin(M) not supported. exp(M*I) also not supported + # https://github.com/sympy/sympy/issues/6218 + assert sin(M) == eye(3) + + +@XFAIL +def test_P36(): + M = Matrix([[10, 7], + [7, 17]]) + assert sqrt(M) == Matrix([[3, 1], + [1, 4]]) + + +def test_P37(): + M = Matrix([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + assert M**S.Half == Matrix([[1, R(1, 2), 0], + [0, 1, 0], + [0, 0, 1]]) + + +@XFAIL +def test_P38(): + M=Matrix([[0, 1, 0], + [0, 0, 0], + [0, 0, 0]]) + + with raises(AssertionError): + # raises ValueError: Matrix det == 0; not invertible + M**S.Half + # if it doesn't raise then this assertion will be + # raised and the test will be flagged as not XFAILing + assert None + +@XFAIL +def test_P39(): + """ + M=Matrix([ + [1, 1], + [2, 2], + [3, 3]]) + M.SVD() + """ + raise NotImplementedError("Singular value decomposition not implemented") + + +def test_P40(): + r, t = symbols('r t', real=True) + M = Matrix([r*cos(t), r*sin(t)]) + assert M.jacobian(Matrix([r, t])) == Matrix([[cos(t), -r*sin(t)], + [sin(t), r*cos(t)]]) + + +def test_P41(): + r, t = symbols('r t', real=True) + assert hessian(r**2*sin(t),(r,t)) == Matrix([[ 2*sin(t), 2*r*cos(t)], + [2*r*cos(t), -r**2*sin(t)]]) + + +def test_P42(): + assert wronskian([cos(x), sin(x)], x).simplify() == 1 + + +def test_P43(): + def __my_jacobian(M, Y): + return Matrix([M.diff(v).T for v in Y]).T + r, t = symbols('r t', real=True) + M = Matrix([r*cos(t), r*sin(t)]) + assert __my_jacobian(M,[r,t]) == Matrix([[cos(t), -r*sin(t)], + [sin(t), r*cos(t)]]) + + +def test_P44(): + def __my_hessian(f, Y): + V = Matrix([diff(f, v) for v in Y]) + return Matrix([V.T.diff(v) for v in Y]) + r, t = symbols('r t', real=True) + assert __my_hessian(r**2*sin(t), (r, t)) == Matrix([ + [ 2*sin(t), 2*r*cos(t)], + [2*r*cos(t), -r**2*sin(t)]]) + + +def test_P45(): + def __my_wronskian(Y, v): + M = Matrix([Matrix(Y).T.diff(x, n) for n in range(0, len(Y))]) + return M.det() + assert __my_wronskian([cos(x), sin(x)], x).simplify() == 1 + +# Q1-Q6 Tensor tests missing + + +@XFAIL +def test_R1(): + i, j, n = symbols('i j n', integer=True, positive=True) + xn = MatrixSymbol('xn', n, 1) + Sm = Sum((xn[i, 0] - Sum(xn[j, 0], (j, 0, n - 1))/n)**2, (i, 0, n - 1)) + # sum does not calculate + # Unknown result + Sm.doit() + raise NotImplementedError('Unknown result') + +@XFAIL +def test_R2(): + m, b = symbols('m b') + i, n = symbols('i n', integer=True, positive=True) + xn = MatrixSymbol('xn', n, 1) + yn = MatrixSymbol('yn', n, 1) + f = Sum((yn[i, 0] - m*xn[i, 0] - b)**2, (i, 0, n - 1)) + f1 = diff(f, m) + f2 = diff(f, b) + # raises TypeError: solveset() takes at most 2 arguments (3 given) + solveset((f1, f2), (m, b), domain=S.Reals) + + +@XFAIL +def test_R3(): + n, k = symbols('n k', integer=True, positive=True) + sk = ((-1)**k) * (binomial(2*n, k))**2 + Sm = Sum(sk, (k, 1, oo)) + T = Sm.doit() + T2 = T.combsimp() + # returns -((-1)**n*factorial(2*n) + # - (factorial(n))**2)*exp_polar(-I*pi)/(factorial(n))**2 + assert T2 == (-1)**n*binomial(2*n, n) + + +@XFAIL +def test_R4(): +# Macsyma indefinite sum test case: +#(c15) /* Check whether the full Gosper algorithm is implemented +# => 1/2^(n + 1) binomial(n, k - 1) */ +#closedform(indefsum(binomial(n, k)/2^n - binomial(n + 1, k)/2^(n + 1), k)); +#Time= 2690 msecs +# (- n + k - 1) binomial(n + 1, k) +#(d15) - -------------------------------- +# n +# 2 2 (n + 1) +# +#(c16) factcomb(makefact(%)); +#Time= 220 msecs +# n! +#(d16) ---------------- +# n +# 2 k! 2 (n - k)! +# Might be possible after fixing https://github.com/sympy/sympy/pull/1879 + raise NotImplementedError("Indefinite sum not supported") + + +@XFAIL +def test_R5(): + a, b, c, n, k = symbols('a b c n k', integer=True, positive=True) + sk = ((-1)**k)*(binomial(a + b, a + k) + *binomial(b + c, b + k)*binomial(c + a, c + k)) + Sm = Sum(sk, (k, 1, oo)) + T = Sm.doit() # hypergeometric series not calculated + assert T == factorial(a+b+c)/(factorial(a)*factorial(b)*factorial(c)) + + +def test_R6(): + n, k = symbols('n k', integer=True, positive=True) + gn = MatrixSymbol('gn', n + 2, 1) + Sm = Sum(gn[k, 0] - gn[k - 1, 0], (k, 1, n + 1)) + assert Sm.doit() == -gn[0, 0] + gn[n + 1, 0] + + +def test_R7(): + n, k = symbols('n k', integer=True, positive=True) + T = Sum(k**3,(k,1,n)).doit() + assert T.factor() == n**2*(n + 1)**2/4 + +@XFAIL +def test_R8(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(k**2*binomial(n, k), (k, 1, n)) + T = Sm.doit() #returns Piecewise function + assert T.combsimp() == n*(n + 1)*2**(n - 2) + + +def test_R9(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(binomial(n, k - 1)/k, (k, 1, n + 1)) + assert Sm.doit().simplify() == (2**(n + 1) - 1)/(n + 1) + + +@XFAIL +def test_R10(): + n, m, r, k = symbols('n m r k', integer=True, positive=True) + Sm = Sum(binomial(n, k)*binomial(m, r - k), (k, 0, r)) + T = Sm.doit() + T2 = T.combsimp().rewrite(factorial) + assert T2 == factorial(m + n)/(factorial(r)*factorial(m + n - r)) + assert T2 == binomial(m + n, r).rewrite(factorial) + # rewrite(binomial) is not working. + # https://github.com/sympy/sympy/issues/7135 + T3 = T2.rewrite(binomial) + assert T3 == binomial(m + n, r) + + +@XFAIL +def test_R11(): + n, k = symbols('n k', integer=True, positive=True) + sk = binomial(n, k)*fibonacci(k) + Sm = Sum(sk, (k, 0, n)) + T = Sm.doit() + # Fibonacci simplification not implemented + # https://github.com/sympy/sympy/issues/7134 + assert T == fibonacci(2*n) + + +@XFAIL +def test_R12(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(fibonacci(k)**2, (k, 0, n)) + T = Sm.doit() + assert T == fibonacci(n)*fibonacci(n + 1) + + +@XFAIL +def test_R13(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(sin(k*x), (k, 1, n)) + T = Sm.doit() # Sum is not calculated + assert T.simplify() == cot(x/2)/2 - cos(x*(2*n + 1)/2)/(2*sin(x/2)) + + +@XFAIL +def test_R14(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(sin((2*k - 1)*x), (k, 1, n)) + T = Sm.doit() # Sum is not calculated + assert T.simplify() == sin(n*x)**2/sin(x) + + +@XFAIL +def test_R15(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(binomial(n - k, k), (k, 0, floor(n/2))) + T = Sm.doit() # Sum is not calculated + assert T.simplify() == fibonacci(n + 1) + + +def test_R16(): + k = symbols('k', integer=True, positive=True) + Sm = Sum(1/k**2 + 1/k**3, (k, 1, oo)) + assert Sm.doit() == zeta(3) + pi**2/6 + + +def test_R17(): + k = symbols('k', integer=True, positive=True) + assert abs(float(Sum(1/k**2 + 1/k**3, (k, 1, oo))) + - 2.8469909700078206) < 1e-15 + + +def test_R18(): + k = symbols('k', integer=True, positive=True) + Sm = Sum(1/(2**k*k**2), (k, 1, oo)) + T = Sm.doit() + assert T.simplify() == -log(2)**2/2 + pi**2/12 + + +@slow +@XFAIL +def test_R19(): + k = symbols('k', integer=True, positive=True) + Sm = Sum(1/((3*k + 1)*(3*k + 2)*(3*k + 3)), (k, 0, oo)) + T = Sm.doit() + # assert fails, T not simplified + assert T.simplify() == -log(3)/4 + sqrt(3)*pi/12 + + +@XFAIL +def test_R20(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(binomial(n, 4*k), (k, 0, oo)) + T = Sm.doit() + # assert fails, T not simplified + assert T.simplify() == 2**(n/2)*cos(pi*n/4)/2 + 2**(n - 1)/2 + + +@XFAIL +def test_R21(): + k = symbols('k', integer=True, positive=True) + Sm = Sum(1/(sqrt(k*(k + 1)) * (sqrt(k) + sqrt(k + 1))), (k, 1, oo)) + T = Sm.doit() # Sum not calculated + assert T.simplify() == 1 + + +# test_R22 answer not available in Wester samples +# Sum(Sum(binomial(n, k)*binomial(n - k, n - 2*k)*x**n*y**(n - 2*k), +# (k, 0, floor(n/2))), (n, 0, oo)) with abs(x*y)<1? + + +@XFAIL +def test_R23(): + n, k = symbols('n k', integer=True, positive=True) + Sm = Sum(Sum((factorial(n)/(factorial(k)**2*factorial(n - 2*k)))* + (x/y)**k*(x*y)**(n - k), (n, 2*k, oo)), (k, 0, oo)) + # Missing how to express constraint abs(x*y)<1? + T = Sm.doit() # Sum not calculated + assert T == -1/sqrt(x**2*y**2 - 4*x**2 - 2*x*y + 1) + + +def test_R24(): + m, k = symbols('m k', integer=True, positive=True) + Sm = Sum(Product(k/(2*k - 1), (k, 1, m)), (m, 2, oo)) + assert Sm.doit() == pi/2 + + +def test_S1(): + k = symbols('k', integer=True, positive=True) + Pr = Product(gamma(k/3), (k, 1, 8)) + assert Pr.doit().simplify() == 640*sqrt(3)*pi**3/6561 + + +def test_S2(): + n, k = symbols('n k', integer=True, positive=True) + assert Product(k, (k, 1, n)).doit() == factorial(n) + + +def test_S3(): + n, k = symbols('n k', integer=True, positive=True) + assert Product(x**k, (k, 1, n)).doit().simplify() == x**(n*(n + 1)/2) + + +def test_S4(): + n, k = symbols('n k', integer=True, positive=True) + assert Product(1 + 1/k, (k, 1, n -1)).doit().simplify() == n + + +def test_S5(): + n, k = symbols('n k', integer=True, positive=True) + assert (Product((2*k - 1)/(2*k), (k, 1, n)).doit().gammasimp() == + gamma(n + S.Half)/(sqrt(pi)*gamma(n + 1))) + + +@XFAIL +def test_S6(): + n, k = symbols('n k', integer=True, positive=True) + # Product does not evaluate + assert (Product(x**2 -2*x*cos(k*pi/n) + 1, (k, 1, n - 1)).doit().simplify() + == (x**(2*n) - 1)/(x**2 - 1)) + + +@XFAIL +def test_S7(): + k = symbols('k', integer=True, positive=True) + Pr = Product((k**3 - 1)/(k**3 + 1), (k, 2, oo)) + T = Pr.doit() # Product does not evaluate + assert T.simplify() == R(2, 3) + + +@XFAIL +def test_S8(): + k = symbols('k', integer=True, positive=True) + Pr = Product(1 - 1/(2*k)**2, (k, 1, oo)) + T = Pr.doit() + # Product does not evaluate + assert T.simplify() == 2/pi + + +@XFAIL +def test_S9(): + k = symbols('k', integer=True, positive=True) + Pr = Product(1 + (-1)**(k + 1)/(2*k - 1), (k, 1, oo)) + T = Pr.doit() + # Product produces 0 + # https://github.com/sympy/sympy/issues/7133 + assert T.simplify() == sqrt(2) + + +@XFAIL +def test_S10(): + k = symbols('k', integer=True, positive=True) + Pr = Product((k*(k + 1) + 1 + I)/(k*(k + 1) + 1 - I), (k, 0, oo)) + T = Pr.doit() + # Product does not evaluate + assert T.simplify() == -1 + + +def test_T1(): + assert limit((1 + 1/n)**n, n, oo) == E + assert limit((1 - cos(x))/x**2, x, 0) == S.Half + + +def test_T2(): + assert limit((3**x + 5**x)**(1/x), x, oo) == 5 + + +def test_T3(): + assert limit(log(x)/(log(x) + sin(x)), x, oo) == 1 + + +def test_T4(): + assert limit((exp(x*exp(-x)/(exp(-x) + exp(-2*x**2/(x + 1)))) + - exp(x))/x, x, oo) == -exp(2) + + +def test_T5(): + assert limit(x*log(x)*log(x*exp(x) - x**2)**2/log(log(x**2 + + 2*exp(exp(3*x**3*log(x))))), x, oo) == R(1, 3) + + +def test_T6(): + assert limit(1/n * factorial(n)**(1/n), n, oo) == exp(-1) + + +def test_T7(): + limit(1/n * gamma(n + 1)**(1/n), n, oo) + + +def test_T8(): + a, z = symbols('a z', positive=True) + assert limit(gamma(z + a)/gamma(z)*exp(-a*log(z)), z, oo) == 1 + + +@XFAIL +def test_T9(): + z, k = symbols('z k', positive=True) + # raises NotImplementedError: + # Don't know how to calculate the mrv of '(1, k)' + assert limit(hyper((1, k), (1,), z/k), k, oo) == exp(z) + + +@XFAIL +def test_T10(): + # No longer raises PoleError, but should return euler-mascheroni constant + assert limit(zeta(x) - 1/(x - 1), x, 1) == integrate(-1/x + 1/floor(x), (x, 1, oo)) + +@XFAIL +def test_T11(): + n, k = symbols('n k', integer=True, positive=True) + # evaluates to 0 + assert limit(n**x/(x*product((1 + x/k), (k, 1, n))), n, oo) == gamma(x) + + +def test_T12(): + x, t = symbols('x t', real=True) + # Does not evaluate the limit but returns an expression with erf + assert limit(x * integrate(exp(-t**2), (t, 0, x))/(1 - exp(-x**2)), + x, 0) == 1 + + +def test_T13(): + x = symbols('x', real=True) + assert [limit(x/abs(x), x, 0, dir='-'), + limit(x/abs(x), x, 0, dir='+')] == [-1, 1] + + +def test_T14(): + x = symbols('x', real=True) + assert limit(atan(-log(x)), x, 0, dir='+') == pi/2 + + +def test_U1(): + x = symbols('x', real=True) + assert diff(abs(x), x) == sign(x) + + +def test_U2(): + f = Lambda(x, Piecewise((-x, x < 0), (x, x >= 0))) + assert diff(f(x), x) == Piecewise((-1, x < 0), (1, x >= 0)) + + +def test_U3(): + f = Lambda(x, Piecewise((x**2 - 1, x == 1), (x**3, x != 1))) + f1 = Lambda(x, diff(f(x), x)) + assert f1(x) == 3*x**2 + assert f1(1) == 3 + + +@XFAIL +def test_U4(): + n = symbols('n', integer=True, positive=True) + x = symbols('x', real=True) + d = diff(x**n, x, n) + assert d.rewrite(factorial) == factorial(n) + + +def test_U5(): + # issue 6681 + t = symbols('t') + ans = ( + Derivative(f(g(t)), g(t))*Derivative(g(t), (t, 2)) + + Derivative(f(g(t)), (g(t), 2))*Derivative(g(t), t)**2) + assert f(g(t)).diff(t, 2) == ans + assert ans.doit() == ans + + +def test_U6(): + h = Function('h') + T = integrate(f(y), (y, h(x), g(x))) + assert T.diff(x) == ( + f(g(x))*Derivative(g(x), x) - f(h(x))*Derivative(h(x), x)) + + +@XFAIL +def test_U7(): + p, t = symbols('p t', real=True) + # Exact differential => d(V(P, T)) => dV/dP DP + dV/dT DT + # raises ValueError: Since there is more than one variable in the + # expression, the variable(s) of differentiation must be supplied to + # differentiate f(p,t) + diff(f(p, t)) + + +def test_U8(): + x, y = symbols('x y', real=True) + eq = cos(x*y) + x + # If SymPy had implicit_diff() function this hack could be avoided + # TODO: Replace solve with solveset, current test fails for solveset + assert idiff(y - eq, y, x) == (-y*sin(x*y) + 1)/(x*sin(x*y) + 1) + + +def test_U9(): + # Wester sample case for Maple: + # O29 := diff(f(x, y), x) + diff(f(x, y), y); + # /d \ /d \ + # |-- f(x, y)| + |-- f(x, y)| + # \dx / \dy / + # + # O30 := factor(subs(f(x, y) = g(x^2 + y^2), %)); + # 2 2 + # 2 D(g)(x + y ) (x + y) + x, y = symbols('x y', real=True) + su = diff(f(x, y), x) + diff(f(x, y), y) + s2 = su.subs(f(x, y), g(x**2 + y**2)) + s3 = s2.doit().factor() + # Subs not performed, s3 = 2*(x + y)*Subs(Derivative( + # g(_xi_1), _xi_1), _xi_1, x**2 + y**2) + # Derivative(g(x*2 + y**2), x**2 + y**2) is not valid in SymPy, + # and probably will remain that way. You can take derivatives with respect + # to other expressions only if they are atomic, like a symbol or a + # function. + # D operator should be added to SymPy + # See https://github.com/sympy/sympy/issues/4719. + assert s3 == (x + y)*Subs(Derivative(g(x), x), x, x**2 + y**2)*2 + + +def test_U10(): + # see issue 2519: + assert residue((z**3 + 5)/((z**4 - 1)*(z + 1)), z, -1) == R(-9, 4) + +@XFAIL +def test_U11(): + # assert (2*dx + dz) ^ (3*dx + dy + dz) ^ (dx + dy + 4*dz) == 8*dx ^ dy ^dz + raise NotImplementedError + + +@XFAIL +def test_U12(): + # Wester sample case: + # (c41) /* d(3 x^5 dy /\ dz + 5 x y^2 dz /\ dx + 8 z dx /\ dy) + # => (15 x^4 + 10 x y + 8) dx /\ dy /\ dz */ + # factor(ext_diff(3*x^5 * dy ~ dz + 5*x*y^2 * dz ~ dx + 8*z * dx ~ dy)); + # 4 + # (d41) (10 x y + 15 x + 8) dx dy dz + raise NotImplementedError( + "External diff of differential form not supported") + + +def test_U13(): + assert minimum(x**4 - x + 1, x) == -3*2**R(1,3)/8 + 1 + + +@XFAIL +def test_U14(): + #f = 1/(x**2 + y**2 + 1) + #assert [minimize(f), maximize(f)] == [0,1] + raise NotImplementedError("minimize(), maximize() not supported") + + +@XFAIL +def test_U15(): + raise NotImplementedError("minimize() not supported and also solve does \ +not support multivariate inequalities") + + +@XFAIL +def test_U16(): + raise NotImplementedError("minimize() not supported in SymPy and also \ +solve does not support multivariate inequalities") + + +@XFAIL +def test_U17(): + raise NotImplementedError("Linear programming, symbolic simplex not \ +supported in SymPy") + + +def test_V1(): + x = symbols('x', real=True) + assert integrate(abs(x), x) == Piecewise((-x**2/2, x <= 0), (x**2/2, True)) + + +def test_V2(): + assert integrate(Piecewise((-x, x < 0), (x, x >= 0)), x + ) == Piecewise((-x**2/2, x < 0), (x**2/2, True)) + + +def test_V3(): + assert integrate(1/(x**3 + 2),x).diff().simplify() == 1/(x**3 + 2) + + +def test_V4(): + assert integrate(2**x/sqrt(1 + 4**x), x) == asinh(2**x)/log(2) + + +@XFAIL +def test_V5(): + # Returns (-45*x**2 + 80*x - 41)/(5*sqrt(2*x - 1)*(4*x**2 - 4*x + 1)) + assert (integrate((3*x - 5)**2/(2*x - 1)**R(7, 2), x).simplify() == + (-41 + 80*x - 45*x**2)/(5*(2*x - 1)**R(5, 2))) + + +@XFAIL +def test_V6(): + # returns RootSum(40*_z**2 - 1, Lambda(_i, _i*log(-4*_i + exp(-m*x))))/m + assert (integrate(1/(2*exp(m*x) - 5*exp(-m*x)), x) == sqrt(10)*( + log(2*exp(m*x) - sqrt(10)) - log(2*exp(m*x) + sqrt(10)))/(20*m)) + + +def test_V7(): + r1 = integrate(sinh(x)**4/cosh(x)**2) + assert r1.simplify() == x*R(-3, 2) + sinh(x)**3/(2*cosh(x)) + 3*tanh(x)/2 + + +@XFAIL +def test_V8_V9(): +#Macsyma test case: +#(c27) /* This example involves several symbolic parameters +# => 1/sqrt(b^2 - a^2) log([sqrt(b^2 - a^2) tan(x/2) + a + b]/ +# [sqrt(b^2 - a^2) tan(x/2) - a - b]) (a^2 < b^2) +# [Gradshteyn and Ryzhik 2.553(3)] */ +#assume(b^2 > a^2)$ +#(c28) integrate(1/(a + b*cos(x)), x); +#(c29) trigsimp(ratsimp(diff(%, x))); +# 1 +#(d29) ------------ +# b cos(x) + a + raise NotImplementedError( + "Integrate with assumption not supported") + + +def test_V10(): + assert integrate(1/(3 + 3*cos(x) + 4*sin(x)), x) == log(4*tan(x/2) + 3)/4 + + +def test_V11(): + r1 = integrate(1/(4 + 3*cos(x) + 4*sin(x)), x) + r2 = factor(r1) + assert (logcombine(r2, force=True) == + log(((tan(x/2) + 1)/(tan(x/2) + 7))**R(1, 3))) + + +def test_V12(): + r1 = integrate(1/(5 + 3*cos(x) + 4*sin(x)), x) + assert r1 == -1/(tan(x/2) + 2) + + +@XFAIL +def test_V13(): + r1 = integrate(1/(6 + 3*cos(x) + 4*sin(x)), x) + # expression not simplified, returns: -sqrt(11)*I*log(tan(x/2) + 4/3 + # - sqrt(11)*I/3)/11 + sqrt(11)*I*log(tan(x/2) + 4/3 + sqrt(11)*I/3)/11 + assert r1.simplify() == 2*sqrt(11)*atan(sqrt(11)*(3*tan(x/2) + 4)/11)/11 + + +@slow +@XFAIL +def test_V14(): + r1 = integrate(log(abs(x**2 - y**2)), x) + # Piecewise result does not simplify to the desired result. + assert (r1.simplify() == x*log(abs(x**2 - y**2)) + + y*log(x + y) - y*log(x - y) - 2*x) + + +def test_V15(): + r1 = integrate(x*acot(x/y), x) + assert simplify(r1 - (x*y + (x**2 + y**2)*acot(x/y))/2) == 0 + + +@XFAIL +def test_V16(): + # Integral not calculated + assert integrate(cos(5*x)*Ci(2*x), x) == Ci(2*x)*sin(5*x)/5 - (Si(3*x) + Si(7*x))/10 + +@XFAIL +def test_V17(): + r1 = integrate((diff(f(x), x)*g(x) + - f(x)*diff(g(x), x))/(f(x)**2 - g(x)**2), x) + # integral not calculated + assert simplify(r1 - (f(x) - g(x))/(f(x) + g(x))/2) == 0 + + +@XFAIL +def test_W1(): + # The function has a pole at y. + # The integral has a Cauchy principal value of zero but SymPy returns -I*pi + # https://github.com/sympy/sympy/issues/7159 + assert integrate(1/(x - y), (x, y - 1, y + 1)) == 0 + + +@XFAIL +def test_W2(): + # The function has a pole at y. + # The integral is divergent but SymPy returns -2 + # https://github.com/sympy/sympy/issues/7160 + # Test case in Macsyma: + # (c6) errcatch(integrate(1/(x - a)^2, x, a - 1, a + 1)); + # Integral is divergent + assert integrate(1/(x - y)**2, (x, y - 1, y + 1)) is zoo + + +@XFAIL +@slow +def test_W3(): + # integral is not calculated + # https://github.com/sympy/sympy/issues/7161 + assert integrate(sqrt(x + 1/x - 2), (x, 0, 1)) == R(4, 3) + + +@XFAIL +@slow +def test_W4(): + # integral is not calculated + assert integrate(sqrt(x + 1/x - 2), (x, 1, 2)) == -2*sqrt(2)/3 + R(4, 3) + + +@XFAIL +@slow +def test_W5(): + # integral is not calculated + assert integrate(sqrt(x + 1/x - 2), (x, 0, 2)) == -2*sqrt(2)/3 + R(8, 3) + + +@XFAIL +@slow +def test_W6(): + # integral is not calculated + assert integrate(sqrt(2 - 2*cos(2*x))/2, (x, pi*R(-3, 4), -pi/4)) == sqrt(2) + + +def test_W7(): + a = symbols('a', positive=True) + r1 = integrate(cos(x)/(x**2 + a**2), (x, -oo, oo)) + assert r1.simplify() == pi*exp(-a)/a + + +@XFAIL +def test_W8(): + # Test case in Mathematica: + # In[19]:= Integrate[t^(a - 1)/(1 + t), {t, 0, Infinity}, + # Assumptions -> 0 < a < 1] + # Out[19]= Pi Csc[a Pi] + raise NotImplementedError( + "Integrate with assumption 0 < a < 1 not supported") + + +@XFAIL +@slow +def test_W9(): + # Integrand with a residue at infinity => -2 pi [sin(pi/5) + sin(2pi/5)] + # (principal value) [Levinson and Redheffer, p. 234] *) + r1 = integrate(5*x**3/(1 + x + x**2 + x**3 + x**4), (x, -oo, oo)) + r2 = r1.doit() + assert r2 == -2*pi*(sqrt(-sqrt(5)/8 + 5/8) + sqrt(sqrt(5)/8 + 5/8)) + + +@XFAIL +def test_W10(): + # integrate(1/[1 + x + x^2 + ... + x^(2 n)], x = -infinity..infinity) = + # 2 pi/(2 n + 1) [1 + cos(pi/[2 n + 1])] csc(2 pi/[2 n + 1]) + # [Levinson and Redheffer, p. 255] => 2 pi/5 [1 + cos(pi/5)] csc(2 pi/5) */ + r1 = integrate(x/(1 + x + x**2 + x**4), (x, -oo, oo)) + r2 = r1.doit() + assert r2 == 2*pi*(sqrt(5)/4 + 5/4)*csc(pi*R(2, 5))/5 + + +@XFAIL +def test_W11(): + # integral not calculated + assert (integrate(sqrt(1 - x**2)/(1 + x**2), (x, -1, 1)) == + pi*(-1 + sqrt(2))) + + +def test_W12(): + p = symbols('p', positive=True) + q = symbols('q', real=True) + r1 = integrate(x*exp(-p*x**2 + 2*q*x), (x, -oo, oo)) + assert r1.simplify() == sqrt(pi)*q*exp(q**2/p)/p**R(3, 2) + + +@XFAIL +def test_W13(): + # Integral not calculated. Expected result is 2*(Euler_mascheroni_constant) + r1 = integrate(1/log(x) + 1/(1 - x) - log(log(1/x)), (x, 0, 1)) + assert r1 == 2*EulerGamma + + +def test_W14(): + assert integrate(sin(x)/x*exp(2*I*x), (x, -oo, oo)) == 0 + + +@XFAIL +def test_W15(): + # integral not calculated + assert integrate(log(gamma(x))*cos(6*pi*x), (x, 0, 1)) == R(1, 12) + + +def test_W16(): + assert integrate((1 + x)**3*legendre_poly(1, x)*legendre_poly(2, x), + (x, -1, 1)) == R(36, 35) + + +def test_W17(): + a, b = symbols('a b', positive=True) + assert integrate(exp(-a*x)*besselj(0, b*x), + (x, 0, oo)) == 1/(b*sqrt(a**2/b**2 + 1)) + + +def test_W18(): + assert integrate((besselj(1, x)/x)**2, (x, 0, oo)) == 4/(3*pi) + + +@XFAIL +def test_W19(): + # Integral not calculated + # Expected result is (cos 7 - 1)/7 [Gradshteyn and Ryzhik 6.782(3)] + assert integrate(Ci(x)*besselj(0, 2*sqrt(7*x)), (x, 0, oo)) == (cos(7) - 1)/7 + + +@XFAIL +def test_W20(): + # integral not calculated + assert (integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1)) == + -pi**2/36 - R(17, 108) + zeta(3)/4 + + (-pi**2/2 - 4*log(2) + log(2)**2 + 35/3)*log(2)/9) + + +def test_W21(): + assert abs(N(integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1))) + - 0.210882859565594) < 1e-15 + + +def test_W22(): + t, u = symbols('t u', real=True) + s = Lambda(x, Piecewise((1, And(x >= 1, x <= 2)), (0, True))) + assert integrate(s(t)*cos(t), (t, 0, u)) == Piecewise( + (0, u < 0), + (-sin(Min(1, u)) + sin(Min(2, u)), True)) + + +@slow +def test_W23(): + a, b = symbols('a b', positive=True) + r1 = integrate(integrate(x/(x**2 + y**2), (x, a, b)), (y, -oo, oo)) + assert r1.collect(pi).cancel() == -pi*a + pi*b + + +def test_W23b(): + # like W23 but limits are reversed + a, b = symbols('a b', positive=True) + r2 = integrate(integrate(x/(x**2 + y**2), (y, -oo, oo)), (x, a, b)) + assert r2.collect(pi) == pi*(-a + b) + + +@XFAIL +@slow +def test_W24(): + if ON_CI: + skip("Too slow for CI.") + # Not that slow, but does not fully evaluate so simplify is slow. + # Maybe also require doit() + x, y = symbols('x y', real=True) + r1 = integrate(integrate(sqrt(x**2 + y**2), (x, 0, 1)), (y, 0, 1)) + assert (r1 - (sqrt(2) + asinh(1))/3).simplify() == 0 + + +@XFAIL +@slow +def test_W25(): + if ON_CI: + skip("Too slow for CI.") + a, x, y = symbols('a x y', real=True) + i1 = integrate( + sin(a)*sin(y)/sqrt(1 - sin(a)**2*sin(x)**2*sin(y)**2), + (x, 0, pi/2)) + i2 = integrate(i1, (y, 0, pi/2)) + assert (i2 - pi*a/2).simplify() == 0 + + +def test_W26(): + x, y = symbols('x y', real=True) + assert integrate(integrate(abs(y - x**2), (y, 0, 2)), + (x, -1, 1)) == R(46, 15) + + +def test_W27(): + a, b, c = symbols('a b c') + assert integrate(integrate(integrate(1, (z, 0, c*(1 - x/a - y/b))), + (y, 0, b*(1 - x/a))), + (x, 0, a)) == a*b*c/6 + + +def test_X1(): + v, c = symbols('v c', real=True) + assert (series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8) == + 5*v**6/(16*c**6) + 3*v**4/(8*c**4) + v**2/(2*c**2) + 1 + O(v**8)) + + +def test_X2(): + v, c = symbols('v c', real=True) + s1 = series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8) + assert (1/s1**2).series(v, x0=0, n=8) == -v**2/c**2 + 1 + O(v**8) + + +def test_X3(): + s1 = (sin(x).series()/cos(x).series()).series() + s2 = tan(x).series() + assert s2 == x + x**3/3 + 2*x**5/15 + O(x**6) + assert s1 == s2 + + +def test_X4(): + s1 = log(sin(x)/x).series() + assert s1 == -x**2/6 - x**4/180 + O(x**6) + assert log(series(sin(x)/x)).series() == s1 + + +@XFAIL +def test_X5(): + # test case in Mathematica syntax: + # In[21]:= (* => [a f'(a d) + g(b d) + integrate(h(c y), y = 0..d)] + # + [a^2 f''(a d) + b g'(b d) + h(c d)] (x - d) *) + # In[22]:= D[f[a*x], x] + g[b*x] + Integrate[h[c*y], {y, 0, x}] + # Out[22]= g[b x] + Integrate[h[c y], {y, 0, x}] + a f'[a x] + # In[23]:= Series[%, {x, d, 1}] + # Out[23]= (g[b d] + Integrate[h[c y], {y, 0, d}] + a f'[a d]) + + # 2 2 + # (h[c d] + b g'[b d] + a f''[a d]) (-d + x) + O[-d + x] + h = Function('h') + a, b, c, d = symbols('a b c d', real=True) + # series() raises NotImplementedError: + # The _eval_nseries method should be added to to give terms up to O(x**n) at x=0 + series(diff(f(a*x), x) + g(b*x) + integrate(h(c*y), (y, 0, x)), + x, x0=d, n=2) + # assert missing, until exception is removed + + +def test_X6(): + # Taylor series of nonscalar objects (noncommutative multiplication) + # expected result => (B A - A B) t^2/2 + O(t^3) [Stanly Steinberg] + a, b = symbols('a b', commutative=False, scalar=False) + assert (series(exp((a + b)*x) - exp(a*x) * exp(b*x), x, x0=0, n=3) == + x**2*(-a*b/2 + b*a/2) + O(x**3)) + + +def test_X7(): + # => sum( Bernoulli[k]/k! x^(k - 2), k = 1..infinity ) + # = 1/x^2 - 1/(2 x) + 1/12 - x^2/720 + x^4/30240 + O(x^6) + # [Levinson and Redheffer, p. 173] + assert (series(1/(x*(exp(x) - 1)), x, 0, 7) == x**(-2) - 1/(2*x) + + R(1, 12) - x**2/720 + x**4/30240 - x**6/1209600 + O(x**7)) + + +def test_X8(): + # Puiseux series (terms with fractional degree): + # => 1/sqrt(x - 3/2 pi) + (x - 3/2 pi)^(3/2) / 12 + O([x - 3/2 pi]^(7/2)) + + # see issue 7167: + x = symbols('x', real=True) + assert (series(sqrt(sec(x)), x, x0=pi*3/2, n=4) == + 1/sqrt(x - pi*R(3, 2)) + (x - pi*R(3, 2))**R(3, 2)/12 + + (x - pi*R(3, 2))**R(7, 2)/160 + O((x - pi*R(3, 2))**4, (x, pi*R(3, 2)))) + + +def test_X9(): + assert (series(x**x, x, x0=0, n=4) == 1 + x*log(x) + x**2*log(x)**2/2 + + x**3*log(x)**3/6 + O(x**4*log(x)**4)) + + +def test_X10(): + z, w = symbols('z w') + assert (series(log(sinh(z)) + log(cosh(z + w)), z, x0=0, n=2) == + log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2)) + + +def test_X11(): + z, w = symbols('z w') + assert (series(log(sinh(z) * cosh(z + w)), z, x0=0, n=2) == + log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2)) + + +@XFAIL +def test_X12(): + # Look at the generalized Taylor series around x = 1 + # Result => (x - 1)^a/e^b [1 - (a + 2 b) (x - 1) / 2 + O((x - 1)^2)] + a, b, x = symbols('a b x', real=True) + # series returns O(log(x-1)**2) + # https://github.com/sympy/sympy/issues/7168 + assert (series(log(x)**a*exp(-b*x), x, x0=1, n=2) == + (x - 1)**a/exp(b)*(1 - (a + 2*b)*(x - 1)/2 + O((x - 1)**2))) + + +def test_X13(): + assert series(sqrt(2*x**2 + 1), x, x0=oo, n=1) == sqrt(2)*x + O(1/x, (x, oo)) + + +@XFAIL +def test_X14(): + # Wallis' product => 1/sqrt(pi n) + ... [Knopp, p. 385] + assert series(1/2**(2*n)*binomial(2*n, n), + n, x==oo, n=1) == 1/(sqrt(pi)*sqrt(n)) + O(1/x, (x, oo)) + + +@SKIP("https://github.com/sympy/sympy/issues/7164") +def test_X15(): + # => 0!/x - 1!/x^2 + 2!/x^3 - 3!/x^4 + O(1/x^5) [Knopp, p. 544] + x, t = symbols('x t', real=True) + # raises RuntimeError: maximum recursion depth exceeded + # https://github.com/sympy/sympy/issues/7164 + # 2019-02-17: Raises + # PoleError: + # Asymptotic expansion of Ei around [-oo] is not implemented. + e1 = integrate(exp(-t)/t, (t, x, oo)) + assert (series(e1, x, x0=oo, n=5) == + 6/x**4 + 2/x**3 - 1/x**2 + 1/x + O(x**(-5), (x, oo))) + + +def test_X16(): + # Multivariate Taylor series expansion => 1 - (x^2 + 2 x y + y^2)/2 + O(x^4) + assert (series(cos(x + y), x + y, x0=0, n=4) == 1 - (x + y)**2/2 + + O(x**4 + x**3*y + x**2*y**2 + x*y**3 + y**4, x, y)) + + +@XFAIL +def test_X17(): + # Power series (compute the general formula) + # (c41) powerseries(log(sin(x)/x), x, 0); + # /aquarius/data2/opt/local/macsyma_422/library1/trgred.so being loaded. + # inf + # ==== i1 2 i1 2 i1 + # \ (- 1) 2 bern(2 i1) x + # (d41) > ------------------------------ + # / 2 i1 (2 i1)! + # ==== + # i1 = 1 + # fps does not calculate + assert fps(log(sin(x)/x)) == \ + Sum((-1)**k*2**(2*k - 1)*bernoulli(2*k)*x**(2*k)/(k*factorial(2*k)), (k, 1, oo)) + + +@XFAIL +def test_X18(): + # Power series (compute the general formula). Maple FPS: + # > FormalPowerSeries(exp(-x)*sin(x), x = 0); + # infinity + # ----- (1/2 k) k + # \ 2 sin(3/4 k Pi) x + # ) ------------------------- + # / k! + # ----- + # + # Now, SymPy returns + # oo + # _____ + # \ ` + # \ / k k\ + # \ k |I*(-1 - I) I*(-1 + I) | + # \ x *|----------- - -----------| + # / \ 2 2 / + # / ------------------------------ + # / k! + # /____, + # k = 0 + k = Dummy('k') + assert fps(exp(-x)*sin(x)) == \ + Sum(2**(S.Half*k)*sin(R(3, 4)*k*pi)*x**k/factorial(k), (k, 0, oo)) + + +@XFAIL +def test_X19(): + # (c45) /* Derive an explicit Taylor series solution of y as a function of + # x from the following implicit relation: + # y = x - 1 + (x - 1)^2/2 + 2/3 (x - 1)^3 + (x - 1)^4 + + # 17/10 (x - 1)^5 + ... + # */ + # x = sin(y) + cos(y); + # Time= 0 msecs + # (d45) x = sin(y) + cos(y) + # + # (c46) taylor_revert(%, y, 7); + raise NotImplementedError("Solve using series not supported. \ +Inverse Taylor series expansion also not supported") + + +@XFAIL +def test_X20(): + # Pade (rational function) approximation => (2 - x)/(2 + x) + # > numapprox[pade](exp(-x), x = 0, [1, 1]); + # bytes used=9019816, alloc=3669344, time=13.12 + # 1 - 1/2 x + # --------- + # 1 + 1/2 x + # mpmath support numeric Pade approximant but there is + # no symbolic implementation in SymPy + # https://en.wikipedia.org/wiki/Pad%C3%A9_approximant + raise NotImplementedError("Symbolic Pade approximant not supported") + + +def test_X21(): + """ + Test whether `fourier_series` of x periodical on the [-p, p] interval equals + `- (2 p / pi) sum( (-1)^n / n sin(n pi x / p), n = 1..infinity )`. + """ + p = symbols('p', positive=True) + n = symbols('n', positive=True, integer=True) + s = fourier_series(x, (x, -p, p)) + + # All cosine coefficients are equal to 0 + assert s.an.formula == 0 + + # Check for sine coefficients + assert s.bn.formula.subs(s.bn.variables[0], 0) == 0 + assert s.bn.formula.subs(s.bn.variables[0], n) == \ + -2*p/pi * (-1)**n / n * sin(n*pi*x/p) + + +@XFAIL +def test_X22(): + # (c52) /* => p / 2 + # - (2 p / pi^2) sum( [1 - (-1)^n] cos(n pi x / p) / n^2, + # n = 1..infinity ) */ + # fourier_series(abs(x), x, p); + # p + # (e52) a = - + # 0 2 + # + # %nn + # (2 (- 1) - 2) p + # (e53) a = ------------------ + # %nn 2 2 + # %pi %nn + # + # (e54) b = 0 + # %nn + # + # Time= 5290 msecs + # inf %nn %pi %nn x + # ==== (2 (- 1) - 2) cos(---------) + # \ p + # p > ------------------------------- + # / 2 + # ==== %nn + # %nn = 1 p + # (d54) ----------------------------------------- + - + # 2 2 + # %pi + raise NotImplementedError("Fourier series not supported") + + +def test_Y1(): + t = symbols('t', positive=True) + w = symbols('w', real=True) + s = symbols('s') + F, _, _ = laplace_transform(cos((w - 1)*t), t, s) + assert F == s/(s**2 + (w - 1)**2) + + +def test_Y2(): + t = symbols('t', positive=True) + w = symbols('w', real=True) + s = symbols('s') + f = inverse_laplace_transform(s/(s**2 + (w - 1)**2), s, t, simplify=True) + assert f == cos(t*(w - 1)) + + +def test_Y3(): + t = symbols('t', positive=True) + w = symbols('w', real=True) + s = symbols('s') + F, _, _ = laplace_transform(sinh(w*t)*cosh(w*t), t, s, simplify=True) + assert F == w/(s**2 - 4*w**2) + + +def test_Y4(): + t = symbols('t', positive=True) + s = symbols('s') + F, _, _ = laplace_transform(erf(3/sqrt(t)), t, s, simplify=True) + assert F == 1/s - exp(-6*sqrt(s))/s + + +def test_Y5_Y6(): +# Solve y'' + y = 4 [H(t - 1) - H(t - 2)], y(0) = 1, y'(0) = 0 where H is the +# Heaviside (unit step) function (the RHS describes a pulse of magnitude 4 and +# duration 1). See David A. Sanchez, Richard C. Allen, Jr. and Walter T. +# Kyner, _Differential Equations: An Introduction_, Addison-Wesley Publishing +# Company, 1983, p. 211. First, take the Laplace transform of the ODE +# => s^2 Y(s) - s + Y(s) = 4/s [e^(-s) - e^(-2 s)] +# where Y(s) is the Laplace transform of y(t) + t = symbols('t', positive=True) + s = symbols('s') + y = Function('y') + F, _, _ = laplace_transform(diff(y(t), t, 2) + y(t) + - 4*(Heaviside(t - 1) - Heaviside(t - 2)), + t, s, simplify=True) + D = (F - (s**2*LaplaceTransform(y(t), t, s) - s*y(0) + + LaplaceTransform(y(t), t, s) - Subs(Derivative(y(t), t), t, 0) + + 4*(1 - exp(s))*exp(-2*s)/s)).simplify(doit=False) + assert D == 0 +# TODO implement second part of test case +# Now, solve for Y(s) and then take the inverse Laplace transform +# => Y(s) = s/(s^2 + 1) + 4 [1/s - s/(s^2 + 1)] [e^(-s) - e^(-2 s)] +# => y(t) = cos t + 4 {[1 - cos(t - 1)] H(t - 1) - [1 - cos(t - 2)] H(t - 2)} + + +@XFAIL +def test_Y7(): + # What is the Laplace transform of an infinite square wave? + # => 1/s + 2 sum( (-1)^n e^(- s n a)/s, n = 1..infinity ) + # [Sanchez, Allen and Kyner, p. 213] + t = symbols('t', positive=True) + a = symbols('a', real=True) + s = symbols('s') + F, _, _ = laplace_transform(1 + 2*Sum((-1)**n*Heaviside(t - n*a), + (n, 1, oo)), t, s) + # returns 2*LaplaceTransform(Sum((-1)**n*Heaviside(-a*n + t), + # (n, 1, oo)), t, s) + 1/s + # https://github.com/sympy/sympy/issues/7177 + assert F == 2*Sum((-1)**n*exp(-a*n*s)/s, (n, 1, oo)) + 1/s + + +@XFAIL +def test_Y8(): + assert fourier_transform(1, x, z) == DiracDelta(z) + + +def test_Y9(): + assert (fourier_transform(exp(-9*x**2), x, z) == + sqrt(pi)*exp(-pi**2*z**2/9)/3) + + +def test_Y10(): + assert (fourier_transform(abs(x)*exp(-3*abs(x)), x, z).cancel() == + (-8*pi**2*z**2 + 18)/(16*pi**4*z**4 + 72*pi**2*z**2 + 81)) + + +@SKIP("https://github.com/sympy/sympy/issues/7181") +@slow +def test_Y11(): + # => pi cot(pi s) (0 < Re s < 1) [Gradshteyn and Ryzhik 17.43(5)] + x, s = symbols('x s') + # raises RuntimeError: maximum recursion depth exceeded + # https://github.com/sympy/sympy/issues/7181 + # Update 2019-02-17 raises: + # TypeError: cannot unpack non-iterable MellinTransform object + F, _, _ = mellin_transform(1/(1 - x), x, s) + assert F == pi*cot(pi*s) + + +@XFAIL +def test_Y12(): + # => 2^(s - 4) gamma(s/2)/gamma(4 - s/2) (0 < Re s < 1) + # [Gradshteyn and Ryzhik 17.43(16)] + x, s = symbols('x s') + # returns Wrong value -2**(s - 4)*gamma(s/2 - 3)/gamma(-s/2 + 1) + # https://github.com/sympy/sympy/issues/7182 + F, _, _ = mellin_transform(besselj(3, x)/x**3, x, s) + assert F == -2**(s - 4)*gamma(s/2)/gamma(-s/2 + 4) + + +@XFAIL +def test_Y13(): +# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function) z + raise NotImplementedError("z-transform not supported") + + +@XFAIL +def test_Y14(): +# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function) + raise NotImplementedError("z-transform not supported") + + +def test_Z1(): + r = Function('r') + assert (rsolve(r(n + 2) - 2*r(n + 1) + r(n) - 2, r(n), + {r(0): 1, r(1): m}).simplify() == n**2 + n*(m - 2) + 1) + + +def test_Z2(): + r = Function('r') + assert (rsolve(r(n) - (5*r(n - 1) - 6*r(n - 2)), r(n), {r(0): 0, r(1): 1}) + == -2**n + 3**n) + + +def test_Z3(): + # => r(n) = Fibonacci[n + 1] [Cohen, p. 83] + r = Function('r') + # recurrence solution is correct, Wester expects it to be simplified to + # fibonacci(n+1), but that is quite hard + expected = ((S(1)/2 - sqrt(5)/2)**n*(S(1)/2 - sqrt(5)/10) + + (S(1)/2 + sqrt(5)/2)**n*(sqrt(5)/10 + S(1)/2)) + sol = rsolve(r(n) - (r(n - 1) + r(n - 2)), r(n), {r(1): 1, r(2): 2}) + assert sol == expected + + +@XFAIL +def test_Z4(): +# => [c^(n+1) [c^(n+1) - 2 c - 2] + (n+1) c^2 + 2 c - n] / [(c-1)^3 (c+1)] +# [Joan Z. Yu and Robert Israel in sci.math.symbolic] + r = Function('r') + c = symbols('c') + # raises ValueError: Polynomial or rational function expected, + # got '(c**2 - c**n)/(c - c**n) + s = rsolve(r(n) - ((1 + c - c**(n-1) - c**(n+1))/(1 - c**n)*r(n - 1) + - c*(1 - c**(n-2))/(1 - c**(n-1))*r(n - 2) + 1), + r(n), {r(1): 1, r(2): (2 + 2*c + c**2)/(1 + c)}) + assert (s - (c*(n + 1)*(c*(n + 1) - 2*c - 2) + + (n + 1)*c**2 + 2*c - n)/((c-1)**3*(c+1)) == 0) + + +@XFAIL +def test_Z5(): + # Second order ODE with initial conditions---solve directly + # transform: f(t) = sin(2 t)/8 - t cos(2 t)/4 + C1, C2 = symbols('C1 C2') + # initial conditions not supported, this is a manual workaround + # https://github.com/sympy/sympy/issues/4720 + eq = Derivative(f(x), x, 2) + 4*f(x) - sin(2*x) + sol = dsolve(eq, f(x)) + f0 = Lambda(x, sol.rhs) + assert f0(x) == C2*sin(2*x) + (C1 - x/4)*cos(2*x) + f1 = Lambda(x, diff(f0(x), x)) + # TODO: Replace solve with solveset, when it works for solveset + const_dict = solve((f0(0), f1(0))) + result = f0(x).subs(C1, const_dict[C1]).subs(C2, const_dict[C2]) + assert result == -x*cos(2*x)/4 + sin(2*x)/8 + # Result is OK, but ODE solving with initial conditions should be + # supported without all this manual work + raise NotImplementedError('ODE solving with initial conditions \ +not supported') + + +@XFAIL +def test_Z6(): + # Second order ODE with initial conditions---solve using Laplace + # transform: f(t) = sin(2 t)/8 - t cos(2 t)/4 + t = symbols('t', positive=True) + s = symbols('s') + eq = Derivative(f(t), t, 2) + 4*f(t) - sin(2*t) + F, _, _ = laplace_transform(eq, t, s) + # Laplace transform for diff() not calculated + # https://github.com/sympy/sympy/issues/7176 + assert (F == s**2*LaplaceTransform(f(t), t, s) + + 4*LaplaceTransform(f(t), t, s) - 2/(s**2 + 4)) + # rest of test case not implemented diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_xxe.py b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_xxe.py new file mode 100644 index 0000000000000000000000000000000000000000..3936e8aa135dde5f22c71548e2f90ed58ac25cb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tests/test_xxe.py @@ -0,0 +1,3 @@ +# A test file for XXE injection +# Username: Test +# Password: Test diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/timeutils.py b/venv/lib/python3.10/site-packages/sympy/utilities/timeutils.py new file mode 100644 index 0000000000000000000000000000000000000000..55fc4b6fc832b6721907929a19949d9b17476315 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/timeutils.py @@ -0,0 +1,76 @@ +"""Simple tools for timing functions' execution, when IPython is not available. """ + + +import timeit +import math + + +_scales = [1e0, 1e3, 1e6, 1e9] +_units = ['s', 'ms', '\N{GREEK SMALL LETTER MU}s', 'ns'] + + +def timed(func, setup="pass", limit=None): + """Adaptively measure execution time of a function. """ + timer = timeit.Timer(func, setup=setup) + repeat, number = 3, 1 + + for i in range(1, 10): + if timer.timeit(number) >= 0.2: + break + elif limit is not None and number >= limit: + break + else: + number *= 10 + + time = min(timer.repeat(repeat, number)) / number + + if time > 0.0: + order = min(-int(math.floor(math.log10(time)) // 3), 3) + else: + order = 3 + + return (number, time, time*_scales[order], _units[order]) + + +# Code for doing inline timings of recursive algorithms. + +def __do_timings(): + import os + res = os.getenv('SYMPY_TIMINGS', '') + res = [x.strip() for x in res.split(',')] + return set(res) + +_do_timings = __do_timings() +_timestack = None + + +def _print_timestack(stack, level=1): + print('-'*level, '%.2f %s%s' % (stack[2], stack[0], stack[3])) + for s in stack[1]: + _print_timestack(s, level + 1) + + +def timethis(name): + def decorator(func): + global _do_timings + if name not in _do_timings: + return func + + def wrapper(*args, **kwargs): + from time import time + global _timestack + oldtimestack = _timestack + _timestack = [func.func_name, [], 0, args] + t1 = time() + r = func(*args, **kwargs) + t2 = time() + _timestack[2] = t2 - t1 + if oldtimestack is not None: + oldtimestack[1].append(_timestack) + _timestack = oldtimestack + else: + _print_timestack(_timestack) + _timestack = None + return r + return wrapper + return decorator diff --git a/venv/lib/python3.10/site-packages/sympy/utilities/tmpfiles.py b/venv/lib/python3.10/site-packages/sympy/utilities/tmpfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..f09216aac76e2fc29e556a488875a2e3c6975f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sympy/utilities/tmpfiles.py @@ -0,0 +1,12 @@ +""" +.. deprecated:: 1.6 + + sympy.utilities.tmpfiles has been renamed to sympy.testing.tmpfiles. +""" +from sympy.utilities.exceptions import sympy_deprecation_warning + +sympy_deprecation_warning("The sympy.utilities.tmpfiles submodule is deprecated. Use sympy.testing.tmpfiles instead.", + deprecated_since_version="1.6", + active_deprecations_target="deprecated-sympy-utilities-submodules") + +from sympy.testing.tmpfiles import * # noqa:F401