index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
43,718
sapero_math.lcm
REP
null
def REP(x): nums = [] for i in range(x): nums.append(i+1) if len(nums) == 1: return nums[0] else: c = LCM(nums[0], nums[1]) if len(nums) >= 2: for i in range(2, len(nums)): c = LCM(c, nums[i]) return c
(x)
43,720
sapero_math.chebyshev
chebyshev_1
null
def chebyshev_1(x): primes = primelist(x) summed = 0 for k in range(len(primes)): summed += math.log(primes[k], math.e) return summed
(x)
43,721
sapero_math.chebyshev
chebyshev_2
null
def chebyshev_2(x): summed = 0 for i in range(x): summed += vonmangoldt(i+1) return summed
(x)
43,722
sapero_math.chebyshev
chebyshev_2lcm
null
def chebyshev_2lcm(x): return math.log(REP(x), math.e)
(x)
43,723
sapero_math.chebyshev
e_psi
null
def e_psi(x): return math.exp(chebyshev_2(x))
(x)
43,725
sapero_math.primegen
pk
null
def pk(n): primes = primelist(n) for p in range(len(primes)): x = 0 m = n while m >= 1: m = m/primes[p] x += 1 if m == 1/primes[p]: return (primes[p], x-1) return (0,0)
(n)
43,726
sapero_math.primegen
primecheck
null
def primecheck(x): flag = False for i in range(2, 1+x//2): if flag == True: break elif(x%i)==0: flag = True break if flag == False: return x else: return 0
(x)
43,728
sapero_math.primegen
primelist
null
def primelist(x): primes = [] for i in range(2, x+1): n = primecheck(i) if n != 0: primes.append(i) return primes
(x)
43,729
sapero_math.chebyshev
vonmangoldt
null
def vonmangoldt(n): m, k = pk(n) if (m, k) != (0, 0) and k >= 1: p = math.log(m, math.e) else: p = float(0) return p
(n)
43,730
cppy
CppyBuildExt
A custom build extension enforcing c++11 standard on all platforms. On Windows, FH4 Exception Handling can be disabled by setting the CPPY_DISABLE_FH4 environment variable. This avoids requiring VCRUNTIME140_1.dll
class CppyBuildExt(build_ext): """A custom build extension enforcing c++11 standard on all platforms. On Windows, FH4 Exception Handling can be disabled by setting the CPPY_DISABLE_FH4 environment variable. This avoids requiring VCRUNTIME140_1.dll """ # MSVC does not have a c++11 flag and default to c++14 anyway c_opts = {"msvc": ["/EHsc"], "unix": ["-std=c++11"]} def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) cppy_includes = get_include() for ext in self.extensions: ext.include_dirs.insert(0, cppy_includes) ext.extra_compile_args = opts if sys.platform == "darwin": # Only Unix compilers and their ports have `compiler_so` so on MacOS # we can sure it will be present. compiler_cmd = self.compiler.compiler_so # Check if we are using Clang, accounting for absolute path if compiler_cmd is not None and 'clang' in compiler_cmd[0]: # If so ensure we use a recent enough version of the stdlib ext.extra_compile_args += ["-stdlib=libc++"] ext.extra_link_args += ["-stdlib=libc++"] if ct == "msvc" and os.environ.get("CPPY_DISABLE_FH4"): # Disable FH4 Exception Handling implementation so that we don't # require VCRUNTIME140_1.dll. For more details, see: # https://devblogs.microsoft.com/cppblog/making-cpp-exception-handling-smaller-x64/ # https://github.com/joerick/cibuildwheel/issues/423#issuecomment-677763904 ext.extra_compile_args.append("/d2FH4-") build_ext.build_extensions(self)
(dist, **kw)
43,731
distutils.cmd
__getattr__
null
def __getattr__(self, attr): if attr == 'dry_run': myval = getattr(self, "_" + attr) if myval is None: return getattr(self.distribution, attr) else: return myval else: raise AttributeError(attr)
(self, attr)
43,732
setuptools
__init__
Construct the command for dist, updating vars(self) with any keyword parameters.
def __init__(self, dist, **kw): """ Construct the command for dist, updating vars(self) with any keyword parameters. """ super().__init__(dist) vars(self).update(kw)
(self, dist, **kw)
43,733
setuptools.command.build_ext
__get_output_extensions
null
def __get_output_extensions(self): yield '.py' yield '.pyc' if self.get_finalized_command('build_py').optimize: yield '.pyo'
(self)
43,734
setuptools.command.build_ext
__get_stubs_outputs
null
def __get_stubs_outputs(self): # assemble the base name for each extension that needs a stub ns_ext_bases = ( os.path.join(self.build_lib, *ext._full_name.split('.')) for ext in self.extensions if ext._needs_stub ) # pair each base with the extension pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) return list(base + fnext for base, fnext in pairs)
(self)
43,735
distutils.command.build_ext
_build_extensions_parallel
null
def _build_extensions_parallel(self): workers = self.parallel if self.parallel is True: workers = os.cpu_count() # may return None try: from concurrent.futures import ThreadPoolExecutor except ImportError: workers = None if workers is None: self._build_extensions_serial() return with ThreadPoolExecutor(max_workers=workers) as executor: futures = [ executor.submit(self.build_extension, ext) for ext in self.extensions ] for ext, fut in zip(self.extensions, futures): with self._filter_build_errors(ext): fut.result()
(self)
43,736
distutils.command.build_ext
_build_extensions_serial
null
def _build_extensions_serial(self): for ext in self.extensions: with self._filter_build_errors(ext): self.build_extension(ext)
(self)
43,737
setuptools.command.build_ext
_compile_and_remove_stub
null
def _compile_and_remove_stub(self, stub_file: str): from distutils.util import byte_compile byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) optimize = self.get_finalized_command('install_lib').optimize if optimize > 0: byte_compile([stub_file], optimize=optimize, force=True, dry_run=self.dry_run) if os.path.exists(stub_file) and not self.dry_run: os.unlink(stub_file)
(self, stub_file: str)
43,738
setuptools
_ensure_stringlike
null
def _ensure_stringlike(self, option, what, default=None): val = getattr(self, option) if val is None: setattr(self, option, default) return default elif not isinstance(val, str): raise DistutilsOptionError( "'%s' must be a %s (got `%s`)" % (option, what, val) ) return val
(self, option, what, default=None)
43,739
distutils.cmd
_ensure_tested_string
null
def _ensure_tested_string(self, option, tester, what, error_fmt, default=None): val = self._ensure_stringlike(option, what, default) if val is not None and not tester(val): raise DistutilsOptionError( ("error in '%s' option: " + error_fmt) % (option, val) )
(self, option, tester, what, error_fmt, default=None)
43,740
distutils.command.build_ext
_filter_build_errors
null
def finalize_options(self): # noqa: C901 from distutils import sysconfig self.set_undefined_options( 'build', ('build_lib', 'build_lib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force'), ('parallel', 'parallel'), ('plat_name', 'plat_name'), ) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. py_include = sysconfig.get_python_inc() plat_py_include = sysconfig.get_python_inc(plat_specific=1) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) # If in a virtualenv, add its include directory # Issue 16116 if sys.exec_prefix != sys.base_exec_prefix: self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.extend(py_include.split(os.path.pathsep)) if plat_py_include != py_include: self.include_dirs.extend(plat_py_include.split(os.path.pathsep)) self.ensure_string_list('libraries') self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset if self.libraries is None: self.libraries = [] if self.library_dirs is None: self.library_dirs = [] elif isinstance(self.library_dirs, str): self.library_dirs = self.library_dirs.split(os.pathsep) if self.rpath is None: self.rpath = [] elif isinstance(self.rpath, str): self.rpath = self.rpath.split(os.pathsep) # for extensions under windows use different directories # for Release and Debug builds. # also Python's library directory must be appended to library_dirs if os.name == 'nt': # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if sys.base_exec_prefix != sys.prefix: # Issue 16116 self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: self.build_temp = os.path.join(self.build_temp, "Release") # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree self.include_dirs.append(os.path.dirname(get_config_h_filename())) self.library_dirs.append(sys.base_exec_prefix) # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = 'win32' else: # win-amd64 suffix = self.plat_name[4:] new_lib = os.path.join(sys.exec_prefix, 'PCbuild') if suffix: new_lib = os.path.join(new_lib, suffix) self.library_dirs.append(new_lib) # For extensions under Cygwin, Python's library directory must be # appended to library_dirs if sys.platform[:6] == 'cygwin': if not sysconfig.python_build: # building third party extensions self.library_dirs.append( os.path.join( sys.prefix, "lib", "python" + get_python_version(), "config" ) ) else: # building python standard extensions self.library_dirs.append('.') # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs # See Issues: #1600860, #4366 if sysconfig.get_config_var('Py_ENABLE_SHARED'): if not sysconfig.python_build: # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) else: # building python standard extensions self.library_dirs.append('.') # The argument parsing will result in self.define being a string, but # it has to be a list of 2-tuples. All the preprocessor symbols # specified by the 'define' option will be set to '1'. Multiple # symbols can be separated with commas. if self.define: defines = self.define.split(',') self.define = [(symbol, '1') for symbol in defines] # The option for macros to undefine is also a string from the # option parsing, but has to be a list. Multiple symbols can also # be separated with commas here. if self.undef: self.undef = self.undef.split(',') if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = self.swig_opts.split(' ') # Finally add the user include and library directories if requested if self.user: user_include = os.path.join(USER_BASE, "include") user_lib = os.path.join(USER_BASE, "lib") if os.path.isdir(user_include): self.include_dirs.append(user_include) if os.path.isdir(user_lib): self.library_dirs.append(user_lib) self.rpath.append(user_lib) if isinstance(self.parallel, str): try: self.parallel = int(self.parallel) except ValueError: raise DistutilsOptionError("parallel should be an integer")
(self, ext)
43,741
setuptools.command.build_ext
_get_equivalent_stub
null
def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str: dir_ = os.path.dirname(output_file) _, _, name = ext.name.rpartition(".") return f"{os.path.join(dir_, name)}.py"
(self, ext: setuptools.extension.Extension, output_file: str) -> str
43,742
setuptools.command.build_ext
_get_inplace_equivalent
null
def _get_inplace_equivalent(self, build_py, ext: Extension) -> Tuple[str, str]: fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) modpath = fullname.split('.') package = '.'.join(modpath[:-1]) package_dir = build_py.get_package_dir(package) inplace_file = os.path.join(package_dir, os.path.basename(filename)) regular_file = os.path.join(self.build_lib, filename) return (inplace_file, regular_file)
(self, build_py, ext: setuptools.extension.Extension) -> Tuple[str, str]
43,743
setuptools.command.build_ext
_get_output_mapping
null
def _get_output_mapping(self) -> Iterator[Tuple[str, str]]: if not self.inplace: return build_py = self.get_finalized_command('build_py') opt = self.get_finalized_command('install_lib').optimize or "" for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) yield (regular_file, inplace_file) if ext._needs_stub: # This version of `build_ext` always builds artifacts in another dir, # when "inplace=True" is given it just copies them back. # This is done in the `copy_extensions_to_source` function, which # always compile stub files via `_compile_and_remove_stub`. # At the end of the process, a `.pyc` stub file is created without the # corresponding `.py`. inplace_stub = self._get_equivalent_stub(ext, inplace_file) regular_stub = self._get_equivalent_stub(ext, regular_file) inplace_cache = _compiled_file_name(inplace_stub, optimization=opt) output_cache = _compiled_file_name(regular_stub, optimization=opt) yield (output_cache, inplace_cache)
(self) -> Iterator[Tuple[str, str]]
43,744
setuptools.command.build_ext
_write_stub_file
null
def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): log.info("writing stub loader for %s to %s", ext._full_name, stub_file) if compile and os.path.exists(stub_file): raise BaseError(stub_file + " already exists! Please delete.") if not self.dry_run: f = open(stub_file, 'w') f.write( '\n'.join([ "def __bootstrap__():", " global __bootstrap__, __file__, __loader__", " import sys, os, pkg_resources, importlib.util" + if_dl(", dl"), " __file__ = pkg_resources.resource_filename" "(__name__,%r)" % os.path.basename(ext._file_name), " del __bootstrap__", " if '__loader__' in globals():", " del __loader__", if_dl(" old_flags = sys.getdlopenflags()"), " old_dir = os.getcwd()", " try:", " os.chdir(os.path.dirname(__file__))", if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), " spec = importlib.util.spec_from_file_location(", " __name__, __file__)", " mod = importlib.util.module_from_spec(spec)", " spec.loader.exec_module(mod)", " finally:", if_dl(" sys.setdlopenflags(old_flags)"), " os.chdir(old_dir)", "__bootstrap__()", "" # terminal \n ]) ) f.close() if compile: self._compile_and_remove_stub(stub_file)
(self, stub_file: str, ext: setuptools.extension.Extension, compile=False)
43,745
distutils.cmd
announce
If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout.
def announce(self, msg, level=1): """If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout. """ log.log(level, msg)
(self, msg, level=1)
43,746
setuptools.command.build_ext
build_extension
null
def build_extension(self, ext): ext._convert_pyx_sources_to_lang() _compiler = self.compiler try: if isinstance(ext, Library): self.compiler = self.shlib_compiler _build_ext.build_extension(self, ext) if ext._needs_stub: build_lib = self.get_finalized_command('build_py').build_lib self.write_stub(build_lib, ext) finally: self.compiler = _compiler
(self, ext)
43,747
cppy
build_extensions
null
def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) cppy_includes = get_include() for ext in self.extensions: ext.include_dirs.insert(0, cppy_includes) ext.extra_compile_args = opts if sys.platform == "darwin": # Only Unix compilers and their ports have `compiler_so` so on MacOS # we can sure it will be present. compiler_cmd = self.compiler.compiler_so # Check if we are using Clang, accounting for absolute path if compiler_cmd is not None and 'clang' in compiler_cmd[0]: # If so ensure we use a recent enough version of the stdlib ext.extra_compile_args += ["-stdlib=libc++"] ext.extra_link_args += ["-stdlib=libc++"] if ct == "msvc" and os.environ.get("CPPY_DISABLE_FH4"): # Disable FH4 Exception Handling implementation so that we don't # require VCRUNTIME140_1.dll. For more details, see: # https://devblogs.microsoft.com/cppblog/making-cpp-exception-handling-smaller-x64/ # https://github.com/joerick/cibuildwheel/issues/423#issuecomment-677763904 ext.extra_compile_args.append("/d2FH4-") build_ext.build_extensions(self)
(self)
43,748
distutils.command.build_ext
check_extensions_list
Ensure that the list of extensions (presumably provided as a command option 'extensions') is valid, i.e. it is a list of Extension objects. We also support the old-style list of 2-tuples, where the tuples are (ext_name, build_info), which are converted to Extension instances here. Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise.
def check_extensions_list(self, extensions): # noqa: C901 """Ensure that the list of extensions (presumably provided as a command option 'extensions') is valid, i.e. it is a list of Extension objects. We also support the old-style list of 2-tuples, where the tuples are (ext_name, build_info), which are converted to Extension instances here. Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. """ if not isinstance(extensions, list): raise DistutilsSetupError( "'ext_modules' option must be a list of Extension instances" ) for i, ext in enumerate(extensions): if isinstance(ext, Extension): continue # OK! (assume type-checking done # by Extension constructor) if not isinstance(ext, tuple) or len(ext) != 2: raise DistutilsSetupError( "each element of 'ext_modules' option must be an " "Extension instance or 2-tuple" ) ext_name, build_info = ext log.warn( "old-style (ext_name, build_info) tuple found in " "ext_modules for extension '%s' " "-- please convert to Extension instance", ext_name, ) if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)): raise DistutilsSetupError( "first element of each tuple in 'ext_modules' " "must be the extension name (a string)" ) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'ext_modules' " "must be a dictionary (build info)" ) # OK, the (ext_name, build_info) dict is type-safe: convert it # to an Extension instance. ext = Extension(ext_name, build_info['sources']) # Easy stuff: one-to-one mapping from dict elements to # instance attributes. for key in ( 'include_dirs', 'library_dirs', 'libraries', 'extra_objects', 'extra_compile_args', 'extra_link_args', ): val = build_info.get(key) if val is not None: setattr(ext, key, val) # Medium-easy stuff: same syntax/semantics, different names. ext.runtime_library_dirs = build_info.get('rpath') if 'def_file' in build_info: log.warn("'def_file' element of build info dict " "no longer supported") # Non-trivial stuff: 'macros' split into 'define_macros' # and 'undef_macros'. macros = build_info.get('macros') if macros: ext.define_macros = [] ext.undef_macros = [] for macro in macros: if not (isinstance(macro, tuple) and len(macro) in (1, 2)): raise DistutilsSetupError( "'macros' element of build info dict " "must be 1- or 2-tuple" ) if len(macro) == 1: ext.undef_macros.append(macro[0]) elif len(macro) == 2: ext.define_macros.append(macro) extensions[i] = ext
(self, extensions)
43,749
setuptools.command.build_ext
copy_extensions_to_source
null
def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. if os.path.exists(regular_file) or not ext.optional: self.copy_file(regular_file, inplace_file, level=self.verbose) if ext._needs_stub: inplace_stub = self._get_equivalent_stub(ext, inplace_file) self._write_stub_file(inplace_stub, ext, compile=True) # Always compile stub and remove the original (leave the cache behind) # (this behaviour was observed in previous iterations of the code)
(self)
43,750
distutils.cmd
copy_file
Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)
def copy_file( self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1 ): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" return file_util.copy_file( infile, outfile, preserve_mode, preserve_times, not self.force, link, dry_run=self.dry_run, )
(self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1)
43,751
distutils.cmd
copy_tree
Copy an entire directory tree respecting verbose, dry-run, and force flags.
def copy_tree( self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1, ): """Copy an entire directory tree respecting verbose, dry-run, and force flags. """ return dir_util.copy_tree( infile, outfile, preserve_mode, preserve_times, preserve_symlinks, not self.force, dry_run=self.dry_run, )
(self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1)
43,752
distutils.cmd
debug_print
Print 'msg' to stdout if the global DEBUG (taken from the DISTUTILS_DEBUG environment variable) flag is true.
def debug_print(self, msg): """Print 'msg' to stdout if the global DEBUG (taken from the DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG if DEBUG: print(msg) sys.stdout.flush()
(self, msg)
43,753
distutils.cmd
dump_options
null
def dump_options(self, header=None, indent=""): from distutils.fancy_getopt import longopt_xlate if header is None: header = "command options for '%s':" % self.get_command_name() self.announce(indent + header, level=log.INFO) indent = indent + " " for (option, _, _) in self.user_options: option = option.translate(longopt_xlate) if option[-1] == "=": option = option[:-1] value = getattr(self, option) self.announce(indent + "{} = {}".format(option, value), level=log.INFO)
(self, header=None, indent='')
43,754
distutils.cmd
ensure_dirname
null
def ensure_dirname(self, option): self._ensure_tested_string( option, os.path.isdir, "directory name", "'%s' does not exist or is not a directory", )
(self, option)
43,755
distutils.cmd
ensure_filename
Ensure that 'option' is the name of an existing file.
def ensure_filename(self, option): """Ensure that 'option' is the name of an existing file.""" self._ensure_tested_string( option, os.path.isfile, "filename", "'%s' does not exist or is not a file" )
(self, option)
43,756
distutils.cmd
ensure_finalized
null
def ensure_finalized(self): if not self.finalized: self.finalize_options() self.finalized = 1
(self)
43,757
distutils.cmd
ensure_string
Ensure that 'option' is a string; if not defined, set it to 'default'.
def ensure_string(self, option, default=None): """Ensure that 'option' is a string; if not defined, set it to 'default'. """ self._ensure_stringlike(option, "string", default)
(self, option, default=None)
43,758
setuptools
ensure_string_list
Ensure that 'option' is a list of strings. If 'option' is currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. .. TODO: This method seems to be similar to the one in ``distutils.cmd`` Probably it is just here for backward compatibility with old Python versions? :meta private:
def ensure_string_list(self, option): r"""Ensure that 'option' is a list of strings. If 'option' is currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. .. TODO: This method seems to be similar to the one in ``distutils.cmd`` Probably it is just here for backward compatibility with old Python versions? :meta private: """ val = getattr(self, option) if val is None: return elif isinstance(val, str): setattr(self, option, re.split(r',\s*|\s+', val)) else: if isinstance(val, list): ok = all(isinstance(v, str) for v in val) else: ok = False if not ok: raise DistutilsOptionError( "'%s' must be a list of strings (got %r)" % (option, val) )
(self, option)
43,759
distutils.cmd
execute
null
def execute(self, func, args, msg=None, level=1): util.execute(func, args, msg, dry_run=self.dry_run)
(self, func, args, msg=None, level=1)
43,760
setuptools.command.build_ext
finalize_options
null
def finalize_options(self): _build_ext.finalize_options(self) self.extensions = self.extensions or [] self.check_extensions_list(self.extensions) self.shlibs = [ext for ext in self.extensions if isinstance(ext, Library)] if self.shlibs: self.setup_shlib_compiler() for ext in self.extensions: ext._full_name = self.get_ext_fullname(ext.name) for ext in self.extensions: fullname = ext._full_name self.ext_map[fullname] = ext # distutils 3.1 will also ask for module names # XXX what to do with conflicts? self.ext_map[fullname.split('.')[-1]] = ext ltd = self.shlibs and self.links_to_dynamic(ext) or False ns = ltd and use_stubs and not isinstance(ext, Library) ext._links_to_dynamic = ltd ext._needs_stub = ns filename = ext._file_name = self.get_ext_filename(fullname) libdir = os.path.dirname(os.path.join(self.build_lib, filename)) if ltd and libdir not in ext.library_dirs: ext.library_dirs.append(libdir) if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) if self.editable_mode: self.inplace = True
(self)
43,761
distutils.command.build_ext
find_swig
Return the name of the SWIG executable. On Unix, this is just "swig" -- it should be in the PATH. Tries a bit harder on Windows.
def find_swig(self): """Return the name of the SWIG executable. On Unix, this is just "swig" -- it should be in the PATH. Tries a bit harder on Windows. """ if os.name == "posix": return "swig" elif os.name == "nt": # Look for SWIG in its standard installation directory on # Windows (or so I presume!). If we find it there, great; # if not, act like Unix and assume it's in the PATH. for vers in ("1.3", "1.2", "1.1"): fn = os.path.join("c:\\swig%s" % vers, "swig.exe") if os.path.isfile(fn): return fn else: return "swig.exe" else: raise DistutilsPlatformError( "I don't know how to find (much less run) SWIG " "on platform '%s'" % os.name )
(self)
43,762
distutils.cmd
get_command_name
null
def get_command_name(self): if hasattr(self, 'command_name'): return self.command_name else: return self.__class__.__name__
(self)
43,763
setuptools.command.build_ext
get_export_symbols
null
def get_export_symbols(self, ext): if isinstance(ext, Library): return ext.export_symbols return _build_ext.get_export_symbols(self, ext)
(self, ext)
43,764
setuptools.command.build_ext
get_ext_filename
null
def get_ext_filename(self, fullname): so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') if so_ext: filename = os.path.join(*fullname.split('.')) + so_ext else: filename = _build_ext.get_ext_filename(self, fullname) so_ext = get_config_var('EXT_SUFFIX') if fullname in self.ext_map: ext = self.ext_map[fullname] use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix() if use_abi3: filename = filename[:-len(so_ext)] so_ext = get_abi3_suffix() filename = filename + so_ext if isinstance(ext, Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn, libtype) elif use_stubs and ext._links_to_dynamic: d, fn = os.path.split(filename) return os.path.join(d, 'dl-' + fn) return filename
(self, fullname)
43,765
distutils.command.build_ext
get_ext_fullname
Returns the fullname of a given extension name. Adds the `package.` prefix
def get_ext_fullname(self, ext_name): """Returns the fullname of a given extension name. Adds the `package.` prefix""" if self.package is None: return ext_name else: return self.package + '.' + ext_name
(self, ext_name)
43,766
distutils.command.build_ext
get_ext_fullpath
Returns the path of the filename for a given extension. The file is located in `build_lib` or directly in the package (inplace option).
def get_ext_fullpath(self, ext_name): """Returns the path of the filename for a given extension. The file is located in `build_lib` or directly in the package (inplace option). """ fullname = self.get_ext_fullname(ext_name) modpath = fullname.split('.') filename = self.get_ext_filename(modpath[-1]) if not self.inplace: # no further work needed # returning : # build_dir/package/path/filename filename = os.path.join(*modpath[:-1] + [filename]) return os.path.join(self.build_lib, filename) # the inplace option requires to find the package directory # using the build_py command for that package = '.'.join(modpath[0:-1]) build_py = self.get_finalized_command('build_py') package_dir = os.path.abspath(build_py.get_package_dir(package)) # returning # package_dir/filename return os.path.join(package_dir, filename)
(self, ext_name)
43,767
distutils.cmd
get_finalized_command
Wrapper around Distribution's 'get_command_obj()' method: find (create if necessary and 'create' is true) the command object for 'command', call its 'ensure_finalized()' method, and return the finalized command object.
def get_finalized_command(self, command, create=1): """Wrapper around Distribution's 'get_command_obj()' method: find (create if necessary and 'create' is true) the command object for 'command', call its 'ensure_finalized()' method, and return the finalized command object. """ cmd_obj = self.distribution.get_command_obj(command, create) cmd_obj.ensure_finalized() return cmd_obj
(self, command, create=1)
43,768
distutils.command.build_ext
get_libraries
Return the list of libraries to link against when building a shared extension. On most platforms, this is just 'ext.libraries'; on Windows, we add the Python library (eg. python20.dll).
def get_libraries(self, ext): # noqa: C901 """Return the list of libraries to link against when building a shared extension. On most platforms, this is just 'ext.libraries'; on Windows, we add the Python library (eg. python20.dll). """ # The python library is always needed on Windows. For MSVC, this # is redundant, since the library is mentioned in a pragma in # pyconfig.h that MSVC groks. The other Windows compilers all seem # to need it mentioned explicitly, though, so that's what we do. # Append '_d' to the python import library on debug builds. if sys.platform == "win32": from distutils._msvccompiler import MSVCCompiler if not isinstance(self.compiler, MSVCCompiler): template = "python%d%d" if self.debug: template = template + '_d' pythonlib = template % ( sys.hexversion >> 24, (sys.hexversion >> 16) & 0xFF, ) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] else: # On Android only the main executable and LD_PRELOADs are considered # to be RTLD_GLOBAL, all the dependencies of the main executable # remain RTLD_LOCAL and so the shared libraries must be linked with # libpython when python is built with a shared python library (issue # bpo-21536). # On Cygwin (and if required, other POSIX-like platforms based on # Windows like MinGW) it is simply necessary that all symbols in # shared libraries are resolved at link time. from distutils.sysconfig import get_config_var link_libpython = False if get_config_var('Py_ENABLE_SHARED'): # A native build on an Android device or on Cygwin if hasattr(sys, 'getandroidapilevel'): link_libpython = True elif sys.platform == 'cygwin': link_libpython = True elif '_PYTHON_HOST_PLATFORM' in os.environ: # We are cross-compiling for one of the relevant platforms if get_config_var('ANDROID_API_LEVEL') != 0: link_libpython = True elif get_config_var('MACHDEP') == 'cygwin': link_libpython = True if link_libpython: ldversion = get_config_var('LDVERSION') return ext.libraries + ['python' + ldversion] return ext.libraries + py37compat.pythonlib()
(self, ext)
43,769
setuptools.command.build_ext
get_output_mapping
See :class:`setuptools.commands.build.SubCommand`
def get_output_mapping(self) -> Dict[str, str]: """See :class:`setuptools.commands.build.SubCommand`""" mapping = self._get_output_mapping() return dict(sorted(mapping, key=lambda x: x[0]))
(self) -> Dict[str, str]
43,770
setuptools.command.build_ext
get_outputs
null
def get_outputs(self) -> List[str]: if self.inplace: return list(self.get_output_mapping().keys()) return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs())
(self) -> List[str]
43,771
distutils.command.build_ext
get_source_files
null
def get_source_files(self): self.check_extensions_list(self.extensions) filenames = [] # Wouldn't it be neat if we knew the names of header files too... for ext in self.extensions: filenames.extend(ext.sources) return filenames
(self)
43,772
distutils.cmd
get_sub_commands
Determine the sub-commands that are relevant in the current distribution (ie., that need to be run). This is based on the 'sub_commands' class attribute: each tuple in that list may include a method that we call to determine if the subcommand needs to be run for the current distribution. Return a list of command names.
def get_sub_commands(self): """Determine the sub-commands that are relevant in the current distribution (ie., that need to be run). This is based on the 'sub_commands' class attribute: each tuple in that list may include a method that we call to determine if the subcommand needs to be run for the current distribution. Return a list of command names. """ commands = [] for (cmd_name, method) in self.sub_commands: if method is None or method(self): commands.append(cmd_name) return commands
(self)
43,773
setuptools.command.build_ext
initialize_options
null
def initialize_options(self): _build_ext.initialize_options(self) self.shlib_compiler = None self.shlibs = [] self.ext_map = {} self.editable_mode = False
(self)
43,774
setuptools.command.build_ext
links_to_dynamic
Return true if 'ext' links to a dynamic lib in the same package
def links_to_dynamic(self, ext): """Return true if 'ext' links to a dynamic lib in the same package""" # XXX this should check to ensure the lib is actually being built # XXX as dynamic, and not just using a locally-found version or a # XXX static-compiled version libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) return any(pkg + libname in libnames for libname in ext.libraries)
(self, ext)
43,775
distutils.cmd
make_archive
null
def make_archive( self, base_name, format, root_dir=None, base_dir=None, owner=None, group=None ): return archive_util.make_archive( base_name, format, root_dir, base_dir, dry_run=self.dry_run, owner=owner, group=group, )
(self, base_name, format, root_dir=None, base_dir=None, owner=None, group=None)
43,776
distutils.cmd
make_file
Special case of 'execute()' for operations that process one or more input files and generate one output file. Works just like 'execute()', except the operation is skipped and a different message printed if 'outfile' already exists and is newer than all files listed in 'infiles'. If the command defined 'self.force', and it is true, then the command is unconditionally run -- does no timestamp checks.
def make_file( self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1 ): """Special case of 'execute()' for operations that process one or more input files and generate one output file. Works just like 'execute()', except the operation is skipped and a different message printed if 'outfile' already exists and is newer than all files listed in 'infiles'. If the command defined 'self.force', and it is true, then the command is unconditionally run -- does no timestamp checks. """ if skip_msg is None: skip_msg = "skipping %s (inputs unchanged)" % outfile # Allow 'infiles' to be a single string if isinstance(infiles, str): infiles = (infiles,) elif not isinstance(infiles, (list, tuple)): raise TypeError("'infiles' must be a string, or a list or tuple of strings") if exec_msg is None: exec_msg = "generating {} from {}".format(outfile, ', '.join(infiles)) # If 'outfile' must be regenerated (either because it doesn't # exist, is out-of-date, or the 'force' flag is true) then # perform the action that presumably regenerates it if self.force or dep_util.newer_group(infiles, outfile): self.execute(func, args, exec_msg, level) # Otherwise, print the "skip" message else: log.debug(skip_msg)
(self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1)
43,777
distutils.cmd
mkpath
null
def mkpath(self, name, mode=0o777): dir_util.mkpath(name, mode, dry_run=self.dry_run)
(self, name, mode=511)
43,778
distutils.cmd
move_file
Move a file respecting dry-run flag.
def move_file(self, src, dst, level=1): """Move a file respecting dry-run flag.""" return file_util.move_file(src, dst, dry_run=self.dry_run)
(self, src, dst, level=1)
43,779
setuptools
reinitialize_command
null
def reinitialize_command(self, command, reinit_subcommands=0, **kw): cmd = _Command.reinitialize_command(self, command, reinit_subcommands) vars(cmd).update(kw) return cmd
(self, command, reinit_subcommands=0, **kw)
43,780
setuptools.command.build_ext
run
Build extensions in build directory, then copy if --inplace
def run(self): """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, 0 _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source()
(self)
43,781
distutils.cmd
run_command
Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method.
def run_command(self, command): """Run some other command: uses the 'run_command()' method of Distribution, which creates and finalizes the command object if necessary and then invokes its 'run()' method. """ self.distribution.run_command(command)
(self, command)
43,782
distutils.cmd
set_undefined_options
Set the values of any "undefined" options from corresponding option values in some other command object. "Undefined" here means "is None", which is the convention used to indicate that an option has not been changed between 'initialize_options()' and 'finalize_options()'. Usually called from 'finalize_options()' for options that depend on some other command rather than another option of the same command. 'src_cmd' is the other command from which option values will be taken (a command object will be created for it if necessary); the remaining arguments are '(src_option,dst_option)' tuples which mean "take the value of 'src_option' in the 'src_cmd' command object, and copy it to 'dst_option' in the current command object".
def set_undefined_options(self, src_cmd, *option_pairs): """Set the values of any "undefined" options from corresponding option values in some other command object. "Undefined" here means "is None", which is the convention used to indicate that an option has not been changed between 'initialize_options()' and 'finalize_options()'. Usually called from 'finalize_options()' for options that depend on some other command rather than another option of the same command. 'src_cmd' is the other command from which option values will be taken (a command object will be created for it if necessary); the remaining arguments are '(src_option,dst_option)' tuples which mean "take the value of 'src_option' in the 'src_cmd' command object, and copy it to 'dst_option' in the current command object". """ # Option_pairs: list of (src_option, dst_option) tuples src_cmd_obj = self.distribution.get_command_obj(src_cmd) src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: setattr(self, dst_option, getattr(src_cmd_obj, src_option))
(self, src_cmd, *option_pairs)
43,783
setuptools.command.build_ext
setup_shlib_compiler
null
def setup_shlib_compiler(self): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) _customize_compiler_for_shlib(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name, value) in self.define: compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: compiler.undefine_macro(macro) if self.libraries is not None: compiler.set_libraries(self.libraries) if self.library_dirs is not None: compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: compiler.set_link_objects(self.link_objects) # hack so distutils' build_extension() builds a library instead compiler.link_shared_object = link_shared_object.__get__(compiler)
(self)
43,784
distutils.cmd
spawn
Spawn an external command respecting dry-run flag.
def spawn(self, cmd, search_path=1, level=1): """Spawn an external command respecting dry-run flag.""" from distutils.spawn import spawn spawn(cmd, search_path, dry_run=self.dry_run)
(self, cmd, search_path=1, level=1)
43,785
distutils.command.build_ext
swig_sources
Walk the list of source files in 'sources', looking for SWIG interface (.i) files. Run SWIG on all that are found, and return a modified 'sources' list with SWIG source files replaced by the generated C (or C++) files.
def swig_sources(self, sources, extension): """Walk the list of source files in 'sources', looking for SWIG interface (.i) files. Run SWIG on all that are found, and return a modified 'sources' list with SWIG source files replaced by the generated C (or C++) files. """ new_sources = [] swig_sources = [] swig_targets = {} # XXX this drops generated C/C++ files into the source tree, which # is fine for developers who want to distribute the generated # source -- but there should be an option to put SWIG output in # the temp dir. if self.swig_cpp: log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") if ( self.swig_cpp or ('-c++' in self.swig_opts) or ('-c++' in extension.swig_opts) ): target_ext = '.cpp' else: target_ext = '.c' for source in sources: (base, ext) = os.path.splitext(source) if ext == ".i": # SWIG interface file new_sources.append(base + '_wrap' + target_ext) swig_sources.append(source) swig_targets[source] = new_sources[-1] else: new_sources.append(source) if not swig_sources: return new_sources swig = self.swig or self.find_swig() swig_cmd = [swig, "-python"] swig_cmd.extend(self.swig_opts) if self.swig_cpp: swig_cmd.append("-c++") # Do not override commandline arguments if not self.swig_opts: for o in extension.swig_opts: swig_cmd.append(o) for source in swig_sources: target = swig_targets[source] log.info("swigging %s to %s", source, target) self.spawn(swig_cmd + ["-o", target, source]) return new_sources
(self, sources, extension)
43,786
distutils.cmd
warn
null
def warn(self, msg): log.warn("warning: %s: %s\n", self.get_command_name(), msg)
(self, msg)
43,787
setuptools.command.build_ext
write_stub
null
def write_stub(self, output_dir, ext, compile=False): stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py' self._write_stub_file(stub_file, ext, compile)
(self, output_dir, ext, compile=False)
43,788
setuptools.command.build_ext
build_ext
null
class build_ext(_build_ext): editable_mode: bool = False inplace: bool = False def run(self): """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, 0 _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source() def _get_inplace_equivalent(self, build_py, ext: Extension) -> Tuple[str, str]: fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) modpath = fullname.split('.') package = '.'.join(modpath[:-1]) package_dir = build_py.get_package_dir(package) inplace_file = os.path.join(package_dir, os.path.basename(filename)) regular_file = os.path.join(self.build_lib, filename) return (inplace_file, regular_file) def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. if os.path.exists(regular_file) or not ext.optional: self.copy_file(regular_file, inplace_file, level=self.verbose) if ext._needs_stub: inplace_stub = self._get_equivalent_stub(ext, inplace_file) self._write_stub_file(inplace_stub, ext, compile=True) # Always compile stub and remove the original (leave the cache behind) # (this behaviour was observed in previous iterations of the code) def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str: dir_ = os.path.dirname(output_file) _, _, name = ext.name.rpartition(".") return f"{os.path.join(dir_, name)}.py" def _get_output_mapping(self) -> Iterator[Tuple[str, str]]: if not self.inplace: return build_py = self.get_finalized_command('build_py') opt = self.get_finalized_command('install_lib').optimize or "" for ext in self.extensions: inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) yield (regular_file, inplace_file) if ext._needs_stub: # This version of `build_ext` always builds artifacts in another dir, # when "inplace=True" is given it just copies them back. # This is done in the `copy_extensions_to_source` function, which # always compile stub files via `_compile_and_remove_stub`. # At the end of the process, a `.pyc` stub file is created without the # corresponding `.py`. inplace_stub = self._get_equivalent_stub(ext, inplace_file) regular_stub = self._get_equivalent_stub(ext, regular_file) inplace_cache = _compiled_file_name(inplace_stub, optimization=opt) output_cache = _compiled_file_name(regular_stub, optimization=opt) yield (output_cache, inplace_cache) def get_ext_filename(self, fullname): so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') if so_ext: filename = os.path.join(*fullname.split('.')) + so_ext else: filename = _build_ext.get_ext_filename(self, fullname) so_ext = get_config_var('EXT_SUFFIX') if fullname in self.ext_map: ext = self.ext_map[fullname] use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix() if use_abi3: filename = filename[:-len(so_ext)] so_ext = get_abi3_suffix() filename = filename + so_ext if isinstance(ext, Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn, libtype) elif use_stubs and ext._links_to_dynamic: d, fn = os.path.split(filename) return os.path.join(d, 'dl-' + fn) return filename def initialize_options(self): _build_ext.initialize_options(self) self.shlib_compiler = None self.shlibs = [] self.ext_map = {} self.editable_mode = False def finalize_options(self): _build_ext.finalize_options(self) self.extensions = self.extensions or [] self.check_extensions_list(self.extensions) self.shlibs = [ext for ext in self.extensions if isinstance(ext, Library)] if self.shlibs: self.setup_shlib_compiler() for ext in self.extensions: ext._full_name = self.get_ext_fullname(ext.name) for ext in self.extensions: fullname = ext._full_name self.ext_map[fullname] = ext # distutils 3.1 will also ask for module names # XXX what to do with conflicts? self.ext_map[fullname.split('.')[-1]] = ext ltd = self.shlibs and self.links_to_dynamic(ext) or False ns = ltd and use_stubs and not isinstance(ext, Library) ext._links_to_dynamic = ltd ext._needs_stub = ns filename = ext._file_name = self.get_ext_filename(fullname) libdir = os.path.dirname(os.path.join(self.build_lib, filename)) if ltd and libdir not in ext.library_dirs: ext.library_dirs.append(libdir) if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) if self.editable_mode: self.inplace = True def setup_shlib_compiler(self): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) _customize_compiler_for_shlib(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name, value) in self.define: compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: compiler.undefine_macro(macro) if self.libraries is not None: compiler.set_libraries(self.libraries) if self.library_dirs is not None: compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: compiler.set_link_objects(self.link_objects) # hack so distutils' build_extension() builds a library instead compiler.link_shared_object = link_shared_object.__get__(compiler) def get_export_symbols(self, ext): if isinstance(ext, Library): return ext.export_symbols return _build_ext.get_export_symbols(self, ext) def build_extension(self, ext): ext._convert_pyx_sources_to_lang() _compiler = self.compiler try: if isinstance(ext, Library): self.compiler = self.shlib_compiler _build_ext.build_extension(self, ext) if ext._needs_stub: build_lib = self.get_finalized_command('build_py').build_lib self.write_stub(build_lib, ext) finally: self.compiler = _compiler def links_to_dynamic(self, ext): """Return true if 'ext' links to a dynamic lib in the same package""" # XXX this should check to ensure the lib is actually being built # XXX as dynamic, and not just using a locally-found version or a # XXX static-compiled version libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) return any(pkg + libname in libnames for libname in ext.libraries) def get_outputs(self) -> List[str]: if self.inplace: return list(self.get_output_mapping().keys()) return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs()) def get_output_mapping(self) -> Dict[str, str]: """See :class:`setuptools.commands.build.SubCommand`""" mapping = self._get_output_mapping() return dict(sorted(mapping, key=lambda x: x[0])) def __get_stubs_outputs(self): # assemble the base name for each extension that needs a stub ns_ext_bases = ( os.path.join(self.build_lib, *ext._full_name.split('.')) for ext in self.extensions if ext._needs_stub ) # pair each base with the extension pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) return list(base + fnext for base, fnext in pairs) def __get_output_extensions(self): yield '.py' yield '.pyc' if self.get_finalized_command('build_py').optimize: yield '.pyo' def write_stub(self, output_dir, ext, compile=False): stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py' self._write_stub_file(stub_file, ext, compile) def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): log.info("writing stub loader for %s to %s", ext._full_name, stub_file) if compile and os.path.exists(stub_file): raise BaseError(stub_file + " already exists! Please delete.") if not self.dry_run: f = open(stub_file, 'w') f.write( '\n'.join([ "def __bootstrap__():", " global __bootstrap__, __file__, __loader__", " import sys, os, pkg_resources, importlib.util" + if_dl(", dl"), " __file__ = pkg_resources.resource_filename" "(__name__,%r)" % os.path.basename(ext._file_name), " del __bootstrap__", " if '__loader__' in globals():", " del __loader__", if_dl(" old_flags = sys.getdlopenflags()"), " old_dir = os.getcwd()", " try:", " os.chdir(os.path.dirname(__file__))", if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), " spec = importlib.util.spec_from_file_location(", " __name__, __file__)", " mod = importlib.util.module_from_spec(spec)", " spec.loader.exec_module(mod)", " finally:", if_dl(" sys.setdlopenflags(old_flags)"), " os.chdir(old_dir)", "__bootstrap__()", "" # terminal \n ]) ) f.close() if compile: self._compile_and_remove_stub(stub_file) def _compile_and_remove_stub(self, stub_file: str): from distutils.util import byte_compile byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) optimize = self.get_finalized_command('install_lib').optimize if optimize > 0: byte_compile([stub_file], optimize=optimize, force=True, dry_run=self.dry_run) if os.path.exists(stub_file) and not self.dry_run: os.unlink(stub_file)
(dist, **kw)
43,805
distutils.command.build_ext
build_extensions
null
def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) if self.parallel: self._build_extensions_parallel() else: self._build_extensions_serial()
(self)
43,846
cppy
get_include
null
def get_include(): import os return os.path.join(os.path.dirname(__file__), 'include')
()
43,850
pdf2docx.converter
Converter
The ``PDF`` to ``docx`` converter. * Read PDF file with ``PyMuPDF`` to get raw layout data page by page, including text, image, drawing and its properties, e.g. boundary box, font, size, image width, height. * Analyze layout in document level, e.g. page header, footer and margin. * Parse page layout to docx structure, e.g. paragraph and its properties like indentation, spacing, text alignment; table and its properties like border, shading, merging. * Finally, generate docx with ``python-docx``.
class Converter: '''The ``PDF`` to ``docx`` converter. * Read PDF file with ``PyMuPDF`` to get raw layout data page by page, including text, image, drawing and its properties, e.g. boundary box, font, size, image width, height. * Analyze layout in document level, e.g. page header, footer and margin. * Parse page layout to docx structure, e.g. paragraph and its properties like indentation, spacing, text alignment; table and its properties like border, shading, merging. * Finally, generate docx with ``python-docx``. ''' def __init__( self, pdf_file: str = None, password: str = None, stream: bytes = None ): '''Initialize fitz object with given pdf file path. Args: pdf_file (str): pdf file path. stream (bytes): pdf file in memory. password (str): Password for encrypted pdf. Default to None if not encrypted. ''' # fitz object self.filename_pdf = pdf_file self.password = str(password or "") if not pdf_file and not stream: raise ValueError("Either pdf_file or stream must be given.") if stream: self._fitz_doc = fitz.Document(stream=stream) else: self._fitz_doc = fitz.Document(pdf_file) # initialize empty pages container self._pages = Pages() @property def fitz_doc(self): return self._fitz_doc @property def pages(self): return self._pages def close(self): self._fitz_doc.close() @property def default_settings(self): '''Default parsing parameters.''' return { 'debug' : False, # plot layout if True 'ocr' : 0, # ocr status: 0 - no ocr; 1 - to do ocr; 2 - ocr-ed pdf 'ignore_page_error' : True, # not break the conversion process due to failure of a certain page if True 'multi_processing' : False, # convert pages with multi-processing if True 'cpu_count' : 0, # working cpu count when convert pages with multi-processing 'min_section_height' : 20.0, # The minimum height of a valid section. 'connected_border_tolerance' : 0.5, # two borders are intersected if the gap lower than this value 'max_border_width' : 6.0, # max border width 'min_border_clearance' : 2.0, # the minimum allowable clearance of two borders 'float_image_ignorable_gap' : 5.0, # float image if the intersection exceeds this value 'page_margin_factor_top' : 0.5, # [0,1] reduce top margin by factor 'page_margin_factor_bottom' : 0.5, # [0,1] reduce bottom margin by factor 'shape_min_dimension' : 2.0, # ignore shape if both width and height is lower than this value 'max_line_spacing_ratio' : 1.5, # maximum line spacing ratio: line spacing / line height 'line_overlap_threshold' : 0.9, # [0,1] delete line if the intersection to other lines exceeds this value 'line_break_width_ratio' : 0.5, # break line if the ratio of line width to entire layout bbox is lower than this value 'line_break_free_space_ratio' : 0.1, # break line if the ratio of free space to entire line exceeds this value 'line_separate_threshold' : 5.0, # two separate lines if the x-distance exceeds this value 'new_paragraph_free_space_ratio' : 0.85, # new paragraph if the ratio of free space to line height exceeds this value 'lines_left_aligned_threshold' : 1.0, # left aligned if d_x0 of two lines is lower than this value (Pt) 'lines_right_aligned_threshold' : 1.0, # right aligned if d_x1 of two lines is lower than this value (Pt) 'lines_center_aligned_threshold' : 2.0, # center aligned if delta center of two lines is lower than this value 'clip_image_res_ratio' : 4.0, # resolution ratio (to 72dpi) when clipping page image 'min_svg_gap_dx' : 15.0, # merge adjacent vector graphics if the horizontal gap is less than this value 'min_svg_gap_dy' : 2.0, # merge adjacent vector graphics if the vertical gap is less than this value 'min_svg_w' : 2.0, # ignore vector graphics if the bbox width is less than this value 'min_svg_h' : 2.0, # ignore vector graphics if the bbox height is less than this value 'extract_stream_table' : False, # don't consider stream table when extracting tables 'parse_lattice_table' : True, # whether parse lattice table or not; may destroy the layout if set False 'parse_stream_table' : True, # whether parse stream table or not; may destroy the layout if set False 'delete_end_line_hyphen' : False # delete hyphen at the end of a line } # ----------------------------------------------------------------------- # Parsing process: load -> analyze document -> parse pages -> make docx # ----------------------------------------------------------------------- def parse(self, start:int=0, end:int=None, pages:list=None, **kwargs): '''Parse pages in three steps: * open PDF file with ``PyMuPDF`` * analyze whole document, e.g. page section, header/footer and margin * parse specified pages, e.g. paragraph, image and table Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None. kwargs (dict, optional): Configuration parameters. ''' return self.load_pages(start, end, pages) \ .parse_document(**kwargs) \ .parse_pages(**kwargs) def load_pages(self, start:int=0, end:int=None, pages:list=None): '''Step 1 of converting process: open PDF file with ``PyMuPDF``, especially for password encrypted file. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None. ''' logging.info(self._color_output('[1/4] Opening document...')) # encrypted pdf ? if self._fitz_doc.needs_pass: if not self.password: raise ConversionException(f'Require password for {self.filename_pdf}.') elif not self._fitz_doc.authenticate(self.password): raise ConversionException('Incorrect password.') # initialize empty pages num = len(self._fitz_doc) self._pages.reset([Page(id=i, skip_parsing=True) for i in range(num)]) # set pages to parse page_indexes = self._page_indexes(start, end, pages, num) for i in page_indexes: self._pages[i].skip_parsing = False return self def parse_document(self, **kwargs): '''Step 2 of converting process: analyze whole document, e.g. page section, header/footer and margin.''' logging.info(self._color_output('[2/4] Analyzing document...')) self._pages.parse(self.fitz_doc, **kwargs) return self def parse_pages(self, **kwargs): '''Step 3 of converting process: parse pages, e.g. paragraph, image and table.''' logging.info(self._color_output('[3/4] Parsing pages...')) pages = [page for page in self._pages if not page.skip_parsing] num_pages = len(pages) for i, page in enumerate(pages, start=1): pid = page.id + 1 logging.info('(%d/%d) Page %d', i, num_pages, pid) try: page.parse(**kwargs) except Exception as e: if not kwargs['debug'] and kwargs['ignore_page_error']: logging.error('Ignore page %d due to parsing page error: %s', pid, e) else: raise ConversionException(f'Error when parsing page {pid}: {e}') return self def make_docx(self, filename_or_stream=None, **kwargs): '''Step 4 of converting process: create docx file with converted pages. Args: filename_or_stream (str, file-like): docx file to write. kwargs (dict, optional): Configuration parameters. ''' logging.info(self._color_output('[4/4] Creating pages...')) # check parsed pages parsed_pages = list(filter( lambda page: page.finalized, self._pages )) if not parsed_pages: raise ConversionException('No parsed pages. Please parse page first.') if not filename_or_stream: if self.filename_pdf: filename_or_stream = f'{self.filename_pdf[0:-len(".pdf")]}.docx' # remove existing file if os.path.exists(filename_or_stream): os.remove(filename_or_stream) else: raise ConversionException("Please specify a docx file name or a file-like object to write.") # create page by page docx_file = Document() num_pages = len(parsed_pages) for i, page in enumerate(parsed_pages, start=1): if not page.finalized: continue # ignore unparsed pages pid = page.id + 1 logging.info('(%d/%d) Page %d', i, num_pages, pid) try: page.make_docx(docx_file) except Exception as e: if not kwargs['debug'] and kwargs['ignore_page_error']: logging.error('Ignore page %d due to making page error: %s', pid, e) else: raise MakedocxException(f'Error when make page {pid}: {e}') # save docx docx_file.save(filename_or_stream) # ----------------------------------------------------------------------- # Store / restore parsed results # ----------------------------------------------------------------------- def store(self): '''Store parsed pages in dict format.''' return { 'filename': os.path.basename(self.filename_pdf), 'page_cnt': len(self._pages), # count of all pages 'pages' : [page.store() for page in self._pages if page.finalized], # parsed pages only } def restore(self, data:dict): '''Restore pages from parsed results.''' # init empty pages if necessary if not self._pages: num = data.get('page_cnt', 100) self._pages.reset([Page(id=i, skip_parsing=True) for i in range(num)]) # restore pages for raw_page in data.get('pages', []): idx = raw_page.get('id', -1) self._pages[idx].restore(raw_page) def serialize(self, filename:str): '''Write parsed pages to specified JSON file.''' with open(filename, 'w', encoding='utf-8') as f: f.write(json.dumps(self.store(), indent=4)) def deserialize(self, filename:str): '''Load parsed pages from specified JSON file.''' with open(filename, 'r') as f: data = json.load(f) self.restore(data) # ----------------------------------------------------------------------- # high level methods, e.g. convert, extract table # ----------------------------------------------------------------------- def debug_page(self, i:int, docx_filename:str=None, debug_pdf:str=None, layout_file:str=None, **kwargs): '''Parse, create and plot single page for debug purpose. Args: i (int): Page index to convert. docx_filename (str): docx filename to write to. debug_pdf (str): New pdf file storing layout information. Default to add prefix ``debug_``. layout_file (str): New json file storing parsed layout data. Default to ``layout.json``. ''' # include debug information # fitz object in debug mode: plot page layout # file path for this debug pdf: demo.pdf -> debug_demo.pdf path, filename = os.path.split(self.filename_pdf) if not debug_pdf: debug_pdf = os.path.join(path, f'debug_{filename}') if not layout_file: layout_file = os.path.join(path, 'layout.json') kwargs.update({ 'debug' : True, 'debug_doc' : fitz.Document(), 'debug_filename': debug_pdf }) # parse and create docx self.convert(docx_filename, pages=[i], **kwargs) # layout information for debugging self.serialize(layout_file) def convert(self, docx_filename: Union[str, IO[AnyStr]] = None, start: int = 0, end: int = None, pages: list = None, **kwargs): """Convert specified PDF pages to docx file. Args: docx_filename (str, file-like, optional): docx file to write. Defaults to None. start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Refer to :py:meth:`~pdf2docx.converter.Converter.default_settings` for detail of configuration parameters. .. note:: Change extension from ``pdf`` to ``docx`` if ``docx_file`` is None. .. note:: * ``start`` and ``end`` is counted from zero if ``--zero_based_index=True`` (by default). * Start from the first page if ``start`` is omitted. * End with the last page if ``end`` is omitted. .. note:: ``pages`` has a higher priority than ``start`` and ``end``. ``start`` and ``end`` works only if ``pages`` is omitted. .. note:: Multi-processing works only for continuous pages specified by ``start`` and ``end`` only. """ t0 = perf_counter() logging.info('Start to convert %s', self.filename_pdf) settings = self.default_settings settings.update(kwargs) # input check if pages and settings['multi_processing']: raise ConversionException('Multi-processing works for continuous pages ' 'specified by "start" and "end" only.') # convert page by page if settings['multi_processing']: self._convert_with_multi_processing(docx_filename, start, end, **settings) else: self.parse(start, end, pages, **settings).make_docx(docx_filename, **settings) logging.info('Terminated in %.2fs.', perf_counter()-t0) def extract_tables(self, start:int=0, end:int=None, pages:list=None, **kwargs): '''Extract table contents from specified PDF pages. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Returns: list: A list of parsed table content. ''' # parsing pages first settings = self.default_settings settings.update(kwargs) self.parse(start, end, pages, **settings) # get parsed tables tables = [] for page in self._pages: if page.finalized: tables.extend(page.extract_tables(**settings)) return tables def _convert_with_multi_processing(self, docx_filename:str, start:int, end:int, **kwargs): '''Parse and create pages based on page indexes with multi-processing. Reference: https://pymupdf.readthedocs.io/en/latest/faq.html#multiprocessing ''' # make vectors of arguments for the processes cpu = min(kwargs['cpu_count'], cpu_count()) if kwargs['cpu_count'] else cpu_count() prefix = 'pages' # json file writing parsed pages per process vectors = [(i, cpu, start, end, self.filename_pdf, self.password, kwargs, f'{prefix}-{i}.json') for i in range(cpu)] # start parsing processes pool = Pool() pool.map(self._parse_pages_per_cpu, vectors, 1) # restore parsed page data for i in range(cpu): filename = f'{prefix}-{i}.json' if not os.path.exists(filename): continue self.deserialize(filename) os.remove(filename) # create docx file self.make_docx(docx_filename, **kwargs) @staticmethod def _parse_pages_per_cpu(vector): '''Render a page range of a document. Args: vector (list): A list containing required parameters. * 0 : segment number for current process * 1 : count of CPUs * 2,3: whole pages range to process * 4 : pdf filename * 5 : password for encrypted pdf * 6 : configuration parameters * 7 : json filename storing parsed results ''' # recreate the arguments idx, cpu, s, e, pdf_filename, password, kwargs, json_filename = vector # open pdf to get page count: all pages are marked to parse temporarily # since don't know which pages to parse for this moment cv = Converter(pdf_filename, password) cv.load_pages() # the specified pages to process e = e or len(cv.fitz_doc) all_indexes = range(s, e) num_pages = len(all_indexes) # page segment processed by this cpu m = int(num_pages/cpu) n = num_pages % cpu seg_size = m + int(idx<n) seg_from = (m+1)*idx + min(n-idx, 0) seg_to = min(seg_from + seg_size, num_pages) page_indexes = [all_indexes[i] for i in range(seg_from, seg_to)] # now, mark the right pages for page in cv.pages: page.skip_parsing = True for i in page_indexes: cv.pages[i].skip_parsing = False # parse pages and serialize data for further processing cv.parse_document(**kwargs) \ .parse_pages(**kwargs) \ .serialize(json_filename) cv.close() @staticmethod def _page_indexes(start, end, pages, pdf_len): '''Parsing arguments.''' if pages: indexes = [int(x) for x in pages] else: end = end or pdf_len s = slice(int(start), int(end)) indexes = range(pdf_len)[s] return indexes @staticmethod def _color_output(msg): return f'\033[1;36m{msg}\033[0m'
(pdf_file: str = None, password: str = None, stream: bytes = None)
43,851
pdf2docx.converter
__init__
Initialize fitz object with given pdf file path. Args: pdf_file (str): pdf file path. stream (bytes): pdf file in memory. password (str): Password for encrypted pdf. Default to None if not encrypted.
def __init__( self, pdf_file: str = None, password: str = None, stream: bytes = None ): '''Initialize fitz object with given pdf file path. Args: pdf_file (str): pdf file path. stream (bytes): pdf file in memory. password (str): Password for encrypted pdf. Default to None if not encrypted. ''' # fitz object self.filename_pdf = pdf_file self.password = str(password or "") if not pdf_file and not stream: raise ValueError("Either pdf_file or stream must be given.") if stream: self._fitz_doc = fitz.Document(stream=stream) else: self._fitz_doc = fitz.Document(pdf_file) # initialize empty pages container self._pages = Pages()
(self, pdf_file: Optional[str] = None, password: Optional[str] = None, stream: Optional[bytes] = None)
43,852
pdf2docx.converter
_color_output
null
@staticmethod def _color_output(msg): return f'\033[1;36m{msg}\033[0m'
(msg)
43,853
pdf2docx.converter
_convert_with_multi_processing
Parse and create pages based on page indexes with multi-processing. Reference: https://pymupdf.readthedocs.io/en/latest/faq.html#multiprocessing
def _convert_with_multi_processing(self, docx_filename:str, start:int, end:int, **kwargs): '''Parse and create pages based on page indexes with multi-processing. Reference: https://pymupdf.readthedocs.io/en/latest/faq.html#multiprocessing ''' # make vectors of arguments for the processes cpu = min(kwargs['cpu_count'], cpu_count()) if kwargs['cpu_count'] else cpu_count() prefix = 'pages' # json file writing parsed pages per process vectors = [(i, cpu, start, end, self.filename_pdf, self.password, kwargs, f'{prefix}-{i}.json') for i in range(cpu)] # start parsing processes pool = Pool() pool.map(self._parse_pages_per_cpu, vectors, 1) # restore parsed page data for i in range(cpu): filename = f'{prefix}-{i}.json' if not os.path.exists(filename): continue self.deserialize(filename) os.remove(filename) # create docx file self.make_docx(docx_filename, **kwargs)
(self, docx_filename: str, start: int, end: int, **kwargs)
43,854
pdf2docx.converter
_page_indexes
Parsing arguments.
@staticmethod def _page_indexes(start, end, pages, pdf_len): '''Parsing arguments.''' if pages: indexes = [int(x) for x in pages] else: end = end or pdf_len s = slice(int(start), int(end)) indexes = range(pdf_len)[s] return indexes
(start, end, pages, pdf_len)
43,855
pdf2docx.converter
_parse_pages_per_cpu
Render a page range of a document. Args: vector (list): A list containing required parameters. * 0 : segment number for current process * 1 : count of CPUs * 2,3: whole pages range to process * 4 : pdf filename * 5 : password for encrypted pdf * 6 : configuration parameters * 7 : json filename storing parsed results
@staticmethod def _parse_pages_per_cpu(vector): '''Render a page range of a document. Args: vector (list): A list containing required parameters. * 0 : segment number for current process * 1 : count of CPUs * 2,3: whole pages range to process * 4 : pdf filename * 5 : password for encrypted pdf * 6 : configuration parameters * 7 : json filename storing parsed results ''' # recreate the arguments idx, cpu, s, e, pdf_filename, password, kwargs, json_filename = vector # open pdf to get page count: all pages are marked to parse temporarily # since don't know which pages to parse for this moment cv = Converter(pdf_filename, password) cv.load_pages() # the specified pages to process e = e or len(cv.fitz_doc) all_indexes = range(s, e) num_pages = len(all_indexes) # page segment processed by this cpu m = int(num_pages/cpu) n = num_pages % cpu seg_size = m + int(idx<n) seg_from = (m+1)*idx + min(n-idx, 0) seg_to = min(seg_from + seg_size, num_pages) page_indexes = [all_indexes[i] for i in range(seg_from, seg_to)] # now, mark the right pages for page in cv.pages: page.skip_parsing = True for i in page_indexes: cv.pages[i].skip_parsing = False # parse pages and serialize data for further processing cv.parse_document(**kwargs) \ .parse_pages(**kwargs) \ .serialize(json_filename) cv.close()
(vector)
43,856
pdf2docx.converter
close
null
def close(self): self._fitz_doc.close()
(self)
43,857
pdf2docx.converter
convert
Convert specified PDF pages to docx file. Args: docx_filename (str, file-like, optional): docx file to write. Defaults to None. start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Refer to :py:meth:`~pdf2docx.converter.Converter.default_settings` for detail of configuration parameters. .. note:: Change extension from ``pdf`` to ``docx`` if ``docx_file`` is None. .. note:: * ``start`` and ``end`` is counted from zero if ``--zero_based_index=True`` (by default). * Start from the first page if ``start`` is omitted. * End with the last page if ``end`` is omitted. .. note:: ``pages`` has a higher priority than ``start`` and ``end``. ``start`` and ``end`` works only if ``pages`` is omitted. .. note:: Multi-processing works only for continuous pages specified by ``start`` and ``end`` only.
def convert(self, docx_filename: Union[str, IO[AnyStr]] = None, start: int = 0, end: int = None, pages: list = None, **kwargs): """Convert specified PDF pages to docx file. Args: docx_filename (str, file-like, optional): docx file to write. Defaults to None. start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Refer to :py:meth:`~pdf2docx.converter.Converter.default_settings` for detail of configuration parameters. .. note:: Change extension from ``pdf`` to ``docx`` if ``docx_file`` is None. .. note:: * ``start`` and ``end`` is counted from zero if ``--zero_based_index=True`` (by default). * Start from the first page if ``start`` is omitted. * End with the last page if ``end`` is omitted. .. note:: ``pages`` has a higher priority than ``start`` and ``end``. ``start`` and ``end`` works only if ``pages`` is omitted. .. note:: Multi-processing works only for continuous pages specified by ``start`` and ``end`` only. """ t0 = perf_counter() logging.info('Start to convert %s', self.filename_pdf) settings = self.default_settings settings.update(kwargs) # input check if pages and settings['multi_processing']: raise ConversionException('Multi-processing works for continuous pages ' 'specified by "start" and "end" only.') # convert page by page if settings['multi_processing']: self._convert_with_multi_processing(docx_filename, start, end, **settings) else: self.parse(start, end, pages, **settings).make_docx(docx_filename, **settings) logging.info('Terminated in %.2fs.', perf_counter()-t0)
(self, docx_filename: Union[str, IO[~AnyStr], NoneType] = None, start: int = 0, end: Optional[int] = None, pages: Optional[list] = None, **kwargs)
43,858
pdf2docx.converter
debug_page
Parse, create and plot single page for debug purpose. Args: i (int): Page index to convert. docx_filename (str): docx filename to write to. debug_pdf (str): New pdf file storing layout information. Default to add prefix ``debug_``. layout_file (str): New json file storing parsed layout data. Default to ``layout.json``.
def debug_page(self, i:int, docx_filename:str=None, debug_pdf:str=None, layout_file:str=None, **kwargs): '''Parse, create and plot single page for debug purpose. Args: i (int): Page index to convert. docx_filename (str): docx filename to write to. debug_pdf (str): New pdf file storing layout information. Default to add prefix ``debug_``. layout_file (str): New json file storing parsed layout data. Default to ``layout.json``. ''' # include debug information # fitz object in debug mode: plot page layout # file path for this debug pdf: demo.pdf -> debug_demo.pdf path, filename = os.path.split(self.filename_pdf) if not debug_pdf: debug_pdf = os.path.join(path, f'debug_{filename}') if not layout_file: layout_file = os.path.join(path, 'layout.json') kwargs.update({ 'debug' : True, 'debug_doc' : fitz.Document(), 'debug_filename': debug_pdf }) # parse and create docx self.convert(docx_filename, pages=[i], **kwargs) # layout information for debugging self.serialize(layout_file)
(self, i: int, docx_filename: Optional[str] = None, debug_pdf: Optional[str] = None, layout_file: Optional[str] = None, **kwargs)
43,859
pdf2docx.converter
deserialize
Load parsed pages from specified JSON file.
def deserialize(self, filename:str): '''Load parsed pages from specified JSON file.''' with open(filename, 'r') as f: data = json.load(f) self.restore(data)
(self, filename: str)
43,860
pdf2docx.converter
extract_tables
Extract table contents from specified PDF pages. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Returns: list: A list of parsed table content.
def extract_tables(self, start:int=0, end:int=None, pages:list=None, **kwargs): '''Extract table contents from specified PDF pages. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes. Defaults to None. kwargs (dict, optional): Configuration parameters. Defaults to None. Returns: list: A list of parsed table content. ''' # parsing pages first settings = self.default_settings settings.update(kwargs) self.parse(start, end, pages, **settings) # get parsed tables tables = [] for page in self._pages: if page.finalized: tables.extend(page.extract_tables(**settings)) return tables
(self, start: int = 0, end: Optional[int] = None, pages: Optional[list] = None, **kwargs)
43,861
pdf2docx.converter
load_pages
Step 1 of converting process: open PDF file with ``PyMuPDF``, especially for password encrypted file. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None.
def load_pages(self, start:int=0, end:int=None, pages:list=None): '''Step 1 of converting process: open PDF file with ``PyMuPDF``, especially for password encrypted file. Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None. ''' logging.info(self._color_output('[1/4] Opening document...')) # encrypted pdf ? if self._fitz_doc.needs_pass: if not self.password: raise ConversionException(f'Require password for {self.filename_pdf}.') elif not self._fitz_doc.authenticate(self.password): raise ConversionException('Incorrect password.') # initialize empty pages num = len(self._fitz_doc) self._pages.reset([Page(id=i, skip_parsing=True) for i in range(num)]) # set pages to parse page_indexes = self._page_indexes(start, end, pages, num) for i in page_indexes: self._pages[i].skip_parsing = False return self
(self, start: int = 0, end: Optional[int] = None, pages: Optional[list] = None)
43,862
pdf2docx.converter
make_docx
Step 4 of converting process: create docx file with converted pages. Args: filename_or_stream (str, file-like): docx file to write. kwargs (dict, optional): Configuration parameters.
def make_docx(self, filename_or_stream=None, **kwargs): '''Step 4 of converting process: create docx file with converted pages. Args: filename_or_stream (str, file-like): docx file to write. kwargs (dict, optional): Configuration parameters. ''' logging.info(self._color_output('[4/4] Creating pages...')) # check parsed pages parsed_pages = list(filter( lambda page: page.finalized, self._pages )) if not parsed_pages: raise ConversionException('No parsed pages. Please parse page first.') if not filename_or_stream: if self.filename_pdf: filename_or_stream = f'{self.filename_pdf[0:-len(".pdf")]}.docx' # remove existing file if os.path.exists(filename_or_stream): os.remove(filename_or_stream) else: raise ConversionException("Please specify a docx file name or a file-like object to write.") # create page by page docx_file = Document() num_pages = len(parsed_pages) for i, page in enumerate(parsed_pages, start=1): if not page.finalized: continue # ignore unparsed pages pid = page.id + 1 logging.info('(%d/%d) Page %d', i, num_pages, pid) try: page.make_docx(docx_file) except Exception as e: if not kwargs['debug'] and kwargs['ignore_page_error']: logging.error('Ignore page %d due to making page error: %s', pid, e) else: raise MakedocxException(f'Error when make page {pid}: {e}') # save docx docx_file.save(filename_or_stream)
(self, filename_or_stream=None, **kwargs)
43,863
pdf2docx.converter
parse
Parse pages in three steps: * open PDF file with ``PyMuPDF`` * analyze whole document, e.g. page section, header/footer and margin * parse specified pages, e.g. paragraph, image and table Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None. kwargs (dict, optional): Configuration parameters.
def parse(self, start:int=0, end:int=None, pages:list=None, **kwargs): '''Parse pages in three steps: * open PDF file with ``PyMuPDF`` * analyze whole document, e.g. page section, header/footer and margin * parse specified pages, e.g. paragraph, image and table Args: start (int, optional): First page to process. Defaults to 0, the first page. end (int, optional): Last page to process. Defaults to None, the last page. pages (list, optional): Range of page indexes to parse. Defaults to None. kwargs (dict, optional): Configuration parameters. ''' return self.load_pages(start, end, pages) \ .parse_document(**kwargs) \ .parse_pages(**kwargs)
(self, start: int = 0, end: Optional[int] = None, pages: Optional[list] = None, **kwargs)
43,864
pdf2docx.converter
parse_document
Step 2 of converting process: analyze whole document, e.g. page section, header/footer and margin.
def parse_document(self, **kwargs): '''Step 2 of converting process: analyze whole document, e.g. page section, header/footer and margin.''' logging.info(self._color_output('[2/4] Analyzing document...')) self._pages.parse(self.fitz_doc, **kwargs) return self
(self, **kwargs)
43,865
pdf2docx.converter
parse_pages
Step 3 of converting process: parse pages, e.g. paragraph, image and table.
def parse_pages(self, **kwargs): '''Step 3 of converting process: parse pages, e.g. paragraph, image and table.''' logging.info(self._color_output('[3/4] Parsing pages...')) pages = [page for page in self._pages if not page.skip_parsing] num_pages = len(pages) for i, page in enumerate(pages, start=1): pid = page.id + 1 logging.info('(%d/%d) Page %d', i, num_pages, pid) try: page.parse(**kwargs) except Exception as e: if not kwargs['debug'] and kwargs['ignore_page_error']: logging.error('Ignore page %d due to parsing page error: %s', pid, e) else: raise ConversionException(f'Error when parsing page {pid}: {e}') return self
(self, **kwargs)
43,866
pdf2docx.converter
restore
Restore pages from parsed results.
def restore(self, data:dict): '''Restore pages from parsed results.''' # init empty pages if necessary if not self._pages: num = data.get('page_cnt', 100) self._pages.reset([Page(id=i, skip_parsing=True) for i in range(num)]) # restore pages for raw_page in data.get('pages', []): idx = raw_page.get('id', -1) self._pages[idx].restore(raw_page)
(self, data: dict)
43,867
pdf2docx.converter
serialize
Write parsed pages to specified JSON file.
def serialize(self, filename:str): '''Write parsed pages to specified JSON file.''' with open(filename, 'w', encoding='utf-8') as f: f.write(json.dumps(self.store(), indent=4))
(self, filename: str)
43,868
pdf2docx.converter
store
Store parsed pages in dict format.
def store(self): '''Store parsed pages in dict format.''' return { 'filename': os.path.basename(self.filename_pdf), 'page_cnt': len(self._pages), # count of all pages 'pages' : [page.store() for page in self._pages if page.finalized], # parsed pages only }
(self)
43,869
pdf2docx.page.Page
Page
Object representing the whole page, e.g. margins, sections.
class Page(BasePage): '''Object representing the whole page, e.g. margins, sections.''' def __init__(self, id:int=-1, skip_parsing:bool=True, width:float=0.0, height:float=0.0, header:str=None, footer:str=None, margin:tuple=None, sections:Sections=None, float_images:BaseCollection=None): '''Initialize page layout. Args: id (int, optional): Page index. Defaults to -1. skip_parsing (bool, optional): Don't parse page if True. Defaults to True. width (float, optional): Page width. Defaults to 0.0. height (float, optional): Page height. Defaults to 0.0. header (str, optional): Page header. Defaults to None. footer (str, optional): Page footer. Defaults to None. margin (tuple, optional): Page margin. Defaults to None. sections (Sections, optional): Page contents. Defaults to None. float_images (BaseCollection, optional): Float images in th is page. Defaults to None. ''' # page index self.id = id self.skip_parsing = skip_parsing # page size and margin super().__init__(width=width, height=height, margin=margin) # flow structure: # Section -> Column -> Blocks -> TextBlock/TableBlock # TableBlock -> Row -> Cell -> Blocks self.sections = sections or Sections(parent=self) # page header, footer self.header = header or '' self.footer = footer or '' # floating images are separate node under page self.float_images = float_images or BaseCollection() self._finalized = False @property def finalized(self): return self._finalized def store(self): '''Store parsed layout in dict format.''' res = { 'id' : self.id, 'width' : self.width, 'height' : self.height, 'margin' : self.margin, 'sections': self.sections.store(), 'header' : self.header, 'footer' : self.footer, 'floats' : self.float_images.store() } return res def restore(self, data:dict): '''Restore Layout from parsed results.''' # page id self.id = data.get('id', -1) # page width/height self.width = data.get('width', 0.0) self.height = data.get('height', 0.0) self.margin = data.get('margin', (0,) * 4) # parsed layout self.sections.restore(data.get('sections', [])) self.header = data.get('header', '') self.footer = data.get('footer', '') # float images self._restore_float_images(data.get('floats', [])) # Suppose layout is finalized when restored; otherwise, set False explicitly # out of this method. self._finalized = True return self @debug_plot('Final Layout') def parse(self, **settings): '''Parse page layout.''' self.sections.parse(**settings) self._finalized = True return self.sections # for debug plot def extract_tables(self, **settings): '''Extract content from tables (top layout only). .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data. ''' # table blocks collections = [] for section in self.sections: for column in section: if settings['extract_stream_table']: collections.extend(column.blocks.table_blocks) else: collections.extend(column.blocks.lattice_table_blocks) # check table tables = [] # type: list[ list[list[str]] ] for table_block in collections: tables.append(table_block.text) return tables def make_docx(self, doc): '''Set page size, margin, and create page. .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data. Args: doc (Document): ``python-docx`` document object ''' # new page if doc.paragraphs: section = doc.add_section(WD_SECTION.NEW_PAGE) else: section = doc.sections[0] # a default section is there when opening docx # page size section.page_width = Pt(self.width) section.page_height = Pt(self.height) # page margin left,right,top,bottom = self.margin section.left_margin = Pt(left) section.right_margin = Pt(right) section.top_margin = Pt(top) section.bottom_margin = Pt(bottom) # create flow layout: sections self.sections.make_docx(doc) def _restore_float_images(self, raws:list): '''Restore float images.''' self.float_images.reset() for raw in raws: image = ImageBlock(raw) image.set_float_image_block() self.float_images.append(image)
(id: int = -1, skip_parsing: bool = True, width: float = 0.0, height: float = 0.0, header: str = None, footer: str = None, margin: tuple = None, sections: pdf2docx.layout.Sections.Sections = None, float_images: pdf2docx.common.Collection.BaseCollection = None)
43,870
pdf2docx.page.Page
__init__
Initialize page layout. Args: id (int, optional): Page index. Defaults to -1. skip_parsing (bool, optional): Don't parse page if True. Defaults to True. width (float, optional): Page width. Defaults to 0.0. height (float, optional): Page height. Defaults to 0.0. header (str, optional): Page header. Defaults to None. footer (str, optional): Page footer. Defaults to None. margin (tuple, optional): Page margin. Defaults to None. sections (Sections, optional): Page contents. Defaults to None. float_images (BaseCollection, optional): Float images in th is page. Defaults to None.
def __init__(self, id:int=-1, skip_parsing:bool=True, width:float=0.0, height:float=0.0, header:str=None, footer:str=None, margin:tuple=None, sections:Sections=None, float_images:BaseCollection=None): '''Initialize page layout. Args: id (int, optional): Page index. Defaults to -1. skip_parsing (bool, optional): Don't parse page if True. Defaults to True. width (float, optional): Page width. Defaults to 0.0. height (float, optional): Page height. Defaults to 0.0. header (str, optional): Page header. Defaults to None. footer (str, optional): Page footer. Defaults to None. margin (tuple, optional): Page margin. Defaults to None. sections (Sections, optional): Page contents. Defaults to None. float_images (BaseCollection, optional): Float images in th is page. Defaults to None. ''' # page index self.id = id self.skip_parsing = skip_parsing # page size and margin super().__init__(width=width, height=height, margin=margin) # flow structure: # Section -> Column -> Blocks -> TextBlock/TableBlock # TableBlock -> Row -> Cell -> Blocks self.sections = sections or Sections(parent=self) # page header, footer self.header = header or '' self.footer = footer or '' # floating images are separate node under page self.float_images = float_images or BaseCollection() self._finalized = False
(self, id: int = -1, skip_parsing: bool = True, width: float = 0.0, height: float = 0.0, header: Optional[str] = None, footer: Optional[str] = None, margin: Optional[tuple] = None, sections: Optional[pdf2docx.layout.Sections.Sections] = None, float_images: Optional[pdf2docx.common.Collection.BaseCollection] = None)
43,871
pdf2docx.page.Page
_restore_float_images
Restore float images.
def _restore_float_images(self, raws:list): '''Restore float images.''' self.float_images.reset() for raw in raws: image = ImageBlock(raw) image.set_float_image_block() self.float_images.append(image)
(self, raws: list)
43,872
pdf2docx.page.Page
extract_tables
Extract content from tables (top layout only). .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data.
def extract_tables(self, **settings): '''Extract content from tables (top layout only). .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data. ''' # table blocks collections = [] for section in self.sections: for column in section: if settings['extract_stream_table']: collections.extend(column.blocks.table_blocks) else: collections.extend(column.blocks.lattice_table_blocks) # check table tables = [] # type: list[ list[list[str]] ] for table_block in collections: tables.append(table_block.text) return tables
(self, **settings)
43,873
pdf2docx.page.Page
make_docx
Set page size, margin, and create page. .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data. Args: doc (Document): ``python-docx`` document object
def make_docx(self, doc): '''Set page size, margin, and create page. .. note:: Before running this method, the page layout must be either parsed from source page or restored from parsed data. Args: doc (Document): ``python-docx`` document object ''' # new page if doc.paragraphs: section = doc.add_section(WD_SECTION.NEW_PAGE) else: section = doc.sections[0] # a default section is there when opening docx # page size section.page_width = Pt(self.width) section.page_height = Pt(self.height) # page margin left,right,top,bottom = self.margin section.left_margin = Pt(left) section.right_margin = Pt(right) section.top_margin = Pt(top) section.bottom_margin = Pt(bottom) # create flow layout: sections self.sections.make_docx(doc)
(self, doc)
43,874
pdf2docx.common.share
inner
null
def debug_plot(title:str, show=True): '''Plot the returned objects of inner function. Args: title (str): Page title. show (bool, optional): Don't plot if show==False. Default to True. .. note:: Prerequisite of the inner function: - the first argument is a :py:class:`~pdf2docx.page.BasePage` instance. - the last argument is configuration parameters in ``dict`` type. ''' def wrapper(func): def inner(*args, **kwargs): # execute function objects = func(*args, **kwargs) # check if plot page page = args[0] # BasePage object debug = kwargs.get('debug', False) doc = kwargs.get('debug_doc', None) filename = kwargs.get('debug_filename', None) if show and objects and debug and doc is not None: # create a new page debug_page = new_page(doc, page.width, page.height, title) # plot objects, e.g. text blocks, shapes, tables... objects.plot(debug_page) doc.save(filename) return objects return inner return wrapper
(*args, **kwargs)
43,875
pdf2docx.page.Page
restore
Restore Layout from parsed results.
def restore(self, data:dict): '''Restore Layout from parsed results.''' # page id self.id = data.get('id', -1) # page width/height self.width = data.get('width', 0.0) self.height = data.get('height', 0.0) self.margin = data.get('margin', (0,) * 4) # parsed layout self.sections.restore(data.get('sections', [])) self.header = data.get('header', '') self.footer = data.get('footer', '') # float images self._restore_float_images(data.get('floats', [])) # Suppose layout is finalized when restored; otherwise, set False explicitly # out of this method. self._finalized = True return self
(self, data: dict)
43,876
pdf2docx.page.Page
store
Store parsed layout in dict format.
def store(self): '''Store parsed layout in dict format.''' res = { 'id' : self.id, 'width' : self.width, 'height' : self.height, 'margin' : self.margin, 'sections': self.sections.store(), 'header' : self.header, 'footer' : self.footer, 'floats' : self.float_images.store() } return res
(self)
43,884
pdf2docx.main
convert
Convert pdf file to docx file. Args: pdf_file (str) : PDF filename to read from. docx_file (str, optional): docx filename to write to. Defaults to None. password (str): Password for encrypted pdf. Default to None if not encrypted. start (int, optional): First page to process. Defaults to 0. end (int, optional): Last page to process. Defaults to None. pages (list, optional): Range of pages, e.g. --pages=1,3,5. Defaults to None. kwargs (dict) : Configuration parameters. .. note:: Refer to :py:meth:`~pdf2docx.converter.Converter.convert` for detailed description on above arguments.
@staticmethod def convert(pdf_file:str, docx_file:str=None, password:str=None, start:int=0, end:int=None, pages:list=None, **kwargs): '''Convert pdf file to docx file. Args: pdf_file (str) : PDF filename to read from. docx_file (str, optional): docx filename to write to. Defaults to None. password (str): Password for encrypted pdf. Default to None if not encrypted. start (int, optional): First page to process. Defaults to 0. end (int, optional): Last page to process. Defaults to None. pages (list, optional): Range of pages, e.g. --pages=1,3,5. Defaults to None. kwargs (dict) : Configuration parameters. .. note:: Refer to :py:meth:`~pdf2docx.converter.Converter.convert` for detailed description on above arguments. ''' # index starts from zero or one if isinstance(pages, int): pages = [pages] # in case --pages=1 if not kwargs.get('zero_based_index', True): start = max(start-1, 0) if end: end -= 1 if pages: pages = [i-1 for i in pages] cv = Converter(pdf_file, password) try: cv.convert(docx_file, start, end, pages, **kwargs) except Exception as e: logging.error(e) finally: cv.close()
(pdf_file: str, docx_file: Optional[str] = None, password: Optional[str] = None, start: int = 0, end: Optional[int] = None, pages: Optional[list] = None, **kwargs)
43,888
pybtex
Engine
null
class Engine(object): def make_bibliography(self, aux_filename, style=None, output_encoding=None, bib_format=None, **kwargs): """ Read the given ``.aux`` file and produce a formatted bibliography using :py:meth:`~.Engine.format_from_files`. :param style: If not ``None``, use this style instead of specified in the ``.aux`` file. """ from pybtex import auxfile if bib_format is None: from pybtex.database.input.bibtex import Parser as bib_format aux_data = auxfile.parse_file(aux_filename, output_encoding) if style is None: style = aux_data.style base_filename = path.splitext(aux_filename)[0] bib_filenames = [filename + bib_format.default_suffix for filename in aux_data.data] return self.format_from_files( bib_filenames, style=aux_data.style, citations=aux_data.citations, output_encoding=output_encoding, output_filename=base_filename, add_output_suffix=True, **kwargs ) def format_from_string(self, bib_string, *args, **kwargs): """ Parse the bigliography data from the given string and produce a formated bibliography using :py:meth:`~.Engine.format_from_files`. This is a convenience method that calls :py:meth:`~.Engine.format_from_strings` with a single string. """ return self.format_from_strings([bib_string], *args, **kwargs) def format_from_strings(self, bib_strings, *args, **kwargs): """ Parse the bigliography data from the given strings and produce a formated bibliography. This is a convenience method that wraps each string into a StringIO, then calls :py:meth:`~.Engine.format_from_files`. """ from io import StringIO inputs = [StringIO(bib_string) for bib_string in bib_strings] return self.format_from_files(inputs, *args, **kwargs) def format_from_file(self, filename, *args, **kwargs): """ Read the bigliography data from the given file and produce a formated bibliography. This is a convenience method that calls :py:meth:`~.Engine.format_from_files` with a single file. All extra arguments are passed to :py:meth:`~.Engine.format_from_files`. """ return self.format_from_files([filename], *args, **kwargs) def format_from_files(*args, **kwargs): """ Read the bigliography data from the given files and produce a formated bibliography. This is an abstract method overridden by both :py:class:`pybtex.PybtexEngine` and :py:class:`pybtex.bibtex.BibTeXEngine`. """ raise NotImplementedError
()
43,889
pybtex
format_from_file
Read the bigliography data from the given file and produce a formated bibliography. This is a convenience method that calls :py:meth:`~.Engine.format_from_files` with a single file. All extra arguments are passed to :py:meth:`~.Engine.format_from_files`.
def format_from_file(self, filename, *args, **kwargs): """ Read the bigliography data from the given file and produce a formated bibliography. This is a convenience method that calls :py:meth:`~.Engine.format_from_files` with a single file. All extra arguments are passed to :py:meth:`~.Engine.format_from_files`. """ return self.format_from_files([filename], *args, **kwargs)
(self, filename, *args, **kwargs)