code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self.parser.casename: self.mountpoint = tempfile.mkdtemp(prefix='image_mounter_', suffix='_' + self.parser.casename) else: self.mountpoint = tempfile.mkdtemp(prefix='image_mounter_') if self.read_write: self.rwpath = tempfile.mkstemp(prefix="image_mounter_rw_cache_")[1] disk_type = self.get_disk_type() methods = self._get_mount_methods(disk_type) cmds = [] for method in methods: if method == 'avfs': # avfs does not participate in the fallback stuff, unfortunately self._mount_avfs() self.disk_mounter = method self.was_mounted = True self.is_mounted = True return elif method == 'dummy': os.rmdir(self.mountpoint) self.mountpoint = "" logger.debug("Raw path to dummy is {}".format(self.get_raw_path())) self.disk_mounter = method self.was_mounted = True self.is_mounted = True return elif method == 'xmount': cmds.append(['xmount', ]) if self.read_write: cmds[-1].extend(['--cache', self.rwpath]) cmds[-1].extend(['--in', 'ewf' if disk_type == 'encase' else 'dd']) cmds[-1].extend(self.paths) # specify all paths, xmount needs this :( cmds[-1].append(self.mountpoint) elif method == 'affuse': cmds.extend([['affuse', '-o', 'allow_other', self.paths[0], self.mountpoint], ['affuse', self.paths[0], self.mountpoint]]) elif method == 'ewfmount': cmds.extend([['ewfmount', '-X', 'allow_other', self.paths[0], self.mountpoint], ['ewfmount', self.paths[0], self.mountpoint]]) elif method == 'vmware-mount': cmds.append(['vmware-mount', '-r', '-f', self.paths[0], self.mountpoint]) elif method == 'qemu-nbd': _util.check_output_(['modprobe', 'nbd', 'max_part=63']) # Load nbd driver try: self._paths['nbd'] = _util.get_free_nbd_device() # Get free nbd device except NoNetworkBlockAvailableError: logger.warning("No free network block device found.", exc_info=True) raise cmds.extend([['qemu-nbd', '--read-only', '-c', self._paths['nbd'], self.paths[0]]]) else: raise ArgumentError("Unknown mount method {0}".format(self.disk_mounter)) for cmd in cmds: # noinspection PyBroadException try: _util.check_call_(cmd, stdout=subprocess.PIPE) # mounting does not seem to be instant, add a timer here time.sleep(.1) except Exception: logger.warning('Could not mount {0}, trying other method'.format(self.paths[0]), exc_info=True) continue else: raw_path = self.get_raw_path() logger.debug("Raw path to disk is {}".format(raw_path)) self.disk_mounter = cmd[0] if raw_path is None: raise MountpointEmptyError() self.was_mounted = True self.is_mounted = True return logger.error('Unable to mount {0}'.format(self.paths[0])) os.rmdir(self.mountpoint) self.mountpoint = "" raise MountError()
def mount(self)
Mounts the base image on a temporary location using the mount method stored in :attr:`method`. If mounting was successful, :attr:`mountpoint` is set to the temporary mountpoint. If :attr:`read_write` is enabled, a temporary read-write cache is also created and stored in :attr:`rwpath`. :return: whether the mounting was successful :rtype: bool
3.423876
3.300155
1.037489
if self.disk_mounter == 'dummy': return self.paths[0] else: if self.disk_mounter == 'avfs' and os.path.isdir(os.path.join(self.mountpoint, 'avfs')): logger.debug("AVFS mounted as a directory, will look in directory for (random) file.") # there is no support for disks inside disks, so this will fail to work for zips containing # E01 files or so. searchdirs = (os.path.join(self.mountpoint, 'avfs'), self.mountpoint) else: searchdirs = (self.mountpoint, ) raw_path = [] if self._paths.get('nbd'): raw_path.append(self._paths['nbd']) for searchdir in searchdirs: # avfs: apparently it is not a dir for pattern in ['*.dd', '*.iso', '*.raw', '*.dmg', 'ewf1', 'flat', 'avfs']: raw_path.extend(glob.glob(os.path.join(searchdir, pattern))) if not raw_path: logger.warning("No viable mount file found in {}.".format(searchdirs)) return None return raw_path[0]
def get_raw_path(self)
Returns the raw path to the mounted disk image, i.e. the raw :file:`.dd`, :file:`.raw` or :file:`ewf1` file. :rtype: str
5.801458
5.158538
1.124632
# prevent adding the same volumes twice if self.volumes.has_detected: for v in self.volumes: yield v elif single: for v in self.volumes.detect_volumes(method='single'): yield v else: # if single == False or single == None, loop over all volumes amount = 0 try: for v in self.volumes.detect_volumes(): amount += 1 yield v except ImageMounterError: pass # ignore and continue to single mount # if single == None and no volumes were mounted, use single_volume if single is None and amount == 0: logger.info("Detecting as single volume instead") for v in self.volumes.detect_volumes(method='single', force=True): yield v
def detect_volumes(self, single=None)
Generator that detects the volumes from the Disk, using one of two methods: * Single volume: the entire Disk is a single volume * Multiple volumes: the Disk is a volume system :param single: If *single* is :const:`True`, this method will call :Func:`init_single_volumes`. If *single* is False, only :func:`init_multiple_volumes` is called. If *single* is None, :func:`init_multiple_volumes` is always called, being followed by :func:`init_single_volume` if no volumes were detected.
5.774888
5.28632
1.092421
self.mount() self.volumes.preload_volume_data() for v in self.init_volumes(single, only_mount=only_mount, skip_mount=skip_mount, swallow_exceptions=swallow_exceptions): yield v
def init(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True)
Calls several methods required to perform a full initialisation: :func:`mount`, and :func:`mount_volumes` and yields all detected volumes. :param bool|None single: indicates whether the disk should be mounted as a single disk, not as a single disk or whether it should try both (defaults to :const:`None`) :param list only_mount: If set, must be a list of volume indexes that are only mounted. :param list skip_mount: If set, must be a list of volume indexes tat should not be mounted. :param bool swallow_exceptions: If True, Exceptions are not raised but rather set on the instance. :rtype: generator
3.92217
4.230772
0.927058
for volume in self.detect_volumes(single=single): for vol in volume.init(only_mount=only_mount, skip_mount=skip_mount, swallow_exceptions=swallow_exceptions): yield vol
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True)
Generator that detects and mounts all volumes in the disk. :param single: If *single* is :const:`True`, this method will call :Func:`init_single_volumes`. If *single* is False, only :func:`init_multiple_volumes` is called. If *single* is None, :func:`init_multiple_volumes` is always called, being followed by :func:`init_single_volume` if no volumes were detected. :param list only_mount: If set, must be a list of volume indexes that are only mounted. :param list skip_mount: If set, must be a list of volume indexes tat should not be mounted. :param bool swallow_exceptions: If True, Exceptions are not raised but rather set on the instance.
3.104377
3.008183
1.031978
volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) return volumes
def get_volumes(self)
Gets a list of all volumes in this disk, including volumes that are contained in other volumes.
4.627476
4.17642
1.108001
for m in list(sorted(self.volumes, key=lambda v: v.mountpoint or "", reverse=True)): try: m.unmount(allow_lazy=allow_lazy) except ImageMounterError: logger.warning("Error unmounting volume {0}".format(m.mountpoint)) if self._paths.get('nbd'): _util.clean_unmount(['qemu-nbd', '-d'], self._paths['nbd'], rmdir=False) if self.mountpoint: try: _util.clean_unmount(['fusermount', '-u'], self.mountpoint) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self.mountpoint) if self._paths.get('avfs'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['avfs']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['avfs']) if self.rw_active() and remove_rw: os.remove(self.rwpath) self.is_mounted = False
def unmount(self, remove_rw=False, allow_lazy=False)
Removes all ties of this disk to the filesystem, so the image can be unmounted successfully. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
3.04306
2.938385
1.035623
self.argparser = ShellArgumentParser(prog='') subparsers = self.argparser.add_subparsers() for name in self.get_names(): if name.startswith('parser_'): parser = subparsers.add_parser(name[7:]) parser.set_defaults(func=getattr(self, 'arg_' + name[7:])) getattr(self, name)(parser) self.argparser_completer = None try: import argcomplete except ImportError: pass else: os.environ.setdefault("_ARGCOMPLETE_COMP_WORDBREAKS", " \t\"'") self.argparser_completer = argcomplete.CompletionFinder(self.argparser)
def _make_argparser(self)
Makes a new argument parser.
3.547855
3.463702
1.024296
result = cmd.Cmd.complete(self, text, state) if self.argparser_completer: self._make_argparser() # argparser screws up with internal states, this is the best way to fix it for now return result
def complete(self, text, state)
Overridden to reset the argument parser after every completion (argcomplete fails :()
11.51729
8.716505
1.32132
if any((line.startswith(x) for x in self.argparse_names())): try: args = self.argparser.parse_args(shlex.split(line)) except Exception: # intentionally catches also other errors in argparser pass else: args.func(args) else: cmd.Cmd.default(self, line)
def default(self, line)
Overriding default to get access to any argparse commands we have specified.
5.17459
4.224021
1.225039
if self.argparser_completer and any((line.startswith(x) for x in self.argparse_names())): self.argparser_completer.rl_complete(line, 0) return [x[begidx:] for x in self.argparser_completer._rl_matches] else: return []
def completedefault(self, text, line, begidx, endidx)
Accessing the argcompleter if available.
5.41195
4.655491
1.162488
return sorted(cmd.Cmd.completenames(self, text, *ignored) + self.argparse_names(text))
def completenames(self, text, *ignored)
Patched to also return argparse commands
7.6627
4.902021
1.563171
if not arg or arg not in self.argparse_names(): cmd.Cmd.do_help(self, arg) else: try: self.argparser.parse_args([arg, '--help']) except Exception: pass
def do_help(self, arg)
Patched to show help for arparse commands
4.237355
3.769278
1.124182
if header == self.doc_header: cmds.extend(self.argparse_names()) cmd.Cmd.print_topics(self, header, sorted(cmds), cmdlen, maxcol)
def print_topics(self, header, cmds, cmdlen, maxcol)
Patched to show all argparse commands as being documented
8.286711
5.556773
1.491281
if not self.parser: self.stdout.write("Welcome to imagemounter {version}".format(version=__version__)) self.stdout.write("\n") self.parser = ImageParser() for p in self.args.paths: self.onecmd('disk "{}"'.format(p))
def preloop(self)
if the parser is not already set, loads the parser.
8.774527
6.981205
1.256879
try: return cmd.Cmd.onecmd(self, line) except Exception as e: print("Critical error.", e)
def onecmd(self, line)
Do not crash the entire program when a single command fails.
5.804163
5.061087
1.146821
if self.parser: return [v.index for v in self.parser.get_volumes()] + [d.index for d in self.parser.disks] else: return None
def _get_all_indexes(self)
Returns all indexes available in the parser
6.837224
5.173821
1.321504
volume_or_disk = self.parser.get_by_index(index) volume, disk = (volume_or_disk, None) if not isinstance(volume_or_disk, Disk) else (None, volume_or_disk) return volume, disk
def _get_by_index(self, index)
Returns a volume,disk tuple for the specified index
3.847314
2.711652
1.418808
if self.saved: self.save() else: self.parser.clean() return True
def do_quit(self, arg)
Quits the program.
10.44766
10.647324
0.981248
commands = [] for mountpoint in self.find_bindmounts(): commands.append('umount {0}'.format(mountpoint)) for mountpoint in self.find_mounts(): commands.append('umount {0}'.format(mountpoint)) commands.append('rm -Rf {0}'.format(mountpoint)) for vgname, pvname in self.find_volume_groups(): commands.append('lvchange -a n {0}'.format(vgname)) commands.append('losetup -d {0}'.format(pvname)) for device in self.find_loopbacks(): commands.append('losetup -d {0}'.format(device)) for mountpoint in self.find_base_images(): commands.append('fusermount -u {0}'.format(mountpoint)) commands.append('rm -Rf {0}'.format(mountpoint)) for folder in self.find_clean_dirs(): cmd = 'rm -Rf {0}'.format(folder) if cmd not in commands: commands.append(cmd) return commands
def preview_unmount(self)
Returns a list of all commands that would be executed if the :func:`unmount` method would be called. Note: any system changes between calling this method and calling :func:`unmount` aren't listed by this command.
2.395839
2.337148
1.025112
self.unmount_bindmounts() self.unmount_mounts() self.unmount_volume_groups() self.unmount_loopbacks() self.unmount_base_images() self.clean_dirs()
def unmount(self)
Calls all unmount methods in the correct order.
5.322516
4.012751
1.326401
# find all mountponits self.mountpoints = {} # noinspection PyBroadException try: result = _util.check_output_(['mount']) for line in result.splitlines(): m = re.match(r'(.+) on (.+) type (.+) \((.+)\)', line) if m: self.mountpoints[m.group(2)] = (m.group(1), m.group(3), m.group(4)) except Exception: pass
def _index_mountpoints(self)
Finds all mountpoints and stores them in :attr:`mountpoints`
3.309305
3.002944
1.10202
self.loopbacks = {} try: result = _util.check_output_(['losetup', '-a']) for line in result.splitlines(): m = re.match(r'(.+): (.+) \((.+)\).*', line) if m: self.loopbacks[m.group(1)] = m.group(3) except Exception: pass
def _index_loopbacks(self)
Finds all loopbacks and stores them in :attr:`loopbacks`
3.712945
3.166213
1.172677
for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' in opts and re.match(self.re_pattern, mountpoint): yield mountpoint
def find_bindmounts(self)
Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern`
6.072991
3.918559
1.549802
for mountpoint, (orig, fs, opts) in self.mountpoints.items(): if 'bind' not in opts and (re.match(self.orig_re_pattern, orig) or (self.be_greedy and re.match(self.re_pattern, mountpoint))): yield mountpoint
def find_mounts(self)
Finds all mountpoints that are mounted to a directory matching :attr:`re_pattern` or originate from a directory matching :attr:`orig_re_pattern`.
6.160474
4.28007
1.43934
for mountpoint, _ in self.mountpoints.items(): if re.match(self.orig_re_pattern, mountpoint): yield mountpoint
def find_base_images(self)
Finds all mountpoints that are mounted to a directory matching :attr:`orig_re_pattern`.
6.941245
2.645754
2.623541
os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find volume groups try: result = _util.check_output_(['pvdisplay']) pvname = vgname = None for line in result.splitlines(): if '--- Physical volume ---' in line: pvname = vgname = None elif "PV Name" in line: pvname = line.replace("PV Name", "").strip() elif "VG Name" in line: vgname = line.replace("VG Name", "").strip() if pvname and vgname: try: # unmount volume groups with a physical volume originating from a disk image if re.match(self.orig_re_pattern, self.loopbacks[pvname]): yield vgname, pvname except Exception: pass pvname = vgname = None except Exception: pass
def find_volume_groups(self)
Finds all volume groups that are mounted through a loopback originating from :attr:`orig_re_pattern`. Generator yields tuples of vgname, pvname
4.682392
3.82539
1.22403
for dev, source in self.loopbacks.items(): if re.match(self.orig_re_pattern, source): yield dev
def find_loopbacks(self)
Finds all loopbacks originating from :attr:`orig_re_pattern`. Generator yields device names
10.05457
3.672692
2.737657
for mountpoint in self.find_bindmounts(): _util.clean_unmount(['umount'], mountpoint, rmdir=False)
def unmount_bindmounts(self)
Unmounts all bind mounts identified by :func:`find_bindmounts`
15.563695
12.424787
1.252633
for vgname, pvname in self.find_volume_groups(): _util.check_output_(['lvchange', '-a', 'n', vgname]) _util.check_output_(['losetup', '-d', pvname])
def unmount_volume_groups(self)
Unmounts all volume groups and related loopback devices as identified by :func:`find_volume_groups`
7.210212
6.009315
1.199839
# re-index loopback devices self._index_loopbacks() for dev in self.find_loopbacks(): _util.check_output_(['losetup', '-d', dev])
def unmount_loopbacks(self)
Unmounts all loopback devices as identified by :func:`find_loopbacks`
9.783625
7.921793
1.235027
for folder in glob.glob(self.glob_pattern): if re.match(self.re_pattern, folder): yield folder for folder in glob.glob(self.orig_glob_pattern): if re.match(self.orig_re_pattern, folder): yield folder
def find_clean_dirs(self)
Finds all (temporary) directories according to the glob and re patterns that should be cleaned.
3.120698
2.46238
1.267351
if is_encase(path): return glob.glob(path[:-2] + '??') or [path] ext_match = re.match(r'^.*\.(\d{2,})$', path) if ext_match is not None: ext_size = len(ext_match.groups()[-1]) return glob.glob(path[:-ext_size] + '[0-9]' * ext_size) or [path] else: return [path]
def expand_path(path)
Expand the given path to either an Encase image or a dd image i.e. if path is '/path/to/image.E01' then the result of this method will be /path/to/image.E*' and if path is '/path/to/image.001' then the result of this method will be '/path/to/image.[0-9][0-9]?'
3.801884
3.199363
1.188326
if self.disks and self.disks[0].index is None: raise DiskIndexError("First disk has no index.") if force_disk_indexes or self.disks: index = len(self.disks) + 1 else: index = None disk = Disk(self, path, index=str(index) if index else None, **args) self.disks.append(disk) return disk
def add_disk(self, path, force_disk_indexes=True, **args)
Adds a disk specified by the path to the ImageParser. :param path: The path to the disk volume :param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the second volume you add. If you plan on using this method, always leave this True. If you add a second disk when the previous disk has no index, an error is raised. :param args: Arguments to pass to the constructor of the Disk.
2.968215
3.022512
0.982036
for d in self.disks: for v in d.init(single, swallow_exceptions=swallow_exceptions): yield v
def init(self, single=None, swallow_exceptions=True)
Handles all important disk-mounting tasks, i.e. calls the :func:`Disk.init` function on all underlying disks. It yields every volume that is encountered, including volumes that have not been mounted. :param single: indicates whether the :class:`Disk` should be mounted as a single disk, not as a single disk or whether it should try both (defaults to :const:`None`) :type single: bool|None :param swallow_exceptions: specify whether you want the init calls to swallow exceptions :rtype: generator
4.715405
3.672955
1.283818
result = True for disk in self.disks: result = disk.mount() and result return result
def mount_disks(self)
Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to use :func:`init` instead. :return: whether all mounts have succeeded :rtype: bool
5.503546
4.81517
1.14296
result = False for disk in self.disks: result = disk.rw_active() or result return result
def rw_active(self)
Indicates whether a read-write cache is active in any of the disks. :rtype: bool
6.054905
6.271598
0.965449
for disk in self.disks: logger.info("Mounting volumes in {0}".format(disk)) for volume in disk.init_volumes(single, only_mount, skip_mount, swallow_exceptions=swallow_exceptions): yield volume
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True)
Detects volumes (as volume system or as single volume) in all disks and yields the volumes. This calls :func:`Disk.init_volumes` on all disks and should be called after :func:`mount_disks`. :rtype: generator
3.247103
2.694889
1.204912
try: return self[index] except KeyError: for v in self.get_volumes(): if v.index == str(index): return v raise KeyError(index)
def get_by_index(self, index)
Returns a Volume or Disk by its index.
5.131561
3.750006
1.368414
volumes = [] for disk in self.disks: volumes.extend(disk.get_volumes()) return volumes
def get_volumes(self)
Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks. :rtype: list
4.525379
3.848653
1.175834
# To ensure clean unmount after reconstruct, we sort across all volumes in all our disks to provide a proper # order volumes = list(sorted(self.get_volumes(), key=lambda v: v.mountpoint or "", reverse=True)) for v in volumes: try: v.unmount(allow_lazy=allow_lazy) except ImageMounterError: logger.error("Error unmounting volume {0}".format(v.mountpoint)) # Now just clean the rest. for disk in self.disks: disk.unmount(remove_rw, allow_lazy=allow_lazy)
def clean(self, remove_rw=False, allow_lazy=False)
Cleans all volumes of all disks (:func:`Volume.unmount`) and all disks (:func:`Disk.unmount`). Volume errors are ignored, but returns immediately on disk unmount error. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
6.002317
5.383475
1.114952
while True: try: self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy) except ImageMounterError: if retries == 0: raise retries -= 1 time.sleep(sleep_interval) else: return
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5)
Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
2.601292
2.056216
1.265087
volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.info.get('lastmountpoint')), key=lambda v: v.numeric_index)) try: root = list(filter(lambda x: x.info.get('lastmountpoint') == '/', volumes))[0] except IndexError: logger.error("Could not find / while reconstructing, aborting!") raise NoRootFoundError() volumes.remove(root) for v in volumes: if v.info.get('lastmountpoint') == root.info.get('lastmountpoint'): logger.debug("Skipping volume %s as it has the same root as %s", v, root) continue v.bindmount(os.path.join(root.mountpoint, v.info.get('lastmountpoint')[1:])) return root
def reconstruct(self)
Reconstructs the filesystem of all volumes mounted by the parser by inspecting the last mount point and bind mounting everything. :raises: NoRootFoundError if no root could be found :return: the root :class:`Volume`
3.926831
3.266293
1.202229
from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
def _make_subvolume(self, **args)
Creates a subvolume, adds it to this class and returns it.
11.052683
10.279642
1.075201
if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = '0' else: index = '{0}.0'.format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
def _make_single_subvolume(self, only_one=True, **args)
Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead.
3.232161
3.08889
1.046383
if self.has_detected and not force: logger.warning("Detection already ran.") return if vstype is None: vstype = self.vstype if method is None: method = self.volume_detector if method == 'auto': method = VolumeSystem._determine_auto_detection_method() if method in ALL_VOLUME_SYSTEM_DETECTORS: for v in ALL_VOLUME_SYSTEM_DETECTORS[method].detect(self, vstype): yield v else: logger.error("No viable detection method found") raise ArgumentError("No viable detection method found") self.has_detected = True
def detect_volumes(self, vstype=None, method=None, force=False)
Iterator for detecting volumes within this volume system. :param str vstype: The volume system type to use. If None, uses :attr:`vstype` :param str method: The detection method to use. If None, uses :attr:`detection` :param bool force: Specify if you wnat to force running the detection if has_Detected is True.
3.770405
3.373407
1.117684
if dependencies.pytsk3.is_available: return 'pytsk3' elif dependencies.mmls.is_available: return 'mmls' elif dependencies.parted.is_available: return 'parted' else: raise PrerequisiteFailedError("No valid detection method is installed.")
def _determine_auto_detection_method()
Return the detection method to use when the detection method is 'auto
5.390986
5.414923
0.995579
if not _util.command_exists('disktype'): logger.warning("disktype not installed, could not detect volume type") return None disktype = _util.check_output_(['disktype', self.parent.get_raw_path()]).strip() current_partition = None for line in disktype.splitlines(): if not line: continue # noinspection PyBroadException try: line = line.strip() find_partition_nr = re.match(r"^Partition (\d+):", line) if find_partition_nr: current_partition = int(find_partition_nr.group(1)) elif current_partition is not None: if line.startswith("Type ") and "GUID" in line: self._disktype[current_partition]['guid'] = \ line[line.index('GUID') + 5:-1].strip() # output is between () elif line.startswith("Partition Name "): self._disktype[current_partition]['label'] = \ line[line.index('Name ') + 6:-1].strip() # output is between "" except Exception: logger.exception("Error while parsing disktype output") return
def _load_disktype_data(self)
Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we are running the tool anyway, the label is also extracted from the tool if it is not yet set. The disktype data is only loaded and not assigned to volumes yet.
3.790559
3.405648
1.113021
if slot is None: slot = volume.slot if slot in self._disktype: data = self._disktype[slot] if not volume.info.get('guid') and 'guid' in data: volume.info['guid'] = data['guid'] if not volume.info.get('label') and 'label' in data: volume.info['label'] = data['label']
def _assign_disktype_data(self, volume, slot=None)
Assigns cached disktype data to a volume.
2.432521
2.316483
1.050092
if volume_system.parent.index is not None: return '{0}.{1}'.format(volume_system.parent.index, idx) else: return str(idx)
def _format_index(self, volume_system, idx)
Returns a formatted index given the disk index idx.
3.489008
3.099797
1.12556
volume = volume_system._make_single_subvolume(offset=0) is_directory = os.path.isdir(volume_system.parent.get_raw_path()) if is_directory: filesize = _util.check_output_(['du', '-scDb', volume_system.parent.get_raw_path()]).strip() if filesize: volume.size = int(filesize.splitlines()[-1].split()[0]) else: description = _util.check_output_(['file', '-sL', volume_system.parent.get_raw_path()]).strip() if description: # description is the part after the :, until the first comma volume.info['fsdescription'] = description.split(': ', 1)[1].split(',', 1)[0].strip() if 'size' in description: volume.size = int(re.findall(r'size:? (\d+)', description)[0]) else: volume.size = os.path.getsize(volume_system.parent.get_raw_path()) volume.flag = 'alloc' volume_system.volume_source = 'single' volume_system._assign_disktype_data(volume) yield volume
def detect(self, volume_system, vstype='detect')
Detects' a single volume. It should not be called other than from a :class:`Disk`.
4.743931
4.489628
1.056642
try: # noinspection PyUnresolvedReferences import pytsk3 except ImportError: logger.error("pytsk3 not installed, could not detect volumes") raise ModuleNotFoundError("pytsk3") baseimage = None try: # ewf raw image is now available on base mountpoint # either as ewf1 file or as .dd file raw_path = volume_system.parent.get_raw_path() # noinspection PyBroadException try: baseimage = pytsk3.Img_Info(raw_path) except Exception: logger.error("Failed retrieving image info (possible empty image).", exc_info=True) return [] try: volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()), volume_system.parent.offset // volume_system.disk.block_size) volume_system.volume_source = 'multi' return volumes except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT')) volume_system.volume_source = 'multi' return volumes except Exception as e: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) else: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) finally: if baseimage: baseimage.close() del baseimage
def _find_volumes(self, volume_system, vstype='detect')
Finds all volumes based on the pytsk3 library.
4.27192
4.185639
1.020614
# Loop over all volumes in image. for p in self._find_volumes(volume_system, vstype): import pytsk3 volume = volume_system._make_subvolume( index=self._format_index(volume_system, p.addr), offset=p.start * volume_system.disk.block_size, size=p.len * volume_system.disk.block_size ) # Fill volume with more information volume.info['fsdescription'] = p.desc.strip().decode('utf-8') if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC: volume.flag = 'alloc' volume.slot = _util.determine_slot(p.table_num, p.slot_num) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len, volume.info['fsdescription'])) elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC: volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len)) elif p.flags == pytsk3.TSK_VS_PART_FLAG_META: volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len)) yield volume
def detect(self, volume_system, vstype='detect')
Generator that mounts every partition of this image and yields the mountpoint.
3.45285
3.369584
1.024711
# for some reason, parted does not properly return extended volume types in its machine # output, so we need to execute it twice. meta_volumes = [] # noinspection PyBroadException try: output = _util.check_output_(['parted', volume_system.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE) for line in output.splitlines(): if 'extended' in line: meta_volumes.append(int(line.split()[0])) except Exception: logger.exception("Failed executing parted command.") # skip detection of meta volumes # noinspection PyBroadException try: # parted does not support passing in the vstype. It either works, or it doesn't. cmd = ['parted', volume_system.parent.get_raw_path(), '-sm', 'unit s', 'print free'] output = _util.check_output_(cmd, stdin=subprocess.PIPE) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing parted command") raise SubsystemError(e) num = 0 for line in output.splitlines(): if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()): continue line = line[:-1] # remove last ; try: slot, start, end, length, description = line.split(':', 4) if ':' in description: description, label, flags = description.split(':', 2) else: description, label, flags = description, '', '' try: slot = int(slot) except ValueError: continue volume = volume_system._make_subvolume( index=self._format_index(volume_system, num), offset=int(start[:-1]) * volume_system.disk.block_size, # remove last s size=int(length[:-1]) * volume_system.disk.block_size) volume.info['fsdescription'] = description if label: volume.info['label'] = label if flags: volume.info['parted_flags'] = flags # TODO: detection of meta volumes if description == 'free': volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) elif slot in meta_volumes: volume.flag = 'meta' volume.slot = slot logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) else: volume.flag = 'alloc' volume.slot = slot volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} " .format(start[:-1], length[:-1], volume.info['fsdescription'])) except AttributeError: logger.exception("Error while parsing parted output") continue num += 1 yield volume
def detect(self, volume_system, vstype='detect')
Finds and mounts all volumes based on parted. :param VolumeSystem volume_system: The volume system.
3.893568
3.844548
1.01275
try: cmd = ['mmls'] if volume_system.parent.offset: cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'): cmd.extend(['-t', vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()] output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing mmls command") raise SubsystemError(e) else: logger.exception("Failed executing mmls command") raise SubsystemError(e) output = output.split("Description", 1)[-1] for line in output.splitlines(): if not line: continue # noinspection PyBroadException try: values = line.split(None, 5) # sometimes there are only 5 elements available description = '' index, slot, start, end, length = values[0:5] if len(values) > 5: description = values[5] volume = volume_system._make_subvolume( index=self._format_index(volume_system, int(index[:-1])), offset=int(start) * volume_system.disk.block_size, size=int(length) * volume_system.disk.block_size ) volume.info['fsdescription'] = description except Exception: logger.exception("Error while parsing mmls output") continue if slot.lower() == 'meta': volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length)) elif slot.lower().startswith('-----'): volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length)) else: volume.flag = 'alloc' if ":" in slot: volume.slot = _util.determine_slot(*slot.split(':')) else: volume.slot = _util.determine_slot(-1, slot) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length, volume.info['fsdescription'])) yield volume
def detect(self, volume_system, vstype='detect')
Finds and mounts all volumes based on mmls.
3.86207
3.780603
1.021549
path = volume_system.parent._paths['vss'] try: volume_info = _util.check_output_(["vshadowinfo", "-o", str(volume_system.parent.offset), volume_system.parent.get_raw_path()]) except Exception as e: logger.exception("Failed obtaining info from the volume shadow copies.") raise SubsystemError(e) current_store = None for line in volume_info.splitlines(): line = line.strip() if line.startswith("Store:"): idx = line.split(":")[-1].strip() current_store = volume_system._make_subvolume( index=self._format_index(volume_system, idx), flag='alloc', offset=0 ) current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx) current_store.info['fsdescription'] = 'VSS Store' elif line.startswith("Volume size"): current_store.size = int(line.split(":")[-1].strip().split()[0]) elif line.startswith("Creation time"): current_store.info['creation_time'] = line.split(":")[-1].strip() return volume_system.volumes
def detect(self, volume_system, vstype='detect')
Detect volume shadow copy volumes in the specified path.
4.679695
4.322089
1.082739
volume_group = volume_system.parent.info.get('volume_group') result = _util.check_output_(["lvm", "lvdisplay", volume_group]) cur_v = None for l in result.splitlines(): if "--- Logical volume ---" in l: cur_v = volume_system._make_subvolume( index=self._format_index(volume_system, len(volume_system)), flag='alloc' ) cur_v.info['fsdescription'] = 'Logical Volume' if "LV Name" in l: cur_v.info['label'] = l.replace("LV Name", "").strip() if "LV Size" in l: size, unit = l.replace("LV Size", "").strip().split(" ", 1) cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2, 'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1)) if "LV Path" in l: cur_v._paths['lv'] = l.replace("LV Path", "").strip() cur_v.offset = 0 logger.info("{0} volumes found".format(len(volume_system))) volume_system.volume_source = 'multi' return volume_system.volumes
def detect(self, volume_system, vstype='detect')
Gather information about lvolumes, gathering their label, size and raw path
3.840878
3.695256
1.039408
if fstype: self.fstype = fstype elif self.index in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes[self.index] elif '*' in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes['*'] elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None: self.fstype = "?" + self.disk.parser.fstypes['?'] else: self.fstype = "" if self.fstype in VOLUME_SYSTEM_TYPES: self.volumes.vstype = self.fstype self.fstype = 'volumesystem' # convert fstype from string to a FileSystemType object if not isinstance(self.fstype, filesystems.FileSystemType): if self.fstype.startswith("?"): fallback = FILE_SYSTEM_TYPES[self.fstype[1:]] self.fstype = filesystems.FallbackFileSystemType(fallback) else: self.fstype = FILE_SYSTEM_TYPES[self.fstype]
def _get_fstype_from_parser(self, fstype=None)
Load fstype information from the parser instance.
2.607216
2.5332
1.029218
desc = '' if with_size and self.size: desc += '{0} '.format(self.get_formatted_size()) s = self.info.get('statfstype') or self.info.get('fsdescription') or '-' if with_index: desc += '{1}:{0}'.format(s, self.index) else: desc += s if self.info.get('label'): desc += ' {0}'.format(self.info.get('label')) if self.info.get('version'): # NTFS desc += ' [{0}]'.format(self.info.get('version')) return desc
def get_description(self, with_size=True, with_index=True)
Obtains a generic description of the volume, containing the file system type, index, label and NTFS version. If *with_size* is provided, the volume size is also included.
3.339628
3.097229
1.078263
if self.size is not None: if self.size < 1024: return "{0} B".format(self.size) elif self.size < 1024 ** 2: return "{0} KiB".format(round(self.size / 1024, 2)) elif self.size < 1024 ** 3: return "{0} MiB".format(round(self.size / 1024 ** 2, 2)) elif self.size < 1024 ** 4: return "{0} GiB".format(round(self.size / 1024 ** 3, 2)) else: return "{0} TiB".format(round(self.size / 1024 ** 4, 2)) else: return self.size
def get_formatted_size(self)
Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs).
1.382214
1.320823
1.04648
try: result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()]) if not result: return None # noinspection PyTypeChecker blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result)) self.info['blkid_data'] = blkid_result if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result: return blkid_result.get('PTTYPE') else: return blkid_result.get('TYPE') except Exception: return None
def _get_blkid_type(self)
Retrieves the FS type from the blkid command.
3.947992
3.724161
1.060102
try: with io.open(self.disk.get_fs_path(), "rb") as file: file.seek(self.offset) fheader = file.read(min(self.size, 4096) if self.size else 4096) except IOError: logger.exception("Failed reading first 4K bytes from volume.") return None # TODO fallback to img-cat image -s blocknum | file - # if we were able to load the module magic try: # noinspection PyUnresolvedReferences import magic if hasattr(magic, 'from_buffer'): # using https://github.com/ahupp/python-magic logger.debug("Using python-magic Python package for file type magic") result = magic.from_buffer(fheader).decode() self.info['magic_data'] = result return result elif hasattr(magic, 'open'): # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module) logger.debug("Using python-magic system package for file type magic") ms = magic.open(magic.NONE) ms.load() result = ms.buffer(fheader) ms.close() self.info['magic_data'] = result return result else: logger.warning("The python-magic module is not available, but another module named magic was found.") except ImportError: logger.warning("The python-magic module is not available.") except AttributeError: logger.warning("The python-magic module is not available, but another module named magic was found.") return None
def _get_magic_type(self)
Checks the volume for its magic bytes and returns the magic.
5.210046
4.87388
1.068973
v = self if not include_self: # lv / vss_store are exceptions, as it covers the volume itself, not the child volume if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('vss_store'): return v._paths['vss_store'] elif v.parent and v.parent != self.disk: v = v.parent else: return self.disk.get_fs_path() while True: if v._paths.get('lv'): return v._paths['lv'] elif v._paths.get('bde'): return v._paths['bde'] + '/bde1' elif v._paths.get('luks'): return '/dev/mapper/' + v._paths['luks'] elif v._paths.get('md'): return v._paths['md'] elif v._paths.get('vss_store'): return v._paths['vss_store'] # Only if the volume has a parent that is not a disk, we try to check the parent for a location. if v.parent and v.parent != self.disk: v = v.parent else: break return self.disk.get_fs_path()
def get_raw_path(self, include_self=False)
Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the path to a logical volume. This is used to determine the source path for a mount call. The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific path, only its children return the more specific path, this volume itself will keep returning the same path. This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this with the include_self argument. This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes. This includes VSS stores and LV volumes.
3.393597
2.972541
1.141649
if self.info.get('label') == '/': return 'root' suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else "" if suffix and suffix[0] == '_': suffix = suffix[1:] if len(suffix) > 2 and suffix[-1] == '_': suffix = suffix[:-1] return suffix
def get_safe_label(self)
Returns a label that is safe to add to a path in the mountpoint for this volume.
3.784231
3.339215
1.13327
self._make_mountpoint(var_name='carve', suffix="carve", in_paths=True) # if no slot, we need to make a loopback that we can use to carve the volume loopback_was_created_for_carving = False if not self.slot: if not self.loopback: self._find_loopback() # Can't carve if volume has no slot number and can't be mounted on loopback. loopback_was_created_for_carving = True # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.loopback, ("freespace," if freespace else "") + "search"]) # clean out the loop device if we created it specifically for carving if loopback_was_created_for_carving: # noinspection PyBroadException try: _util.check_call_(['losetup', '-d', self.loopback]) except Exception: pass else: self.loopback = "" return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e) else: # noinspection PyBroadException try: _util.check_call_(["photorec", "/d", self._paths['carve'] + os.sep, "/cmd", self.get_raw_path(), str(self.slot) + (",freespace" if freespace else "") + ",search"]) return self._paths['carve'] except Exception as e: logger.exception("Failed carving the volume.") raise SubsystemError(e)
def carve(self, freespace=True)
Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its own interface that temporarily takes over the shell. :param freespace: indicates whether the entire volume should be carved (False) or only the free space (True) :type freespace: bool :return: string to the path where carved data is available :raises CommandNotFoundError: if the underlying command does not exist :raises SubsystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available :raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number)
4.378223
3.78976
1.155277
self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True) try: _util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']]) except Exception as e: logger.exception("Failed mounting the volume shadow copies.") raise SubsystemError(e) else: return self.volumes.detect_volumes(vstype='vss')
def detect_volume_shadow_copies(self)
Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available
11.09962
8.600528
1.290574
om = only_mount is None or \ self.index in only_mount or \ self.info.get('lastmountpoint') in only_mount or \ self.info.get('label') in only_mount sm = skip_mount is None or \ (self.index not in skip_mount and self.info.get('lastmountpoint') not in skip_mount and self.info.get('label') not in skip_mount) return om and sm
def _should_mount(self, only_mount=None, skip_mount=None)
Indicates whether this volume should be mounted. Internal method, used by imount.py
2.772496
2.642601
1.049154
if swallow_exceptions: self.exception = None try: if not self._should_mount(only_mount, skip_mount): yield self return if not self.init_volume(): yield self return except ImageMounterError as e: if swallow_exceptions: self.exception = e else: raise if not self.volumes: yield self else: for v in self.volumes: for s in v.init(only_mount, skip_mount, swallow_exceptions): yield s
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True)
Generator that mounts this volume and either yields itself or recursively generates its subvolumes. More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by :func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result of the :func:`init` call on each subvolume is yielded :param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings. :param skip_mount: if specified, volume indexes in this list are not mounted. :param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an exception attribute to the yielded objects.
2.89233
2.933365
0.986011
logger.debug("Initializing volume {0}".format(self)) if not self._should_mount(): return False if self.flag != 'alloc': return False if self.info.get('raid_status') == 'waiting': logger.info("RAID array %s not ready for mounting", self) return False if self.is_mounted: logger.info("%s is currently mounted, not mounting it again", self) return False logger.info("Mounting volume {0}".format(self)) self.mount(fstype=fstype) self.detect_mountpoint() return True
def init_volume(self, fstype=None)
Initializes a single volume. You should use this method instead of :func:`mount` if you want some sane checks before mounting.
4.31311
4.30521
1.001835
parser = self.disk.parser if parser.mountdir and not os.path.exists(parser.mountdir): os.makedirs(parser.mountdir) if parser.pretty: md = parser.mountdir or tempfile.gettempdir() case_name = casename or self.disk.parser.casename or \ ".".join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or \ os.path.basename(self.disk.paths[0]) if self.disk.parser.casename == case_name: # the casename is already in the path in this case pretty_label = "{0}-{1}".format(self.index, self.get_safe_label() or self.fstype or 'volume') else: pretty_label = "{0}-{1}-{2}".format(case_name, self.index, self.get_safe_label() or self.fstype or 'volume') if suffix: pretty_label += "-" + suffix path = os.path.join(md, pretty_label) # check if path already exists, otherwise try to find another nice path if os.path.exists(path): for i in range(2, 100): path = os.path.join(md, pretty_label + "-" + str(i)) if not os.path.exists(path): break else: logger.error("Could not find free mountdir.") raise NoMountpointAvailableError() # noinspection PyBroadException try: os.mkdir(path, 777) if in_paths: self._paths[var_name] = path else: setattr(self, var_name, path) return path except Exception: logger.exception("Could not create mountdir.") raise NoMountpointAvailableError() else: t = tempfile.mkdtemp(prefix='im_' + self.index + '_', suffix='_' + self.get_safe_label() + ("_" + suffix if suffix else ""), dir=parser.mountdir) if in_paths: self._paths[var_name] = t else: setattr(self, var_name, t) return t
def _make_mountpoint(self, casename=None, var_name='mountpoint', suffix='', in_paths=False)
Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`, or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths` attribute instead. :returns: the mountpoint path :raises NoMountpointAvailableError: if no mountpoint could be made
2.909034
2.830109
1.027888
if self.mountpoint: os.rmdir(self.mountpoint) self.mountpoint = ""
def _clear_mountpoint(self)
Clears a created mountpoint. Does not unmount it, merely deletes it.
3.499061
3.043404
1.14972
# noinspection PyBroadException try: loopback = _util.check_output_(['losetup', '-f']).strip() setattr(self, var_name, loopback) except Exception: logger.warning("No free loopback device found.", exc_info=True) raise NoLoopbackAvailableError() # noinspection PyBroadException if use_loopback: try: cmd = ['losetup', '-o', str(self.offset), '--sizelimit', str(self.size), loopback, self.get_raw_path()] if not self.disk.read_write: cmd.insert(1, '-r') _util.check_call_(cmd, stdout=subprocess.PIPE) except Exception: logger.exception("Loopback device could not be mounted.") raise NoLoopbackAvailableError() return loopback
def _find_loopback(self, use_loopback=True, var_name='loopback')
Finds a free loopback device that can be used. The loopback is stored in :attr:`loopback`. If *use_loopback* is True, the loopback will also be used directly. :returns: the loopback address :raises NoLoopbackAvailableError: if no loopback could be found
4.008101
3.59362
1.115338
fstype_fallback = None if isinstance(self.fstype, filesystems.FallbackFileSystemType): fstype_fallback = self.fstype.fallback elif isinstance(self.fstype, filesystems.FileSystemType): return self.fstype result = collections.Counter() for source, description in (('fsdescription', self.info.get('fsdescription')), ('guid', self.info.get('guid')), ('blikid', self._get_blkid_type), ('magic', self._get_magic_type)): # For efficiency reasons, not all functions are called instantly. if callable(description): description = description() logger.debug("Trying to determine fs type from {} '{}'".format(source, description)) if not description: continue # Iterate over all results and update the certainty of all FS types for type in FILE_SYSTEM_TYPES.values(): result.update(type.detect(source, description)) # Now sort the results by their certainty logger.debug("Current certainty levels: {}".format(result)) # If we have not found any candidates, we continue if not result: continue # If we have candidates of which we are not entirely certain, we just continue max_res = result.most_common(1)[0][1] if max_res < 50: logger.debug("Highest certainty item is lower than 50, continuing...") # If we have multiple candidates with the same score, we just continue elif len([True for type, certainty in result.items() if certainty == max_res]) > 1: logger.debug("Multiple items with highest certainty level, so continuing...") else: self.fstype = result.most_common(1)[0][0] return self.fstype # Now be more lax with the fallback: if result: max_res = result.most_common(1)[0][1] if max_res > 0: self.fstype = result.most_common(1)[0][0] return self.fstype if fstype_fallback: self.fstype = fstype_fallback return self.fstype
def determine_fs_type(self)
Determines the FS type for this partition. This function is used internally to determine which mount system to use, based on the file system description. Return values include *ext*, *ufs*, *ntfs*, *lvm* and *luks*. Note: does not do anything if fstype is already set to something sensible.
4.224788
4.096986
1.031194
if not self.parent.is_mounted: raise NotMountedError(self.parent) if fstype is None: fstype = self.determine_fs_type() self._load_fsstat_data() # Prepare mount command try: fstype.mount(self) self.was_mounted = True self.is_mounted = True self.fstype = fstype except Exception as e: logger.exception("Execution failed due to {} {}".format(type(e), e), exc_info=True) if not isinstance(e, ImageMounterError): raise SubsystemError(e) else: raise
def mount(self, fstype=None)
Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in the mountpoint as specified by :attr:`mountpoint`. If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to :attr:`volumes` :raises NotMountedError: if the parent volume/disk is not mounted :raises NoMountpointAvailableError: if no mountpoint was found :raises NoLoopbackAvailableError: if no loopback device was found :raises UnsupportedFilesystemError: if the fstype is not supported for mounting :raises SubsystemError: if one of the underlying commands failed
4.530112
3.833626
1.181678
if not self.mountpoint: raise NotMountedError(self) try: _util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE) if 'bindmounts' in self._paths: self._paths['bindmounts'].append(mountpoint) else: self._paths['bindmounts'] = [mountpoint] return True except Exception as e: logger.exception("Error bind mounting {0}.".format(self)) raise SubsystemError(e)
def bindmount(self, mountpoint)
Bind mounts the volume to another mountpoint. Only works if the volume is already mounted. :raises NotMountedError: when the volume is not yet mounted :raises SubsystemError: when the underlying command failed
3.493511
3.029454
1.153182
if self.volumes: volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) volumes.append(self) return volumes else: return [self]
def get_volumes(self)
Recursively gets a list of all subvolumes and the current volume.
3.148147
2.60864
1.206815
def stats_thread(): try: cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)] # Setting the fstype explicitly makes fsstat much faster and more reliable # In some versions, the auto-detect yaffs2 check takes ages for large images fstype = { "ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs", "ufs": "ufs", "swap": "swap", "exfat": "exfat", }.get(self.fstype, None) if fstype: cmd.extend(["-f", fstype]) logger.debug('$ {0}'.format(' '.join(cmd))) stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in iter(stats_thread.process.stdout.readline, b''): line = line.decode('utf-8') logger.debug('< {0}'.format(line)) if line.startswith("File System Type:"): self.info['statfstype'] = line[line.index(':') + 2:].strip() elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"): self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/") elif line.startswith("Volume Name:") and not self.info.get('label'): self.info['label'] = line[line.index(':') + 2:].strip() elif line.startswith("Version:"): self.info['version'] = line[line.index(':') + 2:].strip() elif line.startswith("Source OS:"): self.info['version'] = line[line.index(':') + 2:].strip() elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line: # noinspection PyBroadException try: stats_thread.process.terminate() logger.debug("Terminated fsstat at cylinder/block group information.") except Exception: pass break if self.info.get('lastmountpoint') and self.info.get('label'): self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label']) elif self.info.get('lastmountpoint') and not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] elif not self.info.get('lastmountpoint') and self.info.get('label') and \ self.info['label'].startswith("/"): # e.g. /boot1 if self.info['label'].endswith("1"): self.info['lastmountpoint'] = self.info['label'][:-1] else: self.info['lastmountpoint'] = self.info['label'] except Exception: # ignore any exceptions here. logger.exception("Error while obtaining stats.") stats_thread.process = None thread = threading.Thread(target=stats_thread) thread.start() thread.join(timeout) if thread.is_alive(): # noinspection PyBroadException try: stats_thread.process.terminate() except Exception: pass thread.join() logger.debug("Killed fsstat after {0}s".format(timeout))
def _load_fsstat_data(self, timeout=3)
Using :command:`fsstat`, adds some additional information of the volume to the Volume.
2.711638
2.685838
1.009606
if self.info.get('lastmountpoint'): return self.info.get('lastmountpoint') if not self.mountpoint: return None result = None paths = os.listdir(self.mountpoint) if 'grub' in paths: result = '/boot' elif 'usr' in paths and 'var' in paths and 'root' in paths: result = '/' elif 'bin' in paths and 'lib' in paths and 'local' in paths and 'src' in paths and 'usr' not in paths: result = '/usr' elif 'bin' in paths and 'lib' in paths and 'local' not in paths and 'src' in paths and 'usr' not in paths: result = '/usr/local' elif 'lib' in paths and 'local' in paths and 'tmp' in paths and 'var' not in paths: result = '/var' # elif sum(['bin' in paths, 'boot' in paths, 'cdrom' in paths, 'dev' in paths, 'etc' in paths, 'home' in paths, # 'lib' in paths, 'lib64' in paths, 'media' in paths, 'mnt' in paths, 'opt' in paths, # 'proc' in paths, 'root' in paths, 'sbin' in paths, 'srv' in paths, 'sys' in paths, 'tmp' in paths, # 'usr' in paths, 'var' in paths]) > 11: # result = '/' if result: self.info['lastmountpoint'] = result if not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] logger.info("Detected mountpoint as {0} based on files in volume".format(self.info['lastmountpoint'])) return result
def detect_mountpoint(self)
Attempts to detect the previous mountpoint if this was not done through :func:`load_fsstat_data`. This detection does some heuristic method on the mounted volume.
2.225791
2.14032
1.039934
for volume in self.volumes: try: volume.unmount(allow_lazy=allow_lazy) except ImageMounterError: pass if self.is_mounted: logger.info("Unmounting volume %s", self) if self.loopback and self.info.get('volume_group'): _util.check_call_(["lvm", 'vgchange', '-a', 'n', self.info['volume_group']], wrap_error=True, stdout=subprocess.PIPE) self.info['volume_group'] = "" if self.loopback and self._paths.get('luks'): _util.check_call_(['cryptsetup', 'luksClose', self._paths['luks']], wrap_error=True, stdout=subprocess.PIPE) del self._paths['luks'] if self._paths.get('bde'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['bde']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['bde']) del self._paths['bde'] if self._paths.get('md'): md_path = self._paths['md'] del self._paths['md'] # removing it here to ensure we do not enter an infinite loop, will add it back later # MD arrays are a bit complicated, we also check all other volumes that are part of this array and # unmount them as well. logger.debug("All other volumes that use %s as well will also be unmounted", md_path) for v in self.disk.get_volumes(): if v != self and v._paths.get('md') == md_path: v.unmount(allow_lazy=allow_lazy) try: _util.check_output_(["mdadm", '--stop', md_path], stderr=subprocess.STDOUT) except Exception as e: self._paths['md'] = md_path raise SubsystemError(e) if self._paths.get('vss'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['vss']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['vss']) del self._paths['vss'] if self.loopback: _util.check_call_(['losetup', '-d', self.loopback], wrap_error=True) self.loopback = "" if self._paths.get('bindmounts'): for mp in self._paths['bindmounts']: _util.clean_unmount(['umount'], mp, rmdir=False) del self._paths['bindmounts'] if self.mountpoint: _util.clean_unmount(['umount'], self.mountpoint) self.mountpoint = "" if self._paths.get('carve'): try: shutil.rmtree(self._paths['carve']) except OSError as e: raise SubsystemError(e) else: del self._paths['carve'] self.is_mounted = False
def unmount(self, allow_lazy=False)
Unounts the volume from the filesystem. :raises SubsystemError: if one of the underlying processes fails :raises CleanupError: if the cleanup fails
2.771661
2.719945
1.019014
# TODO: require(*requirements, none_on_failure=False) is not supported by Python 2 none_on_failure = kwargs.get('none_on_failure', False) def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): for req in requirements: if none_on_failure: if not getattr(req, 'is_available'): return None else: getattr(req, 'require')() return f(*args, **kwargs) return wrapper return inner
def require(*requirements, **kwargs)
Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None
2.99337
3.053273
0.980381
if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
def status_message(self)
Detailed message about whether the dependency is installed. :rtype: str
3.660285
3.423104
1.069288
if source == "guid" and description in self.guids: return {self: 100} description = description.lower() if description == self.type: return {self: 100} elif re.search(r"\b" + self.type + r"\b", description): return {self: 80} elif any((re.search(r"\b" + alias + r"\b", description) for alias in self.aliases)): return {self: 70} return {}
def detect(self, source, description)
Detects the type of a volume based on the provided information. It returns the plausibility for all file system types as a dict. Although it is only responsible for returning its own plausibility, it is possible that one type of filesystem is more likely than another, e.g. when NTFS detects it is likely to be NTFS, it can also update the plausibility of exFAT to indicate it is less likely. All scores a cumulative. When multiple sources are used, it is also cumulative. For instance, if run 1 is 25 certain, and run 2 is 25 certain as well, it will become 50 certain. :meth:`Volume.detect_fs_type` will return immediately if the score is higher than 50 and there is only 1 FS type with the highest score. Otherwise, it will continue with the next run. If at the end of all runs no viable FS type was found, it will return the highest scoring FS type (if it is > 0), otherwise it will return the FS type fallback. :param source: The source of the description :param description: The description to detect with :return: Dict with mapping of FsType() objects to scores
3.164706
3.066262
1.032105
volume._make_mountpoint() try: self._call_mount(volume, volume.mountpoint, self._mount_type or self.type, self._mount_opts) except Exception: # undo the creation of the mountpoint volume._clear_mountpoint() raise
def mount(self, volume)
Mounts the given volume on the provided mountpoint. The default implementation simply calls mount. :param Volume volume: The volume to be mounted :param mountpoint: The file system path to mount the filesystem on. :raises UnsupportedFilesystemError: when the volume system type can not be mounted.
5.867062
6.185219
0.948562
# default arguments for calling mount if opts and not opts.endswith(','): opts += "," opts += 'loop,offset=' + str(volume.offset) + ',sizelimit=' + str(volume.size) # building the command cmd = ['mount', volume.get_raw_path(), mountpoint, '-o', opts] # add read-only if needed if not volume.disk.read_write: cmd[-1] += ',ro' # add the type if specified if type is not None: cmd += ['-t', type] _util.check_output_(cmd, stderr=subprocess.STDOUT)
def _call_mount(self, volume, mountpoint, type=None, opts="")
Calls the mount command, specifying the mount type and mount options.
4.805867
4.80627
0.999916
# we have to make a ram-device to store the image, we keep 20% overhead size_in_kb = int((volume.size / 1024) * 1.2) _util.check_call_(['modprobe', '-v', 'mtd']) _util.check_call_(['modprobe', '-v', 'jffs2']) _util.check_call_(['modprobe', '-v', 'mtdram', 'total_size={}'.format(size_in_kb), 'erase_size=256']) _util.check_call_(['modprobe', '-v', 'mtdblock']) _util.check_call_(['dd', 'if=' + volume.get_raw_path(), 'of=/dev/mtd0']) _util.check_call_(['mount', '-t', 'jffs2', '/dev/mtdblock0', volume.mountpoint])
def mount(self, volume)
Perform specific operations to mount a JFFS2 image. This kind of image is sometimes used for things like bios images. so external tools are required but given this method you don't have to memorize anything and it works fast and easy. Note that this module might not yet work while mounting multiple images at the same time.
3.855253
3.865314
0.997397
# Open a loopback device volume._find_loopback() # Check if this is a LUKS device # noinspection PyBroadException try: _util.check_call_(["cryptsetup", "isLuks", volume.loopback], stderr=subprocess.STDOUT) # ret = 0 if isLuks except Exception: logger.warning("Not a LUKS volume") # clean the loopback device, we want this method to be clean as possible # noinspection PyBroadException try: volume._free_loopback() except Exception: pass raise IncorrectFilesystemError() try: extra_args = [] key = None if volume.key: t, v = volume.key.split(':', 1) if t == 'p': # passphrase key = v elif t == 'f': # key-file extra_args = ['--key-file', v] elif t == 'm': # master-key-file extra_args = ['--master-key-file', v] else: logger.warning("No key material provided for %s", volume) except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) volume._free_loopback() raise ArgumentError() # Open the LUKS container volume._paths['luks'] = 'image_mounter_luks_' + str(random.randint(10000, 99999)) # noinspection PyBroadException try: cmd = ["cryptsetup", "luksOpen", volume.loopback, volume._paths['luks']] cmd.extend(extra_args) if not volume.disk.read_write: cmd.insert(1, '-r') if key is not None: logger.debug('$ {0}'.format(' '.join(cmd))) # for py 3.2+, we could have used input=, but that doesn't exist in py2.7. p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate(key.encode("utf-8")) p.wait() retcode = p.poll() if retcode: raise KeyInvalidError() else: _util.check_call_(cmd) except ImageMounterError: del volume._paths['luks'] volume._free_loopback() raise except Exception as e: del volume._paths['luks'] volume._free_loopback() raise SubsystemError(e) size = None # noinspection PyBroadException try: result = _util.check_output_(["cryptsetup", "status", volume._paths['luks']]) for l in result.splitlines(): if "size:" in l and "key" not in l: size = int(l.replace("size:", "").replace("sectors", "").strip()) * volume.disk.block_size except Exception: pass container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=size) container.info['fsdescription'] = 'LUKS Volume' return container
def mount(self, volume)
Command that is an alternative to the :func:`mount` command that opens a LUKS container. The opened volume is added to the subvolume set of this volume. Requires the user to enter the key manually. TODO: add support for :attr:`keys` :return: the Volume contained in the LUKS container, or None on failure. :raises NoLoopbackAvailableError: when no free loopback could be found :raises IncorrectFilesystemError: when this is not a LUKS volume :raises SubsystemError: when the underlying command fails
3.846443
3.537161
1.087438
volume._paths['bde'] = tempfile.mkdtemp(prefix='image_mounter_bde_') try: if volume.key: t, v = volume.key.split(':', 1) key = ['-' + t, v] else: logger.warning("No key material provided for %s", volume) key = [] except ValueError: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", volume.key, volume) raise ArgumentError() # noinspection PyBroadException try: cmd = ["bdemount", volume.get_raw_path(), volume._paths['bde'], '-o', str(volume.offset)] cmd.extend(key) _util.check_call_(cmd) except Exception as e: del volume._paths['bde'] logger.exception("Failed mounting BDE volume %s.", volume) raise SubsystemError(e) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'BDE Volume' return container
def mount(self, volume)
Mounts a BDE container. Uses key material provided by the :attr:`keys` attribute. The key material should be provided in the same format as to :cmd:`bdemount`, used as follows: k:full volume encryption and tweak key p:passphrase r:recovery password s:file to startup key (.bek) :return: the Volume contained in the BDE container :raises ArgumentError: if the keys argument is invalid :raises SubsystemError: when the underlying command fails
6.21613
5.102345
1.218289
os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
def mount(self, volume)
Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group
6.882528
5.829298
1.180679
volume._find_loopback() raid_status = None try: # use mdadm to mount the loopback to a md device # incremental and run as soon as available output = _util.check_output_(['mdadm', '-IR', volume.loopback], stderr=subprocess.STDOUT) match = re.findall(r"attached to ([^ ,]+)", output) if match: volume._paths['md'] = os.path.realpath(match[0]) if 'which is already active' in output: logger.info("RAID is already active in other volume, using %s", volume._paths['md']) raid_status = 'active' elif 'not enough to start' in output: volume._paths['md'] = volume._paths['md'].replace("/dev/md/", "/dev/md") logger.info("RAID volume added, but not enough to start %s", volume._paths['md']) raid_status = 'waiting' else: logger.info("RAID started at {0}".format(volume._paths['md'])) raid_status = 'active' except Exception as e: logger.exception("Failed mounting RAID.") volume._free_loopback() raise SubsystemError(e) # search for the RAID volume for v in volume.disk.parser.get_volumes(): if v._paths.get("md") == volume._paths['md'] and v.volumes: logger.debug("Adding existing volume %s to volume %s", v.volumes[0], volume) v.volumes[0].info['raid_status'] = raid_status volume.volumes.volumes.append(v.volumes[0]) return v.volumes[0] else: logger.debug("Creating RAID volume for %s", self) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'RAID Volume' container.info['raid_status'] = raid_status return container
def mount(self, volume)
Add the volume to a RAID system. The RAID array is activated as soon as the array can be activated. :raises NoLoopbackAvailableError: if no loopback device was found
4.900849
4.868093
1.006729
def sub(m): c = m.group() if c in CHAR_ESCAPE: return CHAR_ESCAPE[c] if c.isspace(): if fold_newlines: return r'\\' return r'\\[{}\baselineskip]'.format(len(c)) return ESCAPE_RE.sub(sub, s)
def escape(s, fold_newlines=True)
Escapes a string to make it usable in LaTeX text mode. Will replace special characters as well as newlines. Some problematic characters like ``[`` and ``]`` are escaped into groups (e.g. ``{[}``), because they tend to cause problems when mixed with ``\\`` newlines otherwise. :param s: The string to escape. :param fold_newlines: If true, multiple newlines will be reduced to just a single ``\\``. Otherwise, whitespace is kept intact by adding multiple ``[n\baselineskip]``.
4.699237
3.554607
1.322013
if builder is None: builders = PREFERRED_BUILDERS elif builder not in BUILDERS: raise RuntimeError('Invalid Builder specified') else: builders = (builder, ) for bld in builders: bld_cls = BUILDERS[bld] builder = bld_cls() if not builder.is_available(): continue return builder.build_pdf(source, texinputs) else: raise RuntimeError('No available builder could be instantiated. ' 'Please make sure LaTeX is installed.')
def build_pdf(source, texinputs=[], builder=None)
Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``.
3.515213
3.194221
1.100492
lines = log.splitlines() errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n + context_size] try: err['line'] = int(err['line']) except TypeError: pass # ignore invalid int conversion errors.append(err) return errors
def parse_log(log, context_size=3)
Parses latex log output and tries to extract error messages. Requires ``-file-line-error`` to be active. :param log: The contents of the logfile as a string. :param context_size: Number of lines to keep as context, including the original error line. :return: A dictionary containig ``line`` (line number, an int), ``error``, (the error message), ``filename`` (name of the temporary file used for building) and ``context`` (list of lines, starting with with the error line).
3.224306
3.036336
1.061907
ka = ENV_ARGS.copy() ka.update(kwargs) env = Environment(*args, **ka) env.filters['e'] = LatexMarkup.escape env.filters['escape'] = LatexMarkup.escape env.filters['forceescape'] = LatexMarkup.escape # FIXME: this is a bug return env
def make_env(*args, **kwargs)
Creates an :py:class:`~jinja2.Environment` with different defaults. Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled. All start/end/prefix strings will be changed for a more LaTeX-friendly version (see the docs for details). Any arguments will be passed on to the :py:class:`~jinja2.Environment` constructor and override new values. Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be replaced with a call to :func:`latex.escape`.
6.369131
4.525861
1.407275
params = {} metadata = {} for header_name in headers: if header_name.lower() in header_mapping: params[header_mapping[header_name.lower()]] = headers[header_name] else: metadata[header_name] = headers[header_name] return metadata, params
def split_metadata_params(headers)
Given a dict of headers for s3, seperates those that are boto3 parameters and those that must be metadata
2.257584
2.251557
1.002677
hasher = hashlib.sha1() with open(filename, 'rb') as f: buf = f.read(65536) while len(buf) > 0: hasher.update(buf) buf = f.read(65536) return hasher.hexdigest()
def hash_file(filename)
Generate a hash for the contents of a file
1.430043
1.46083
0.978925
app = current_app # manage other special values, all have no meaning for static urls values.pop('_external', False) # external has no meaning here values.pop('_anchor', None) # anchor as well values.pop('_method', None) # method too url_style = get_setting('FLASKS3_URL_STYLE', app) if url_style == 'host': url_format = '{bucket_name}.{bucket_domain}' elif url_style == 'path': url_format = '{bucket_domain}/{bucket_name}' else: raise ValueError('Invalid S3 URL style: "{}"'.format(url_style)) if get_setting('FLASKS3_CDN_DOMAIN', app): bucket_path = '{}'.format(get_setting('FLASKS3_CDN_DOMAIN', app)) else: bucket_path = url_format.format( bucket_name=get_setting('FLASKS3_BUCKET_NAME', app), bucket_domain=get_setting('FLASKS3_BUCKET_DOMAIN', app), ) bucket_path += _get_statics_prefix(app).rstrip('/') return bucket_path, values
def _get_bucket_name(**values)
Generates the bucket name for url_for.
3.531531
3.39083
1.041495
app = current_app if app.config.get('TESTING', False) and not app.config.get('FLASKS3_OVERRIDE_TESTING', True): return flask_url_for(endpoint, **values) if 'FLASKS3_BUCKET_NAME' not in app.config: raise ValueError("FLASKS3_BUCKET_NAME not found in app configuration.") if endpoint == 'static' or endpoint.endswith('.static'): scheme = 'https' if not app.config.get("FLASKS3_USE_HTTPS", True): scheme = 'http' # allow per url override for scheme scheme = values.pop('_scheme', scheme) bucket_path, values = _get_bucket_name(**values) urls = app.url_map.bind(bucket_path, url_scheme=scheme) built = urls.build(endpoint, values=values, force_external=True) return built return flask_url_for(endpoint, **values)
def url_for(endpoint, **values)
Generates a URL to the given endpoint. If the endpoint is for a static resource then an Amazon S3 URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskS3.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates.
3.226475
3.448186
0.935702
u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
def _bp_static_url(blueprint)
builds the absolute url path for a blueprint's static folder
4.694875
4.985202
0.941762
dirs = [(six.text_type(app.static_folder), app.static_url_path)] if hasattr(app, 'blueprints'): blueprints = app.blueprints.values() bp_details = lambda x: (x.static_folder, _bp_static_url(x)) dirs.extend([bp_details(x) for x in blueprints if x.static_folder]) valid_files = defaultdict(list) for static_folder, static_url_loc in dirs: if not os.path.isdir(static_folder): logger.warning("WARNING - [%s does not exist]" % static_folder) else: logger.debug("Checking static folder: %s" % static_folder) for root, _, files in os.walk(static_folder): relative_folder = re.sub(r'^/', '', root.replace(static_folder, '')) files = [os.path.join(root, x) for x in files if ( (hidden or x[0] != '.') and # Skip this file if the filter regex is # defined, and this file's path is a # negative match. (filepath_filter_regex == None or re.search( filepath_filter_regex, os.path.join(relative_folder, x))))] if files: valid_files[(static_folder, static_url_loc)].extend(files) return valid_files
def _gather_files(app, hidden, filepath_filter_regex=None)
Gets all files in static folders and returns in dict.
3.279936
3.145839
1.042627
# first get the asset path relative to the static folder. # static_asset is not simply a filename because it could be # sub-directory then file etc. if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] # Now bolt the static url path and the relative asset location together return '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
def _static_folder_path(static_url, static_folder, static_asset)
Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset
4.935503
5.180254
0.952753