code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# All checks for correct path spec is done in SQLiteBlobFile.
# Therefore, attempt to open the path specification and
# check if errors occurred.
try:
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
except (IOError, ValueError, errors.AccessError, errors.PathSpecError):
return False
file_object.close()
return True | def FileEntryExistsByPathSpec(self, path_spec) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists. | 5.227439 | 5.752685 | 0.908696 |
row_index = getattr(path_spec, 'row_index', None)
row_condition = getattr(path_spec, 'row_condition', None)
# If no row_index or row_condition is provided, return a directory.
if row_index is None and row_condition is None:
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None. | 2.489544 | 2.738865 | 0.908969 |
path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=self._path_spec.table_name,
column_name=self._path_spec.column_name,
parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
FileEntry: a file entry or None. | 3.045588 | 3.319303 | 0.917538 |
string_parts = []
string_parts.append('table name: {0:s}'.format(self.table_name))
string_parts.append('column name: {0:s}'.format(self.column_name))
if self.row_condition is not None:
row_condition_string = ' '.join([
'{0!s}'.format(value) for value in self.row_condition])
string_parts.append('row condition: "{0:s}"'.format(
row_condition_string))
if self.row_index is not None:
string_parts.append('row index: {0:d}'.format(self.row_index))
return self._GetComparable(sub_comparable_string=', '.join(string_parts)) | def comparable(self) | str: comparable representation of the path specification. | 2.500678 | 2.328701 | 1.073851 |
location = getattr(self.path_spec, 'location', None)
part_index = getattr(self.path_spec, 'part_index', None)
start_offset = getattr(self.path_spec, 'start_offset', None)
# Only the virtual root file has directory entries.
if (part_index is None and start_offset is None and
location is not None and location == self._file_system.LOCATION_ROOT):
tsk_volume = self._file_system.GetTSKVolume()
bytes_per_sector = tsk_partition.TSKVolumeGetBytesPerSector(tsk_volume)
part_index = 0
partition_index = 0
# pytsk3 does not handle the Volume_Info iterator correctly therefore
# the explicit list is needed to prevent the iterator terminating too
# soon or looping forever.
for tsk_vs_part in list(tsk_volume):
kwargs = {}
if tsk_partition.TSKVsPartIsAllocated(tsk_vs_part):
partition_index += 1
kwargs['location'] = '/p{0:d}'.format(partition_index)
kwargs['part_index'] = part_index
part_index += 1
start_sector = tsk_partition.TSKVsPartGetStartSector(tsk_vs_part)
if start_sector is not None:
kwargs['start_offset'] = start_sector * bytes_per_sector
kwargs['parent'] = self.path_spec.parent
yield tsk_partition_path_spec.TSKPartitionPathSpec(**kwargs) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
TSKPartitionPathSpec: a path specification. | 3.545917 | 3.409073 | 1.040141 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return TSKPartitionDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
TSKPartitionDirectory: a directory or None if not available. | 7.952602 | 4.578219 | 1.737052 |
stat_object = super(TSKPartitionFileEntry, self)._GetStat()
bytes_per_sector = tsk_partition.TSKVolumeGetBytesPerSector(
self._tsk_volume)
# File data stat information.
if self._tsk_vs_part is not None:
number_of_sectors = tsk_partition.TSKVsPartGetNumberOfSectors(
self._tsk_vs_part)
if number_of_sectors:
stat_object.size = number_of_sectors * bytes_per_sector
# Date and time stat information.
# Ownership and permissions stat information.
# File entry type stat information.
# The root file entry is virtual and should have type directory.
if not self._is_virtual:
stat_object.is_allocated = tsk_partition.TSKVsPartIsAllocated(
self._tsk_vs_part)
return stat_object | def _GetStat(self) | Retrieves the stat object.
Returns:
VFSStat: stat object. | 4.752534 | 4.738403 | 1.002982 |
if self._name is None:
# Directory entries without a location in the path specification
# are not given a name for now.
location = getattr(self.path_spec, 'location', None)
if location is not None:
self._name = self._file_system.BasenamePath(location)
else:
self._name = ''
return self._name | def name(self) | str: name of the file entry, which does not include the full path. | 4.876116 | 4.29179 | 1.13615 |
string_parts = []
if self.location is not None:
string_parts.append('location: {0:s}'.format(self.location))
if self.store_index is not None:
string_parts.append('store index: {0:d}'.format(self.store_index))
return self._GetComparable(sub_comparable_string=', '.join(string_parts)) | def comparable(self) | str: comparable representation of the path specification. | 3.261348 | 2.936695 | 1.110551 |
location = getattr(self.path_spec, 'location', None)
if location is not None:
paths = self._file_system.GetPaths()
for path in iter(paths.keys()):
# Determine if the start of the path is similar to the location string.
# If not the file the path refers to is not in the same directory.
if not path or not path.startswith(location):
continue
_, suffix = self._file_system.GetPathSegmentAndSuffix(location, path)
# Ignore anything that is part of a sub directory or the directory
# itself.
if suffix or path == location:
continue
path_spec_location = self._file_system.JoinPath([path])
yield fake_path_spec.FakePathSpec(location=path_spec_location) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
FakePathSpec: a path specification. | 4.84314 | 4.669317 | 1.037227 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return FakeDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
FakeDirectory: a directory or None if not available. | 6.523568 | 4.722237 | 1.381457 |
stat_object = super(FakeFileEntry, self)._GetStat()
location = getattr(self.path_spec, 'location', None)
if location:
file_data = self._file_system.GetDataByPath(location)
if file_data is not None:
stat_object.size = len(file_data)
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 4.062366 | 3.757129 | 1.081242 |
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory:
for path_spec in self._directory.entries:
yield self._file_system.GetFileEntryByPathSpec(path_spec) | def _GetSubFileEntries(self) | Retrieves sub file entries.
Yields:
FakeFileEntry: a sub file entry. | 3.412707 | 3.32638 | 1.025952 |
if not self.IsLink():
return ''
location = getattr(self.path_spec, 'location', None)
if location is None:
return ''
return self._file_system.GetDataByPath(location) | def link(self) | str: full path of the linked file entry. | 6.414904 | 4.586367 | 1.39869 |
if not self.IsFile():
raise IOError('Cannot open non-file.')
if data_stream_name:
return None
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
file_data = self._file_system.GetDataByPath(location)
file_object = fake_file_io.FakeFile(self._resolver_context, file_data)
file_object.open(path_spec=self.path_spec)
return file_object | def GetFileObject(self, data_stream_name='') | Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file. | 3.392289 | 3.062595 | 1.107652 |
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
parent_location = self._file_system.DirnamePath(location)
if parent_location is None:
return None
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
path_spec = fake_path_spec.FakePathSpec(location=parent_location)
return self._file_system.GetFileEntryByPathSpec(path_spec) | def GetParentFileEntry(self) | Retrieves the root file entry.
Returns:
FakeFileEntry: parent file entry or None if not available. | 2.739916 | 2.474122 | 1.10743 |
segment_number = 0
segment_files = []
while True:
segment_index = segment_number
segment_letters = []
while len(segment_letters) < segment_length:
segment_index, remainder = divmod(segment_index, 26)
if upper_case:
segment_letters.append(chr(ord('A') + remainder))
else:
segment_letters.append(chr(ord('a') + remainder))
# Reverse the segment letters list to form the extension.
segment_letters = ''.join(segment_letters[::-1])
segment_location = segment_format.format(location, segment_letters)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files | def _RawGlobPathSpecWithAlphabeticalSchema(
file_system, parent_path_spec, segment_format, location, segment_length,
upper_case=False) | Globs for path specifications according to an alphabetical naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_length (int): length (number of characters) of the segment
indicator.
upper_case (Optional[bool]): True if the segment name is in upper case.
Returns:
list[PathSpec]: path specifications that match the glob. | 2.479861 | 2.636258 | 0.940675 |
segment_files = []
while True:
segment_location = segment_format.format(location, segment_number)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files | def _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format, location, segment_number) | Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob. | 2.680911 | 2.820018 | 0.950671 |
if not path_spec:
raise ValueError('Missing path specification.')
store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)
if store_index is None:
raise errors.PathSpecError(
'Unable to retrieve store index from path specification.')
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
vshadow_volume = self._file_system.GetVShadowVolume()
if (store_index < 0 or
store_index >= vshadow_volume.number_of_stores):
raise errors.PathSpecError((
'Unable to retrieve VSS store: {0:d} from path '
'specification.').format(store_index))
vshadow_store = vshadow_volume.get_store(store_index)
if not vshadow_store.has_in_volume_data():
raise IOError((
'Unable to open VSS store: {0:d} without in-volume stored '
'data.').format(store_index))
self._vshadow_store = vshadow_store | def _Open(self, path_spec=None, mode='rb') | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.346568 | 2.428504 | 0.966261 |
if not self._is_open:
raise IOError('Not opened.')
return self._vshadow_store.read(size) | def read(self, size=None) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | 12.724627 | 17.48974 | 0.727548 |
for sub_node in self.sub_nodes:
sub_node_location = getattr(sub_node.path_spec, 'location', None)
if location == sub_node_location:
return sub_node
return None | def GetSubNodeByLocation(self, location) | Retrieves a sub scan node based on the location.
Args:
location (str): location that should match the location of the path
specification of a sub scan node.
Returns:
SourceScanNode: sub scan node or None if not available. | 3.276508 | 3.003801 | 1.090787 |
if not self.sub_nodes and not self.scanned:
return self
for sub_node in self.sub_nodes:
result = sub_node.GetUnscannedSubNode()
if result:
return result
return None | def GetUnscannedSubNode(self) | Retrieves the first unscanned sub node.
Returns:
SourceScanNode: sub scan node or None if not available. | 2.789822 | 2.990907 | 0.932768 |
scan_node = self._scan_nodes.get(path_spec, None)
if scan_node:
raise KeyError('Scan node already exists.')
scan_node = SourceScanNode(path_spec)
if parent_scan_node:
if parent_scan_node.path_spec not in self._scan_nodes:
raise RuntimeError('Parent scan node not present.')
scan_node.parent_node = parent_scan_node
parent_scan_node.sub_nodes.append(scan_node)
if not self._root_path_spec:
self._root_path_spec = path_spec
self._scan_nodes[path_spec] = scan_node
if path_spec.IsFileSystem():
self._file_system_scan_nodes[path_spec] = scan_node
self.updated = True
return scan_node | def AddScanNode(self, path_spec, parent_scan_node) | Adds a scan node for a certain path specification.
Args:
path_spec (PathSpec): path specification.
parent_scan_node (SourceScanNode): parent scan node or None.
Returns:
SourceScanNode: scan node.
Raises:
KeyError: if the scan node already exists.
RuntimeError: if the parent scan node is not present. | 2.234397 | 1.997719 | 1.118474 |
root_scan_node = self._scan_nodes.get(self._root_path_spec, None)
if not root_scan_node or not root_scan_node.scanned:
return root_scan_node
return root_scan_node.GetUnscannedSubNode() | def GetUnscannedScanNode(self) | Retrieves the first unscanned scan node.
Returns:
SourceScanNode: scan node or None if not available. | 4.046589 | 3.7966 | 1.065845 |
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
raise KeyError('Scan node does not exist.')
self._locked_scan_nodes[path_spec] = scan_node | def LockScanNode(self, path_spec) | Marks a scan node as locked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists. | 3.181671 | 2.914438 | 1.091693 |
source_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=source_path)
self.AddScanNode(source_path_spec, None) | def OpenSourcePath(self, source_path) | Opens the source path.
Args:
source_path (str): source path. | 3.289176 | 3.92188 | 0.838673 |
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
return None
if scan_node.sub_nodes:
raise RuntimeError('Scan node has sub nodes.')
parent_scan_node = scan_node.parent_node
if parent_scan_node:
parent_scan_node.sub_nodes.remove(scan_node)
if path_spec == self._root_path_spec:
self._root_path_spec = None
del self._scan_nodes[path_spec]
if path_spec.IsFileSystem():
del self._file_system_scan_nodes[path_spec]
return parent_scan_node | def RemoveScanNode(self, path_spec) | Removes a scan node of a certain path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
SourceScanNode: parent scan node or None if not available.
Raises:
RuntimeError: if the scan node has sub nodes. | 2.330833 | 2.045005 | 1.139769 |
if not self.HasScanNode(path_spec):
raise KeyError('Scan node does not exist.')
if path_spec not in self._locked_scan_nodes:
raise KeyError('Scan node is not locked.')
del self._locked_scan_nodes[path_spec]
# Scan a node again after it has been unlocked.
self._scan_nodes[path_spec].scanned = False | def UnlockScanNode(self, path_spec) | Marks a scan node as unlocked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists or is not locked. | 3.430317 | 3.336689 | 1.02806 |
if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
container_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
fsapfs_volume = container_file_entry.GetAPFSVolume()
# TODO: unlocking the volume multiple times is inefficient cache volume
# object in scan node and use is_locked = fsapfs_volume.is_locked()
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, scan_node.path_spec, resolver.Resolver.key_chain)
except IOError as exception:
raise errors.BackEndError(
'Unable to unlock APFS volume with error: {0!s}'.format(exception))
else:
file_object = resolver.Resolver.OpenFileObject(
scan_node.path_spec, resolver_context=self._resolver_context)
is_locked = not file_object or file_object.is_locked
file_object.close()
if is_locked:
scan_context.LockScanNode(scan_node.path_spec)
# For BitLocker To Go add a scan node for the unencrypted part of
# the volume.
if scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node) | def _ScanEncryptedVolumeNode(self, scan_context, scan_node) | Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid. | 3.95592 | 3.911405 | 1.011381 |
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# For VSS add a scan node for the current volume.
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node)
# Determine the path specifications of the sub file entries.
file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
for sub_file_entry in file_entry.sub_file_entries:
sub_scan_node = scan_context.AddScanNode(
sub_file_entry.path_spec, scan_node)
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
continue
if auto_recurse or not scan_context.updated:
self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse) | def _ScanVolumeSystemRootNode(
self, scan_context, scan_node, auto_recurse=True) | Scans a volume system root node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
ValueError: if the scan context or scan node is invalid. | 3.150985 | 3.274348 | 0.962324 |
volume_identifiers = []
for volume in volume_system.volumes:
volume_identifier = getattr(volume, 'identifier', None)
if volume_identifier:
volume_identifiers.append(volume_identifier)
return sorted(volume_identifiers) | def GetVolumeIdentifiers(self, volume_system) | Retrieves the volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
Returns:
list[str]: sorted volume identifiers. | 2.68052 | 2.43865 | 1.099182 |
if not scan_context:
raise ValueError('Invalid scan context.')
scan_context.updated = False
if scan_path_spec:
scan_node = scan_context.GetScanNode(scan_path_spec)
else:
scan_node = scan_context.GetUnscannedScanNode()
if scan_node:
self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse) | def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None) | Scans for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
scan_path_spec (Optional[PathSpec]): path specification to indicate
where the source scanner should continue scanning, where None
indicates the scanner will start with the sources.
Raises:
ValueError: if the scan context is invalid. | 2.695294 | 2.80992 | 0.959207 |
if source_path_spec.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
return path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/',
parent=source_path_spec)
try:
type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
return None
type_indicator = type_indicators[0]
if len(type_indicators) > 1:
if definitions.PREFERRED_NTFS_BACK_END not in type_indicators:
raise errors.BackEndError(
'Unsupported source found more than one file system types.')
type_indicator = definitions.PREFERRED_NTFS_BACK_END
# TODO: determine root location from file system or path specification.
if type_indicator == definitions.TYPE_INDICATOR_NTFS:
root_location = '\\'
else:
root_location = '/'
file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
type_indicator, location=root_location, parent=source_path_spec)
if type_indicator == definitions.TYPE_INDICATOR_TSK:
# Check if the file system can be opened since the file system by
# signature detection results in false positives.
try:
file_system = resolver.Resolver.OpenFileSystem(
file_system_path_spec, resolver_context=self._resolver_context)
file_system.Close()
except errors.BackEndError:
file_system_path_spec = None
return file_system_path_spec | def ScanForFileSystem(self, source_path_spec) | Scans the path specification for a supported file system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: file system path specification or None if no supported file
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one file
system type is found. | 2.709662 | 2.703285 | 1.002359 |
try:
type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
# The RAW storage media image type cannot be detected based on
# a signature so we try to detect it based on common file naming schemas.
file_system = resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=self._resolver_context)
raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)
try:
# The RAW glob function will raise a PathSpecError if the path
# specification is unsuitable for globbing.
glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)
except errors.PathSpecError:
glob_results = None
file_system.Close()
if not glob_results:
return None
return raw_path_spec
if len(type_indicators) > 1:
raise errors.BackEndError(
'Unsupported source found more than one storage media image types.')
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec) | def ScanForStorageMediaImage(self, source_path_spec) | Scans the path specification for a supported storage media image format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: storage media image path specification or None if no supported
storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found. | 2.835055 | 2.830096 | 1.001752 |
if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# It is technically possible to scan for VSS-in-VSS but makes no sense
# to do so.
return None
if source_path_spec.IsVolumeSystemRoot():
return source_path_spec
if source_path_spec.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
return None
try:
type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except (IOError, RuntimeError) as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
return None
if len(type_indicators) > 1:
raise errors.BackEndError(
'Unsupported source found more than one volume system types.')
if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and
source_path_spec.type_indicator in [
definitions.TYPE_INDICATOR_TSK_PARTITION]):
return None
if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS:
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], location='/', parent=source_path_spec)
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec) | def ScanForVolumeSystem(self, source_path_spec) | Scans the path specification for a supported volume system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: volume system path specification or None if no supported volume
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one volume
system type is found. | 2.609 | 2.603178 | 1.002237 |
if not scan_context.HasScanNode(path_spec):
raise KeyError('Scan node does not exist.')
if not scan_context.IsLockedScanNode(path_spec):
raise KeyError('Scan node is not locked.')
resolver.Resolver.key_chain.SetCredential(
path_spec, credential_identifier, credential_data)
if path_spec.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
container_file_entry = resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=self._resolver_context)
fsapfs_volume = container_file_entry.GetAPFSVolume()
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, path_spec, resolver.Resolver.key_chain)
except IOError as exception:
raise errors.BackEndError(
'Unable to unlock APFS volume with error: {0!s}'.format(exception))
else:
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
is_locked = not file_object or file_object.is_locked
file_object.close()
if not is_locked:
scan_context.UnlockScanNode(path_spec)
return not is_locked | def Unlock(
self, scan_context, path_spec, credential_identifier, credential_data) | Unlocks a locked scan node e.g. the scan node of an encrypted volume.
Args:
scan_context (SourceScannerContext): source scanner context.
path_spec (PathSpec): path specification of the locked scan node.
credential_identifier (str): credential identifier used to unlock
the scan node.
credential_data (bytes): credential data used to unlock the scan node.
Returns:
bool: True if the scan node was successfully unlocked.
Raises:
BackEndError: if the scan node cannot be unlocked.
KeyError: if the scan node does not exists or is not locked. | 2.826501 | 2.643146 | 1.06937 |
self._fsntfs_data_stream = None
self._fsntfs_file_entry = None
self._file_system.Close()
self._file_system = None | def _Close(self) | Closes the file-like object. | 6.97178 | 7.255281 | 0.960925 |
if not path_spec:
raise ValueError('Missing path specification.')
data_stream = getattr(path_spec, 'data_stream', None)
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
raise IOError('Unable to open file entry.')
fsntfs_data_stream = None
fsntfs_file_entry = file_entry.GetNTFSFileEntry()
if not fsntfs_file_entry:
raise IOError('Unable to open NTFS file entry.')
if data_stream:
fsntfs_data_stream = fsntfs_file_entry.get_alternate_data_stream_by_name(
data_stream)
if not fsntfs_data_stream:
raise IOError('Unable to open data stream: {0:s}.'.format(
data_stream))
elif not fsntfs_file_entry.has_default_data_stream():
raise IOError('Missing default data stream.')
self._fsntfs_data_stream = fsntfs_data_stream
self._fsntfs_file_entry = fsntfs_file_entry | def _Open(self, path_spec=None, mode='rb') | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 1.912157 | 1.989935 | 0.960914 |
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
return self._fsntfs_data_stream.read(size=size)
return self._fsntfs_file_entry.read(size=size) | def read(self, size=None) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | 4.424492 | 5.087599 | 0.869662 |
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
self._fsntfs_data_stream.seek(offset, whence)
else:
self._fsntfs_file_entry.seek(offset, whence) | def seek(self, offset, whence=os.SEEK_SET) | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | 3.876324 | 4.084215 | 0.949099 |
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
return self._fsntfs_data_stream.get_offset()
return self._fsntfs_file_entry.get_offset() | def get_offset(self) | Retrieves the current offset into the file-like object.
Returns:
int: current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened. | 5.114954 | 5.07445 | 1.007982 |
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
return self._fsntfs_data_stream.get_size()
return self._fsntfs_file_entry.get_size() | def get_size(self) | Retrieves the size of the file-like object.
Returns:
int: size of the file-like object data.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened. | 4.838464 | 4.836249 | 1.000458 |
stat_object = super(GzipFileEntry, self)._GetStat()
if self._gzip_file:
stat_object.size = self._gzip_file.uncompressed_data_size
# Other stat information.
# gzip_file.comment
# gzip_file.operating_system
# gzip_file.original_filename
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 5.613212 | 5.69523 | 0.985599 |
timestamps = self._gzip_file.modification_times
if not timestamps:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamps[0]) | def modification_time(self) | dfdatetime.DateTimeValues: modification time or None if not available. | 7.068104 | 4.533419 | 1.559111 |
input_string = self._file_object.readline()
if isinstance(input_string, py2to3.BYTES_TYPE):
try:
input_string = codecs.decode(input_string, self._encoding, self._errors)
except UnicodeDecodeError:
if self._errors == 'strict':
logging.error(
'Unable to properly read input due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
input_string = codecs.decode(input_string, self._encoding, self._errors)
return input_string | def Read(self) | Reads a string from the input.
Returns:
str: input. | 3.542994 | 3.427341 | 1.033744 |
row_strings = []
for value_index, value_string in enumerate(values):
padding_size = self._column_sizes[value_index] - len(value_string)
padding_string = ' ' * padding_size
row_strings.extend([value_string, padding_string])
row_strings.pop()
row_strings = ''.join(row_strings)
if in_bold and not win32console:
# TODO: for win32console get current color and set intensity,
# write the header separately then reset intensity.
row_strings = '\x1b[1m{0:s}\x1b[0m'.format(row_strings)
output_writer.Write('{0:s}\n'.format(row_strings)) | def _WriteRow(self, output_writer, values, in_bold=False) | Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold. | 3.543506 | 3.823949 | 0.926661 |
# Round up the column sizes to the nearest tab.
for column_index, column_size in enumerate(self._column_sizes):
column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)
column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB
self._column_sizes[column_index] = column_size
if self._columns:
self._WriteRow(output_writer, self._columns, in_bold=True)
for values in self._rows:
self._WriteRow(output_writer, values) | def Write(self, output_writer) | Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer. | 3.011477 | 3.0108 | 1.000225 |
print_header = True
while True:
if print_header:
self._PrintVSSStoreIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(self._USER_PROMPT_VSS)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nVSS identifier(s): ')
try:
selected_volumes = self._ReadSelectedVolumes(
volume_system, prefix='vss')
if (not selected_volumes or
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported VSS identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes | def GetVSSStoreIdentifiers(self, volume_system, volume_identifiers) | Retrieves VSS store identifiers.
This method can be used to prompt the user to provide VSS store identifiers.
Args:
volume_system (VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None. | 3.430103 | 3.291494 | 1.042111 |
# TODO: print volume description.
if locked_scan_node.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
header = 'Found an APFS encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
header = 'Found a BitLocker encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:
header = 'Found a CoreStorage (FVDE) encrypted volume.'
else:
header = 'Found an encrypted volume.'
self._output_writer.Write(header)
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
self._output_writer.Write('Supported credentials:\n\n')
for index, name in enumerate(credentials_list):
available_credential = ' {0:d}. {1:s}\n'.format(index + 1, name)
self._output_writer.Write(available_credential)
self._output_writer.Write('\nNote that you can abort with Ctrl^C.\n\n')
result = False
while not result:
self._output_writer.Write('Select a credential to unlock the volume: ')
input_line = self._input_reader.Read()
input_line = input_line.strip()
if input_line in credentials_list:
credential_type = input_line
else:
try:
credential_type = int(input_line, 10)
credential_type = credentials_list[credential_type - 1]
except (IndexError, ValueError):
self._output_writer.Write(
'Unsupported credential: {0:s}\n'.format(input_line))
continue
if credential_type == 'skip':
break
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
self._output_writer.Write('\n')
if credential_type == 'key':
try:
credential_data = credential_data.decode('hex')
except TypeError:
self._output_writer.Write('Unsupported credential data.\n')
continue
result = source_scanner_object.Unlock(
scan_context, locked_scan_node.path_spec, credential_type,
credential_data)
if not result:
self._output_writer.Write('Unable to unlock volume.\n\n')
return result | def UnlockEncryptedVolume(
self, source_scanner_object, scan_context, locked_scan_node, credentials) | Unlocks an encrypted volume.
This method can be used to prompt the user to provide encrypted volume
credentials.
Args:
source_scanner_object (SourceScanner): source scanner.
scan_context (SourceScannerContext): source scanner context.
locked_scan_node (SourceScanNode): locked scan node.
credentials (Credentials): credentials supported by the locked scan node.
Returns:
bool: True if the volume was unlocked. | 2.387802 | 2.365793 | 1.009303 |
location = getattr(self.path_spec, 'location', None)
if location and location.startswith(self._file_system.PATH_SEPARATOR):
# The TAR info name does not have the leading path separator as
# the location string does.
tar_path = location[1:]
# Set of top level sub directories that have been yielded.
processed_directories = set()
tar_file = self._file_system.GetTARFile()
for tar_info in iter(tar_file.getmembers()):
path = tar_info.name
# Determine if the start of the TAR info name is similar to
# the location string. If not the file TAR info refers to is not in
# the same directory.
if not path or not path.startswith(tar_path):
continue
# Ignore the directory itself.
if path == tar_path:
continue
path_segment, suffix = self._file_system.GetPathSegmentAndSuffix(
tar_path, path)
if not path_segment:
continue
# Sometimes the TAR file lacks directories, therefore we will
# provide virtual ones.
if suffix:
path_spec_location = self._file_system.JoinPath([
location, path_segment])
is_directory = True
else:
path_spec_location = self._file_system.JoinPath([path])
is_directory = tar_info.isdir()
if is_directory:
if path_spec_location in processed_directories:
continue
processed_directories.add(path_spec_location)
yield tar_path_spec.TARPathSpec(
location=path_spec_location, parent=self.path_spec.parent) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
TARPathSpec: TAR path specification. | 3.676182 | 3.476725 | 1.057369 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return TARDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
TARDirectory: a directory or None if not available. | 8.22637 | 4.683893 | 1.75631 |
if self._link is None:
if self._tar_info:
self._link = self._tar_info.linkname
return self._link | def _GetLink(self) | Retrieves the link.
Returns:
str: link. | 5.381601 | 5.51397 | 0.975994 |
stat_object = super(TARFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = getattr(self._tar_info, 'size', None)
# Ownership and permissions stat information.
stat_object.mode = getattr(self._tar_info, 'mode', None)
stat_object.uid = getattr(self._tar_info, 'uid', None)
stat_object.gid = getattr(self._tar_info, 'gid', None)
# TODO: implement support for:
# stat_object.uname = getattr(self._tar_info, 'uname', None)
# stat_object.gname = getattr(self._tar_info, 'gname', None)
# File entry type stat information.
# The root file entry is virtual and should have type directory.
# TODO: determine if this covers all the types:
# REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE,
# CHRTYPE, BLKTYPE, GNUTYPE_SPARSE
# Other stat information.
# tar_info.pax_headers
return stat_object | def _GetStat(self) | Retrieves the stat object.
Returns:
VFSStat: stat object. | 3.958059 | 3.939695 | 1.004661 |
tar_file = self._file_system.GetTARFile()
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory and tar_file:
for path_spec in self._directory.entries:
location = getattr(path_spec, 'location', None)
if location is None:
continue
kwargs = {}
try:
kwargs['tar_info'] = tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
yield TARFileEntry(
self._resolver_context, self._file_system, path_spec, **kwargs) | def _GetSubFileEntries(self) | Retrieves sub file entries.
Yields:
TARFileEntry: a sub file entry. | 3.220749 | 3.063164 | 1.051445 |
timestamp = getattr(self._tar_info, 'mtime', None)
if timestamp is None:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp) | def modification_time(self) | dfdatetime.DateTimeValues: modification time or None if not available. | 4.921833 | 3.130947 | 1.571995 |
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
parent_location = self._file_system.DirnamePath(location)
if parent_location is None:
return None
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
is_root = True
is_virtual = True
else:
is_root = False
is_virtual = False
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = tar_path_spec.TARPathSpec(
location=parent_location, parent=parent_path_spec)
return TARFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root,
is_virtual=is_virtual) | def GetParentFileEntry(self) | Retrieves the parent file entry.
Returns:
TARFileEntry: parent file entry or None. | 2.286611 | 2.105043 | 1.086254 |
if not self._tar_info:
location = getattr(self.path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self._file_system.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
tar_file = self._file_system.GetTARFile()
try:
self._tar_info = tar_file.getmember(location[1:])
except KeyError:
pass
return self._tar_info | def GetTARInfo(self) | Retrieves the TAR info.
Returns:
tarfile.TARInfo: TAR info or None if it does not exist.
Raises:
PathSpecError: if the path specification is incorrect. | 2.622333 | 2.475283 | 1.059407 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
bde_volume = pybde.volume()
bde.BDEVolumeOpen(
bde_volume, path_spec, file_object, resolver.Resolver.key_chain)
return bde_volume | def _OpenFileObject(self, path_spec) | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvde.volume: BDE volume file-like object.
Raises:
PathSpecError: if the path specification is incorrect. | 3.007892 | 2.653137 | 1.133712 |
encoding_method = encoding_method.lower()
decoder = cls._decoders.get(encoding_method, None)
if not decoder:
return None
return decoder() | def GetDecoder(cls, encoding_method) | Retrieves the decoder object for a specific encoding method.
Args:
encoding_method (str): encoding method identifier.
Returns:
Decoder: decoder or None if the encoding method does not exists. | 2.964771 | 3.760724 | 0.788351 |
encoding_method = decoder.ENCODING_METHOD.lower()
if encoding_method in cls._decoders:
raise KeyError(
'Decoder for encoding method: {0:s} already set.'.format(
decoder.ENCODING_METHOD))
cls._decoders[encoding_method] = decoder | def RegisterDecoder(cls, decoder) | Registers a decoder for a specific encoding method.
Args:
decoder (type): decoder class.
Raises:
KeyError: if the corresponding decoder is already set. | 3.806909 | 3.604863 | 1.056048 |
volume_index = getattr(self.path_spec, 'volume_index', None)
location = getattr(self.path_spec, 'location', None)
# Only the virtual root file has directory entries.
if (volume_index is None and location is not None and
location == self._file_system.LOCATION_ROOT):
vslvm_volume_group = self._file_system.GetLVMVolumeGroup()
for volume_index in range(
0, vslvm_volume_group.number_of_logical_volumes):
location = '/lvm{0:d}'.format(volume_index + 1)
yield lvm_path_spec.LVMPathSpec(
location=location, parent=self.path_spec.parent,
volume_index=volume_index) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
LVMPathSpec: a path specification. | 3.48504 | 3.080173 | 1.131443 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return LVMDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves the directory.
Returns:
LVMDirectory: a directory or None if not available. | 8.061901 | 4.712835 | 1.710627 |
stat_object = super(LVMFileEntry, self)._GetStat()
if self._vslvm_logical_volume is not None:
stat_object.size = self._vslvm_logical_volume.size
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 5.10003 | 4.708778 | 1.08309 |
volume_index = lvm.LVMPathSpecGetVolumeIndex(self.path_spec)
if volume_index is None:
return None
return self._file_system.GetRootFileEntry() | def GetParentFileEntry(self) | Retrieves the parent file entry.
Returns:
LVMFileEntry: parent file entry or None if not available. | 6.658282 | 5.233127 | 1.272333 |
location = getattr(path_spec, 'location', None)
part_index = getattr(path_spec, 'part_index', None)
start_offset = getattr(path_spec, 'start_offset', None)
partition_index = None
if part_index is None:
if location is not None:
if location.startswith('/p'):
try:
partition_index = int(location[2:], 10) - 1
except ValueError:
pass
if partition_index is None or partition_index < 0:
location = None
if location is None and start_offset is None:
return None, None
bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)
current_part_index = 0
current_partition_index = 0
tsk_vs_part = None
# pytsk3 does not handle the Volume_Info iterator correctly therefore
# the explicit cast to list is needed to prevent the iterator terminating
# too soon or looping forever.
tsk_vs_part_list = list(tsk_volume)
number_of_tsk_vs_parts = len(tsk_vs_part_list)
if number_of_tsk_vs_parts > 0:
if (part_index is not None and
(part_index < 0 or part_index >= number_of_tsk_vs_parts)):
return None, None
for tsk_vs_part in tsk_vs_part_list:
if TSKVsPartIsAllocated(tsk_vs_part):
if partition_index is not None:
if partition_index == current_partition_index:
break
current_partition_index += 1
if part_index is not None and part_index == current_part_index:
break
if start_offset is not None:
start_sector = TSKVsPartGetStartSector(tsk_vs_part)
if start_sector is not None:
start_sector *= bytes_per_sector
if start_sector == start_offset:
break
current_part_index += 1
# Note that here we cannot solely rely on testing if tsk_vs_part is set
# since the for loop will exit with tsk_vs_part set.
if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:
return None, None
if not TSKVsPartIsAllocated(tsk_vs_part):
current_partition_index = None
return tsk_vs_part, current_partition_index | def GetTSKVsPartByPathSpec(tsk_volume, path_spec) | Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available. | 2.561444 | 2.490539 | 1.02847 |
# Note that because pytsk3.Volume_Info does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None. Default to 512 otherwise.
if hasattr(tsk_volume, 'info') and tsk_volume.info is not None:
block_size = getattr(tsk_volume.info, 'block_size', 512)
else:
block_size = 512
return block_size | def TSKVolumeGetBytesPerSector(tsk_volume) | Retrieves the number of bytes per sector from a TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
Returns:
int: number of bytes per sector or 512 by default. | 4.15244 | 3.720816 | 1.116002 |
table_name = getattr(self.path_spec, 'table_name', None)
column_name = getattr(self.path_spec, 'column_name', None)
if table_name and column_name:
if self._number_of_entries is None:
# Open the first entry to determine how many entries we have.
# TODO: change this when there is a move this to a central temp file
# manager. https://github.com/log2timeline/dfvfs/issues/92
path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=table_name, column_name=column_name, row_index=0,
parent=self.path_spec.parent)
sub_file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
self._number_of_entries = 0
else:
self._number_of_entries = sub_file_entry.GetNumberOfRows()
for row_index in range(0, self._number_of_entries):
yield sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=table_name, column_name=column_name, row_index=row_index,
parent=self.path_spec.parent) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
SQLiteBlobPathSpec: a path specification.
Raises:
AccessError: if the access to list the directory was denied.
BackEndError: if the directory could not be listed. | 2.635361 | 2.493168 | 1.057033 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return SQLiteBlobDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
SQLiteBlobDirectory: a directory or None if not available. | 8.856442 | 4.74183 | 1.867727 |
stat_object = super(SQLiteBlobFileEntry, self)._GetStat()
if not self._is_virtual:
file_object = self.GetFileObject()
if not file_object:
raise errors.BackEndError(
'Unable to retrieve SQLite blob file-like object.')
try:
stat_object.size = file_object.get_size()
finally:
file_object.close()
return stat_object | def _GetStat(self) | Retrieves the stat object.
Returns:
VFSStat: stat object.
Raises:
BackEndError: when the SQLite blob file-like object is missing. | 4.026157 | 3.343652 | 1.20412 |
row_index = getattr(self.path_spec, 'row_index', None)
if row_index is not None:
return 'OFFSET {0:d}'.format(row_index)
row_condition = getattr(self.path_spec, 'row_condition', None)
if row_condition is not None:
if len(row_condition) > 2 and isinstance(
row_condition[2], py2to3.STRING_TYPES):
return 'WHERE {0:s} {1:s} \'{2:s}\''.format(*row_condition)
return 'WHERE {0:s} {1:s} {2!s}'.format(*row_condition)
# Directory name is full name of column: <table>.<column>
table_name = getattr(self.path_spec, 'table_name', None)
column_name = getattr(self.path_spec, 'column_name', None)
if table_name and column_name:
return '{0:s}.{1:s}'.format(table_name, column_name)
return '' | def name(self) | str: name of the file entry, which does not include the full path. | 2.494198 | 2.437432 | 1.023289 |
file_object = self.GetFileObject()
if not file_object:
raise errors.BackEndError(
'Unable to retrieve SQLite blob file-like object.')
try:
# TODO: move this function out of SQLiteBlobFile.
self._number_of_entries = file_object.GetNumberOfRows()
finally:
file_object.close()
return self._number_of_entries | def GetNumberOfRows(self) | Retrieves the number of rows in the table.
Returns:
int: number of rows.
Raises:
BackEndError: when the SQLite blob file-like object is missing. | 5.491116 | 3.41759 | 1.606722 |
# If the file entry is a sub entry, return the SQLite blob directory.
if self._is_virtual:
return None
path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=self.path_spec.table_name,
column_name=self.path_spec.column_name,
parent=self.path_spec.parent)
return SQLiteBlobFileEntry(
self._resolver_context, self._file_system,
path_spec, is_root=True, is_virtual=True) | def GetParentFileEntry(self) | Retrieves the parent file entry.
Returns:
SQLiteBlobFileEntry: parent file entry or None if not available. | 3.753832 | 3.183804 | 1.17904 |
super(VHDIFile, self)._Close()
for vhdi_file in self._parent_vhdi_files:
vhdi_file.close()
for file_object in self._sub_file_objects:
file_object.close()
self._parent_vhdi_files = []
self._sub_file_objects = [] | def _Close(self) | Closes the file-like object. | 3.67927 | 3.252818 | 1.131103 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
vhdi_file = pyvhdi.file()
vhdi_file.open_file_object(file_object)
if vhdi_file.parent_identifier:
file_system = resolver.Resolver.OpenFileSystem(
path_spec.parent, resolver_context=self._resolver_context)
try:
self._OpenParentFile(file_system, path_spec.parent, vhdi_file)
finally:
file_system.Close()
self._sub_file_objects.append(file_object)
self._parent_vhdi_files.reverse()
self._sub_file_objects.reverse()
return vhdi_file | def _OpenFileObject(self, path_spec) | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvhdi.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect. | 2.472559 | 2.344015 | 1.05484 |
location = getattr(path_spec, 'location', None)
if not location:
raise errors.PathSpecError(
'Unsupported path specification without location.')
location_path_segments = file_system.SplitPath(location)
parent_filename = vhdi_file.parent_filename
_, _, parent_filename = parent_filename.rpartition('\\')
location_path_segments.pop()
location_path_segments.append(parent_filename)
parent_file_location = file_system.JoinPath(location_path_segments)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs['location'] = parent_file_location
if path_spec.parent is not None:
kwargs['parent'] = path_spec.parent
parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(
path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(parent_file_path_spec):
return
file_object = resolver.Resolver.OpenFileObject(
parent_file_path_spec, resolver_context=self._resolver_context)
vhdi_parent_file = pyvhdi.file()
vhdi_parent_file.open_file_object(file_object)
if vhdi_parent_file.parent_identifier:
self._OpenParentFile(
file_system, parent_file_path_spec, vhdi_parent_file)
vhdi_file.set_parent(vhdi_parent_file)
self._parent_vhdi_files.append(vhdi_parent_file)
self._sub_file_objects.append(file_object) | def _OpenParentFile(self, file_system, path_spec, vhdi_file) | Opens the parent file.
Args:
file_system (FileSystem): file system of the VHDI file.
path_spec (PathSpec): path specification of the VHDI file.
vhdi_file (pyvhdi.file): VHDI file.
Raises:
PathSpecError: if the path specification is incorrect. | 2.276838 | 2.251448 | 1.011277 |
return gzip_file_entry.GzipFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
GzipFileEntry: a file entry or None if not available. | 3.396157 | 4.331307 | 0.784095 |
path_spec = gzip_path_spec.GzipPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
GzipFileEntry: a file entry or None if not available. | 3.757309 | 4.395274 | 0.854852 |
string_parts = []
if self.data_stream:
string_parts.append('data stream: {0:s}'.format(self.data_stream))
if self.location is not None:
string_parts.append('location: {0:s}'.format(self.location))
if self.mft_attribute is not None:
string_parts.append('MFT attribute: {0:d}'.format(self.mft_attribute))
if self.mft_entry is not None:
string_parts.append('MFT entry: {0:d}'.format(self.mft_entry))
return self._GetComparable(sub_comparable_string=', '.join(string_parts)) | def comparable(self) | str: comparable representation of the path specification. | 2.390904 | 2.200822 | 1.086369 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
fvde_volume = pyfvde.volume()
fvde.FVDEVolumeOpen(
fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)
return fvde_volume | def _OpenFileObject(self, path_spec) | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileIO: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect. | 3.119768 | 3.346168 | 0.932341 |
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method not in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} not set.'.format(
decrypter.ENCRYPTION_METHOD))
del cls._decrypters[encryption_method] | def DeregisterDecrypter(cls, decrypter) | Deregisters a decrypter for a specific encryption method.
Args:
decrypter (type): decrypter class.
Raises:
KeyError: if the corresponding decrypter is not set. | 4.115599 | 3.539376 | 1.162804 |
encryption_method = encryption_method.lower()
decrypter = cls._decrypters.get(encryption_method, None)
if not decrypter:
return None
return decrypter(**kwargs) | def GetDecrypter(cls, encryption_method, **kwargs) | Retrieves the decrypter object for a specific encryption method.
Args:
encryption_method (str): encryption method identifier.
kwargs (dict): keyword arguments depending on the decrypter.
Returns:
Decrypter: decrypter or None if the encryption method does not exists.
Raises:
CredentialError: if the necessary credentials are missing. | 2.932905 | 3.898368 | 0.752342 |
# Only the virtual root file has directory entries.
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is not None:
return
location = getattr(self.path_spec, 'location', None)
if location is None or location != self._file_system.LOCATION_ROOT:
return
fsapfs_container = self._file_system.GetAPFSContainer()
for volume_index in range(0, fsapfs_container.number_of_volumes):
yield apfs_container_path_spec.APFSContainerPathSpec(
location='/apfs{0:d}'.format(volume_index + 1),
volume_index=volume_index, parent=self.path_spec.parent) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
APFSContainerPathSpec: a path specification. | 3.614047 | 3.205992 | 1.127279 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return APFSContainerDirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
APFSContainerDirectory: a directory or None if not available. | 7.513726 | 4.405992 | 1.705343 |
stat_object = super(APFSContainerFileEntry, self)._GetStat()
if self._fsapfs_volume is not None:
# File data stat information.
# TODO: implement volume size.
# stat_object.size = self._fsapfs_volume.size
pass
# Ownership and permissions stat information.
# File entry type stat information.
# The root file entry is virtual and should have type directory.
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 9.799583 | 9.229314 | 1.061789 |
if self._name is None:
location = getattr(self.path_spec, 'location', None)
if location is not None:
self._name = self._file_system.BasenamePath(location)
else:
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is not None:
self._name = 'apfs{0:d}'.format(volume_index + 1)
else:
self._name = ''
return self._name | def name(self) | str: name of the file entry, which does not include the full path. | 3.033192 | 2.792835 | 1.086062 |
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is None:
return None
return self._file_system.GetRootFileEntry() | def GetParentFileEntry(self) | Retrieves the parent file entry.
Returns:
APFSContainerFileEntry: parent file entry or None if not available. | 8.091702 | 6.136488 | 1.318621 |
self._tar_file.close()
self._tar_file = None
self._file_object.close()
self._file_object = None | def _Close(self) | Closes the file system.
Raises:
IOError: if the close failed. | 4.593412 | 4.261193 | 1.077964 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
# Set the file offset to 0 because tarfile.open() does not.
file_object.seek(0, os.SEEK_SET)
# Explicitly tell tarfile not to use compression. Compression should be
# handled by the file-like object.
tar_file = tarfile.open(mode='r:', fileobj=file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tar_file = tar_file | def _Open(self, path_spec, mode='rb') | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.495702 | 2.665123 | 0.936431 |
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._tar_file.getmember(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._tar_file.getnames()):
# The TAR info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False | def FileEntryExistsByPathSpec(self, path_spec) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists. | 4.184019 | 4.691511 | 0.891827 |
if not self.FileEntryExistsByPathSpec(path_spec):
return None
location = getattr(path_spec, 'location', None)
if len(location) == 1:
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
kwargs = {}
try:
kwargs['tar_info'] = self._tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, **kwargs) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
TARFileEntry: file entry or None. | 2.414548 | 2.448311 | 0.98621 |
path_spec = tar_path_spec.TARPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
TARFileEntry: file entry. | 3.702803 | 3.727395 | 0.993402 |
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
try:
return self._tar_file.getmember(location[1:])
except KeyError:
pass | def GetTARInfoByPathSpec(self, path_spec) | Retrieves the TAR info for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
tarfile.TARInfo: TAR info or None if it does not exist.
Raises:
PathSpecError: if the path specification is incorrect. | 2.640846 | 2.861887 | 0.922764 |
index_split = -(len(encrypted_data) % Blowfish.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._blowfish_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data | def Decrypt(self, encrypted_data) | Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes,bytes]: decrypted data and remaining encrypted data. | 2.974807 | 2.665927 | 1.115862 |
if size is not None and size < 0:
raise ValueError('Invalid size value smaller than zero.')
if size is not None and size > self._MAXIMUM_READ_BUFFER_SIZE:
raise ValueError('Invalid size value exceeds maximum.')
if not self._lines:
if self._lines_buffer_offset >= self._file_object_size:
return ''
read_size = size
if not read_size:
read_size = self._MAXIMUM_READ_BUFFER_SIZE
if self._lines_buffer_offset + read_size > self._file_object_size:
read_size = self._file_object_size - self._lines_buffer_offset
self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET)
read_buffer = self._file_object.read(read_size)
self._lines_buffer_offset += len(read_buffer)
self._lines = read_buffer.split(self._end_of_line)
if self._lines_buffer:
self._lines[0] = b''.join([self._lines_buffer, self._lines[0]])
self._lines_buffer = b''
# Move a partial line from the lines list to the lines buffer.
if read_buffer[self._end_of_line_length:] != self._end_of_line:
self._lines_buffer = self._lines.pop()
for index, line in enumerate(self._lines):
self._lines[index] = b''.join([line, self._end_of_line])
if (self._lines_buffer and
self._lines_buffer_offset >= self._file_object_size):
self._lines.append(self._lines_buffer)
self._lines_buffer = b''
if not self._lines:
line = self._lines_buffer
self._lines_buffer = b''
elif not size or size >= len(self._lines[0]):
line = self._lines.pop(0)
else:
line = self._lines[0]
self._lines[0] = line[size:]
line = line[:size]
last_offset = self._current_offset
self._current_offset += len(line)
decoded_line = line.decode(self._encoding)
# Remove a byte-order mark at the start of the file.
if last_offset == 0 and decoded_line[0] == '\ufeff':
decoded_line = decoded_line[1:]
return decoded_line | def readline(self, size=None) | Reads a single line of text.
The functions reads one entire line from the file-like object. A trailing
end-of-line indicator (newline by default) is kept in the string (but may
be absent when a file ends with an incomplete line). An empty string is
returned only when end-of-file is encountered immediately.
Args:
size (Optional[int]): maximum byte size to read. If present and
non-negative, it is a maximum byte count (including the trailing
end-of-line) and an incomplete line may be returned.
Returns:
str: line of text.
Raises:
UnicodeDecodeError: if a line cannot be decoded.
ValueError: if the size is smaller than zero or exceeds the maximum
(as defined by _MAXIMUM_READ_BUFFER_SIZE). | 2.096962 | 2.015259 | 1.040543 |
self._cpio_archive_file.Close()
self._cpio_archive_file = None
self._file_object.close()
self._file_object = None | def _Close(self) | Closes the file system.
Raises:
IOError: if the close failed. | 4.806403 | 4.406018 | 1.090872 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
cpio_archive_file = cpio.CPIOArchiveFile()
try:
cpio_archive_file.Open(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._cpio_archive_file = cpio_archive_file | def _Open(self, path_spec, mode='rb') | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.098819 | 2.268728 | 0.925108 |
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith(self.LOCATION_ROOT):
return False
if len(location) == 1:
return True
return self._cpio_archive_file.FileEntryExistsByPath(location[1:]) | def FileEntryExistsByPathSpec(self, path_spec) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists. | 4.674633 | 5.602888 | 0.834326 |
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
return self._cpio_archive_file.GetFileEntryByPath(location[1:]) | def GetCPIOArchiveFileEntryByPathSpec(self, path_spec) | Retrieves the CPIO archive file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CPIOArchiveFileEntry: CPIO archive file entry or None if not available.
Raises:
PathSpecError: if the path specification is incorrect. | 2.845907 | 3.123726 | 0.911062 |
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return None
if len(location) == 1:
return cpio_file_entry.CPIOFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
cpio_archive_file_entry = self._cpio_archive_file.GetFileEntryByPath(
location[1:])
if cpio_archive_file_entry is None:
return None
return cpio_file_entry.CPIOFileEntry(
self._resolver_context, self, path_spec,
cpio_archive_file_entry=cpio_archive_file_entry) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CPIOFileEntry: a file entry or None if not available. | 2.257937 | 2.28261 | 0.989191 |
path_spec = cpio_path_spec.CPIOPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
CPIOFileEntry: a file entry or None if not available. | 3.436943 | 3.515347 | 0.977697 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.