code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if self._is_open:
raise IOError('Already open.')
if range_offset < 0:
raise ValueError(
'Invalid range offset: {0:d} value out of bounds.'.format(
range_offset))
if range_size < 0:
raise ValueError(
'Invalid range size: {0:d} value out of bounds.'.format(
range_size))
self._range_offset = range_offset
self._range_size = range_size
self._current_offset = 0 | def SetRange(self, range_offset, range_size) | Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid. | 2.27116 | 1.988501 | 1.142147 |
if not self._is_open:
raise IOError('Not opened.')
if self._range_offset < 0 or self._range_size < 0:
raise IOError('Invalid data range.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._current_offset >= self._range_size:
return b''
if size is None:
size = self._range_size
if self._current_offset + size > self._range_size:
size = self._range_size - self._current_offset
self._file_object.seek(
self._range_offset + self._current_offset, os.SEEK_SET)
data = self._file_object.read(size)
self._current_offset += len(data)
return data | def read(self, size=None) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | 2.095748 | 2.074139 | 1.010419 |
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._range_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
self._current_offset = offset | def seek(self, offset, whence=os.SEEK_SET) | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | 2.491953 | 2.540616 | 0.980846 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
compression_method = getattr(path_spec, 'compression_method', None)
if not compression_method:
raise errors.PathSpecError(
'Unsupported path specification without compression method.')
self._compression_method = compression_method | def _Open(self, path_spec, mode='rb') | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.060245 | 2.378963 | 0.866026 |
return compressed_stream_file_entry.CompressedStreamFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CompressedStreamFileEntry: a file entry or None if not available. | 3.739353 | 3.934773 | 0.950335 |
path_spec = compressed_stream_path_spec.CompressedStreamPathSpec(
compression_method=self._compression_method,
parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
CompressedStreamFileEntry: a file entry or None if not available. | 3.564241 | 3.165793 | 1.12586 |
if self._data_streams is None:
if self._directory is None:
self._directory = self._GetDirectory()
self._data_streams = []
# It is assumed that directory and link file entries typically
# do not have data streams.
if not self._directory and not self.link:
data_stream = DataStream()
self._data_streams.append(data_stream)
return self._data_streams | def _GetDataStreams(self) | Retrieves the data streams.
Returns:
list[DataStream]: data streams. | 4.918984 | 4.839001 | 1.016529 |
stat_object = vfs_stat.VFSStat()
# Date and time stat information.
access_time = self.access_time
if access_time:
stat_time, stat_time_nano = access_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.atime = stat_time
if stat_time_nano is not None:
stat_object.atime_nano = stat_time_nano
change_time = self.change_time
if change_time:
stat_time, stat_time_nano = change_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.ctime = stat_time
if stat_time_nano is not None:
stat_object.ctime_nano = stat_time_nano
creation_time = self.creation_time
if creation_time:
stat_time, stat_time_nano = creation_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.crtime = stat_time
if stat_time_nano is not None:
stat_object.crtime_nano = stat_time_nano
modification_time = self.modification_time
if modification_time:
stat_time, stat_time_nano = modification_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.mtime = stat_time
if stat_time_nano is not None:
stat_object.mtime_nano = stat_time_nano
# File entry type stat information.
if self.entry_type:
stat_object.type = self.entry_type
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 1.606937 | 1.587692 | 1.012122 |
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory is None:
return 0
# We cannot use len(self._directory.entries) since entries is a generator.
return sum(1 for path_spec in self._directory.entries) | def number_of_sub_file_entries(self) | int: number of sub file entries. | 4.430177 | 4.219081 | 1.050033 |
if not isinstance(name, py2to3.STRING_TYPES):
raise ValueError('Name is not a string.')
name_lower = name.lower()
matching_data_stream = None
for data_stream in self._GetDataStreams():
if data_stream.name == name:
return data_stream
if not case_sensitive and data_stream.name.lower() == name_lower:
if not matching_data_stream:
matching_data_stream = data_stream
return matching_data_stream | def GetDataStream(self, name, case_sensitive=True) | Retrieves a data stream by name.
Args:
name (str): name of the data stream.
case_sensitive (Optional[bool]): True if the name is case sensitive.
Returns:
DataStream: a data stream or None if not available.
Raises:
ValueError: if the name is not string. | 2.584003 | 2.4616 | 1.049725 |
if data_stream_name:
return None
return resolver.Resolver.OpenFileObject(
self.path_spec, resolver_context=self._resolver_context) | def GetFileObject(self, data_stream_name='') | Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FileIO: a file-like object or None if not available. | 4.566508 | 5.926197 | 0.770563 |
name_lower = name.lower()
matching_sub_file_entry = None
for sub_file_entry in self.sub_file_entries:
if sub_file_entry.name == name:
return sub_file_entry
if not case_sensitive and sub_file_entry.name.lower() == name_lower:
if not matching_sub_file_entry:
matching_sub_file_entry = sub_file_entry
return matching_sub_file_entry | def GetSubFileEntryByName(self, name, case_sensitive=True) | Retrieves a sub file entry by name.
Args:
name (str): name of the file entry.
case_sensitive (Optional[bool]): True if the name is case sensitive.
Returns:
FileEntry: a file entry or None if not available. | 1.847283 | 2.114321 | 0.873701 |
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object | def GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available. | 4.416836 | 4.543361 | 0.972152 |
if not isinstance(name, py2to3.STRING_TYPES):
raise ValueError('Name is not a string.')
name_lower = name.lower()
for data_stream in self._GetDataStreams():
if data_stream.name == name:
return True
if not case_sensitive and data_stream.name.lower() == name_lower:
return True
return False | def HasDataStream(self, name, case_sensitive=True) | Determines if the file entry has specific data stream.
Args:
name (str): name of the data stream.
case_sensitive (Optional[bool]): True if the name is case sensitive.
Returns:
bool: True if the file entry has the data stream.
Raises:
ValueError: if the name is not string. | 2.902776 | 2.769612 | 1.048081 |
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object and self._stat_object.is_allocated | def IsAllocated(self) | Determines if the file entry is allocated.
Returns:
bool: True if the file entry is allocated. | 4.941375 | 4.245837 | 1.163816 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_DEVICE | def IsDevice(self) | Determines if the file entry is a device.
Returns:
bool: True if the file entry is a device. | 4.196339 | 3.233023 | 1.297961 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY | def IsDirectory(self) | Determines if the file entry is a directory.
Returns:
bool: True if the file entry is a directory. | 3.732494 | 3.198614 | 1.16691 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_FILE | def IsFile(self) | Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file. | 3.916354 | 3.24775 | 1.205867 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_LINK | def IsLink(self) | Determines if the file entry is a link.
Returns:
bool: True if the file entry is a link. | 4.176638 | 3.268116 | 1.277996 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_PIPE | def IsPipe(self) | Determines if the file entry is a pipe.
Returns:
bool: True if the file entry is a pipe. | 4.066028 | 3.21604 | 1.264296 |
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_SOCKET | def IsSocket(self) | Determines if the file entry is a socket.
Returns:
bool: True if the file entry is a socket. | 4.18899 | 3.298106 | 1.27012 |
sub_comparable_string = (
'range_offset: 0x{0:08x}, range_size: 0x{1:08x}').format(
self.range_offset, self.range_size)
return self._GetComparable(sub_comparable_string=sub_comparable_string) | def comparable(self) | str: comparable representation of the path specification. | 4.069518 | 3.480439 | 1.169254 |
tsk_vs_part = self._file_entry.GetTSKVsPart()
tsk_addr = getattr(tsk_vs_part, 'addr', None)
if tsk_addr is not None:
address = volume_system.VolumeAttribute('address', tsk_addr)
self._AddAttribute(address)
tsk_desc = getattr(tsk_vs_part, 'desc', None)
if tsk_desc is not None:
# pytsk3 returns an UTF-8 encoded byte string.
try:
tsk_desc = tsk_desc.decode('utf8')
self._AddAttribute(volume_system.VolumeAttribute(
'description', tsk_desc))
except UnicodeError:
pass
start_sector = tsk_partition.TSKVsPartGetStartSector(tsk_vs_part)
number_of_sectors = tsk_partition.TSKVsPartGetNumberOfSectors(tsk_vs_part)
volume_extent = volume_system.VolumeExtent(
start_sector * self._bytes_per_sector,
number_of_sectors * self._bytes_per_sector)
self._extents.append(volume_extent) | def _Parse(self) | Extracts attributes and extents from the volume. | 2.872202 | 2.780641 | 1.032928 |
root_file_entry = self._file_system.GetRootFileEntry()
tsk_volume = self._file_system.GetTSKVolume()
self.bytes_per_sector = tsk_partition.TSKVolumeGetBytesPerSector(tsk_volume)
for sub_file_entry in root_file_entry.sub_file_entries:
tsk_vs_part = sub_file_entry.GetTSKVsPart()
start_sector = tsk_partition.TSKVsPartGetStartSector(tsk_vs_part)
number_of_sectors = tsk_partition.TSKVsPartGetNumberOfSectors(tsk_vs_part)
if start_sector is None or number_of_sectors is None:
continue
if tsk_partition.TSKVsPartIsAllocated(tsk_vs_part):
volume = TSKVolume(sub_file_entry, self.bytes_per_sector)
self._AddVolume(volume)
volume_extent = volume_system.VolumeExtent(
start_sector * self.bytes_per_sector,
number_of_sectors * self.bytes_per_sector)
self._sections.append(volume_extent) | def _Parse(self) | Extracts sections and volumes from the volume system. | 2.619019 | 2.378052 | 1.10133 |
self._file_system = resolver.Resolver.OpenFileSystem(path_spec)
if self._file_system is None:
raise errors.VolumeSystemError('Unable to resolve path specification.')
type_indicator = self._file_system.type_indicator
if type_indicator != definitions.TYPE_INDICATOR_TSK_PARTITION:
raise errors.VolumeSystemError('Unsupported type indicator.') | def Open(self, path_spec) | Opens a volume defined by path specification.
Args:
path_spec (PathSpec): a path specification.
Raises:
VolumeSystemError: if the TSK partition virtual file system could not
be resolved. | 2.352204 | 2.314394 | 1.016337 |
sub_comparable_string = 'encoding_method: {0:s}'.format(
self.encoding_method)
return self._GetComparable(sub_comparable_string=sub_comparable_string) | def comparable(self) | str: comparable representation of the path specification. | 6.910935 | 5.705836 | 1.211205 |
self._vslvm_volume_group = None
self._vslvm_handle.close()
self._vslvm_handle = None
self._file_object.close()
self._file_object = None | def _Close(self) | Closes the file system object.
Raises:
IOError: if the close failed. | 6.800945 | 5.969281 | 1.139324 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
vslvm_handle = pyvslvm.handle()
vslvm_handle.open_file_object(file_object)
# TODO: implement multi physical volume support.
vslvm_handle.open_physical_volume_files_as_file_objects([
file_object])
vslvm_volume_group = vslvm_handle.get_volume_group()
except:
file_object.close()
raise
self._file_object = file_object
self._vslvm_handle = vslvm_handle
self._vslvm_volume_group = vslvm_volume_group | def _Open(self, path_spec, mode='rb') | Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.625243 | 2.789791 | 0.941018 |
volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return (
0 <= volume_index < self._vslvm_volume_group.number_of_logical_volumes) | def FileEntryExistsByPathSpec(self, path_spec) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists. | 5.793641 | 6.43162 | 0.900806 |
volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or location != self.LOCATION_ROOT:
return None
return lvm_file_entry.LVMFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if (volume_index < 0 or
volume_index >= self._vslvm_volume_group.number_of_logical_volumes):
return None
return lvm_file_entry.LVMFileEntry(self._resolver_context, self, path_spec) | def GetFileEntryByPathSpec(self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
LVMFileEntry: a file entry or None if not available. | 3.089795 | 3.233862 | 0.955451 |
volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)
if volume_index is None:
return None
return self._vslvm_volume_group.get_logical_volume(volume_index) | def GetLVMLogicalVolumeByPathSpec(self, path_spec) | Retrieves a LVM logical volume for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvslvm.logical_volume: a LVM logical volume or None if not available. | 4.944995 | 5.105035 | 0.96865 |
path_spec = lvm_path_spec.LVMPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
LVMFileEntry: root file entry or None if not available. | 3.343953 | 3.310987 | 1.009957 |
self._tar_ext_file.close()
self._tar_ext_file = None
self._file_system.Close()
self._file_system = None | def _Close(self) | Closes the file-like object. | 5.624866 | 5.409657 | 1.039782 |
if not path_spec:
raise ValueError('Missing path specification.')
file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
file_system.Close()
raise IOError('Unable to retrieve file entry.')
if not file_entry.IsFile():
file_system.Close()
raise IOError('Not a regular file.')
self._file_system = file_system
tar_file = self._file_system.GetTARFile()
tar_info = file_entry.GetTARInfo()
self._tar_ext_file = tar_file.extractfile(tar_info)
self._current_offset = 0
self._size = tar_info.size | def _Open(self, path_spec=None, mode='rb') | Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.401332 | 2.587678 | 0.927987 |
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError('Invalid current offset value less than zero.')
if self._current_offset > self._size:
return b''
if size is None or self._current_offset + size > self._size:
size = self._size - self._current_offset
self._tar_ext_file.seek(self._current_offset, os.SEEK_SET)
data = self._tar_ext_file.read(size)
# It is possible the that returned data size is not the same as the
# requested data size. At this layer we don't care and this discrepancy
# should be dealt with on a higher layer if necessary.
self._current_offset += len(data)
return data | def read(self, size=None) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | 3.807454 | 3.878799 | 0.981606 |
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if value is None:
continue
self.SetCredential(path_spec, identifier, value) | def ExtractCredentialsFromPathSpec(self, path_spec) | Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from. | 4.572402 | 5.574145 | 0.820288 |
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None) | def GetCredential(self, path_spec, identifier) | Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set. | 4.163665 | 5.488556 | 0.758608 |
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
'Unsuppored credential: {0:s} for path specification type: '
'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials | def SetCredential(self, path_spec, identifier, data) | Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type. | 2.827223 | 2.611535 | 1.08259 |
stat_object = super(BDEFileEntry, self)._GetStat()
stat_object.size = self._bde_volume.get_size()
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 8.656819 | 6.900462 | 1.254527 |
timestamp = self._bde_volume.get_creation_time_as_integer()
return dfdatetime_filetime.Filetime(timestamp=timestamp) | def creation_time(self) | dfdatetime.DateTimeValues: creation time or None if not available. | 10.792477 | 5.596128 | 1.928562 |
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError(
'Unsupported parent path specification without location.')
parent_location, _, segment_extension = parent_location.rpartition('.')
segment_extension_start = segment_extension[0]
segment_extension_length = len(segment_extension)
if (segment_extension_length not in [3, 4] or
not segment_extension.endswith('01') or (
segment_extension_length == 3 and
segment_extension_start not in ['E', 'e', 's']) or (
segment_extension_length == 4 and
not segment_extension.startswith('Ex'))):
raise errors.PathSpecError((
'Unsupported parent path specification invalid segment file '
'extension: {0:s}').format(segment_extension))
segment_number = 1
segment_files = []
while True:
segment_location = '{0:s}.{1:s}'.format(parent_location, segment_extension)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
if segment_number <= 99:
if segment_extension_length == 3:
segment_extension = '{0:s}{1:02d}'.format(
segment_extension_start, segment_number)
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:02d}'.format(
segment_extension_start, segment_number)
else:
segment_index = segment_number - 100
if segment_extension_start in ['e', 's']:
letter_offset = ord('a')
else:
letter_offset = ord('A')
segment_index, remainder = divmod(segment_index, 26)
third_letter = chr(letter_offset + remainder)
segment_index, remainder = divmod(segment_index, 26)
second_letter = chr(letter_offset + remainder)
first_letter = chr(ord(segment_extension_start) + segment_index)
if first_letter in ['[', '{']:
raise RuntimeError('Unsupported number of segment files.')
if segment_extension_length == 3:
segment_extension = '{0:s}{1:s}{2:s}'.format(
first_letter, second_letter, third_letter)
elif segment_extension_length == 4:
segment_extension = '{0:s}x{1:s}{2:s}'.format(
first_letter, second_letter, third_letter)
return segment_files | def EWFGlobPathSpec(file_system, path_spec) | Globs for path specifications according to the EWF naming schema.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
Returns:
list[PathSpec]: path specifications that match the glob.
Raises:
PathSpecError: if the path specification is invalid.
RuntimeError: if the maximum number of supported segment files is
reached. | 2.065887 | 2.040857 | 1.012264 |
path_segments = self.file_system.SplitPath(path)
for segment_index in range(len(path_segments)):
parent_path = self.file_system.JoinPath(path_segments[:segment_index])
file_entry = self.file_system.GetFileEntryByPath(parent_path)
if file_entry and not file_entry.IsDirectory():
raise ValueError(
'Non-directory parent file entry: {0:s} already exists.'.format(
parent_path))
for segment_index in range(len(path_segments)):
parent_path = self.file_system.JoinPath(path_segments[:segment_index])
if not self.file_system.FileEntryExistsByPath(parent_path):
self.file_system.AddFileEntry(
parent_path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY) | def _AddParentDirectories(self, path) | Adds the parent directories of a path to the fake file system.
Args:
path (str): path of the file within the fake file system.
Raises:
ValueError: if a parent directory is already set and is not a directory. | 1.938166 | 1.953121 | 0.992343 |
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(
path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY) | def AddDirectory(self, path) | Adds a directory to the fake file system.
Note that this function will create parent directories if needed.
Args:
path (str): path of the directory within the fake file system.
Raises:
ValueError: if the path is already set. | 4.309347 | 3.357585 | 1.283466 |
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(path, file_data=file_data) | def AddFile(self, path, file_data) | Adds a "regular" file to the fake file system.
Note that this function will create parent directories if needed.
Args:
path (str): path of the file within the fake file system.
file_data (bytes): data of the file.
Raises:
ValueError: if the path is already set. | 4.763951 | 3.627476 | 1.313296 |
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
with open(file_data_path, 'rb') as file_object:
file_data = file_object.read()
self._AddParentDirectories(path)
self.file_system.AddFileEntry(path, file_data=file_data) | def AddFileReadData(self, path, file_data_path) | Adds a "regular" file to the fake file system.
Args:
path (str): path of the file within the fake file system.
file_data_path (str): path of the file to read the file data from.
Raises:
ValueError: if the path is already set. | 3.361884 | 2.807839 | 1.197321 |
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(
path, file_entry_type=definitions.FILE_ENTRY_TYPE_LINK,
link_data=linked_path) | def AddSymbolicLink(self, path, linked_path) | Adds a symbolic link to the fake file system.
Args:
path (str): path of the symbolic link within the fake file system.
linked_path (str): path that is linked.
Raises:
ValueError: if the path is already set. | 4.324669 | 3.419137 | 1.264842 |
if credentials.type_indicator not in cls._credentials:
raise KeyError(
'Credential object not set for type indicator: {0:s}.'.format(
credentials.type_indicator))
del cls._credentials[credentials.type_indicator] | def DeregisterCredentials(cls, credentials) | Deregisters a path specification credentials.
Args:
credentials (Credentials): credentials.
Raises:
KeyError: if credential object is not set for the corresponding
type indicator. | 4.013862 | 2.427925 | 1.653207 |
if credentials.type_indicator in cls._credentials:
raise KeyError(
'Credentials object already set for type indicator: {0:s}.'.format(
credentials.type_indicator))
cls._credentials[credentials.type_indicator] = credentials | def RegisterCredentials(cls, credentials) | Registers a path specification credentials.
Args:
credentials (Credentials): credentials.
Raises:
KeyError: if credentials object is already set for the corresponding
type indicator. | 3.72792 | 2.196732 | 1.69703 |
file_object.seek(self.last_read, os.SEEK_SET)
read_data = file_object.read(self._MAXIMUM_READ_SIZE)
self.last_read = file_object.get_offset()
compressed_data = b''.join([self._compressed_data, read_data])
decompressed, extra_compressed = self._decompressor.Decompress(
compressed_data)
self._compressed_data = extra_compressed
self.uncompressed_offset += len(decompressed)
return decompressed | def Read(self, file_object) | Reads the next uncompressed data from the gzip stream.
Args:
file_object (FileIO): file object that contains the compressed stream.
Returns:
bytes: next uncompressed data from the compressed stream. | 3.482216 | 3.642174 | 0.956082 |
file_offset = file_object.get_offset()
member_header = self._ReadStructure(
file_object, file_offset, self._MEMBER_HEADER_SIZE,
self._MEMBER_HEADER, 'member header')
if member_header.signature != self._GZIP_SIGNATURE:
raise errors.FileFormatError(
'Unsupported signature: 0x{0:04x}.'.format(member_header.signature))
if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE:
raise errors.FileFormatError(
'Unsupported compression method: {0:d}.'.format(
member_header.compression_method))
self.modification_time = member_header.modification_time
self.operating_system = member_header.operating_system
if member_header.flags & self._FLAG_FEXTRA:
file_offset = file_object.get_offset()
extra_field_data_size = self._ReadStructure(
file_object, file_offset, self._UINT16LE_SIZE,
self._UINT16LE, 'extra field data size')
file_object.seek(extra_field_data_size, os.SEEK_CUR)
if member_header.flags & self._FLAG_FNAME:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'original filename')
self.original_filename = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FCOMMENT:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'comment')
self.comment = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FHCRC:
file_object.read(2) | def _ReadMemberHeader(self, file_object) | Reads a member header.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member header cannot be read. | 1.825324 | 1.849134 | 0.987124 |
file_offset = file_object.get_offset()
member_footer = self._ReadStructure(
file_object, file_offset, self._MEMBER_FOOTER_SIZE,
self._MEMBER_FOOTER, 'member footer')
self.uncompressed_data_size = member_footer.uncompressed_data_size | def _ReadMemberFooter(self, file_object) | Reads a member footer.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member footer cannot be read. | 2.824669 | 3.25554 | 0.86765 |
self._cache = b''
self._cache_start_offset = None
self._cache_end_offset = None
self._ResetDecompressorState() | def FlushCache(self) | Empties the cache that holds cached decompressed data. | 8.239596 | 6.331224 | 1.301422 |
if not self._cache_start_offset or not self._cache_end_offset:
return 0
return self._cache_end_offset - self._cache_start_offset | def GetCacheSize(self) | Determines the size of the uncompressed cached data.
Returns:
int: number of cached bytes. | 3.251878 | 3.183685 | 1.02142 |
if size is not None and size < 0:
raise ValueError('Invalid size value {0!s}'.format(size))
if offset < 0:
raise ValueError('Invalid offset value {0!s}'.format(offset))
if size == 0 or offset >= self.uncompressed_data_size:
return b''
if self._cache_start_offset is None:
self._LoadDataIntoCache(self._file_object, offset)
if offset > self._cache_end_offset or offset < self._cache_start_offset:
self.FlushCache()
self._LoadDataIntoCache(self._file_object, offset)
cache_offset = offset - self._cache_start_offset
if not size:
return self._cache[cache_offset:]
data_end_offset = cache_offset + size
if data_end_offset > self._cache_end_offset:
return self._cache[cache_offset:]
return self._cache[cache_offset:data_end_offset] | def ReadAtOffset(self, offset, size=None) | Reads a byte string from the gzip member at the specified offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
offset (int): offset within the uncompressed data in this member to
read from.
size (Optional[int]): maximum number of bytes to read, where None
represents all remaining data, to a maximum of the uncompressed
cache size.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
ValueError: if a negative read size or offset is specified. | 2.330504 | 2.239369 | 1.040697 |
# Decompression can only be performed from beginning to end of the stream.
# So, if data before the current position of the decompressor in the stream
# is required, it's necessary to throw away the current decompression
# state and start again.
if minimum_offset < self._decompressor_state.uncompressed_offset:
self._ResetDecompressorState()
while not self.IsCacheFull() or read_all_data:
decompressed_data = self._decompressor_state.Read(file_object)
# Note that decompressed_data will be empty if there is no data left
# to read and decompress.
if not decompressed_data:
break
decompressed_data_length = len(decompressed_data)
decompressed_end_offset = self._decompressor_state.uncompressed_offset
decompressed_start_offset = (
decompressed_end_offset - decompressed_data_length)
data_to_add = decompressed_data
added_data_start_offset = decompressed_start_offset
if decompressed_start_offset < minimum_offset:
data_to_add = None
if decompressed_start_offset < minimum_offset < decompressed_end_offset:
data_add_offset = decompressed_end_offset - minimum_offset
data_to_add = decompressed_data[-data_add_offset]
added_data_start_offset = decompressed_end_offset - data_add_offset
if not self.IsCacheFull() and data_to_add:
self._cache = b''.join([self._cache, data_to_add])
if self._cache_start_offset is None:
self._cache_start_offset = added_data_start_offset
if self._cache_end_offset is None:
self._cache_end_offset = self._cache_start_offset + len(data_to_add)
else:
self._cache_end_offset += len(data_to_add)
# If there's no more data in the member, the unused_data value is
# populated in the decompressor. When this situation arises, we rewind
# to the end of the compressed_data section.
unused_data = self._decompressor_state.GetUnusedData()
if unused_data:
seek_offset = -len(unused_data)
file_object.seek(seek_offset, os.SEEK_CUR)
self._ResetDecompressorState()
break | def _LoadDataIntoCache(
self, file_object, minimum_offset, read_all_data=False) | Reads and decompresses the data in the member.
This function already loads as much data as possible in the cache, up to
UNCOMPRESSED_DATA_CACHE_SIZE bytes.
Args:
file_object (FileIO): file-like object.
minimum_offset (int): offset into this member's uncompressed data at
which the cache should start.
read_all_data (bool): True if all the compressed data should be read
from the member. | 2.760409 | 2.713342 | 1.017347 |
# Since every file system implementation can have their own path
# segment separator we are using JoinPath to be platform and file system
# type independent.
full_path = file_system.JoinPath([parent_full_path, file_entry.name])
if not self._list_only_files or file_entry.IsFile():
output_writer.WriteFileEntry(full_path)
for sub_file_entry in file_entry.sub_file_entries:
self._ListFileEntry(file_system, sub_file_entry, full_path, output_writer) | def _ListFileEntry(
self, file_system, file_entry, parent_full_path, output_writer) | Lists a file entry.
Args:
file_system (dfvfs.FileSystem): file system that contains the file entry.
file_entry (dfvfs.FileEntry): file entry to list.
parent_full_path (str): full path of the parent file entry.
output_writer (StdoutWriter): output writer. | 3.367537 | 3.601591 | 0.935014 |
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
logging.warning(
'Unable to open base path specification:\n{0:s}'.format(
base_path_spec.comparable))
return
self._ListFileEntry(file_system, file_entry, '', output_writer) | def ListFileEntries(self, base_path_specs, output_writer) | Lists file entries in the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer. | 2.041778 | 2.222913 | 0.918515 |
string = '{0:s}\n'.format(path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string) | def WriteFileEntry(self, path) | Writes the file path to file.
Args:
path (str): path of the file. | 4.933566 | 6.147937 | 0.802475 |
if path in self._paths:
raise KeyError('File entry already set for path: {0:s}.'.format(path))
if file_data and file_entry_type != definitions.FILE_ENTRY_TYPE_FILE:
raise ValueError('File data set for non-file file entry type.')
if link_data and file_entry_type != definitions.FILE_ENTRY_TYPE_LINK:
raise ValueError('Link data set for non-link file entry type.')
if file_data is not None:
path_data = file_data
elif link_data is not None:
path_data = link_data
else:
path_data = None
self._paths[path] = (file_entry_type, path_data) | def AddFileEntry(
self, path, file_entry_type=definitions.FILE_ENTRY_TYPE_FILE,
file_data=None, link_data=None) | Adds a fake file entry.
Args:
path (str): path of the file entry.
file_entry_type (Optional[str]): type of the file entry object.
file_data (Optional[bytes]): data of the fake file-like object.
link_data (Optional[bytes]): link data of the fake file entry object.
Raises:
KeyError: if the path already exists.
ValueError: if the file data is set but the file entry type is not a file
or if the link data is set but the file entry type is not a link. | 1.842215 | 1.771817 | 1.039732 |
_, path_data = self._paths.get(path, (None, None))
return path_data | def GetDataByPath(self, path) | Retrieves the data associated to a path.
Args:
path (str): path of the file entry.
Returns:
bytes: data or None if not available. | 7.472669 | 8.392074 | 0.890444 |
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | def GetFileEntryByPath(self, path) | Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available. | 2.661226 | 2.502329 | 1.0635 |
path_spec = fake_path_spec.FakePathSpec(location=self.LOCATION_ROOT)
return self.GetFileEntryByPathSpec(path_spec) | def GetRootFileEntry(self) | Retrieves the root file entry.
Returns:
FakeFileEntry: a file entry or None if not available. | 4.629295 | 4.602963 | 1.005721 |
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
_, _, basename = path.rpartition(self.PATH_SEPARATOR)
return basename | def BasenamePath(self, path) | Determines the basename of the path.
Args:
path (str): path.
Returns:
str: basename of the path. | 3.484582 | 4.136712 | 0.842356 |
if not self._is_open:
raise IOError('Not opened.')
if not self._is_cached:
close_file_system = True
elif self._resolver_context.ReleaseFileSystem(self):
self._is_cached = False
close_file_system = True
else:
close_file_system = False
if close_file_system:
self._Close()
self._is_open = False
self._path_spec = None | def Close(self) | Closes the file system.
Raises:
IOError: if the file system object was not opened or the close failed.
OSError: if the file system object was not opened or the close failed. | 4.04558 | 3.372 | 1.199757 |
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
if not path:
return None
dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)
return dirname | def DirnamePath(self, path) | Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None. | 3.554448 | 3.784204 | 0.939285 |
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
data_stream_name = getattr(path_spec, 'data_stream', None)
return file_entry.GetDataStream(data_stream_name) | def GetDataStreamByPathSpec(self, path_spec) | Retrieves a data stream for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataStream: a data stream or None if not available. | 2.21691 | 2.543823 | 0.871488 |
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.GetFileObject() | def GetFileObjectByPathSpec(self, path_spec) | Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available. | 2.373269 | 2.95901 | 0.802048 |
if path is None or base_path is None or not path.startswith(base_path):
return None, None
path_index = len(base_path)
if base_path and not base_path.endswith(self.PATH_SEPARATOR):
path_index += 1
if path_index == len(path):
return '', ''
path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)
return path_segment, suffix | def GetPathSegmentAndSuffix(self, base_path, path) | Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string. | 2.239922 | 2.357832 | 0.949992 |
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(self.PATH_SEPARATOR) for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = list(filter(None, path_segments))
return '{0:s}{1:s}'.format(
self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) | def JoinPath(self, path_segments) | Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator. | 3.808287 | 3.646614 | 1.044335 |
if self._is_open and not self._is_cached:
raise IOError('Already open.')
if mode != 'rb':
raise ValueError('Unsupported mode: {0:s}.'.format(mode))
if not path_spec:
raise ValueError('Missing path specification.')
if not self._is_open:
self._Open(path_spec, mode=mode)
self._is_open = True
self._path_spec = path_spec
if path_spec and not self._resolver_context.GetFileSystem(path_spec):
self._resolver_context.CacheFileSystem(path_spec, self)
self._is_cached = True
if self._is_cached:
self._resolver_context.GrabFileSystem(path_spec) | def Open(self, path_spec, mode='rb') | Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object was already opened or the open failed.
OSError: if the file system object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid. | 2.628984 | 2.660496 | 0.988156 |
location = getattr(self.path_spec, 'location', None)
if location and location.startswith(self._file_system.PATH_SEPARATOR):
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
for cpio_archive_file_entry in cpio_archive_file.GetFileEntries(
path_prefix=location[1:]):
path = cpio_archive_file_entry.path
if not path:
continue
_, suffix = self._file_system.GetPathSegmentAndSuffix(
location[1:], path)
# Ignore anything that is part of a sub directory or the directory
# itself.
if suffix or path == location:
continue
path_spec_location = self._file_system.JoinPath([path])
yield cpio_path_spec.CPIOPathSpec(
location=path_spec_location, parent=self.path_spec.parent) | def _EntriesGenerator(self) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
CPIOPathSpec: path specification. | 3.612944 | 3.325369 | 1.086479 |
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec) | def _GetDirectory(self) | Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available. | 7.451351 | 4.744102 | 1.570656 |
if self._link is None:
self._link = ''
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(
self._cpio_archive_file_entry.data_offset,
self._cpio_archive_file_entry.data_size)
# TODO: should this be ASCII?
self._link = link_data.decode('ascii')
return self._link | def _GetLink(self) | Retrieves the link.
Returns:
str: full path of the linked file entry. | 3.858628 | 3.607386 | 1.069646 |
stat_object = super(CPIOFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = getattr(
self._cpio_archive_file_entry, 'data_size', None)
# Ownership and permissions stat information.
mode = getattr(self._cpio_archive_file_entry, 'mode', 0)
stat_object.mode = stat.S_IMODE(mode)
stat_object.uid = getattr(
self._cpio_archive_file_entry, 'user_identifier', None)
stat_object.gid = getattr(
self._cpio_archive_file_entry, 'group_identifier', None)
return stat_object | def _GetStat(self) | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | 2.655478 | 2.591187 | 1.024812 |
# Note that the root file entry is virtual and has no
# cpio_archive_file_entry.
if self._cpio_archive_file_entry is None:
return ''
return self._file_system.BasenamePath(self._cpio_archive_file_entry.path) | def name(self) | str: name of the file entry, which does not include the full path. | 7.855337 | 5.896469 | 1.33221 |
timestamp = getattr(
self._cpio_archive_file_entry, 'modification_time', None)
if timestamp is None:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp) | def modification_time(self) | dfdatetime.DateTimeValues: modification time or None if not available. | 5.592237 | 3.690853 | 1.515161 |
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
parent_location = self._file_system.DirnamePath(location)
if parent_location is None:
return None
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
is_root = True
is_virtual = True
else:
is_root = False
is_virtual = False
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = cpio_path_spec.CPIOPathSpec(
location=parent_location, parent=parent_path_spec)
return CPIOFileEntry(
self._resolver_context, self._file_system, path_spec,
is_root=is_root, is_virtual=is_virtual) | def GetParentFileEntry(self) | Retrieves the parent file entry.
Returns:
CPIOFileEntry: parent file entry or None if not available. | 2.304986 | 2.098903 | 1.098186 |
volume_index = getattr(path_spec, 'volume_index', None)
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith('/lvm'):
return None
volume_index = None
try:
volume_index = int(location[4:], 10) - 1
except ValueError:
pass
if volume_index is None or volume_index < 0:
return None
return volume_index | def LVMPathSpecGetVolumeIndex(path_spec) | Retrieves the volume index from the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
int: volume index or None if not available. | 1.987725 | 2.489337 | 0.798496 |
if not cls._resolver_helpers_manager:
# Delay the import of the resolver helpers manager to prevent circular
# imports.
from dfvfs.resolver_helpers import manager
cls._resolver_helpers_manager = manager.ResolverHelperManager
return cls._resolver_helpers_manager.GetHelper(type_indicator) | def _GetResolverHelper(cls, type_indicator) | Retrieves the path specification resolver helper for the specified type.
Args:
type_indicator (str): type indicator.
Returns:
ResolverHelper: a resolver helper. | 3.835598 | 3.908986 | 0.981226 |
file_system = cls.OpenFileSystem(
path_spec_object, resolver_context=resolver_context)
if resolver_context is None:
resolver_context = cls._resolver_context
file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)
# Release the file system so it will be removed from the cache
# when the file entry is destroyed.
resolver_context.ReleaseFileSystem(file_system)
return file_entry | def OpenFileEntry(cls, path_spec_object, resolver_context=None) | Opens a file entry object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileEntry: file entry or None if the path specification could not be
resolved. | 2.654193 | 3.24841 | 0.817075 |
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_object = resolver_context.GetFileObject(path_spec_object)
if not file_object:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_object = resolver_helper.NewFileObject(resolver_context)
file_object.open(path_spec=path_spec_object)
return file_object | def OpenFileObject(cls, path_spec_object, resolver_context=None) | Opens a file-like object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileIO: file-like object or None if the path specification could not
be resolved.
Raises:
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported. | 2.116089 | 2.120123 | 0.998097 |
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_system = resolver_context.GetFileSystem(path_spec_object)
if not file_system:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_system = resolver_helper.NewFileSystem(resolver_context)
try:
file_system.Open(path_spec_object)
except (IOError, ValueError) as exception:
raise errors.BackEndError(
'Unable to open file system with error: {0!s}'.format(exception))
return file_system | def OpenFileSystem(cls, path_spec_object, resolver_context=None) | Opens a file system object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileSystem: file system or None if the path specification could not
be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
MountPointError: if the mount point specified in the path specification
does not exist.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported. | 1.96463 | 1.925478 | 1.020333 |
try:
decoded_data = base64.b32decode(encoded_data, casefold=False)
except (TypeError, binascii.Error) as exception:
raise errors.BackEndError(
'Unable to decode base32 stream with error: {0!s}.'.format(
exception))
return decoded_data, b'' | def Decode(self, encoded_data) | Decode the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
Raises:
BackEndError: if the base32 stream cannot be decoded. | 3.711831 | 2.721741 | 1.363771 |
password = key_chain.GetCredential(path_spec, 'password')
if password:
bde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
bde_volume.set_recovery_password(recovery_password)
startup_key = key_chain.GetCredential(path_spec, 'startup_key')
if startup_key:
bde_volume.read_startup_key(startup_key)
bde_volume.open_file_object(file_object) | def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain) | Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | 2.05885 | 2.397307 | 0.858818 |
compression_method = compression_method.lower()
decompressor = cls._decompressors.get(compression_method, None)
if not decompressor:
return None
return decompressor() | def GetDecompressor(cls, compression_method) | Retrieves the decompressor object for a specific compression method.
Args:
compression_method (str): compression method identifier.
Returns:
Decompressor: decompressor or None if the compression method does
not exists. | 2.751746 | 3.557734 | 0.773455 |
compression_method = decompressor.COMPRESSION_METHOD.lower()
if compression_method in cls._decompressors:
raise KeyError(
'Decompressor for compression method: {0:s} already set.'.format(
decompressor.COMPRESSION_METHOD))
cls._decompressors[compression_method] = decompressor | def RegisterDecompressor(cls, decompressor) | Registers a decompressor for a specific compression method.
Args:
decompressor (type): decompressor class.
Raises:
KeyError: if the corresponding decompressor is already set. | 3.278415 | 3.060742 | 1.071118 |
if self._is_open and not self._is_cached:
raise IOError('Already open.')
if mode != 'rb':
raise ValueError('Unsupported mode: {0:s}.'.format(mode))
if not self._is_open:
self._Open(path_spec=path_spec, mode=mode)
self._is_open = True
if path_spec and not self._resolver_context.GetFileObject(path_spec):
self._resolver_context.CacheFileObject(path_spec, self)
self._is_cached = True
if self._is_cached:
self._resolver_context.GrabFileObject(path_spec) | def open(self, path_spec=None, mode='rb') | Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
OSError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid. | 2.64993 | 2.71977 | 0.974321 |
if not self._is_open:
raise IOError('Not opened.')
if not self._is_cached:
close_file_object = True
elif self._resolver_context.ReleaseFileObject(self):
self._is_cached = False
close_file_object = True
else:
close_file_object = False
if close_file_object:
self._Close()
self._is_open = False | def close(self) | Closes the file-like object.
Raises:
IOError: if the file-like object was not opened or the close failed.
OSError: if the file-like object was not opened or the close failed. | 3.931437 | 3.502436 | 1.122486 |
if not self._file_object_set_in_init:
self._file_object.close()
self._file_object = None
self._decrypter = None
self._decrypted_data = b''
self._encrypted_data = b'' | def _Close(self) | Closes the file-like object.
If the file-like object was passed in the init function
the encrypted stream file-like object does not control
the file-like object and should not actually close it. | 4.90893 | 3.641475 | 1.348061 |
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(self._path_spec)
try:
credentials = resolver.Resolver.key_chain.GetCredentials(self._path_spec)
return encryption_manager.EncryptionManager.GetDecrypter(
self._encryption_method, **credentials)
except ValueError as exception:
raise IOError(exception) | def _GetDecrypter(self) | Retrieves a decrypter.
Returns:
Decrypter: decrypter.
Raises:
IOError: if the decrypter cannot be initialized.
OSError: if the decrypter cannot be initialized. | 5.044826 | 6.638383 | 0.759948 |
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
decrypted_stream_size = 0
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
decrypted_stream_size += self._decrypted_data_size
return decrypted_stream_size | def _GetDecryptedStreamSize(self) | Retrieves the decrypted stream size.
Returns:
int: decrypted stream size. | 2.40275 | 2.481304 | 0.968342 |
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if not self._file_object_set_in_init:
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._encryption_method = getattr(path_spec, 'encryption_method', None)
if self._encryption_method is None:
raise errors.PathSpecError(
'Path specification missing encryption method.')
self._file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
self._path_spec = path_spec | def _Open(self, path_spec=None, mode='rb') | Opens the file-like object.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | 2.204388 | 2.251337 | 0.979146 |
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
if decrypted_data_offset < self._decrypted_data_size:
self._decrypted_data_offset = decrypted_data_offset
break
decrypted_data_offset -= self._decrypted_data_size | def _AlignDecryptedDataOffset(self, decrypted_data_offset) | Aligns the encrypted file with the decrypted data offset.
Args:
decrypted_data_offset (int): decrypted data offset. | 2.301672 | 2.32216 | 0.991177 |
encrypted_data = self._file_object.read(read_size)
read_count = len(encrypted_data)
self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])
self._decrypted_data, self._encrypted_data = (
self._decrypter.Decrypt(self._encrypted_data))
self._decrypted_data_size = len(self._decrypted_data)
return read_count | def _ReadEncryptedData(self, read_size) | Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read. | 2.649379 | 3.001583 | 0.882661 |
if self._is_open:
raise IOError('Already open.')
if decrypted_stream_size < 0:
raise ValueError((
'Invalid decrypted stream size: {0:d} value out of '
'bounds.').format(decrypted_stream_size))
self._decrypted_stream_size = decrypted_stream_size | def SetDecryptedStreamSize(self, decrypted_stream_size) | Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid. | 3.285953 | 2.731227 | 1.203105 |
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size < 0:
raise IOError('Invalid decrypted stream size.')
if self._current_offset >= self._decrypted_stream_size:
return b''
if self._realign_offset:
self._AlignDecryptedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._decrypted_stream_size
if self._current_offset + size > self._decrypted_stream_size:
size = self._decrypted_stream_size - self._current_offset
decrypted_data = b''
if size == 0:
return decrypted_data
while size > self._decrypted_data_size:
decrypted_data = b''.join([
decrypted_data,
self._decrypted_data[self._decrypted_data_offset:]])
remaining_decrypted_data_size = (
self._decrypted_data_size - self._decrypted_data_offset)
self._current_offset += remaining_decrypted_data_size
size -= remaining_decrypted_data_size
if self._current_offset >= self._decrypted_stream_size:
break
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
self._decrypted_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._decrypted_data_offset
slice_end_offset = slice_start_offset + size
decrypted_data = b''.join([
decrypted_data,
self._decrypted_data[slice_start_offset:slice_end_offset]])
self._decrypted_data_offset += size
self._current_offset += size
return decrypted_data | def read(self, size=None) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | 1.952844 | 1.969188 | 0.9917 |
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size is None:
raise IOError('Invalid decrypted stream size.')
offset += self._decrypted_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True | def seek(self, offset, whence=os.SEEK_SET) | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek.
whence (Optional[int]): value that indicates whether offset is an
absolute or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | 2.290006 | 2.321241 | 0.986544 |
if not self._is_open:
raise IOError('Not opened.')
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
return self._decrypted_stream_size | def get_size(self) | Retrieves the size of the file-like object.
Returns:
int: size of the decrypted stream.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened. | 3.994017 | 2.934482 | 1.361064 |
views = registration.getViews(IBrowserRequest)
for v in views:
if v.provided == IViewlet:
# Note that we might have conflicting BrowserView with the same
# name, thus we need to check for provided
if v.name == name:
return v
return None | def getViewletByName(self, name) | Viewlets allow through-the-web customizations.
Through-the-web customization magic is managed by five.customerize.
We need to think of this when looking up viewlets.
@return: Viewlet registration object | 12.326312 | 12.307271 | 1.001547 |
context = aq_inner(self.context)
request = self.request
# Perform viewlet regisration look-up
# from adapters registry
reg = self.getViewletByName(name)
if reg is None:
return None
# factory method is responsible for creating the viewlet instance
factory = reg.factory
# Create viewlet and put it to the acquisition chain
# Viewlet need initialization parameters: context, request, view
try:
viewlet = factory(context, request, self, None).__of__(context)
except TypeError:
# Bad constructor call parameters
raise RuntimeError(
"Unable to initialize viewlet {}. "
"Factory method {} call failed."
.format(name, str(factory)))
return viewlet | def setupViewletByName(self, name) | Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist | 7.602308 | 8.147762 | 0.933055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.