index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
35,653 | pefile | set_dword_at_offset | Set the double word value at the given file offset. | def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
| (self, offset, dword) |
35,654 | pefile | set_dword_at_rva | Set the double word value at the file offset corresponding to the given RVA. | def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
| (self, rva, dword) |
35,655 | pefile | set_qword_at_offset | Set the quad-word value at the given file offset. | def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
| (self, offset, qword) |
35,656 | pefile | set_qword_at_rva | Set the quad-word value at the file offset corresponding to the given RVA. | def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
| (self, rva, qword) |
35,657 | pefile | set_word_at_offset | Set the word value at the given file offset. | def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
| (self, offset, word) |
35,658 | pefile | set_word_at_rva | Set the word value at the file offset corresponding to the given RVA. | def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
| (self, rva, word) |
35,659 | pefile | show_warnings | Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
| def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print(">", warning)
| (self) |
35,660 | pefile | trim | Return the just data defined by the PE headers, removing any overlaid data. | def trim(self):
"""Return the just data defined by the PE headers, removing any overlaid data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[:overlay_data_offset]
return self.__data__[:]
| (self) |
35,661 | pefile | verify_checksum | null | def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
| (self) |
35,662 | pefile | write | Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
| def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
"""
file_data = bytearray(self.__data__)
for structure in self.__structures__:
struct_data = bytearray(structure.__pack__())
offset = structure.get_file_offset()
file_data[offset : offset + len(struct_data)] = struct_data
if hasattr(self, "VS_VERSIONINFO"):
if hasattr(self, "FileInfo"):
for finfo in self.FileInfo:
for entry in finfo:
if hasattr(entry, "StringTable"):
for st_entry in entry.StringTable:
for key, entry in list(st_entry.entries.items()):
# Offsets and lengths of the keys and values.
# Each value in the dictionary is a tuple:
# (key length, value length)
# The lengths are in characters, not in bytes.
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len(entry) > lengths[1]:
l = entry.decode("utf-8").encode("utf-16le")
file_data[
offsets[1] : offsets[1] + lengths[1] * 2
] = l[: lengths[1] * 2]
else:
encoded_data = entry.decode("utf-8").encode(
"utf-16le"
)
file_data[
offsets[1] : offsets[1] + len(encoded_data)
] = encoded_data
new_file_data = file_data
if not filename:
return new_file_data
f = open(filename, "wb+")
f.write(new_file_data)
f.close()
return
| (self, filename=None) |
35,663 | pefile | PEFormatError | Generic PE format error exception. | class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| (value) |
35,664 | pefile | __init__ | null | def __init__(self, value):
self.value = value
| (self, value) |
35,665 | pefile | __str__ | null | def __str__(self):
return repr(self.value)
| (self) |
35,666 | pefile | PrologEpilogOp | Meant as an abstract class representing a generic unwind code.
There is a subclass of PrologEpilogOp for each member of UNWIND_OP_CODES enum.
| class PrologEpilogOp:
"""Meant as an abstract class representing a generic unwind code.
There is a subclass of PrologEpilogOp for each member of UNWIND_OP_CODES enum.
"""
def initialize(self, unw_code, data, unw_info, file_offset):
self.struct = StructureWithBitfields(
self._get_format(unw_code), file_offset=file_offset
)
self.struct.__unpack__(data)
def length_in_code_structures(self, unw_code, unw_info):
"""Computes how many UNWIND_CODE structures UNWIND_CODE occupies.
May be called before initialize() and, for that reason, should not rely on
the values of intance attributes.
"""
return 1
def is_valid(self):
return True
def _get_format(self, unw_code):
return ("UNWIND_CODE", ("B,CodeOffset", "B:4,UnwindOp", "B:4,OpInfo"))
| () |
35,667 | pefile | _get_format | null | def _get_format(self, unw_code):
return ("UNWIND_CODE", ("B,CodeOffset", "B:4,UnwindOp", "B:4,OpInfo"))
| (self, unw_code) |
35,668 | pefile | initialize | null | def initialize(self, unw_code, data, unw_info, file_offset):
self.struct = StructureWithBitfields(
self._get_format(unw_code), file_offset=file_offset
)
self.struct.__unpack__(data)
| (self, unw_code, data, unw_info, file_offset) |
35,669 | pefile | is_valid | null | def is_valid(self):
return True
| (self) |
35,670 | pefile | length_in_code_structures | Computes how many UNWIND_CODE structures UNWIND_CODE occupies.
May be called before initialize() and, for that reason, should not rely on
the values of intance attributes.
| def length_in_code_structures(self, unw_code, unw_info):
"""Computes how many UNWIND_CODE structures UNWIND_CODE occupies.
May be called before initialize() and, for that reason, should not rely on
the values of intance attributes.
"""
return 1
| (self, unw_code, unw_info) |
35,671 | pefile | PrologEpilogOpAllocLarge | UWOP_ALLOC_LARGE | class PrologEpilogOpAllocLarge(PrologEpilogOp):
"""UWOP_ALLOC_LARGE"""
def _get_format(self, unw_code):
return (
"UNWIND_CODE_ALLOC_LARGE",
(
"B,CodeOffset",
"B:4,UnwindOp",
"B:4,OpInfo",
"H,AllocSizeInQwords" if unw_code.OpInfo == 0 else "I,AllocSize",
),
)
def length_in_code_structures(self, unw_code, unw_info):
return 2 if unw_code.OpInfo == 0 else 3
def get_alloc_size(self):
return (
self.struct.AllocSizeInQwords * 8
if self.struct.OpInfo == 0
else self.struct.AllocSize
)
def __str__(self):
return ".ALLOCSTACK " + hex(self.get_alloc_size())
| () |
35,672 | pefile | __str__ | null | def __str__(self):
return ".ALLOCSTACK " + hex(self.get_alloc_size())
| (self) |
35,673 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_ALLOC_LARGE",
(
"B,CodeOffset",
"B:4,UnwindOp",
"B:4,OpInfo",
"H,AllocSizeInQwords" if unw_code.OpInfo == 0 else "I,AllocSize",
),
)
| (self, unw_code) |
35,674 | pefile | get_alloc_size | null | def get_alloc_size(self):
return (
self.struct.AllocSizeInQwords * 8
if self.struct.OpInfo == 0
else self.struct.AllocSize
)
| (self) |
35,677 | pefile | length_in_code_structures | null | def length_in_code_structures(self, unw_code, unw_info):
return 2 if unw_code.OpInfo == 0 else 3
| (self, unw_code, unw_info) |
35,678 | pefile | PrologEpilogOpAllocSmall | UWOP_ALLOC_SMALL | class PrologEpilogOpAllocSmall(PrologEpilogOp):
"""UWOP_ALLOC_SMALL"""
def _get_format(self, unw_code):
return (
"UNWIND_CODE_ALLOC_SMALL",
("B,CodeOffset", "B:4,UnwindOp", "B:4,AllocSizeInQwordsMinus8"),
)
def get_alloc_size(self):
return self.struct.AllocSizeInQwordsMinus8 * 8 + 8
def __str__(self):
return ".ALLOCSTACK " + hex(self.get_alloc_size())
| () |
35,680 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_ALLOC_SMALL",
("B,CodeOffset", "B:4,UnwindOp", "B:4,AllocSizeInQwordsMinus8"),
)
| (self, unw_code) |
35,681 | pefile | get_alloc_size | null | def get_alloc_size(self):
return self.struct.AllocSizeInQwordsMinus8 * 8 + 8
| (self) |
35,685 | pefile | PrologEpilogOpEpilogMarker | UWOP_EPILOG | class PrologEpilogOpEpilogMarker(PrologEpilogOp):
"""UWOP_EPILOG"""
def initialize(self, unw_code, data, unw_info, file_offset):
self._long_offst = True
self._first = not hasattr(unw_info, "SizeOfEpilog")
super(PrologEpilogOpEpilogMarker, self).initialize(
unw_code, data, unw_info, file_offset
)
if self._first:
setattr(unw_info, "SizeOfEpilog", self.struct.Size)
self._long_offst = unw_code.OpInfo & 1 == 0
self._epilog_size = unw_info.SizeOfEpilog
def _get_format(self, unw_code):
# check if it is the first epilog code among encountered; then its record
# will contain size of the epilog
if self._first:
return (
"UNWIND_CODE_EPILOG",
("B,OffsetLow,Size", "B:4,UnwindOp", "B:4,Flags")
if unw_code.OpInfo & 1 == 1
else (
"B,Size",
"B:4,UnwindOp",
"B:4,Flags",
"B,OffsetLow",
"B:4,Unused",
"B:4,OffsetHigh",
),
)
else:
return (
"UNWIND_CODE_EPILOG",
("B,OffsetLow", "B:4,UnwindOp", "B:4,OffsetHigh"),
)
def length_in_code_structures(self, unw_code, unw_info):
return (
2
if not hasattr(unw_info, "SizeOfEpilog") and (unw_code.OpInfo & 1) == 0
else 1
)
def get_offset(self):
return self.struct.OffsetLow | (
self.struct.OffsetHigh << 8 if self._long_offst else 0
)
def is_valid(self):
return self.get_offset() > 0
def __str__(self):
# the EPILOG sequence may have a terminating all-zeros entry
return (
"EPILOG: size="
+ hex(self._epilog_size)
+ ", offset from the end=-"
+ hex(self.get_offset())
if self.get_offset() > 0
else ""
)
| () |
35,686 | pefile | __str__ | null | def __str__(self):
# the EPILOG sequence may have a terminating all-zeros entry
return (
"EPILOG: size="
+ hex(self._epilog_size)
+ ", offset from the end=-"
+ hex(self.get_offset())
if self.get_offset() > 0
else ""
)
| (self) |
35,687 | pefile | _get_format | null | def _get_format(self, unw_code):
# check if it is the first epilog code among encountered; then its record
# will contain size of the epilog
if self._first:
return (
"UNWIND_CODE_EPILOG",
("B,OffsetLow,Size", "B:4,UnwindOp", "B:4,Flags")
if unw_code.OpInfo & 1 == 1
else (
"B,Size",
"B:4,UnwindOp",
"B:4,Flags",
"B,OffsetLow",
"B:4,Unused",
"B:4,OffsetHigh",
),
)
else:
return (
"UNWIND_CODE_EPILOG",
("B,OffsetLow", "B:4,UnwindOp", "B:4,OffsetHigh"),
)
| (self, unw_code) |
35,688 | pefile | get_offset | null | def get_offset(self):
return self.struct.OffsetLow | (
self.struct.OffsetHigh << 8 if self._long_offst else 0
)
| (self) |
35,689 | pefile | initialize | null | def initialize(self, unw_code, data, unw_info, file_offset):
self._long_offst = True
self._first = not hasattr(unw_info, "SizeOfEpilog")
super(PrologEpilogOpEpilogMarker, self).initialize(
unw_code, data, unw_info, file_offset
)
if self._first:
setattr(unw_info, "SizeOfEpilog", self.struct.Size)
self._long_offst = unw_code.OpInfo & 1 == 0
self._epilog_size = unw_info.SizeOfEpilog
| (self, unw_code, data, unw_info, file_offset) |
35,690 | pefile | is_valid | null | def is_valid(self):
return self.get_offset() > 0
| (self) |
35,691 | pefile | length_in_code_structures | null | def length_in_code_structures(self, unw_code, unw_info):
return (
2
if not hasattr(unw_info, "SizeOfEpilog") and (unw_code.OpInfo & 1) == 0
else 1
)
| (self, unw_code, unw_info) |
35,692 | pefile | PrologEpilogOpPushFrame | UWOP_PUSH_MACHFRAME | class PrologEpilogOpPushFrame(PrologEpilogOp):
"""UWOP_PUSH_MACHFRAME"""
def __str__(self):
return ".PUSHFRAME" + (" <code>" if self.struct.OpInfo else "")
| () |
35,693 | pefile | __str__ | null | def __str__(self):
return ".PUSHFRAME" + (" <code>" if self.struct.OpInfo else "")
| (self) |
35,698 | pefile | PrologEpilogOpPushReg | UWOP_PUSH_NONVOL | class PrologEpilogOpPushReg(PrologEpilogOp):
"""UWOP_PUSH_NONVOL"""
def _get_format(self, unw_code):
return ("UNWIND_CODE_PUSH_NONVOL", ("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg"))
def __str__(self):
return ".PUSHREG " + REGISTERS[self.struct.Reg]
| () |
35,699 | pefile | __str__ | null | def __str__(self):
return ".PUSHREG " + REGISTERS[self.struct.Reg]
| (self) |
35,700 | pefile | _get_format | null | def _get_format(self, unw_code):
return ("UNWIND_CODE_PUSH_NONVOL", ("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg"))
| (self, unw_code) |
35,704 | pefile | PrologEpilogOpSaveReg | UWOP_SAVE_NONVOL | class PrologEpilogOpSaveReg(PrologEpilogOp):
"""UWOP_SAVE_NONVOL"""
def length_in_code_structures(self, unwcode, unw_info):
return 2
def get_offset(self):
return self.struct.OffsetInQwords * 8
def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_NONVOL",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "H,OffsetInQwords"),
)
def __str__(self):
return ".SAVEREG " + REGISTERS[self.struct.Reg] + ", " + hex(self.get_offset())
| () |
35,705 | pefile | __str__ | null | def __str__(self):
return ".SAVEREG " + REGISTERS[self.struct.Reg] + ", " + hex(self.get_offset())
| (self) |
35,706 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_NONVOL",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "H,OffsetInQwords"),
)
| (self, unw_code) |
35,707 | pefile | get_offset | null | def get_offset(self):
return self.struct.OffsetInQwords * 8
| (self) |
35,710 | pefile | length_in_code_structures | null | def length_in_code_structures(self, unwcode, unw_info):
return 2
| (self, unwcode, unw_info) |
35,711 | pefile | PrologEpilogOpSaveRegFar | UWOP_SAVE_NONVOL_FAR | class PrologEpilogOpSaveRegFar(PrologEpilogOp):
"""UWOP_SAVE_NONVOL_FAR"""
def length_in_code_structures(self, unw_code, unw_info):
return 3
def get_offset(self):
return self.struct.Offset
def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_NONVOL_FAR",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "I,Offset"),
)
def __str__(self):
return ".SAVEREG " + REGISTERS[self.struct.Reg] + ", " + hex(self.struct.Offset)
| () |
35,712 | pefile | __str__ | null | def __str__(self):
return ".SAVEREG " + REGISTERS[self.struct.Reg] + ", " + hex(self.struct.Offset)
| (self) |
35,713 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_NONVOL_FAR",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "I,Offset"),
)
| (self, unw_code) |
35,714 | pefile | get_offset | null | def get_offset(self):
return self.struct.Offset
| (self) |
35,717 | pefile | length_in_code_structures | null | def length_in_code_structures(self, unw_code, unw_info):
return 3
| (self, unw_code, unw_info) |
35,718 | pefile | PrologEpilogOpSaveXMM | UWOP_SAVE_XMM128 | class PrologEpilogOpSaveXMM(PrologEpilogOp):
"""UWOP_SAVE_XMM128"""
def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_XMM128",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "H,OffsetIn2Qwords"),
)
def length_in_code_structures(self, unw_code, unw_info):
return 2
def get_offset(self):
return self.struct.OffsetIn2Qwords * 16
def __str__(self):
return ".SAVEXMM128 XMM" + str(self.struct.Reg) + ", " + hex(self.get_offset())
| () |
35,719 | pefile | __str__ | null | def __str__(self):
return ".SAVEXMM128 XMM" + str(self.struct.Reg) + ", " + hex(self.get_offset())
| (self) |
35,720 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_XMM128",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "H,OffsetIn2Qwords"),
)
| (self, unw_code) |
35,721 | pefile | get_offset | null | def get_offset(self):
return self.struct.OffsetIn2Qwords * 16
| (self) |
35,724 | pefile | length_in_code_structures | null | def length_in_code_structures(self, unw_code, unw_info):
return 2
| (self, unw_code, unw_info) |
35,725 | pefile | PrologEpilogOpSaveXMMFar | UWOP_SAVE_XMM128_FAR | class PrologEpilogOpSaveXMMFar(PrologEpilogOp):
"""UWOP_SAVE_XMM128_FAR"""
def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_XMM128_FAR",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "I,Offset"),
)
def length_in_code_structures(self, unw_code, unw_info):
return 3
def get_offset(self):
return self.struct.Offset
def __str__(self):
return ".SAVEXMM128 XMM" + str(self.struct.Reg) + ", " + hex(self.struct.Offset)
| () |
35,726 | pefile | __str__ | null | def __str__(self):
return ".SAVEXMM128 XMM" + str(self.struct.Reg) + ", " + hex(self.struct.Offset)
| (self) |
35,727 | pefile | _get_format | null | def _get_format(self, unw_code):
return (
"UNWIND_CODE_SAVE_XMM128_FAR",
("B,CodeOffset", "B:4,UnwindOp", "B:4,Reg", "I,Offset"),
)
| (self, unw_code) |
35,732 | pefile | PrologEpilogOpSetFP | UWOP_SET_FPREG | class PrologEpilogOpSetFP(PrologEpilogOp):
"""UWOP_SET_FPREG"""
def initialize(self, unw_code, data, unw_info, file_offset):
super(PrologEpilogOpSetFP, self).initialize(
unw_code, data, unw_info, file_offset
)
self._frame_register = unw_info.FrameRegister
self._frame_offset = unw_info.FrameOffset * 16
def __str__(self):
return (
".SETFRAME "
+ REGISTERS[self._frame_register]
+ ", "
+ hex(self._frame_offset)
)
| () |
35,733 | pefile | __str__ | null | def __str__(self):
return (
".SETFRAME "
+ REGISTERS[self._frame_register]
+ ", "
+ hex(self._frame_offset)
)
| (self) |
35,735 | pefile | initialize | null | def initialize(self, unw_code, data, unw_info, file_offset):
super(PrologEpilogOpSetFP, self).initialize(
unw_code, data, unw_info, file_offset
)
self._frame_register = unw_info.FrameRegister
self._frame_offset = unw_info.FrameOffset * 16
| (self, unw_code, data, unw_info, file_offset) |
35,738 | pefile | PrologEpilogOpsFactory | A factory for creating unwind codes based on the value of UnwindOp | class PrologEpilogOpsFactory:
"""A factory for creating unwind codes based on the value of UnwindOp"""
_class_dict = {
UWOP_PUSH_NONVOL: PrologEpilogOpPushReg,
UWOP_ALLOC_LARGE: PrologEpilogOpAllocLarge,
UWOP_ALLOC_SMALL: PrologEpilogOpAllocSmall,
UWOP_SET_FPREG: PrologEpilogOpSetFP,
UWOP_SAVE_NONVOL: PrologEpilogOpSaveReg,
UWOP_SAVE_NONVOL_FAR: PrologEpilogOpSaveRegFar,
UWOP_SAVE_XMM128: PrologEpilogOpSaveXMM,
UWOP_SAVE_XMM128_FAR: PrologEpilogOpSaveXMMFar,
UWOP_PUSH_MACHFRAME: PrologEpilogOpPushFrame,
UWOP_EPILOG: PrologEpilogOpEpilogMarker,
}
@staticmethod
def create(unwcode):
code = unwcode.UnwindOp
return (
PrologEpilogOpsFactory._class_dict[code]()
if code in PrologEpilogOpsFactory._class_dict
else None
)
| () |
35,739 | pefile | create | null | @staticmethod
def create(unwcode):
code = unwcode.UnwindOp
return (
PrologEpilogOpsFactory._class_dict[code]()
if code in PrologEpilogOpsFactory._class_dict
else None
)
| (unwcode) |
35,740 | pefile | RelocationData | Holds relocation information.
type: Type of relocation
The type string can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
| class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have a struct attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, "struct"):
# Get the word containing the type and data
#
word = self.struct.Data
if name == "type":
word = (val << 12) | (word & 0xFFF)
elif name == "rva":
offset = max(val - self.base_rva, 0)
word = (word & 0xF000) | (offset & 0xFFF)
# Store the modified data
#
self.struct.Data = word
self.__dict__[name] = val
| (**args) |
35,742 | pefile | __setattr__ | null | def __setattr__(self, name, val):
# If the instance doesn't yet have a struct attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, "struct"):
# Get the word containing the type and data
#
word = self.struct.Data
if name == "type":
word = (val << 12) | (word & 0xFFF)
elif name == "rva":
offset = max(val - self.base_rva, 0)
word = (word & 0xF000) | (offset & 0xFFF)
# Store the modified data
#
self.struct.Data = word
self.__dict__[name] = val
| (self, name, val) |
35,743 | pefile | ResourceDataEntryData | Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
| class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
| (**args) |
35,745 | pefile | ResourceDirData | Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
| class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
| (**args) |
35,747 | pefile | ResourceDirEntryData | Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
| class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
| (**args) |
35,749 | pefile | SectionStructure | Convenience section handling class. | class SectionStructure(Structure):
"""Convenience section handling class."""
def __init__(self, *argl, **argd):
if "pe" in argd:
self.pe = argd["pe"]
del argd["pe"]
self.PointerToRawData = None
self.VirtualAddress = None
self.SizeOfRawData = None
self.Misc_VirtualSize = None
Structure.__init__(self, *argl, **argd)
self.PointerToRawData_adj = None
self.VirtualAddress_adj = None
self.section_min_addr = None
self.section_max_addr = None
def get_PointerToRawData_adj(self):
if self.PointerToRawData_adj is None:
if self.PointerToRawData is not None:
self.PointerToRawData_adj = self.pe.adjust_FileAlignment(
self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment
)
return self.PointerToRawData_adj
def get_VirtualAddress_adj(self):
if self.VirtualAddress_adj is None:
if self.VirtualAddress is not None:
self.VirtualAddress_adj = self.pe.adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment,
)
return self.VirtualAddress_adj
def get_data(self, start=None, length=None, ignore_padding=False):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by their real
addresses as they would be if loaded.
Note that sections on disk can include padding that would
not be loaded to memory. That is the case if `section.SizeOfRawData`
is greater than `section.Misc_VirtualSize`, and that means
that data past `section.Misc_VirtualSize` is padding.
In case you are not interested in this padding, passing
`ignore_padding=True` will truncate the result in order
not to return the padding (if any).
Returns bytes() under Python 3.x and set() under Python 2.7
"""
if start is None:
offset = self.get_PointerToRawData_adj()
else:
offset = (
start - self.get_VirtualAddress_adj()
) + self.get_PointerToRawData_adj()
if length is not None:
end = offset + length
elif self.SizeOfRawData is not None:
end = offset + self.SizeOfRawData
else:
end = offset
if ignore_padding and end is not None and offset is not None:
end = min(end, offset + self.Misc_VirtualSize)
# PointerToRawData is not adjusted here as we might want to read any possible
# extra bytes that might get cut off by aligning the start (and hence cutting
# something off the end)
if self.PointerToRawData is not None and self.SizeOfRawData is not None:
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
return self.pe.__data__[offset:end]
def __setattr__(self, name, val):
if name == "Characteristics":
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_")
# Set the section's flags according to the Characteristics member
set_flags(self, val, section_flags)
elif "IMAGE_SCN_" in name and hasattr(self, name):
if val:
self.__dict__["Characteristics"] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__["Characteristics"] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
def get_rva_from_offset(self, offset):
return offset - self.get_PointerToRawData_adj() + self.get_VirtualAddress_adj()
def get_offset_from_rva(self, rva):
return rva - self.get_VirtualAddress_adj() + self.get_PointerToRawData_adj()
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
PointerToRawData_adj = self.get_PointerToRawData_adj()
return (
PointerToRawData_adj <= offset < PointerToRawData_adj + self.SizeOfRawData
)
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# speedup
if self.section_min_addr is not None and self.section_max_addr is not None:
return self.section_min_addr <= rva < self.section_max_addr
VirtualAddress_adj = self.get_VirtualAddress_adj()
# Check if the SizeOfRawData is realistic. If it's bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contains a misleading value.
# In either of those cases we take the VirtualSize
#
if len(self.pe.__data__) - self.get_PointerToRawData_adj() < self.SizeOfRawData:
# PECOFF documentation v8 says:
# VirtualSize: The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
#
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
# Check whether there's any section after the current one that starts before
# the calculated end for the current one. If so, cut the current section's size
# to fit in the range up to where the next section starts.
if (
self.next_section_virtual_address is not None
and self.next_section_virtual_address > self.VirtualAddress
and VirtualAddress_adj + size > self.next_section_virtual_address
):
size = self.next_section_virtual_address - VirtualAddress_adj
self.section_min_addr = VirtualAddress_adj
self.section_max_addr = VirtualAddress_adj + size
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
def contains(self, rva):
return self.contains_rva(rva)
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H(self.get_data())
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1(self.get_data()).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256(self.get_data()).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512(self.get_data()).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5(self.get_data()).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if not data:
return 0.0
occurences = Counter(bytearray(data))
entropy = 0
for x in occurences.values():
p_x = float(x) / len(data)
entropy -= p_x * math.log(p_x, 2)
return entropy
| (*argl, **argd) |
35,750 | pefile | __get_format__ | null | def __get_format__(self) -> str:
return self.__format_str__
| (self) -> str |
35,751 | pefile | __init__ | null | def __init__(self, *argl, **argd):
if "pe" in argd:
self.pe = argd["pe"]
del argd["pe"]
self.PointerToRawData = None
self.VirtualAddress = None
self.SizeOfRawData = None
self.Misc_VirtualSize = None
Structure.__init__(self, *argl, **argd)
self.PointerToRawData_adj = None
self.VirtualAddress_adj = None
self.section_min_addr = None
self.section_max_addr = None
| (self, *argl, **argd) |
35,752 | pefile | __pack__ | null | def __pack__(self):
new_values = []
for idx, val in enumerate(self.__unpacked_data_elms__):
new_val = None
for key in self.__keys__[idx]:
new_val = getattr(self, key)
# In the case of unions, when the first changed value
# is picked the loop is exited
if new_val != val:
break
new_values.append(new_val)
return struct.pack(self.__format_str__, *new_values)
| (self) |
35,753 | pefile | __repr__ | null | def __repr__(self):
return "<Structure: %s>" % (
" ".join([" ".join(s.split()) for s in self.dump()])
)
| (self) |
35,754 | pefile | __setattr__ | null | def __setattr__(self, name, val):
if name == "Characteristics":
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_")
# Set the section's flags according to the Characteristics member
set_flags(self, val, section_flags)
elif "IMAGE_SCN_" in name and hasattr(self, name):
if val:
self.__dict__["Characteristics"] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__["Characteristics"] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
| (self, name, val) |
35,755 | pefile | __str__ | null | def __str__(self):
return "\n".join(self.dump())
| (self) |
35,756 | pefile | __unpack__ | null | def __unpack__(self, data):
data = b(data)
if len(data) > self.__format_length__:
data = data[: self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data) < self.__format_length__:
raise PEFormatError("Data length less than expected header length.")
if count_zeroes(data) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format_str__, data)
for idx, val in enumerate(self.__unpacked_data_elms__):
for key in self.__keys__[idx]:
setattr(self, key, val)
| (self, data) |
35,757 | pefile | all_zeroes | Returns true is the unpacked data is all zeros. | def all_zeroes(self):
"""Returns true is the unpacked data is all zeros."""
return self.__all_zeroes__
| (self) |
35,758 | pefile | contains | null | def contains(self, rva):
return self.contains_rva(rva)
| (self, rva) |
35,759 | pefile | contains_offset | Check whether the section contains the file offset provided. | def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
PointerToRawData_adj = self.get_PointerToRawData_adj()
return (
PointerToRawData_adj <= offset < PointerToRawData_adj + self.SizeOfRawData
)
| (self, offset) |
35,760 | pefile | contains_rva | Check whether the section contains the address provided. | def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# speedup
if self.section_min_addr is not None and self.section_max_addr is not None:
return self.section_min_addr <= rva < self.section_max_addr
VirtualAddress_adj = self.get_VirtualAddress_adj()
# Check if the SizeOfRawData is realistic. If it's bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contains a misleading value.
# In either of those cases we take the VirtualSize
#
if len(self.pe.__data__) - self.get_PointerToRawData_adj() < self.SizeOfRawData:
# PECOFF documentation v8 says:
# VirtualSize: The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
#
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
# Check whether there's any section after the current one that starts before
# the calculated end for the current one. If so, cut the current section's size
# to fit in the range up to where the next section starts.
if (
self.next_section_virtual_address is not None
and self.next_section_virtual_address > self.VirtualAddress
and VirtualAddress_adj + size > self.next_section_virtual_address
):
size = self.next_section_virtual_address - VirtualAddress_adj
self.section_min_addr = VirtualAddress_adj
self.section_max_addr = VirtualAddress_adj + size
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
| (self, rva) |
35,761 | pefile | dump | Returns a string representation of the structure. | def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append("[{0}]".format(self.name))
printable_bytes = [
ord(i) for i in string.printable if i not in string.whitespace
]
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key.startswith("Signature_"):
val_str = "{:<8X}".format(val)
else:
val_str = "0x{:<8X}".format(val)
if key == "TimeDateStamp" or key == "dwTimeStamp":
try:
val_str += " [%s UTC]" % time.asctime(time.gmtime(val))
except ValueError:
val_str += " [INVALID TIME]"
else:
val_str = bytearray(val)
if key.startswith("Signature"):
val_str = "".join(
["{:02X}".format(i) for i in val_str.rstrip(b"\x00")]
)
else:
val_str = "".join(
[
chr(i)
if (i in printable_bytes)
else "\\x{0:02x}".format(i)
for i in val_str.rstrip(b"\x00")
]
)
dump.append(
"0x%-8X 0x%-3X %-30s %s"
% (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key],
key + ":",
val_str,
)
)
return dump
| (self, indentation=0) |
35,762 | pefile | dump_dict | Returns a dictionary representation of the structure. | def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = {}
dump_dict["Structure"] = self.name
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key == "TimeDateStamp" or key == "dwTimeStamp":
try:
val = "0x%-8X [%s UTC]" % (
val,
time.asctime(time.gmtime(val)),
)
except ValueError:
val = "0x%-8X [INVALID TIME]" % val
else:
val = "".join(
chr(d) if chr(d) in string.printable else "\\x%02x" % d
for d in [ord(c) if not isinstance(c, int) else c for c in val]
)
dump_dict[key] = {
"FileOffset": self.__field_offsets__[key] + self.__file_offset__,
"Offset": self.__field_offsets__[key],
"Value": val,
}
return dump_dict
| (self) |
35,763 | pefile | entropy_H | Calculate the entropy of a chunk of data. | def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if not data:
return 0.0
occurences = Counter(bytearray(data))
entropy = 0
for x in occurences.values():
p_x = float(x) / len(data)
entropy -= p_x * math.log(p_x, 2)
return entropy
| (self, data) |
35,764 | pefile | get_PointerToRawData_adj | null | def get_PointerToRawData_adj(self):
if self.PointerToRawData_adj is None:
if self.PointerToRawData is not None:
self.PointerToRawData_adj = self.pe.adjust_FileAlignment(
self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment
)
return self.PointerToRawData_adj
| (self) |
35,765 | pefile | get_VirtualAddress_adj | null | def get_VirtualAddress_adj(self):
if self.VirtualAddress_adj is None:
if self.VirtualAddress is not None:
self.VirtualAddress_adj = self.pe.adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment,
)
return self.VirtualAddress_adj
| (self) |
35,766 | pefile | get_data | Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by their real
addresses as they would be if loaded.
Note that sections on disk can include padding that would
not be loaded to memory. That is the case if `section.SizeOfRawData`
is greater than `section.Misc_VirtualSize`, and that means
that data past `section.Misc_VirtualSize` is padding.
In case you are not interested in this padding, passing
`ignore_padding=True` will truncate the result in order
not to return the padding (if any).
Returns bytes() under Python 3.x and set() under Python 2.7
| def get_data(self, start=None, length=None, ignore_padding=False):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by their real
addresses as they would be if loaded.
Note that sections on disk can include padding that would
not be loaded to memory. That is the case if `section.SizeOfRawData`
is greater than `section.Misc_VirtualSize`, and that means
that data past `section.Misc_VirtualSize` is padding.
In case you are not interested in this padding, passing
`ignore_padding=True` will truncate the result in order
not to return the padding (if any).
Returns bytes() under Python 3.x and set() under Python 2.7
"""
if start is None:
offset = self.get_PointerToRawData_adj()
else:
offset = (
start - self.get_VirtualAddress_adj()
) + self.get_PointerToRawData_adj()
if length is not None:
end = offset + length
elif self.SizeOfRawData is not None:
end = offset + self.SizeOfRawData
else:
end = offset
if ignore_padding and end is not None and offset is not None:
end = min(end, offset + self.Misc_VirtualSize)
# PointerToRawData is not adjusted here as we might want to read any possible
# extra bytes that might get cut off by aligning the start (and hence cutting
# something off the end)
if self.PointerToRawData is not None and self.SizeOfRawData is not None:
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
return self.pe.__data__[offset:end]
| (self, start=None, length=None, ignore_padding=False) |
35,767 | pefile | get_entropy | Calculate and return the entropy for the section. | def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H(self.get_data())
| (self) |
35,768 | pefile | get_field_absolute_offset | Return the offset within the field for the requested field in the structure. | def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
| (self, field_name) |
35,769 | pefile | get_field_relative_offset | Return the offset within the structure for the requested field. | def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
| (self, field_name) |
35,770 | pefile | get_file_offset | null | def get_file_offset(self):
return self.__file_offset__
| (self) |
35,771 | pefile | get_hash_md5 | Get the MD5 hex-digest of the section's data. | def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5(self.get_data()).hexdigest()
| (self) |
35,772 | pefile | get_hash_sha1 | Get the SHA-1 hex-digest of the section's data. | def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1(self.get_data()).hexdigest()
| (self) |
35,773 | pefile | get_hash_sha256 | Get the SHA-256 hex-digest of the section's data. | def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256(self.get_data()).hexdigest()
| (self) |
35,774 | pefile | get_hash_sha512 | Get the SHA-512 hex-digest of the section's data. | def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512(self.get_data()).hexdigest()
| (self) |
35,775 | pefile | get_offset_from_rva | null | def get_offset_from_rva(self, rva):
return rva - self.get_VirtualAddress_adj() + self.get_PointerToRawData_adj()
| (self, rva) |
35,776 | pefile | get_rva_from_offset | null | def get_rva_from_offset(self, offset):
return offset - self.get_PointerToRawData_adj() + self.get_VirtualAddress_adj()
| (self, offset) |
35,777 | pefile | set_file_offset | null | def set_file_offset(self, offset):
self.__file_offset__ = offset
| (self, offset) |
35,778 | pefile | sizeof | Return size of the structure. | def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
| (self) |
35,779 | pefile | Structure | Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
| class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format_str__ = "<"
self.__keys__ = []
self.__format_length__ = 0
self.__field_offsets__ = {}
self.__unpacked_data_elms__ = []
d = format[1]
# need a tuple to be hashable in set_format using lru cache
if not isinstance(d, tuple):
d = tuple(d)
(
self.__format_str__,
self.__unpacked_data_elms__,
self.__field_offsets__,
self.__keys__,
self.__format_length__,
) = set_format(d)
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self) -> str:
return self.__format_str__
def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeros."""
return self.__all_zeroes__
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
data = b(data)
if len(data) > self.__format_length__:
data = data[: self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data) < self.__format_length__:
raise PEFormatError("Data length less than expected header length.")
if count_zeroes(data) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format_str__, data)
for idx, val in enumerate(self.__unpacked_data_elms__):
for key in self.__keys__[idx]:
setattr(self, key, val)
def __pack__(self):
new_values = []
for idx, val in enumerate(self.__unpacked_data_elms__):
new_val = None
for key in self.__keys__[idx]:
new_val = getattr(self, key)
# In the case of unions, when the first changed value
# is picked the loop is exited
if new_val != val:
break
new_values.append(new_val)
return struct.pack(self.__format_str__, *new_values)
def __str__(self):
return "\n".join(self.dump())
def __repr__(self):
return "<Structure: %s>" % (
" ".join([" ".join(s.split()) for s in self.dump()])
)
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append("[{0}]".format(self.name))
printable_bytes = [
ord(i) for i in string.printable if i not in string.whitespace
]
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key.startswith("Signature_"):
val_str = "{:<8X}".format(val)
else:
val_str = "0x{:<8X}".format(val)
if key == "TimeDateStamp" or key == "dwTimeStamp":
try:
val_str += " [%s UTC]" % time.asctime(time.gmtime(val))
except ValueError:
val_str += " [INVALID TIME]"
else:
val_str = bytearray(val)
if key.startswith("Signature"):
val_str = "".join(
["{:02X}".format(i) for i in val_str.rstrip(b"\x00")]
)
else:
val_str = "".join(
[
chr(i)
if (i in printable_bytes)
else "\\x{0:02x}".format(i)
for i in val_str.rstrip(b"\x00")
]
)
dump.append(
"0x%-8X 0x%-3X %-30s %s"
% (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key],
key + ":",
val_str,
)
)
return dump
def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = {}
dump_dict["Structure"] = self.name
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key == "TimeDateStamp" or key == "dwTimeStamp":
try:
val = "0x%-8X [%s UTC]" % (
val,
time.asctime(time.gmtime(val)),
)
except ValueError:
val = "0x%-8X [INVALID TIME]" % val
else:
val = "".join(
chr(d) if chr(d) in string.printable else "\\x%02x" % d
for d in [ord(c) if not isinstance(c, int) else c for c in val]
)
dump_dict[key] = {
"FileOffset": self.__field_offsets__[key] + self.__file_offset__,
"Offset": self.__field_offsets__[key],
"Value": val,
}
return dump_dict
| (format, name=None, file_offset=None) |
35,781 | pefile | __init__ | null | def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format_str__ = "<"
self.__keys__ = []
self.__format_length__ = 0
self.__field_offsets__ = {}
self.__unpacked_data_elms__ = []
d = format[1]
# need a tuple to be hashable in set_format using lru cache
if not isinstance(d, tuple):
d = tuple(d)
(
self.__format_str__,
self.__unpacked_data_elms__,
self.__field_offsets__,
self.__keys__,
self.__format_length__,
) = set_format(d)
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
| (self, format, name=None, file_offset=None) |
35,794 | pefile | StructureWithBitfields |
Extends Structure's functionality with support for bitfields such as:
('B:4,LowerHalf', 'B:4,UpperHalf')
To this end, two lists are maintained:
* self.__keys__ that contains compound fields, for example
('B,~LowerHalfUpperHalf'), and is used during packing/unpaking
* self.__keys_ext__ containing a separate key for each field (ex., LowerHalf,
UpperHalf) to simplify implementation of dump()
This way the implementation of unpacking/packing and dump() from Structure can be
reused.
In addition, we create a dictionary:
<comound_field_index_in_keys> -->
(data type, [ (subfield name, length in bits)+ ] )
that facilitates bitfield paking and unpacking.
With lru_cache() creating only once instance per format string, the memory
overhead is negligible.
| class StructureWithBitfields(Structure):
"""
Extends Structure's functionality with support for bitfields such as:
('B:4,LowerHalf', 'B:4,UpperHalf')
To this end, two lists are maintained:
* self.__keys__ that contains compound fields, for example
('B,~LowerHalfUpperHalf'), and is used during packing/unpaking
* self.__keys_ext__ containing a separate key for each field (ex., LowerHalf,
UpperHalf) to simplify implementation of dump()
This way the implementation of unpacking/packing and dump() from Structure can be
reused.
In addition, we create a dictionary:
<comound_field_index_in_keys> -->
(data type, [ (subfield name, length in bits)+ ] )
that facilitates bitfield paking and unpacking.
With lru_cache() creating only once instance per format string, the memory
overhead is negligible.
"""
BTF_NAME_IDX = 0
BTF_BITCNT_IDX = 1
CF_TYPE_IDX = 0
CF_SUBFLD_IDX = 1
def __init__(self, format, name=None, file_offset=None):
(
self.__format_str__,
self.__format_length__,
self.__field_offsets__,
self.__keys__,
self.__keys_ext__,
self.__compound_fields__,
) = set_bitfields_format(format)
# create our own unpacked_data_elms to ensure they are not shared among
# StructureWithBitfields instances with the same format string
self.__unpacked_data_elms__ = [None for i in range(self.__format_length__)]
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
self.name = name if name != None else format[0]
def __unpack__(self, data):
# calling the original routine to deal with special cases/spurious data
# structures
super(StructureWithBitfields, self).__unpack__(data)
self._unpack_bitfield_attributes()
def __pack__(self):
self._pack_bitfield_attributes()
try:
data = super(StructureWithBitfields, self).__pack__()
finally:
self._unpack_bitfield_attributes()
return data
def dump(self, indentation=0):
tk = self.__keys__
self.__keys__ = self.__keys_ext__
try:
ret = super(StructureWithBitfields, self).dump(indentation)
finally:
self.__keys__ = tk
return ret
def dump_dict(self):
tk = self.__keys__
self.__keys__ = self.__keys_ext__
try:
ret = super(StructureWithBitfields, self).dump_dict()
finally:
self.__keys__ = tk
return ret
def _unpack_bitfield_attributes(self):
"""Replace compound attributes corresponding to bitfields with separate
sub-fields.
"""
for i in self.__compound_fields__.keys():
cf_name = self.__keys__[i][0]
cval = getattr(self, cf_name)
delattr(self, cf_name)
offst = 0
for sf in self.__compound_fields__[i][StructureWithBitfields.CF_SUBFLD_IDX]:
mask = (1 << sf[StructureWithBitfields.BTF_BITCNT_IDX]) - 1
mask <<= offst
setattr(
self,
sf[StructureWithBitfields.BTF_NAME_IDX],
(cval & mask) >> offst,
)
offst += sf[StructureWithBitfields.BTF_BITCNT_IDX]
def _pack_bitfield_attributes(self):
"""Pack attributes into a compound bitfield"""
for i in self.__compound_fields__.keys():
cf_name = self.__keys__[i][0]
offst, acc_val = 0, 0
for sf in self.__compound_fields__[i][StructureWithBitfields.CF_SUBFLD_IDX]:
mask = (1 << sf[StructureWithBitfields.BTF_BITCNT_IDX]) - 1
field_val = (
getattr(self, sf[StructureWithBitfields.BTF_NAME_IDX]) & mask
)
acc_val |= field_val << offst
offst += sf[StructureWithBitfields.BTF_BITCNT_IDX]
setattr(self, cf_name, acc_val)
| (format, name=None, file_offset=None) |
35,796 | pefile | __init__ | null | def __init__(self, format, name=None, file_offset=None):
(
self.__format_str__,
self.__format_length__,
self.__field_offsets__,
self.__keys__,
self.__keys_ext__,
self.__compound_fields__,
) = set_bitfields_format(format)
# create our own unpacked_data_elms to ensure they are not shared among
# StructureWithBitfields instances with the same format string
self.__unpacked_data_elms__ = [None for i in range(self.__format_length__)]
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
self.name = name if name != None else format[0]
| (self, format, name=None, file_offset=None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.