index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
35,512
pefile
BoundImportDescData
Holds bound import descriptor data. This directory entry will provide information on the DLLs this PE file has been bound to (if bound at all). The structure will contain the name and timestamp of the DLL at the time of binding so that the loader can know whether it differs from the one currently present in the system and must, therefore, re-bind the PE's imports. struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure name: DLL name entries: list of entries (BoundImportRefData instances) the entries will exist if this DLL has forwarded symbols. If so, the destination DLL will have an entry in this list.
class BoundImportDescData(DataContainer): """Holds bound import descriptor data. This directory entry will provide information on the DLLs this PE file has been bound to (if bound at all). The structure will contain the name and timestamp of the DLL at the time of binding so that the loader can know whether it differs from the one currently present in the system and must, therefore, re-bind the PE's imports. struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure name: DLL name entries: list of entries (BoundImportRefData instances) the entries will exist if this DLL has forwarded symbols. If so, the destination DLL will have an entry in this list. """
(**args)
35,514
pefile
BoundImportRefData
Holds bound import forwarder reference data. Contains the same information as the bound descriptor but for forwarded DLLs, if any. struct: IMAGE_BOUND_FORWARDER_REF structure name: dll name
class BoundImportRefData(DataContainer): """Holds bound import forwarder reference data. Contains the same information as the bound descriptor but for forwarded DLLs, if any. struct: IMAGE_BOUND_FORWARDER_REF structure name: dll name """
(**args)
35,545
pefile
DataContainer
Generic data container.
class DataContainer: """Generic data container.""" def __init__(self, **args): bare_setattr = super(DataContainer, self).__setattr__ for key, value in args.items(): bare_setattr(key, value)
(**args)
35,547
pefile
DebugData
Holds debug information. struct: IMAGE_DEBUG_DIRECTORY structure entries: list of entries (IMAGE_DEBUG_TYPE instances)
class DebugData(DataContainer): """Holds debug information. struct: IMAGE_DEBUG_DIRECTORY structure entries: list of entries (IMAGE_DEBUG_TYPE instances) """
(**args)
35,549
pefile
Dump
Convenience class for dumping the PE information.
class Dump: """Convenience class for dumping the PE information.""" def __init__(self): self.text = [] def add_lines(self, txt, indent=0): """Adds a list of lines. The list can be indented with the optional argument 'indent'. """ for line in txt: self.add_line(line, indent) def add_line(self, txt, indent=0): """Adds a line. The line can be indented with the optional argument 'indent'. """ self.add(txt + "\n", indent) def add(self, txt, indent=0): """Adds some text, no newline will be appended. The text can be indented with the optional argument 'indent'. """ self.text.append("{0}{1}".format(" " * indent, txt)) def add_header(self, txt): """Adds a header element.""" self.add_line("{0}{1}{0}\n".format("-" * 10, txt)) def add_newline(self): """Adds a newline.""" self.text.append("\n") def get_text(self): """Get the text in its current state.""" return "".join("{0}".format(b) for b in self.text)
()
35,550
pefile
__init__
null
def __init__(self): self.text = []
(self)
35,551
pefile
add
Adds some text, no newline will be appended. The text can be indented with the optional argument 'indent'.
def add(self, txt, indent=0): """Adds some text, no newline will be appended. The text can be indented with the optional argument 'indent'. """ self.text.append("{0}{1}".format(" " * indent, txt))
(self, txt, indent=0)
35,552
pefile
add_header
Adds a header element.
def add_header(self, txt): """Adds a header element.""" self.add_line("{0}{1}{0}\n".format("-" * 10, txt))
(self, txt)
35,553
pefile
add_line
Adds a line. The line can be indented with the optional argument 'indent'.
def add_line(self, txt, indent=0): """Adds a line. The line can be indented with the optional argument 'indent'. """ self.add(txt + "\n", indent)
(self, txt, indent=0)
35,554
pefile
add_lines
Adds a list of lines. The list can be indented with the optional argument 'indent'.
def add_lines(self, txt, indent=0): """Adds a list of lines. The list can be indented with the optional argument 'indent'. """ for line in txt: self.add_line(line, indent)
(self, txt, indent=0)
35,555
pefile
add_newline
Adds a newline.
def add_newline(self): """Adds a newline.""" self.text.append("\n")
(self)
35,556
pefile
get_text
Get the text in its current state.
def get_text(self): """Get the text in its current state.""" return "".join("{0}".format(b) for b in self.text)
(self)
35,557
pefile
DynamicRelocationData
Holds dynamic relocation information. struct: IMAGE_DYNAMIC_RELOCATION structure symbol: Symbol to which dynamic relocations must be applied relocations: List of dynamic relocations for this symbol (BaseRelocationData instances)
class DynamicRelocationData(DataContainer): """Holds dynamic relocation information. struct: IMAGE_DYNAMIC_RELOCATION structure symbol: Symbol to which dynamic relocations must be applied relocations: List of dynamic relocations for this symbol (BaseRelocationData instances) """
(**args)
35,559
pefile
ExceptionsDirEntryData
Holds the data related to SEH (and stack unwinding, in particular) struct an instance of RUNTIME_FUNTION unwindinfo an instance of UNWIND_INFO
class ExceptionsDirEntryData(DataContainer): """Holds the data related to SEH (and stack unwinding, in particular) struct an instance of RUNTIME_FUNTION unwindinfo an instance of UNWIND_INFO """
(**args)
35,561
pefile
ExportData
Holds exported symbols' information. ordinal: ordinal of the symbol address: address of the symbol name: name of the symbol (None if the symbol is exported by ordinal only) forwarder: if the symbol is forwarded it will contain the name of the target symbol, None otherwise.
class ExportData(DataContainer): """Holds exported symbols' information. ordinal: ordinal of the symbol address: address of the symbol name: name of the symbol (None if the symbol is exported by ordinal only) forwarder: if the symbol is forwarded it will contain the name of the target symbol, None otherwise. """ def __setattr__(self, name, val): # If the instance doesn't yet have an ordinal attribute # it's not fully initialized so can't do any of the # following # if ( hasattr(self, "ordinal") and hasattr(self, "address") and hasattr(self, "forwarder") and hasattr(self, "name") ): if name == "ordinal": self.pe.set_word_at_offset(self.ordinal_offset, val) elif name == "address": self.pe.set_dword_at_offset(self.address_offset, val) elif name == "name": # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.name): raise PEFormatError( "The export name provided is longer than the existing one." ) self.pe.set_bytes_at_offset(self.name_offset, val) elif name == "forwarder": # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.forwarder): raise PEFormatError( "The forwarder name provided is longer than the existing one." ) self.pe.set_bytes_at_offset(self.forwarder_offset, val) self.__dict__[name] = val
(**args)
35,563
pefile
__setattr__
null
def __setattr__(self, name, val): # If the instance doesn't yet have an ordinal attribute # it's not fully initialized so can't do any of the # following # if ( hasattr(self, "ordinal") and hasattr(self, "address") and hasattr(self, "forwarder") and hasattr(self, "name") ): if name == "ordinal": self.pe.set_word_at_offset(self.ordinal_offset, val) elif name == "address": self.pe.set_dword_at_offset(self.address_offset, val) elif name == "name": # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.name): raise PEFormatError( "The export name provided is longer than the existing one." ) self.pe.set_bytes_at_offset(self.name_offset, val) elif name == "forwarder": # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.forwarder): raise PEFormatError( "The forwarder name provided is longer than the existing one." ) self.pe.set_bytes_at_offset(self.forwarder_offset, val) self.__dict__[name] = val
(self, name, val)
35,564
pefile
ExportDirData
Holds export directory information. struct: IMAGE_EXPORT_DIRECTORY structure symbols: list of exported symbols (ExportData instances)
class ExportDirData(DataContainer): """Holds export directory information. struct: IMAGE_EXPORT_DIRECTORY structure symbols: list of exported symbols (ExportData instances)"""
(**args)
35,566
pefile
ImportData
Holds imported symbol's information. ordinal: Ordinal of the symbol name: Name of the symbol bound: If the symbol is bound, this contains the address.
class ImportData(DataContainer): """Holds imported symbol's information. ordinal: Ordinal of the symbol name: Name of the symbol bound: If the symbol is bound, this contains the address. """ def __setattr__(self, name, val): # If the instance doesn't yet have an ordinal attribute # it's not fully initialized so can't do any of the # following # if ( hasattr(self, "ordinal") and hasattr(self, "bound") and hasattr(self, "name") ): if name == "ordinal": if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 # Set the ordinal and flag the entry as importing by ordinal self.struct_table.Ordinal = ordinal_flag | (val & 0xFFFF) self.struct_table.AddressOfData = self.struct_table.Ordinal self.struct_table.Function = self.struct_table.Ordinal self.struct_table.ForwarderString = self.struct_table.Ordinal elif name == "bound": if self.struct_iat is not None: self.struct_iat.AddressOfData = val self.struct_iat.AddressOfData = self.struct_iat.AddressOfData self.struct_iat.Function = self.struct_iat.AddressOfData self.struct_iat.ForwarderString = self.struct_iat.AddressOfData elif name == "address": self.struct_table.AddressOfData = val self.struct_table.Ordinal = self.struct_table.AddressOfData self.struct_table.Function = self.struct_table.AddressOfData self.struct_table.ForwarderString = self.struct_table.AddressOfData elif name == "name": # Make sure we reset the entry in case the import had been set to # import by ordinal if self.name_offset: name_rva = self.pe.get_rva_from_offset(self.name_offset) self.pe.set_dword_at_offset( self.ordinal_offset, (0 << 31) | name_rva ) # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.name): raise PEFormatError( "The export name provided is longer than the existing one." ) pass self.pe.set_bytes_at_offset(self.name_offset, val) self.__dict__[name] = val
(**args)
35,568
pefile
__setattr__
null
def __setattr__(self, name, val): # If the instance doesn't yet have an ordinal attribute # it's not fully initialized so can't do any of the # following # if ( hasattr(self, "ordinal") and hasattr(self, "bound") and hasattr(self, "name") ): if name == "ordinal": if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 # Set the ordinal and flag the entry as importing by ordinal self.struct_table.Ordinal = ordinal_flag | (val & 0xFFFF) self.struct_table.AddressOfData = self.struct_table.Ordinal self.struct_table.Function = self.struct_table.Ordinal self.struct_table.ForwarderString = self.struct_table.Ordinal elif name == "bound": if self.struct_iat is not None: self.struct_iat.AddressOfData = val self.struct_iat.AddressOfData = self.struct_iat.AddressOfData self.struct_iat.Function = self.struct_iat.AddressOfData self.struct_iat.ForwarderString = self.struct_iat.AddressOfData elif name == "address": self.struct_table.AddressOfData = val self.struct_table.Ordinal = self.struct_table.AddressOfData self.struct_table.Function = self.struct_table.AddressOfData self.struct_table.ForwarderString = self.struct_table.AddressOfData elif name == "name": # Make sure we reset the entry in case the import had been set to # import by ordinal if self.name_offset: name_rva = self.pe.get_rva_from_offset(self.name_offset) self.pe.set_dword_at_offset( self.ordinal_offset, (0 << 31) | name_rva ) # Complain if the length of the new name is longer than the # existing one if len(val) > len(self.name): raise PEFormatError( "The export name provided is longer than the existing one." ) pass self.pe.set_bytes_at_offset(self.name_offset, val) self.__dict__[name] = val
(self, name, val)
35,569
pefile
ImportDescData
Holds import descriptor information. dll: name of the imported DLL imports: list of imported symbols (ImportData instances) struct: IMAGE_IMPORT_DESCRIPTOR structure
class ImportDescData(DataContainer): """Holds import descriptor information. dll: name of the imported DLL imports: list of imported symbols (ImportData instances) struct: IMAGE_IMPORT_DESCRIPTOR structure """
(**args)
35,571
pefile
LoadConfigData
Holds Load Config data. struct: IMAGE_LOAD_CONFIG_DIRECTORY structure name: dll name dynamic_relocations: dynamic relocation information, if present
class LoadConfigData(DataContainer): """Holds Load Config data. struct: IMAGE_LOAD_CONFIG_DIRECTORY structure name: dll name dynamic_relocations: dynamic relocation information, if present """
(**args)
35,573
pefile
PE
A Portable Executable representation. This class provides access to most of the information in a PE file. It expects to be supplied the name of the file to load or PE data to process and an optional argument 'fast_load' (False by default) which controls whether to load all the directories information, which can be quite time consuming. pe = pefile.PE('module.dll') pe = pefile.PE(name='module.dll') would load 'module.dll' and process it. If the data is already available in a buffer the same can be achieved with: pe = pefile.PE(data=module_dll_data) The "fast_load" can be set to a default by setting its value in the module itself by means, for instance, of a "pefile.fast_load = True". That will make all the subsequent instances not to load the whole PE structure. The "full_load" method can be used to parse the missing data at a later stage. Basic headers information will be available in the attributes: DOS_HEADER NT_HEADERS FILE_HEADER OPTIONAL_HEADER All of them will contain among their attributes the members of the corresponding structures as defined in WINNT.H The raw data corresponding to the header (from the beginning of the file up to the start of the first section) will be available in the instance's attribute 'header' as a string. The sections will be available as a list in the 'sections' attribute. Each entry will contain as attributes all the structure's members. Directory entries will be available as attributes (if they exist): (no other entries are processed at this point) DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances) DIRECTORY_ENTRY_EXPORT (ExportDirData instance) DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance) DIRECTORY_ENTRY_DEBUG (list of DebugData instances) DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances) DIRECTORY_ENTRY_TLS DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances) The following dictionary attributes provide ways of mapping different constants. They will accept the numeric value and return the string representation and the opposite, feed in the string and get the numeric constant: DIRECTORY_ENTRY IMAGE_CHARACTERISTICS SECTION_CHARACTERISTICS DEBUG_TYPE SUBSYSTEM_TYPE MACHINE_TYPE RELOCATION_TYPE RESOURCE_TYPE LANG SUBLANG
class PE: """A Portable Executable representation. This class provides access to most of the information in a PE file. It expects to be supplied the name of the file to load or PE data to process and an optional argument 'fast_load' (False by default) which controls whether to load all the directories information, which can be quite time consuming. pe = pefile.PE('module.dll') pe = pefile.PE(name='module.dll') would load 'module.dll' and process it. If the data is already available in a buffer the same can be achieved with: pe = pefile.PE(data=module_dll_data) The "fast_load" can be set to a default by setting its value in the module itself by means, for instance, of a "pefile.fast_load = True". That will make all the subsequent instances not to load the whole PE structure. The "full_load" method can be used to parse the missing data at a later stage. Basic headers information will be available in the attributes: DOS_HEADER NT_HEADERS FILE_HEADER OPTIONAL_HEADER All of them will contain among their attributes the members of the corresponding structures as defined in WINNT.H The raw data corresponding to the header (from the beginning of the file up to the start of the first section) will be available in the instance's attribute 'header' as a string. The sections will be available as a list in the 'sections' attribute. Each entry will contain as attributes all the structure's members. Directory entries will be available as attributes (if they exist): (no other entries are processed at this point) DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances) DIRECTORY_ENTRY_EXPORT (ExportDirData instance) DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance) DIRECTORY_ENTRY_DEBUG (list of DebugData instances) DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances) DIRECTORY_ENTRY_TLS DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances) The following dictionary attributes provide ways of mapping different constants. They will accept the numeric value and return the string representation and the opposite, feed in the string and get the numeric constant: DIRECTORY_ENTRY IMAGE_CHARACTERISTICS SECTION_CHARACTERISTICS DEBUG_TYPE SUBSYSTEM_TYPE MACHINE_TYPE RELOCATION_TYPE RESOURCE_TYPE LANG SUBLANG """ # # Format specifications for PE structures. # __IMAGE_DOS_HEADER_format__ = ( "IMAGE_DOS_HEADER", ( "H,e_magic", "H,e_cblp", "H,e_cp", "H,e_crlc", "H,e_cparhdr", "H,e_minalloc", "H,e_maxalloc", "H,e_ss", "H,e_sp", "H,e_csum", "H,e_ip", "H,e_cs", "H,e_lfarlc", "H,e_ovno", "8s,e_res", "H,e_oemid", "H,e_oeminfo", "20s,e_res2", "I,e_lfanew", ), ) __IMAGE_FILE_HEADER_format__ = ( "IMAGE_FILE_HEADER", ( "H,Machine", "H,NumberOfSections", "I,TimeDateStamp", "I,PointerToSymbolTable", "I,NumberOfSymbols", "H,SizeOfOptionalHeader", "H,Characteristics", ), ) __IMAGE_DATA_DIRECTORY_format__ = ( "IMAGE_DATA_DIRECTORY", ("I,VirtualAddress", "I,Size"), ) __IMAGE_OPTIONAL_HEADER_format__ = ( "IMAGE_OPTIONAL_HEADER", ( "H,Magic", "B,MajorLinkerVersion", "B,MinorLinkerVersion", "I,SizeOfCode", "I,SizeOfInitializedData", "I,SizeOfUninitializedData", "I,AddressOfEntryPoint", "I,BaseOfCode", "I,BaseOfData", "I,ImageBase", "I,SectionAlignment", "I,FileAlignment", "H,MajorOperatingSystemVersion", "H,MinorOperatingSystemVersion", "H,MajorImageVersion", "H,MinorImageVersion", "H,MajorSubsystemVersion", "H,MinorSubsystemVersion", "I,Reserved1", "I,SizeOfImage", "I,SizeOfHeaders", "I,CheckSum", "H,Subsystem", "H,DllCharacteristics", "I,SizeOfStackReserve", "I,SizeOfStackCommit", "I,SizeOfHeapReserve", "I,SizeOfHeapCommit", "I,LoaderFlags", "I,NumberOfRvaAndSizes", ), ) __IMAGE_OPTIONAL_HEADER64_format__ = ( "IMAGE_OPTIONAL_HEADER64", ( "H,Magic", "B,MajorLinkerVersion", "B,MinorLinkerVersion", "I,SizeOfCode", "I,SizeOfInitializedData", "I,SizeOfUninitializedData", "I,AddressOfEntryPoint", "I,BaseOfCode", "Q,ImageBase", "I,SectionAlignment", "I,FileAlignment", "H,MajorOperatingSystemVersion", "H,MinorOperatingSystemVersion", "H,MajorImageVersion", "H,MinorImageVersion", "H,MajorSubsystemVersion", "H,MinorSubsystemVersion", "I,Reserved1", "I,SizeOfImage", "I,SizeOfHeaders", "I,CheckSum", "H,Subsystem", "H,DllCharacteristics", "Q,SizeOfStackReserve", "Q,SizeOfStackCommit", "Q,SizeOfHeapReserve", "Q,SizeOfHeapCommit", "I,LoaderFlags", "I,NumberOfRvaAndSizes", ), ) __IMAGE_NT_HEADERS_format__ = ("IMAGE_NT_HEADERS", ("I,Signature",)) __IMAGE_SECTION_HEADER_format__ = ( "IMAGE_SECTION_HEADER", ( "8s,Name", "I,Misc,Misc_PhysicalAddress,Misc_VirtualSize", "I,VirtualAddress", "I,SizeOfRawData", "I,PointerToRawData", "I,PointerToRelocations", "I,PointerToLinenumbers", "H,NumberOfRelocations", "H,NumberOfLinenumbers", "I,Characteristics", ), ) __IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ( "IMAGE_DELAY_IMPORT_DESCRIPTOR", ( "I,grAttrs", "I,szName", "I,phmod", "I,pIAT", "I,pINT", "I,pBoundIAT", "I,pUnloadIAT", "I,dwTimeStamp", ), ) __IMAGE_IMPORT_DESCRIPTOR_format__ = ( "IMAGE_IMPORT_DESCRIPTOR", ( "I,OriginalFirstThunk,Characteristics", "I,TimeDateStamp", "I,ForwarderChain", "I,Name", "I,FirstThunk", ), ) __IMAGE_EXPORT_DIRECTORY_format__ = ( "IMAGE_EXPORT_DIRECTORY", ( "I,Characteristics", "I,TimeDateStamp", "H,MajorVersion", "H,MinorVersion", "I,Name", "I,Base", "I,NumberOfFunctions", "I,NumberOfNames", "I,AddressOfFunctions", "I,AddressOfNames", "I,AddressOfNameOrdinals", ), ) __IMAGE_RESOURCE_DIRECTORY_format__ = ( "IMAGE_RESOURCE_DIRECTORY", ( "I,Characteristics", "I,TimeDateStamp", "H,MajorVersion", "H,MinorVersion", "H,NumberOfNamedEntries", "H,NumberOfIdEntries", ), ) __IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ( "IMAGE_RESOURCE_DIRECTORY_ENTRY", ("I,Name", "I,OffsetToData"), ) __IMAGE_RESOURCE_DATA_ENTRY_format__ = ( "IMAGE_RESOURCE_DATA_ENTRY", ("I,OffsetToData", "I,Size", "I,CodePage", "I,Reserved"), ) __VS_VERSIONINFO_format__ = ( "VS_VERSIONINFO", ("H,Length", "H,ValueLength", "H,Type"), ) __VS_FIXEDFILEINFO_format__ = ( "VS_FIXEDFILEINFO", ( "I,Signature", "I,StrucVersion", "I,FileVersionMS", "I,FileVersionLS", "I,ProductVersionMS", "I,ProductVersionLS", "I,FileFlagsMask", "I,FileFlags", "I,FileOS", "I,FileType", "I,FileSubtype", "I,FileDateMS", "I,FileDateLS", ), ) __StringFileInfo_format__ = ( "StringFileInfo", ("H,Length", "H,ValueLength", "H,Type"), ) __StringTable_format__ = ("StringTable", ("H,Length", "H,ValueLength", "H,Type")) __String_format__ = ("String", ("H,Length", "H,ValueLength", "H,Type")) __Var_format__ = ("Var", ("H,Length", "H,ValueLength", "H,Type")) __IMAGE_THUNK_DATA_format__ = ( "IMAGE_THUNK_DATA", ("I,ForwarderString,Function,Ordinal,AddressOfData",), ) __IMAGE_THUNK_DATA64_format__ = ( "IMAGE_THUNK_DATA", ("Q,ForwarderString,Function,Ordinal,AddressOfData",), ) __IMAGE_DEBUG_DIRECTORY_format__ = ( "IMAGE_DEBUG_DIRECTORY", ( "I,Characteristics", "I,TimeDateStamp", "H,MajorVersion", "H,MinorVersion", "I,Type", "I,SizeOfData", "I,AddressOfRawData", "I,PointerToRawData", ), ) __IMAGE_BASE_RELOCATION_format__ = ( "IMAGE_BASE_RELOCATION", ("I,VirtualAddress", "I,SizeOfBlock"), ) __IMAGE_BASE_RELOCATION_ENTRY_format__ = ( "IMAGE_BASE_RELOCATION_ENTRY", ("H,Data",), ) __IMAGE_IMPORT_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__ = ( "IMAGE_IMPORT_CONTROL_TRANSFER_DYNAMIC_RELOCATION", ("I:12,PageRelativeOffset", "I:1,IndirectCall", "I:19,IATIndex"), ) __IMAGE_INDIR_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__ = ( "IMAGE_INDIR_CONTROL_TRANSFER_DYNAMIC_RELOCATION", ( "I:12,PageRelativeOffset", "I:1,IndirectCall", "I:1,RexWPrefix", "I:1,CfgCheck", "I:1,Reserved", ), ) __IMAGE_SWITCHTABLE_BRANCH_DYNAMIC_RELOCATION_format__ = ( "IMAGE_SWITCHTABLE_BRANCH_DYNAMIC_RELOCATION", ("I:12,PageRelativeOffset", "I:4,RegisterNumber"), ) __IMAGE_TLS_DIRECTORY_format__ = ( "IMAGE_TLS_DIRECTORY", ( "I,StartAddressOfRawData", "I,EndAddressOfRawData", "I,AddressOfIndex", "I,AddressOfCallBacks", "I,SizeOfZeroFill", "I,Characteristics", ), ) __IMAGE_TLS_DIRECTORY64_format__ = ( "IMAGE_TLS_DIRECTORY", ( "Q,StartAddressOfRawData", "Q,EndAddressOfRawData", "Q,AddressOfIndex", "Q,AddressOfCallBacks", "I,SizeOfZeroFill", "I,Characteristics", ), ) __IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ( "IMAGE_LOAD_CONFIG_DIRECTORY", ( "I,Size", "I,TimeDateStamp", "H,MajorVersion", "H,MinorVersion", "I,GlobalFlagsClear", "I,GlobalFlagsSet", "I,CriticalSectionDefaultTimeout", "I,DeCommitFreeBlockThreshold", "I,DeCommitTotalFreeThreshold", "I,LockPrefixTable", "I,MaximumAllocationSize", "I,VirtualMemoryThreshold", "I,ProcessHeapFlags", "I,ProcessAffinityMask", "H,CSDVersion", "H,Reserved1", "I,EditList", "I,SecurityCookie", "I,SEHandlerTable", "I,SEHandlerCount", "I,GuardCFCheckFunctionPointer", "I,GuardCFDispatchFunctionPointer", "I,GuardCFFunctionTable", "I,GuardCFFunctionCount", "I,GuardFlags", "H,CodeIntegrityFlags", "H,CodeIntegrityCatalog", "I,CodeIntegrityCatalogOffset", "I,CodeIntegrityReserved", "I,GuardAddressTakenIatEntryTable", "I,GuardAddressTakenIatEntryCount", "I,GuardLongJumpTargetTable", "I,GuardLongJumpTargetCount", "I,DynamicValueRelocTable", "I,CHPEMetadataPointer", "I,GuardRFFailureRoutine", "I,GuardRFFailureRoutineFunctionPointer", "I,DynamicValueRelocTableOffset", "H,DynamicValueRelocTableSection", "H,Reserved2", "I,GuardRFVerifyStackPointerFunctionPointer" "I,HotPatchTableOffset", "I,Reserved3", "I,EnclaveConfigurationPointer", ), ) __IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ( "IMAGE_LOAD_CONFIG_DIRECTORY", ( "I,Size", "I,TimeDateStamp", "H,MajorVersion", "H,MinorVersion", "I,GlobalFlagsClear", "I,GlobalFlagsSet", "I,CriticalSectionDefaultTimeout", "Q,DeCommitFreeBlockThreshold", "Q,DeCommitTotalFreeThreshold", "Q,LockPrefixTable", "Q,MaximumAllocationSize", "Q,VirtualMemoryThreshold", "Q,ProcessAffinityMask", "I,ProcessHeapFlags", "H,CSDVersion", "H,Reserved1", "Q,EditList", "Q,SecurityCookie", "Q,SEHandlerTable", "Q,SEHandlerCount", "Q,GuardCFCheckFunctionPointer", "Q,GuardCFDispatchFunctionPointer", "Q,GuardCFFunctionTable", "Q,GuardCFFunctionCount", "I,GuardFlags", "H,CodeIntegrityFlags", "H,CodeIntegrityCatalog", "I,CodeIntegrityCatalogOffset", "I,CodeIntegrityReserved", "Q,GuardAddressTakenIatEntryTable", "Q,GuardAddressTakenIatEntryCount", "Q,GuardLongJumpTargetTable", "Q,GuardLongJumpTargetCount", "Q,DynamicValueRelocTable", "Q,CHPEMetadataPointer", "Q,GuardRFFailureRoutine", "Q,GuardRFFailureRoutineFunctionPointer", "I,DynamicValueRelocTableOffset", "H,DynamicValueRelocTableSection", "H,Reserved2", "Q,GuardRFVerifyStackPointerFunctionPointer", "I,HotPatchTableOffset", "I,Reserved3", "Q,EnclaveConfigurationPointer", ), ) __IMAGE_DYNAMIC_RELOCATION_TABLE_format__ = ( "IMAGE_DYNAMIC_RELOCATION_TABLE", ("I,Version", "I,Size"), ) __IMAGE_DYNAMIC_RELOCATION_format__ = ( "IMAGE_DYNAMIC_RELOCATION", ("I,Symbol", "I,BaseRelocSize"), ) __IMAGE_DYNAMIC_RELOCATION64_format__ = ( "IMAGE_DYNAMIC_RELOCATION64", ("Q,Symbol", "I,BaseRelocSize"), ) __IMAGE_DYNAMIC_RELOCATION_V2_format__ = ( "IMAGE_DYNAMIC_RELOCATION_V2", ("I,HeaderSize", "I,FixupInfoSize", "I,Symbol", "I,SymbolGroup", "I,Flags"), ) __IMAGE_DYNAMIC_RELOCATION64_V2_format__ = ( "IMAGE_DYNAMIC_RELOCATION64_V2", ("I,HeaderSize", "I,FixupInfoSize", "Q,Symbol", "I,SymbolGroup", "I,Flags"), ) __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ( "IMAGE_BOUND_IMPORT_DESCRIPTOR", ("I,TimeDateStamp", "H,OffsetModuleName", "H,NumberOfModuleForwarderRefs"), ) __IMAGE_BOUND_FORWARDER_REF_format__ = ( "IMAGE_BOUND_FORWARDER_REF", ("I,TimeDateStamp", "H,OffsetModuleName", "H,Reserved"), ) __RUNTIME_FUNCTION_format__ = ( "RUNTIME_FUNCTION", ("I,BeginAddress", "I,EndAddress", "I,UnwindData"), ) def __init__( self, name=None, data=None, fast_load=None, max_symbol_exports=MAX_SYMBOL_EXPORT_COUNT, max_repeated_symbol=120, ): self.max_symbol_exports = max_symbol_exports self.max_repeated_symbol = max_repeated_symbol self._get_section_by_rva_last_used = None self.sections = [] self.__warnings = [] self.PE_TYPE = None if name is None and data is None: raise ValueError("Must supply either name or data") # This list will keep track of all the structures created. # That will allow for an easy iteration through the list # in order to save the modifications made self.__structures__ = [] self.__from_file = None # We only want to print these warnings once self.FileAlignment_Warning = False self.SectionAlignment_Warning = False # Count of total resource entries across nested tables self.__total_resource_entries_count = 0 # Sum of the size of all resource entries parsed, which should not # exceed the file size. self.__total_resource_bytes = 0 # The number of imports parsed in this file self.__total_import_symbols = 0 self.dynamic_relocation_format_by_symbol = { 3: PE.__IMAGE_IMPORT_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__, 4: PE.__IMAGE_INDIR_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__, 5: PE.__IMAGE_SWITCHTABLE_BRANCH_DYNAMIC_RELOCATION_format__, } fast_load = fast_load if fast_load is not None else globals()["fast_load"] try: self.__parse__(name, data, fast_load) except: self.close() raise def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): if ( self.__from_file is True and hasattr(self, "__data__") and ( (isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap)) or "mmap.mmap" in repr(type(self.__data__)) ) ): self.__data__.close() del self.__data__ def __unpack_data__(self, format, data, file_offset): """Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise. """ structure = Structure(format, file_offset=file_offset) try: structure.__unpack__(data) except PEFormatError as err: self.__warnings.append( 'Corrupt header "{0}" at file offset {1}. Exception: {2}'.format( format[0], file_offset, err ) ) return None self.__structures__.append(structure) return structure def __unpack_data_with_bitfields__(self, format, data, file_offset): """Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise. """ structure = StructureWithBitfields(format, file_offset=file_offset) try: structure.__unpack__(data) except PEFormatError as err: self.__warnings.append( 'Corrupt header "{0}" at file offset {1}. Exception: {2}'.format( format[0], file_offset, err ) ) return None self.__structures__.append(structure) return structure def __parse__(self, fname, data, fast_load): """Parse a Portable Executable file. Loads a PE file, parsing all its structures and making them available through the instance's attributes. """ if fname is not None: stat = os.stat(fname) if stat.st_size == 0: raise PEFormatError("The file is empty") fd = None try: fd = open(fname, "rb") self.fileno = fd.fileno() if hasattr(mmap, "MAP_PRIVATE"): # Unix self.__data__ = mmap.mmap(self.fileno, 0, mmap.MAP_PRIVATE) else: # Windows self.__data__ = mmap.mmap(self.fileno, 0, access=mmap.ACCESS_READ) self.__from_file = True except IOError as excp: exception_msg = "{0}".format(excp) exception_msg = exception_msg and (": %s" % exception_msg) raise Exception( "Unable to access file '{0}'{1}".format(fname, exception_msg) ) finally: if fd is not None: fd.close() elif data is not None: self.__data__ = data self.__from_file = False # Resources should not overlap each other, so they should not exceed the # file size. self.__resource_size_limit_upperbounds = len(self.__data__) self.__resource_size_limit_reached = False if not fast_load: for byte, byte_count in Counter(bytearray(self.__data__)).items(): # Only report the cases where a byte makes up for more than 50% (if # zero) or 15% (if non-zero) of the file's contents. There are # legitimate PEs where 0x00 bytes are close to 50% of the whole # file's contents. if (byte == 0 and byte_count / len(self.__data__) > 0.5) or ( byte != 0 and byte_count / len(self.__data__) > 0.15 ): self.__warnings.append( ( "Byte 0x{0:02x} makes up {1:.4f}% of the file's contents." " This may indicate truncation / malformation." ).format(byte, 100.0 * byte_count / len(self.__data__)) ) dos_header_data = self.__data__[:64] if len(dos_header_data) != 64: raise PEFormatError( "Unable to read the DOS Header, possibly a truncated file." ) self.DOS_HEADER = self.__unpack_data__( self.__IMAGE_DOS_HEADER_format__, dos_header_data, file_offset=0 ) if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE: raise PEFormatError("Probably a ZM Executable (not a PE file).") if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE: raise PEFormatError("DOS Header magic not found.") # OC Patch: # Check for sane value in e_lfanew # if self.DOS_HEADER.e_lfanew > len(self.__data__): raise PEFormatError("Invalid e_lfanew value, probably not a PE file") nt_headers_offset = self.DOS_HEADER.e_lfanew self.NT_HEADERS = self.__unpack_data__( self.__IMAGE_NT_HEADERS_format__, self.__data__[nt_headers_offset : nt_headers_offset + 8], file_offset=nt_headers_offset, ) # We better check the signature right here, before the file screws # around with sections: # OC Patch: # Some malware will cause the Signature value to not exist at all if not self.NT_HEADERS or not self.NT_HEADERS.Signature: raise PEFormatError("NT Headers not found.") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_NE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a NE file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a LE file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LX_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a LX file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_TE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a TE file") if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE: raise PEFormatError("Invalid NT Headers signature.") self.FILE_HEADER = self.__unpack_data__( self.__IMAGE_FILE_HEADER_format__, self.__data__[nt_headers_offset + 4 : nt_headers_offset + 4 + 32], file_offset=nt_headers_offset + 4, ) image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") if not self.FILE_HEADER: raise PEFormatError("File Header missing") # Set the image's flags according the the Characteristics member set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags) optional_header_offset = nt_headers_offset + 4 + self.FILE_HEADER.sizeof() # Note: location of sections can be controlled from PE header: sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, # Read up to 256 bytes to allow creating a copy of too much data self.__data__[optional_header_offset : optional_header_offset + 256], file_offset=optional_header_offset, ) # According to solardesigner's findings for his # Tiny PE project, the optional header does not # need fields beyond "Subsystem" in order to be # loadable by the Windows loader (given that zeros # are acceptable values and the header is loaded # in a zeroed memory page) # If trying to parse a full Optional Header fails # we try to parse it again with some 0 padding # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69 if ( self.OPTIONAL_HEADER is None and len( self.__data__[optional_header_offset : optional_header_offset + 0x200] ) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): # Add enough zeros to make up for the unused fields # padding_length = 128 # Create padding # padded_data = self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] + (b"\0" * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, padded_data, file_offset=optional_header_offset, ) # Check the Magic in the OPTIONAL_HEADER and set the PE file # type accordingly # if self.OPTIONAL_HEADER is not None: if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, self.__data__[ optional_header_offset : optional_header_offset + 0x200 ], file_offset=optional_header_offset, ) # Again, as explained above, we try to parse # a reduced form of the Optional Header which # is still valid despite not including all # structure members # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69 + 4 if ( self.OPTIONAL_HEADER is None and len( self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] ) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): padding_length = 128 padded_data = self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] + (b"\0" * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, padded_data, file_offset=optional_header_offset, ) if not self.FILE_HEADER: raise PEFormatError("File Header missing") # OC Patch: # Die gracefully if there is no OPTIONAL_HEADER field # 975440f5ad5e2e4a92c4d9a5f22f75c1 if self.OPTIONAL_HEADER is None: raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file.") if self.PE_TYPE is None: self.__warnings.append( "Invalid type 0x{0:04x} in Optional Header.".format( self.OPTIONAL_HEADER.Magic ) ) dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) # Set the Dll Characteristics flags according the the DllCharacteristics member set_flags( self.OPTIONAL_HEADER, self.OPTIONAL_HEADER.DllCharacteristics, dll_characteristics_flags, ) self.OPTIONAL_HEADER.DATA_DIRECTORY = [] # offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader) offset = optional_header_offset + self.OPTIONAL_HEADER.sizeof() self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER # Windows 8 specific check # if ( self.OPTIONAL_HEADER.AddressOfEntryPoint < self.OPTIONAL_HEADER.SizeOfHeaders ): self.__warnings.append( "SizeOfHeaders is smaller than AddressOfEntryPoint: this file " "cannot run under Windows 8." ) # The NumberOfRvaAndSizes is sanitized to stay within # reasonable limits so can be casted to an int # if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10: self.__warnings.append( "Suspicious NumberOfRvaAndSizes in the Optional Header. " "Normal values are never larger than 0x10, the value is: 0x%x" % self.OPTIONAL_HEADER.NumberOfRvaAndSizes ) MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100 for i in range(int(0x7FFFFFFF & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)): if len(self.__data__) - offset == 0: break if len(self.__data__) - offset < 8: data = self.__data__[offset:] + b"\0" * 8 else: data = self.__data__[ offset : offset + MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES ] dir_entry = self.__unpack_data__( self.__IMAGE_DATA_DIRECTORY_format__, data, file_offset=offset ) if dir_entry is None: break # Would fail if missing an entry # 1d4937b2fa4d84ad1bce0309857e70ca offending sample try: dir_entry.name = DIRECTORY_ENTRY[i] except (KeyError, AttributeError): break offset += dir_entry.sizeof() self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry) # If the offset goes outside the optional header, # the loop is broken, regardless of how many directories # NumberOfRvaAndSizes says there are # # We assume a normally sized optional header, hence that we do # a sizeof() instead of reading SizeOfOptionalHeader. # Then we add a default number of directories times their size, # if we go beyond that, we assume the number of directories # is wrong and stop processing if offset >= ( optional_header_offset + self.OPTIONAL_HEADER.sizeof() + 8 * 16 ): break offset = self.parse_sections(sections_offset) # OC Patch: # There could be a problem if there are no raw data sections # greater than 0 # fc91013eb72529da005110a3403541b6 example # Should this throw an exception in the minimum header offset # can't be found? # rawDataPointers = [ self.adjust_FileAlignment( s.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections if s.PointerToRawData > 0 ] if len(rawDataPointers) > 0: lowest_section_offset = min(rawDataPointers) else: lowest_section_offset = None if not lowest_section_offset or lowest_section_offset < offset: self.header = self.__data__[:offset] else: self.header = self.__data__[:lowest_section_offset] # Check whether the entry point lies within a section # if ( self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None ): # Check whether the entry point lies within the file # ep_offset = self.get_offset_from_rva( self.OPTIONAL_HEADER.AddressOfEntryPoint ) if ep_offset > len(self.__data__): self.__warnings.append( "Possibly corrupt file. AddressOfEntryPoint lies outside the" " file. AddressOfEntryPoint: 0x%x" % self.OPTIONAL_HEADER.AddressOfEntryPoint ) else: self.__warnings.append( "AddressOfEntryPoint lies outside the sections' boundaries. " "AddressOfEntryPoint: 0x%x" % self.OPTIONAL_HEADER.AddressOfEntryPoint ) if not fast_load: self.full_load() def parse_rich_header(self): """Parses the rich header see http://www.ntcore.com/files/richsign.htm for more information Structure: 00 DanS ^ checksum, checksum, checksum, checksum 10 Symbol RVA ^ checksum, Symbol size ^ checksum... ... XX Rich, checksum, 0, 0,... """ # Rich Header constants # DANS = 0x536E6144 # 'DanS' as dword RICH = 0x68636952 # 'Rich' as dword rich_index = self.__data__.find( b"Rich", 0x80, self.OPTIONAL_HEADER.get_file_offset() ) if rich_index == -1: return None # Read a block of data try: # The end of the structure is 8 bytes after the start of the Rich # string. rich_data = self.__data__[0x80 : rich_index + 8] # Make the data have length a multiple of 4, otherwise the # subsequent parsing will fail. It's not impossible that we retrieve # truncated data that it's not a multiple. rich_data = rich_data[: 4 * int(len(rich_data) / 4)] data = list( struct.unpack("<{0}I".format(int(len(rich_data) / 4)), rich_data) ) if RICH not in data: return None except PEFormatError: return None # get key, raw_data and clear_data key = struct.pack("<L", data[data.index(RICH) + 1]) result = {"key": key} raw_data = rich_data[: rich_data.find(b"Rich")] result["raw_data"] = raw_data ord_ = lambda c: ord(c) if not isinstance(c, int) else c clear_data = bytearray() for idx, val in enumerate(raw_data): clear_data.append((ord_(val) ^ ord_(key[idx % len(key)]))) result["clear_data"] = bytes(clear_data) # the checksum should be present 3 times after the DanS signature # checksum = data[1] if data[0] ^ checksum != DANS or data[2] != checksum or data[3] != checksum: return None result["checksum"] = checksum headervalues = [] result["values"] = headervalues data = data[4:] for i in range(int(len(data) / 2)): # Stop until the Rich footer signature is found # if data[2 * i] == RICH: # it should be followed by the checksum # if data[2 * i + 1] != checksum: self.__warnings.append("Rich Header is malformed") break # header values come by pairs # headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum] return result def get_warnings(self): """Return the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method returns the full list. """ return self.__warnings def show_warnings(self): """Print the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method prints the full list to standard output. """ for warning in self.__warnings: print(">", warning) def full_load(self): """Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used. """ self.parse_data_directories() class RichHeader: pass rich_header = self.parse_rich_header() if rich_header: self.RICH_HEADER = RichHeader() self.RICH_HEADER.checksum = rich_header.get("checksum", None) self.RICH_HEADER.values = rich_header.get("values", None) self.RICH_HEADER.key = rich_header.get("key", None) self.RICH_HEADER.raw_data = rich_header.get("raw_data", None) self.RICH_HEADER.clear_data = rich_header.get("clear_data", None) else: self.RICH_HEADER = None def write(self, filename=None): """Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional, if not provided the data will be returned as a 'str' object. """ file_data = bytearray(self.__data__) for structure in self.__structures__: struct_data = bytearray(structure.__pack__()) offset = structure.get_file_offset() file_data[offset : offset + len(struct_data)] = struct_data if hasattr(self, "VS_VERSIONINFO"): if hasattr(self, "FileInfo"): for finfo in self.FileInfo: for entry in finfo: if hasattr(entry, "StringTable"): for st_entry in entry.StringTable: for key, entry in list(st_entry.entries.items()): # Offsets and lengths of the keys and values. # Each value in the dictionary is a tuple: # (key length, value length) # The lengths are in characters, not in bytes. offsets = st_entry.entries_offsets[key] lengths = st_entry.entries_lengths[key] if len(entry) > lengths[1]: l = entry.decode("utf-8").encode("utf-16le") file_data[ offsets[1] : offsets[1] + lengths[1] * 2 ] = l[: lengths[1] * 2] else: encoded_data = entry.decode("utf-8").encode( "utf-16le" ) file_data[ offsets[1] : offsets[1] + len(encoded_data) ] = encoded_data new_file_data = file_data if not filename: return new_file_data f = open(filename, "wb+") f.write(new_file_data) f.close() return def parse_sections(self, offset): """Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info. """ self.sections = [] MAX_SIMULTANEOUS_ERRORS = 3 for i in range(self.FILE_HEADER.NumberOfSections): if i >= MAX_SECTIONS: self.__warnings.append( "Too many sections {0} (>={1})".format( self.FILE_HEADER.NumberOfSections, MAX_SECTIONS ) ) break simultaneous_errors = 0 section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__, pe=self) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section_data = self.__data__[ section_offset : section_offset + section.sizeof() ] # Check if the section is all nulls and stop if so. if count_zeroes(section_data) == section.sizeof(): self.__warnings.append(f"Invalid section {i}. Contents are null-bytes.") break if not section_data: self.__warnings.append( f"Invalid section {i}. No data in the file (is this corkami's " "virtsectblXP?)." ) break section.__unpack__(section_data) self.__structures__.append(section) if section.SizeOfRawData + section.PointerToRawData > len(self.__data__): simultaneous_errors += 1 self.__warnings.append( f"Error parsing section {i}. SizeOfRawData is larger than file." ) if self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__): simultaneous_errors += 1 self.__warnings.append( f"Error parsing section {i}. PointerToRawData points beyond " "the end of the file." ) if section.Misc_VirtualSize > 0x10000000: simultaneous_errors += 1 self.__warnings.append( f"Suspicious value found parsing section {i}. VirtualSize is " "extremely large > 256MiB." ) if ( self.adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) > 0x10000000 ): simultaneous_errors += 1 self.__warnings.append( f"Suspicious value found parsing section {i}. VirtualAddress is " "beyond 0x10000000." ) if ( self.OPTIONAL_HEADER.FileAlignment != 0 and (section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0 ): simultaneous_errors += 1 self.__warnings.append( ( f"Error parsing section {i}. " "PointerToRawData should normally be " "a multiple of FileAlignment, this might imply the file " "is trying to confuse tools which parse this incorrectly." ) ) if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS: self.__warnings.append("Too many warnings parsing section. Aborting.") break section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") # Set the section's flags according the the Characteristics member set_flags(section, section.Characteristics, section_flags) if section.__dict__.get( "IMAGE_SCN_MEM_WRITE", False ) and section.__dict__.get("IMAGE_SCN_MEM_EXECUTE", False): if section.Name.rstrip(b"\x00") == b"PAGE" and self.is_driver(): # Drivers can have a PAGE section with those flags set without # implying that it is malicious pass else: self.__warnings.append( f"Suspicious flags set for section {i}. " "Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. " "This might indicate a packed executable." ) self.sections.append(section) # Sort the sections by their VirtualAddress and add a field to each of them # with the VirtualAddress of the next section. This will allow to check # for potentially overlapping sections in badly constructed PEs. self.sections.sort(key=lambda a: a.VirtualAddress) for idx, section in enumerate(self.sections): if idx == len(self.sections) - 1: section.next_section_virtual_address = None else: section.next_section_virtual_address = self.sections[ idx + 1 ].VirtualAddress if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return ( offset + self.sections[0].sizeof() * self.FILE_HEADER.NumberOfSections ) else: return offset def parse_data_directories( self, directories=None, forwarded_exports_only=False, import_dllnames_only=False ): """Parse and process the PE file's data directories. If the optional argument 'directories' is given, only the directories at the specified indexes will be parsed. Such functionality allows parsing of areas of interest without the burden of having to parse all others. The directories can then be specified as: For export / import only: directories = [ 0, 1 ] or (more verbosely): directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ] If 'directories' is a list, the ones that are processed will be removed, leaving only the ones that are not present in the image. If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT attribute will only contain exports that are forwarded to another DLL. If `import_dllnames_only` is True, symbols will not be parsed from the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT attribute will not have a `symbols` attribute. """ directory_parsing = ( ("IMAGE_DIRECTORY_ENTRY_IMPORT", self.parse_import_directory), ("IMAGE_DIRECTORY_ENTRY_EXPORT", self.parse_export_directory), ("IMAGE_DIRECTORY_ENTRY_RESOURCE", self.parse_resources_directory), ("IMAGE_DIRECTORY_ENTRY_DEBUG", self.parse_debug_directory), ("IMAGE_DIRECTORY_ENTRY_BASERELOC", self.parse_relocations_directory), ("IMAGE_DIRECTORY_ENTRY_TLS", self.parse_directory_tls), ("IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", self.parse_directory_load_config), ("IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", self.parse_delay_import_directory), ("IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", self.parse_directory_bound_imports), ("IMAGE_DIRECTORY_ENTRY_EXCEPTION", self.parse_exceptions_directory), ) if directories is not None: if not isinstance(directories, (tuple, list)): directories = [directories] for entry in directory_parsing: # OC Patch: # try: directory_index = DIRECTORY_ENTRY[entry[0]] dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index] except IndexError: break # Only process all the directories if no individual ones have # been chosen # if directories is None or directory_index in directories: value = None if dir_entry.VirtualAddress: if ( forwarded_exports_only and entry[0] == "IMAGE_DIRECTORY_ENTRY_EXPORT" ): value = entry[1]( dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True, ) elif ( import_dllnames_only and entry[0] == "IMAGE_DIRECTORY_ENTRY_IMPORT" ): value = entry[1]( dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True ) else: try: value = entry[1](dir_entry.VirtualAddress, dir_entry.Size) except PEFormatError as excp: self.__warnings.append( f'Failed to process directoty "{entry[0]}": {excp}' ) if value: setattr(self, entry[0][6:], value) if ( (directories is not None) and isinstance(directories, list) and (entry[0] in directories) ): directories.remove(directory_index) def parse_exceptions_directory(self, rva, size): """Parses exception directory All the code related to handling exception directories is documented in https://auscitte.github.io/systems%20blog/Exception-Directory-pefile#implementation-details """ # "For x64 and Itanium platforms; the format is different for other platforms" if ( self.FILE_HEADER.Machine != MACHINE_TYPE["IMAGE_FILE_MACHINE_AMD64"] and self.FILE_HEADER.Machine != MACHINE_TYPE["IMAGE_FILE_MACHINE_IA64"] ): return None rf = Structure(self.__RUNTIME_FUNCTION_format__) rf_size = rf.sizeof() rva2rt = {} rt_funcs = [] rva2infos = {} for _ in range(size // rf_size): rf = self.__unpack_data__( self.__RUNTIME_FUNCTION_format__, self.get_data(rva, rf_size), file_offset=self.get_offset_from_rva(rva), ) if rf is None: break ui = None if (rf.UnwindData & 0x1) == 0: # according to "Improving Automated Analysis of Windows x64 Binaries", # if the lowest bit is set, (UnwindData & ~0x1) should point to the # chained RUNTIME_FUNCTION instead of UNWIND_INFO if ( rf.UnwindData in rva2infos ): # unwind info data structures can be shared among functions ui = rva2infos[rf.UnwindData] else: ui = UnwindInfo(file_offset=self.get_offset_from_rva(rf.UnwindData)) rva2infos[rf.UnwindData] = ui ws = ui.unpack_in_stages(self.get_data(rf.UnwindData, ui.sizeof())) if ws != None: self.__warnings.append(ws) break ws = ui.unpack_in_stages(self.get_data(rf.UnwindData, ui.sizeof())) if ws != None: self.__warnings.append(ws) break self.__structures__.append(ui) entry = ExceptionsDirEntryData(struct=rf, unwindinfo=ui) rt_funcs.append(entry) rva2rt[rf.BeginAddress] = entry rva += rf_size # each chained function entry holds a reference to the function first in chain for rf in rt_funcs: if rf.unwindinfo is None: # TODO: have not encountered such a binary yet; # in theory, (UnwindData & ~0x1) should point to the chained # RUNTIME_FUNCTION which could be used to locate the corresponding # ExceptionsDirEntryData and set_chained_function_entry() continue if not hasattr(rf.unwindinfo, "FunctionEntry"): continue if not rf.unwindinfo.FunctionEntry in rva2rt: self.__warnings.append( f"FunctionEntry of UNWIND_INFO at {rf.struct.get_file_offset():x}" " points to an entry that does not exist" ) continue try: rf.unwindinfo.set_chained_function_entry( rva2rt[rf.unwindinfo.FunctionEntry] ) except PEFormatError as excp: self.__warnings.append( "Failed parsing FunctionEntry of UNWIND_INFO at " f"{rf.struct.get_file_offset():x}: {excp}" ) continue return rt_funcs def parse_directory_bound_imports(self, rva, size): """""" bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__) bnd_descr_size = bnd_descr.sizeof() start = rva bound_imports = [] while True: bnd_descr = self.__unpack_data__( self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__, self.__data__[rva : rva + bnd_descr_size], file_offset=rva, ) if bnd_descr is None: # If can't parse directory then silently return. # This directory does not necessarily have to be valid to # still have a valid PE file self.__warnings.append( "The Bound Imports directory exists but can't be parsed." ) return if bnd_descr.all_zeroes(): break rva += bnd_descr.sizeof() section = self.get_section_by_offset(rva) file_offset = self.get_offset_from_rva(rva) if section is None: safety_boundary = len(self.__data__) - file_offset sections_after_offset = [ s.PointerToRawData for s in self.sections if s.PointerToRawData > file_offset ] if sections_after_offset: # Find the first section starting at a later offset than that # specified by 'rva' first_section_after_offset = min(sections_after_offset) section = self.get_section_by_offset(first_section_after_offset) if section is not None: safety_boundary = section.PointerToRawData - file_offset else: safety_boundary = ( section.PointerToRawData + len(section.get_data()) - file_offset ) if not section: self.__warnings.append( ( "RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points " "to an invalid address: {0:x}" ).format(rva) ) return forwarder_refs = [] # 8 is the size of __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ for _ in range( min(bnd_descr.NumberOfModuleForwarderRefs, int(safety_boundary / 8)) ): # Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and # IMAGE_BOUND_FORWARDER_REF have the same size. bnd_frwd_ref = self.__unpack_data__( self.__IMAGE_BOUND_FORWARDER_REF_format__, self.__data__[rva : rva + bnd_descr_size], file_offset=rva, ) # OC Patch: if not bnd_frwd_ref: raise PEFormatError("IMAGE_BOUND_FORWARDER_REF cannot be read") rva += bnd_frwd_ref.sizeof() offset = start + bnd_frwd_ref.OffsetModuleName name_str = self.get_string_from_data( 0, self.__data__[offset : offset + MAX_STRING_LENGTH] ) # OffsetModuleName points to a DLL name. These shouldn't be too long. # Anything longer than a safety length of 128 will be taken to indicate # a corrupt entry and abort the processing of these entries. # Names shorter than 4 characters will be taken as invalid as well. if name_str: invalid_chars = [ c for c in bytearray(name_str) if chr(c) not in string.printable ] if len(name_str) > 256 or invalid_chars: break forwarder_refs.append( BoundImportRefData(struct=bnd_frwd_ref, name=name_str) ) offset = start + bnd_descr.OffsetModuleName name_str = self.get_string_from_data( 0, self.__data__[offset : offset + MAX_STRING_LENGTH] ) if name_str: invalid_chars = [ c for c in bytearray(name_str) if chr(c) not in string.printable ] if len(name_str) > 256 or invalid_chars: break if not name_str: break bound_imports.append( BoundImportDescData( struct=bnd_descr, name=name_str, entries=forwarder_refs ) ) return bound_imports def parse_directory_tls(self, rva, size): """""" # By default let's pretend the format is a 32-bit PE. It may help # produce some output for files where the Magic in the Optional Header # is incorrect. format = self.__IMAGE_TLS_DIRECTORY_format__ if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_TLS_DIRECTORY64_format__ try: tls_struct = self.__unpack_data__( format, self.get_data(rva, Structure(format).sizeof()), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid TLS information. Can't read " "data at RVA: 0x%x" % rva ) tls_struct = None if not tls_struct: return None return TlsData(struct=tls_struct) def parse_directory_load_config(self, rva, size): """""" if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: load_config_dir_sz = self.get_dword_at_rva(rva) format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: load_config_dir_sz = self.get_dword_at_rva(rva) format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ else: self.__warnings.append( "Don't know how to parse LOAD_CONFIG information for non-PE32/" "PE32+ file" ) return None # load config directory size can be less than represented by 'format' variable, # generate truncated format which correspond load config directory size fields_counter = 0 cumulative_sz = 0 for field in format[1]: fields_counter += 1 cumulative_sz += STRUCT_SIZEOF_TYPES[field.split(",")[0]] if cumulative_sz == load_config_dir_sz: break format = (format[0], format[1][:fields_counter]) load_config_struct = None try: load_config_struct = self.__unpack_data__( format, self.get_data(rva, Structure(format).sizeof()), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid LOAD_CONFIG information. Can't read " "data at RVA: 0x%x" % rva ) if not load_config_struct: return None dynamic_relocations = None if fields_counter > 35: dynamic_relocations = self.parse_dynamic_relocations( load_config_struct.DynamicValueRelocTableOffset, load_config_struct.DynamicValueRelocTableSection, ) return LoadConfigData( struct=load_config_struct, dynamic_relocations=dynamic_relocations ) def parse_dynamic_relocations( self, dynamic_value_reloc_table_offset, dynamic_value_reloc_table_section ): if not dynamic_value_reloc_table_offset: return None if not dynamic_value_reloc_table_section: return None if dynamic_value_reloc_table_section > len(self.sections): return None section = self.sections[dynamic_value_reloc_table_section - 1] rva = section.VirtualAddress + dynamic_value_reloc_table_offset image_dynamic_reloc_table_struct = None reloc_table_size = Structure( self.__IMAGE_DYNAMIC_RELOCATION_TABLE_format__ ).sizeof() try: image_dynamic_reloc_table_struct = self.__unpack_data__( self.__IMAGE_DYNAMIC_RELOCATION_TABLE_format__, self.get_data(rva, reloc_table_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid IMAGE_DYNAMIC_RELOCATION_TABLE information. Can't read " "data at RVA: 0x%x" % rva ) if image_dynamic_reloc_table_struct.Version != 1: self.__warnings.append( "No pasring available for IMAGE_DYNAMIC_RELOCATION_TABLE.Version = %d", image_dynamic_reloc_table_struct.Version, ) return None rva += reloc_table_size end = rva + image_dynamic_reloc_table_struct.Size dynamic_relocations = [] while rva < end: format = self.__IMAGE_DYNAMIC_RELOCATION_format__ if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_DYNAMIC_RELOCATION64_format__ rlc_size = Structure(format).sizeof() try: dynamic_rlc = self.__unpack_data__( format, self.get_data(rva, rlc_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid relocation information. Can't read " "data at RVA: 0x%x" % rva ) dynamic_rlc = None if not dynamic_rlc: break rva += rlc_size symbol = dynamic_rlc.Symbol size = dynamic_rlc.BaseRelocSize if 3 <= symbol <= 5: relocations = self.parse_image_base_relocation_list( rva, size, self.dynamic_relocation_format_by_symbol[symbol] ) dynamic_relocations.append( DynamicRelocationData( struct=dynamic_rlc, symbol=symbol, relocations=relocations ) ) if symbol > 5: relocations = self.parse_image_base_relocation_list(rva, size) dynamic_relocations.append( DynamicRelocationData( struct=dynamic_rlc, symbol=symbol, relocations=relocations ) ) rva += size return dynamic_relocations def parse_relocations_directory(self, rva, size): """""" return self.parse_image_base_relocation_list(rva, size) def parse_image_base_relocation_list(self, rva, size, fmt=None): rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof() end = rva + size relocations = [] while rva < end: # OC Patch: # Malware that has bad RVA entries will cause an error. # Just continue on after an exception # try: rlc = self.__unpack_data__( self.__IMAGE_BASE_RELOCATION_format__, self.get_data(rva, rlc_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid relocation information. Can't read " "data at RVA: 0x%x" % rva ) rlc = None if not rlc: break # rlc.VirtualAddress must lie within the Image if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage: self.__warnings.append( "Invalid relocation information. VirtualAddress outside" " of Image: 0x%x" % rlc.VirtualAddress ) break # rlc.SizeOfBlock must be less or equal than the size of the image # (It's a rather loose sanity test) if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage: self.__warnings.append( "Invalid relocation information. SizeOfBlock too large" ": %d" % rlc.SizeOfBlock ) break if fmt is None: reloc_entries = self.parse_relocations( rva + rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock - rlc_size ) else: reloc_entries = self.parse_relocations_with_format( rva + rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock - rlc_size, fmt ) relocations.append(BaseRelocationData(struct=rlc, entries=reloc_entries)) if not rlc.SizeOfBlock: break rva += rlc.SizeOfBlock return relocations def parse_relocations(self, data_rva, rva, size): """""" try: data = self.get_data(data_rva, size) file_offset = self.get_offset_from_rva(data_rva) except PEFormatError: self.__warnings.append(f"Bad RVA in relocation data: 0x{data_rva:x}") return [] entries = [] offsets_and_type = set() for idx in range(int(len(data) / 2)): entry = self.__unpack_data__( self.__IMAGE_BASE_RELOCATION_ENTRY_format__, data[idx * 2 : (idx + 1) * 2], file_offset=file_offset, ) if not entry: break word = entry.Data reloc_type = word >> 12 reloc_offset = word & 0x0FFF if (reloc_offset, reloc_type) in offsets_and_type: self.__warnings.append( "Overlapping offsets in relocation data " "at RVA: 0x%x" % (reloc_offset + rva) ) break offsets_and_type.add((reloc_offset, reloc_type)) entries.append( RelocationData( struct=entry, type=reloc_type, base_rva=rva, rva=reloc_offset + rva ) ) file_offset += entry.sizeof() return entries def parse_relocations_with_format(self, data_rva, rva, size, format): """""" try: data = self.get_data(data_rva, size) file_offset = self.get_offset_from_rva(data_rva) except PEFormatError: self.__warnings.append(f"Bad RVA in relocation data: 0x{data_rva:x}") return [] entry_size = StructureWithBitfields(format).sizeof() entries = [] offsets = set() for idx in range(int(len(data) / entry_size)): entry = self.__unpack_data_with_bitfields__( format, data[idx * entry_size : (idx + 1) * entry_size], file_offset=file_offset, ) if not entry: break reloc_offset = entry.PageRelativeOffset if reloc_offset in offsets: self.__warnings.append( "Overlapping offsets in relocation data " "at RVA: 0x%x" % (reloc_offset + rva) ) break offsets.add(reloc_offset) entries.append( RelocationData(struct=entry, base_rva=rva, rva=reloc_offset + rva) ) file_offset += entry_size return entries def parse_debug_directory(self, rva, size): """""" dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof() debug = [] for idx in range(int(size / dbg_size)): try: data = self.get_data(rva + dbg_size * idx, dbg_size) except PEFormatError: self.__warnings.append( "Invalid debug information. Can't read " "data at RVA: 0x%x" % rva ) return None dbg = self.__unpack_data__( self.__IMAGE_DEBUG_DIRECTORY_format__, data, file_offset=self.get_offset_from_rva(rva + dbg_size * idx), ) if not dbg: return None # apply structure according to DEBUG_TYPE # http://www.debuginfo.com/articles/debuginfomatch.html # dbg_type = None if dbg.Type == 1: # IMAGE_DEBUG_TYPE_COFF pass elif dbg.Type == 2: # if IMAGE_DEBUG_TYPE_CODEVIEW dbg_type_offset = dbg.PointerToRawData dbg_type_size = dbg.SizeOfData dbg_type_data = self.__data__[ dbg_type_offset : dbg_type_offset + dbg_type_size ] if dbg_type_data[:4] == b"RSDS": # pdb7.0 __CV_INFO_PDB70_format__ = [ "CV_INFO_PDB70", [ "4s,CvSignature", "I,Signature_Data1", # Signature is of GUID type "H,Signature_Data2", "H,Signature_Data3", "B,Signature_Data4", "B,Signature_Data5", "6s,Signature_Data6", "I,Age", ], ] pdbFileName_size = ( dbg_type_size - Structure(__CV_INFO_PDB70_format__).sizeof() ) # pdbFileName_size can be negative here, as seen in the malware # sample with hash # MD5: 7c297600870d026c014d42596bb9b5fd # SHA256: # 83f4e63681fcba8a9d7bbb1688c71981b1837446514a1773597e0192bba9fac3 # Checking for positive size here to ensure proper parsing. if pdbFileName_size > 0: __CV_INFO_PDB70_format__[1].append( "{0}s,PdbFileName".format(pdbFileName_size) ) dbg_type = self.__unpack_data__( __CV_INFO_PDB70_format__, dbg_type_data, dbg_type_offset ) if dbg_type is not None: dbg_type.Signature_Data6_value = struct.unpack( ">Q", b"\0\0" + dbg_type.Signature_Data6 )[0] dbg_type.Signature_String = ( str( uuid.UUID( fields=( dbg_type.Signature_Data1, dbg_type.Signature_Data2, dbg_type.Signature_Data3, dbg_type.Signature_Data4, dbg_type.Signature_Data5, dbg_type.Signature_Data6_value, ) ) ) .replace("-", "") .upper() + f"{dbg_type.Age:X}" ) elif dbg_type_data[:4] == b"NB10": # pdb2.0 __CV_INFO_PDB20_format__ = [ "CV_INFO_PDB20", [ "I,CvHeaderSignature", "I,CvHeaderOffset", "I,Signature", "I,Age", ], ] pdbFileName_size = ( dbg_type_size - Structure(__CV_INFO_PDB20_format__).sizeof() ) # As with the PDB 7.0 case, ensuring a positive size for # pdbFileName_size to ensure proper parsing. if pdbFileName_size > 0: # Add the last variable-length string field. __CV_INFO_PDB20_format__[1].append( "{0}s,PdbFileName".format(pdbFileName_size) ) dbg_type = self.__unpack_data__( __CV_INFO_PDB20_format__, dbg_type_data, dbg_type_offset ) elif dbg.Type == 4: # IMAGE_DEBUG_TYPE_MISC dbg_type_offset = dbg.PointerToRawData dbg_type_size = dbg.SizeOfData dbg_type_data = self.__data__[ dbg_type_offset : dbg_type_offset + dbg_type_size ] ___IMAGE_DEBUG_MISC_format__ = [ "IMAGE_DEBUG_MISC", [ "I,DataType", "I,Length", "B,Unicode", "B,Reserved1", "H,Reserved2", ], ] dbg_type_partial = self.__unpack_data__( ___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset ) # Need to check that dbg_type_partial contains a correctly unpacked data # structure, as the malware sample with the following hash # MD5: 5e7d6707d693108de5a303045c17d95b # SHA256: # 5dd94a95025f3b6e3dd440d52f7c6d2964fdd1aa119e0ee92e38c7bf83829e5c # contains a value of None for dbg_type_partial after unpacking, # presumably due to a malformed DEBUG entry. if dbg_type_partial: # The Unicode bool should be set to 0 or 1. if dbg_type_partial.Unicode in (0, 1): data_size = ( dbg_type_size - Structure(___IMAGE_DEBUG_MISC_format__).sizeof() ) # As with the PDB case, ensuring a positive size for data_size # here to ensure proper parsing. if data_size > 0: ___IMAGE_DEBUG_MISC_format__[1].append( "{0}s,Data".format(data_size) ) dbg_type = self.__unpack_data__( ___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset ) debug.append(DebugData(struct=dbg, entry=dbg_type)) return debug def parse_resources_directory(self, rva, size=0, base_rva=None, level=0, dirs=None): """Parse the resources directory. Given the RVA of the resources directory, it will process all its entries. The root will have the corresponding member of its structure, IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the entries in the directory. Those entries will have, correspondingly, all the structure's members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one, "directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure representing upper layers of the tree. This one will also have an 'entries' attribute, pointing to the 3rd, and last, level. Another directory with more entries. Those last entries will have a new attribute (both 'leaf' or 'data_entry' can be used to access it). This structure finally points to the resource data. All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY, are available as its attributes. """ # OC Patch: if dirs is None: dirs = [rva] if base_rva is None: base_rva = rva if level > MAX_RESOURCE_DEPTH: self.__warnings.append( "Error parsing the resources directory. " "Excessively nested table depth %d (>%s)" % (level, MAX_RESOURCE_DEPTH) ) return None try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() ) except PEFormatError: self.__warnings.append( "Invalid resources directory. Can't read " "directory data at RVA: 0x%x" % rva ) return None # Get the resource directory structure, that is, the header # of the table preceding the actual entries # resource_dir = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_format__, data, file_offset=self.get_offset_from_rva(rva), ) if resource_dir is None: # If we can't parse resources directory then silently return. # This directory does not necessarily have to be valid to # still have a valid PE file self.__warnings.append( "Invalid resources directory. Can't parse " "directory data at RVA: 0x%x" % rva ) return None dir_entries = [] # Advance the RVA to the position immediately following the directory # table header and pointing to the first entry in the table # rva += resource_dir.sizeof() number_of_entries = ( resource_dir.NumberOfNamedEntries + resource_dir.NumberOfIdEntries ) # Set a hard limit on the maximum reasonable number of entries MAX_ALLOWED_ENTRIES = 4096 if number_of_entries > MAX_ALLOWED_ENTRIES: self.__warnings.append( "Error parsing the resources directory. " "The directory contains %d entries (>%s)" % (number_of_entries, MAX_ALLOWED_ENTRIES) ) return None self.__total_resource_entries_count += number_of_entries if self.__total_resource_entries_count > MAX_RESOURCE_ENTRIES: self.__warnings.append( "Error parsing the resources directory. " "The file contains at least %d entries (>%d)" % (self.__total_resource_entries_count, MAX_RESOURCE_ENTRIES) ) return None strings_to_postprocess = [] # Keep track of the last name's start and end offsets in order # to be able to detect overlapping entries that might suggest # and invalid or corrupt directory. last_name_begin_end = None for idx in range(number_of_entries): if ( not self.__resource_size_limit_reached and self.__total_resource_bytes > self.__resource_size_limit_upperbounds ): self.__resource_size_limit_reached = True self.__warnings.append( "Resource size 0x%x exceeds file size 0x%x, overlapping " "resources found." % ( self.__total_resource_bytes, self.__resource_size_limit_upperbounds, ) ) res = self.parse_resource_entry(rva) if res is None: self.__warnings.append( "Error parsing the resources directory, " "Entry %d is invalid, RVA = 0x%x. " % (idx, rva) ) break entry_name = None entry_id = None name_is_string = (res.Name & 0x80000000) >> 31 if not name_is_string: entry_id = res.Name else: ustr_offset = base_rva + res.NameOffset try: entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset) self.__total_resource_bytes += entry_name.get_pascal_16_length() # If the last entry's offset points before the current's but its end # is past the current's beginning, assume the overlap indicates a # corrupt name. if last_name_begin_end and ( last_name_begin_end[0] < ustr_offset and last_name_begin_end[1] >= ustr_offset ): # Remove the previous overlapping entry as it's likely to be # already corrupt data. strings_to_postprocess.pop() self.__warnings.append( "Error parsing the resources directory, " "attempting to read entry name. " "Entry names overlap 0x%x" % (ustr_offset) ) break last_name_begin_end = ( ustr_offset, ustr_offset + entry_name.get_pascal_16_length(), ) strings_to_postprocess.append(entry_name) except PEFormatError: self.__warnings.append( "Error parsing the resources directory, " "attempting to read entry name. " "Can't read unicode string at offset 0x%x" % (ustr_offset) ) if res.DataIsDirectory: # OC Patch: # # One trick malware can do is to recursively reference # the next directory. This causes hilarity to ensue when # trying to parse everything correctly. # If the original RVA given to this function is equal to # the next one to parse, we assume that it's a trick. # Instead of raising a PEFormatError this would skip some # reasonable data so we just break. # # 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample if base_rva + res.OffsetToDirectory in dirs: break entry_directory = self.parse_resources_directory( base_rva + res.OffsetToDirectory, size - (rva - base_rva), # size base_rva=base_rva, level=level + 1, dirs=dirs + [base_rva + res.OffsetToDirectory], ) if not entry_directory: break # Ange Albertini's code to process resources' strings # strings = None if entry_id == RESOURCE_TYPE["RT_STRING"]: strings = {} for resource_id in entry_directory.entries: if hasattr(resource_id, "directory"): resource_strings = {} for resource_lang in resource_id.directory.entries: if ( resource_lang is None or not hasattr(resource_lang, "data") or resource_lang.data.struct.Size is None or resource_id.id is None ): continue string_entry_rva = ( resource_lang.data.struct.OffsetToData ) string_entry_size = resource_lang.data.struct.Size string_entry_id = resource_id.id # XXX: has been raising exceptions preventing parsing try: string_entry_data = self.get_data( string_entry_rva, string_entry_size ) except PEFormatError: self.__warnings.append( f"Error parsing resource of type RT_STRING at " f"RVA 0x{string_entry_rva:x} with " f"size {string_entry_size}" ) continue parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings, ) strings.update(resource_strings) resource_id.directory.strings = resource_strings dir_entries.append( ResourceDirEntryData( struct=res, name=entry_name, id=entry_id, directory=entry_directory, ) ) else: struct = self.parse_resource_data_entry( base_rva + res.OffsetToDirectory ) if struct: self.__total_resource_bytes += struct.Size entry_data = ResourceDataEntryData( struct=struct, lang=res.Name & 0x3FF, sublang=res.Name >> 10 ) dir_entries.append( ResourceDirEntryData( struct=res, name=entry_name, id=entry_id, data=entry_data ) ) else: break # Check if this entry contains version information # if level == 0 and res.Id == RESOURCE_TYPE["RT_VERSION"]: if dir_entries: last_entry = dir_entries[-1] try: version_entries = last_entry.directory.entries[0].directory.entries except: # Maybe a malformed directory structure...? # Let's ignore it pass else: for version_entry in version_entries: rt_version_struct = None try: rt_version_struct = version_entry.data.struct except: # Maybe a malformed directory structure...? # Let's ignore it pass if rt_version_struct is not None: self.parse_version_information(rt_version_struct) rva += res.sizeof() string_rvas = [s.get_rva() for s in strings_to_postprocess] string_rvas.sort() for idx, s in enumerate(strings_to_postprocess): s.render_pascal_16() resource_directory_data = ResourceDirData( struct=resource_dir, entries=dir_entries ) return resource_directory_data def parse_resource_data_entry(self, rva): """Parse a data entry from the resources directory.""" try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() ) except PEFormatError: self.__warnings.append( "Error parsing a resource directory data entry, " "the RVA is invalid: 0x%x" % (rva) ) return None data_entry = self.__unpack_data__( self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data, file_offset=self.get_offset_from_rva(rva), ) return data_entry def parse_resource_entry(self, rva): """Parse a directory entry from the resources directory.""" try: data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() ) except PEFormatError: # A warning will be added by the caller if this method returns None return None resource = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data, file_offset=self.get_offset_from_rva(rva), ) if resource is None: return None # resource.NameIsString = (resource.Name & 0x80000000L) >> 31 resource.NameOffset = resource.Name & 0x7FFFFFFF resource.__pad = resource.Name & 0xFFFF0000 resource.Id = resource.Name & 0x0000FFFF resource.DataIsDirectory = (resource.OffsetToData & 0x80000000) >> 31 resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFF return resource def parse_version_information(self, version_struct): """Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key / value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var. """ # Retrieve the data for the version info resource # try: start_offset = self.get_offset_from_rva(version_struct.OffsetToData) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read OffsetToData with RVA: 0x{:x}".format( version_struct.OffsetToData ) ) return raw_data = self.__data__[start_offset : start_offset + version_struct.Size] # Map the main structure and the subsequent string # versioninfo_struct = self.__unpack_data__( self.__VS_VERSIONINFO_format__, raw_data, file_offset=start_offset ) if versioninfo_struct is None: return ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof() section = self.get_section_by_rva(ustr_offset) section_end = None if section: section_end = section.VirtualAddress + max( section.SizeOfRawData, section.Misc_VirtualSize ) versioninfo_string = None # These should return 'ascii' decoded data. For the case when it's # garbled data the ascii string will retain the byte values while # encoding it to something else may yield values that don't match the # file's contents. try: if section_end is None: versioninfo_string = self.get_string_u_at_rva( ustr_offset, encoding="ascii" ) else: versioninfo_string = self.get_string_u_at_rva( ustr_offset, (section_end - ustr_offset) >> 1, encoding="ascii" ) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read VS_VERSION_INFO string. Can't " "read unicode string at offset 0x%x" % (ustr_offset) ) if versioninfo_string is None: self.__warnings.append( "Invalid VS_VERSION_INFO block: {0}".format(versioninfo_string) ) return # If the structure does not contain the expected name, it's assumed to # be invalid if versioninfo_string is not None and versioninfo_string != b"VS_VERSION_INFO": if len(versioninfo_string) > 128: excerpt = versioninfo_string[:128].decode("ascii") # Don't leave any half-escaped characters excerpt = excerpt[: excerpt.rfind("\\u")] versioninfo_string = b( "{0} ... ({1} bytes, too long to display)".format( excerpt, len(versioninfo_string) ) ) self.__warnings.append( "Invalid VS_VERSION_INFO block: {0}".format( versioninfo_string.decode("ascii").replace("\00", "\\00") ) ) return if not hasattr(self, "VS_VERSIONINFO"): self.VS_VERSIONINFO = [] # Set the PE object's VS_VERSIONINFO to this one vinfo = versioninfo_struct # Set the Key attribute to point to the unicode string identifying the structure vinfo.Key = versioninfo_string self.VS_VERSIONINFO.append(vinfo) if versioninfo_string is None: versioninfo_string = "" # Process the fixed version information, get the offset and structure fixedfileinfo_offset = self.dword_align( versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1), version_struct.OffsetToData, ) fixedfileinfo_struct = self.__unpack_data__( self.__VS_FIXEDFILEINFO_format__, raw_data[fixedfileinfo_offset:], file_offset=start_offset + fixedfileinfo_offset, ) if not fixedfileinfo_struct: return if not hasattr(self, "VS_FIXEDFILEINFO"): self.VS_FIXEDFILEINFO = [] # Set the PE object's VS_FIXEDFILEINFO to this one self.VS_FIXEDFILEINFO.append(fixedfileinfo_struct) # Start parsing all the StringFileInfo and VarFileInfo structures # Get the first one stringfileinfo_offset = self.dword_align( fixedfileinfo_offset + fixedfileinfo_struct.sizeof(), version_struct.OffsetToData, ) # Set the PE object's attribute that will contain them all. if not hasattr(self, "FileInfo"): self.FileInfo = [] finfo = [] while True: # Process the StringFileInfo/VarFileInfo structure stringfileinfo_struct = self.__unpack_data__( self.__StringFileInfo_format__, raw_data[stringfileinfo_offset:], file_offset=start_offset + stringfileinfo_offset, ) if stringfileinfo_struct is None: self.__warnings.append( "Error parsing StringFileInfo/VarFileInfo struct" ) return None # Get the subsequent string defining the structure. ustr_offset = ( version_struct.OffsetToData + stringfileinfo_offset + versioninfo_struct.sizeof() ) try: stringfileinfo_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringFileInfo string. Can't " "read unicode string at offset 0x{0:x}".format(ustr_offset) ) break # Set such string as the Key attribute stringfileinfo_struct.Key = stringfileinfo_string # Append the structure to the PE object's list finfo.append(stringfileinfo_struct) # Parse a StringFileInfo entry if stringfileinfo_string and stringfileinfo_string.startswith( b"StringFileInfo" ): if ( stringfileinfo_struct.Type in (0, 1) and stringfileinfo_struct.ValueLength == 0 ): stringtable_offset = self.dword_align( stringfileinfo_offset + stringfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData, ) stringfileinfo_struct.StringTable = [] # Process the String Table entries while True: stringtable_struct = self.__unpack_data__( self.__StringTable_format__, raw_data[stringtable_offset:], file_offset=start_offset + stringtable_offset, ) if not stringtable_struct: break ustr_offset = ( version_struct.OffsetToData + stringtable_offset + stringtable_struct.sizeof() ) try: stringtable_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringTable string. Can't " "read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break stringtable_struct.LangID = stringtable_string stringtable_struct.entries = {} stringtable_struct.entries_offsets = {} stringtable_struct.entries_lengths = {} stringfileinfo_struct.StringTable.append(stringtable_struct) entry_offset = self.dword_align( stringtable_offset + stringtable_struct.sizeof() + 2 * (len(stringtable_string) + 1), version_struct.OffsetToData, ) # Process all entries in the string table while ( entry_offset < stringtable_offset + stringtable_struct.Length ): string_struct = self.__unpack_data__( self.__String_format__, raw_data[entry_offset:], file_offset=start_offset + entry_offset, ) if not string_struct: break ustr_offset = ( version_struct.OffsetToData + entry_offset + string_struct.sizeof() ) try: key = self.get_string_u_at_rva(ustr_offset) key_offset = self.get_offset_from_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringTable Key string. Can't " "read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break value_offset = self.dword_align( 2 * (len(key) + 1) + entry_offset + string_struct.sizeof(), version_struct.OffsetToData, ) ustr_offset = version_struct.OffsetToData + value_offset try: value = self.get_string_u_at_rva( ustr_offset, max_length=string_struct.ValueLength ) value_offset = self.get_offset_from_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, attempting " "to read StringTable Value string. Can't read " f"unicode string at offset 0x{ustr_offset:x}" ) break if string_struct.Length == 0: entry_offset = ( stringtable_offset + stringtable_struct.Length ) else: entry_offset = self.dword_align( string_struct.Length + entry_offset, version_struct.OffsetToData, ) stringtable_struct.entries[key] = value stringtable_struct.entries_offsets[key] = ( key_offset, value_offset, ) stringtable_struct.entries_lengths[key] = ( len(key), len(value), ) new_stringtable_offset = self.dword_align( stringtable_struct.Length + stringtable_offset, version_struct.OffsetToData, ) # Check if the entry is crafted in a way that would lead # to an infinite loop and break if so. if new_stringtable_offset == stringtable_offset: break stringtable_offset = new_stringtable_offset if stringtable_offset >= stringfileinfo_struct.Length: break # Parse a VarFileInfo entry elif stringfileinfo_string and stringfileinfo_string.startswith( b"VarFileInfo" ): varfileinfo_struct = stringfileinfo_struct varfileinfo_struct.name = "VarFileInfo" if ( varfileinfo_struct.Type in (0, 1) and varfileinfo_struct.ValueLength == 0 ): var_offset = self.dword_align( stringfileinfo_offset + varfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData, ) varfileinfo_struct.Var = [] # Process all entries while True: var_struct = self.__unpack_data__( self.__Var_format__, raw_data[var_offset:], file_offset=start_offset + var_offset, ) if not var_struct: break ustr_offset = ( version_struct.OffsetToData + var_offset + var_struct.sizeof() ) try: var_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read VarFileInfo Var string. " "Can't read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break if var_string is None: break varfileinfo_struct.Var.append(var_struct) varword_offset = self.dword_align( 2 * (len(var_string) + 1) + var_offset + var_struct.sizeof(), version_struct.OffsetToData, ) orig_varword_offset = varword_offset while ( varword_offset < orig_varword_offset + var_struct.ValueLength ): word1 = self.get_word_from_data( raw_data[varword_offset : varword_offset + 2], 0 ) word2 = self.get_word_from_data( raw_data[varword_offset + 2 : varword_offset + 4], 0 ) varword_offset += 4 if isinstance(word1, int) and isinstance(word2, int): var_struct.entry = { var_string: "0x%04x 0x%04x" % (word1, word2) } var_offset = self.dword_align( var_offset + var_struct.Length, version_struct.OffsetToData ) if var_offset <= var_offset + var_struct.Length: break # Increment and align the offset stringfileinfo_offset = self.dword_align( stringfileinfo_struct.Length + stringfileinfo_offset, version_struct.OffsetToData, ) # Check if all the StringFileInfo and VarFileInfo items have been processed if ( stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length ): break self.FileInfo.append(finfo) def parse_export_directory(self, rva, size, forwarded_only=False): """Parse the export directory. Given the RVA of the export directory, it will process all its entries. The exports will be made available as a list of ExportData instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute. """ try: export_dir = self.__unpack_data__( self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Error parsing export directory at RVA: 0x%x" % (rva) ) return if not export_dir: return # We keep track of the bytes left in the file and use it to set a upper # bound in the number of items that can be read from the different # arrays. def length_until_eof(rva): return len(self.__data__) - self.get_offset_from_rva(rva) try: address_of_names = self.get_data( export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames * 4, ), ) address_of_name_ordinals = self.get_data( export_dir.AddressOfNameOrdinals, min( length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames * 4, ), ) address_of_functions = self.get_data( export_dir.AddressOfFunctions, min( length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions * 4, ), ) except PEFormatError: self.__warnings.append( "Error parsing export directory at RVA: 0x%x" % (rva) ) return exports = [] max_failed_entries_before_giving_up = 10 section = self.get_section_by_rva(export_dir.AddressOfNames) # Overly generous upper bound safety_boundary = len(self.__data__) if section: safety_boundary = ( section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfNames ) symbol_counts = collections.defaultdict(int) export_parsing_loop_completed_normally = True for i in range(min(export_dir.NumberOfNames, int(safety_boundary / 4))): symbol_ordinal = self.get_word_from_data(address_of_name_ordinals, i) if symbol_ordinal is not None and symbol_ordinal * 4 < len( address_of_functions ): symbol_address = self.get_dword_from_data( address_of_functions, symbol_ordinal ) else: # Corrupt? a bad pointer... we assume it's all # useless, no exports return None if symbol_address is None or symbol_address == 0: continue # If the function's RVA points within the export directory # it will point to a string with the forwarded symbol's string # instead of pointing the the function start address. if symbol_address >= rva and symbol_address < rva + size: forwarder_str = self.get_string_at_rva(symbol_address) try: forwarder_offset = self.get_offset_from_rva(symbol_address) except PEFormatError: continue else: if forwarded_only: continue forwarder_str = None forwarder_offset = None symbol_name_address = self.get_dword_from_data(address_of_names, i) if symbol_name_address is None: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break symbol_name = self.get_string_at_rva( symbol_name_address, MAX_SYMBOL_NAME_LENGTH ) if not is_valid_function_name(symbol_name, relax_allowed_characters=True): export_parsing_loop_completed_normally = False break try: symbol_name_offset = self.get_offset_from_rva(symbol_name_address) except PEFormatError: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break try: symbol_name_offset = self.get_offset_from_rva(symbol_name_address) except PEFormatError: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break continue # File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 # was being parsed as potentially containing millions of exports. # Checking for duplicates addresses the issue. symbol_counts[(symbol_name, symbol_address)] += 1 if symbol_counts[(symbol_name, symbol_address)] > 10: self.__warnings.append( f"Export directory contains more than 10 repeated entries " f"({symbol_name}, {symbol_address:#02x}). Assuming corrupt." ) break elif len(symbol_counts) > self.max_symbol_exports: self.__warnings.append( "Export directory contains more than {} symbol entries. " "Assuming corrupt.".format(self.max_symbol_exports) ) break exports.append( ExportData( pe=self, ordinal=export_dir.Base + symbol_ordinal, ordinal_offset=self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2 * i ), address=symbol_address, address_offset=self.get_offset_from_rva( export_dir.AddressOfFunctions + 4 * symbol_ordinal ), name=symbol_name, name_offset=symbol_name_offset, forwarder=forwarder_str, forwarder_offset=forwarder_offset, ) ) if not export_parsing_loop_completed_normally: self.__warnings.append( f"RVA AddressOfNames in the export directory points to an invalid " f"address: {export_dir.AddressOfNames:x}" ) ordinals = {exp.ordinal for exp in exports} max_failed_entries_before_giving_up = 10 section = self.get_section_by_rva(export_dir.AddressOfFunctions) # Overly generous upper bound safety_boundary = len(self.__data__) if section: safety_boundary = ( section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions ) symbol_counts = collections.defaultdict(int) export_parsing_loop_completed_normally = True for idx in range(min(export_dir.NumberOfFunctions, int(safety_boundary / 4))): if not idx + export_dir.Base in ordinals: try: symbol_address = self.get_dword_from_data(address_of_functions, idx) except PEFormatError: symbol_address = None if symbol_address is None: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break if symbol_address == 0: continue # Checking for forwarder again. if ( symbol_address is not None and symbol_address >= rva and symbol_address < rva + size ): forwarder_str = self.get_string_at_rva(symbol_address) else: forwarder_str = None # File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 # was being parsed as potentially containing millions of exports. # Checking for duplicates addresses the issue. symbol_counts[symbol_address] += 1 if symbol_counts[symbol_address] > self.max_repeated_symbol: # if most_common and most_common[0][1] > 10: self.__warnings.append( "Export directory contains more than {} repeated " "ordinal entries (0x{:x}). Assuming corrupt.".format( self.max_repeated_symbol, symbol_address ) ) break elif len(symbol_counts) > self.max_symbol_exports: self.__warnings.append( "Export directory contains more than " f"{self.max_symbol_exports} ordinal entries. Assuming corrupt." ) break exports.append( ExportData( ordinal=export_dir.Base + idx, address=symbol_address, name=None, forwarder=forwarder_str, ) ) if not export_parsing_loop_completed_normally: self.__warnings.append( "RVA AddressOfFunctions in the export directory points to an invalid " f"address: {export_dir.AddressOfFunctions:x}" ) return if not exports and export_dir.all_zeroes(): return None return ExportDirData( struct=export_dir, symbols=exports, name=self.get_string_at_rva(export_dir.Name), ) def dword_align(self, offset, base): return ((offset + base + 3) & 0xFFFFFFFC) - (base & 0xFFFFFFFC) def normalize_import_va(self, va): # Setup image range begin_of_image = self.OPTIONAL_HEADER.ImageBase end_of_image = self.OPTIONAL_HEADER.ImageBase + self.OPTIONAL_HEADER.SizeOfImage # Try to avoid bogus VAs, which are out of the image. # This also filters out entries that are zero if begin_of_image <= va and va < end_of_image: va -= begin_of_image return va def parse_delay_import_directory(self, rva, size): """Walk and parse the delay import directory.""" import_descs = [] error_count = 0 while True: try: # If the RVA is invalid all would blow up. Some PEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof(), ) except PEFormatError: self.__warnings.append( "Error parsing the Delay import directory at RVA: 0x%x" % (rva) ) break file_offset = self.get_offset_from_rva(rva) import_desc = self.__unpack_data__( self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__, data, file_offset=file_offset, ) # If the structure is all zeros, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break contains_addresses = False # Handle old import descriptor that has Virtual Addresses instead of RVAs # This version of import descriptor is created by old Visual Studio versions # (pre 6.0) # Can only be present in 32-bit binaries (no 64-bit compiler existed at the # time) # Sample: e8d3bff0c1a9a6955993f7a441121a2692261421e82fdfadaaded45d3bea9980 if ( import_desc.grAttrs == 0 and self.FILE_HEADER.Machine == MACHINE_TYPE["IMAGE_FILE_MACHINE_I386"] ): import_desc.pBoundIAT = self.normalize_import_va(import_desc.pBoundIAT) import_desc.pIAT = self.normalize_import_va(import_desc.pIAT) import_desc.pINT = self.normalize_import_va(import_desc.pINT) import_desc.pUnloadIAT = self.normalize_import_va( import_desc.pUnloadIAT ) import_desc.phmod = self.normalize_import_va(import_desc.pUnloadIAT) import_desc.szName = self.normalize_import_va(import_desc.szName) contains_addresses = True rva += import_desc.sizeof() # If the array of thunks is somewhere earlier than the import # descriptor we can set a maximum length for the array. Otherwise # just set a maximum length of the size of the file max_len = len(self.__data__) - file_offset if rva > import_desc.pINT or rva > import_desc.pIAT: max_len = max(rva - import_desc.pINT, rva - import_desc.pIAT) import_data = [] try: import_data = self.parse_imports( import_desc.pINT, import_desc.pIAT, None, max_len, contains_addresses, ) except PEFormatError as excp: self.__warnings.append( "Error parsing the Delay import directory. " "Invalid import data at RVA: 0x{0:x} ({1})".format(rva, excp.value) ) if error_count > 5: self.__warnings.append( "Too many errors parsing the Delay import directory. " "Invalid import data at RVA: 0x{0:x}".format(rva) ) break if not import_data: error_count += 1 continue if self.__total_import_symbols > MAX_IMPORT_SYMBOLS: self.__warnings.append( "Error, too many imported symbols %d (>%s)" % (self.__total_import_symbols, MAX_IMPORT_SYMBOLS) ) break dll = self.get_string_at_rva(import_desc.szName, MAX_DLL_LENGTH) if not is_valid_dos_filename(dll): dll = b("*invalid*") if dll: for symbol in import_data: if symbol.name is None: funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal) if funcname: symbol.name = funcname import_descs.append( ImportDescData(struct=import_desc, imports=import_data, dll=dll) ) return import_descs def get_rich_header_hash(self, algorithm="md5"): if not hasattr(self, "RICH_HEADER") or self.RICH_HEADER is None: return "" if algorithm == "md5": return md5(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha1": return sha1(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha256": return sha256(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha512": return sha512(self.RICH_HEADER.clear_data).hexdigest() raise Exception("Invalid hashing algorithm specified") def get_imphash(self): """Return the imphash of the PE file. Creates a hash based on imported symbol names and their specific order within the executable: https://www.mandiant.com/resources/blog/tracking-malware-import-hashing Returns: the hexdigest of the MD5 hash of the exported symbols. """ impstrs = [] exts = ["ocx", "sys", "dll"] if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): return "" for entry in self.DIRECTORY_ENTRY_IMPORT: if isinstance(entry.dll, bytes): libname = entry.dll.decode().lower() else: libname = entry.dll.lower() parts = libname.rsplit(".", 1) if len(parts) > 1 and parts[1] in exts: libname = parts[0] entry_dll_lower = entry.dll.lower() for imp in entry.imports: funcname = None if not imp.name: funcname = ordlookup.ordLookup( entry_dll_lower, imp.ordinal, make_name=True ) if not funcname: raise PEFormatError( f"Unable to look up ordinal {entry.dll}:{imp.ordinal:04x}" ) else: funcname = imp.name if not funcname: continue if isinstance(funcname, bytes): funcname = funcname.decode() impstrs.append("%s.%s" % (libname.lower(), funcname.lower())) return md5(",".join(impstrs).encode()).hexdigest() def get_exphash(self): """Return the exphash of the PE file. Similar to imphash, but based on exported symbol names and their specific order. Returns: the hexdigest of the SHA256 hash of the exported symbols. """ if not hasattr(self, "DIRECTORY_ENTRY_EXPORT"): return "" if not hasattr(self.DIRECTORY_ENTRY_EXPORT, "symbols"): return "" export_list = [ e.name.decode().lower() for e in self.DIRECTORY_ENTRY_EXPORT.symbols if e and e.name is not None ] if len(export_list) == 0: return "" return sha256(",".join(export_list).encode()).hexdigest() def parse_import_directory(self, rva, size, dllnames_only=False): """Walk and parse the import directory.""" import_descs = [] error_count = 0 image_import_descriptor_size = Structure( self.__IMAGE_IMPORT_DESCRIPTOR_format__ ).sizeof() while True: try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva, image_import_descriptor_size) except PEFormatError: self.__warnings.append( f"Error parsing the import directory at RVA: 0x{rva:x}" ) break file_offset = self.get_offset_from_rva(rva) import_desc = self.__unpack_data__( self.__IMAGE_IMPORT_DESCRIPTOR_format__, data, file_offset=file_offset ) # If the structure is all zeros, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break rva += import_desc.sizeof() # If the array of thunks is somewhere earlier than the import # descriptor we can set a maximum length for the array. Otherwise # just set a maximum length of the size of the file max_len = len(self.__data__) - file_offset if rva > import_desc.OriginalFirstThunk or rva > import_desc.FirstThunk: max_len = max( rva - import_desc.OriginalFirstThunk, rva - import_desc.FirstThunk ) import_data = [] if not dllnames_only: try: import_data = self.parse_imports( import_desc.OriginalFirstThunk, import_desc.FirstThunk, import_desc.ForwarderChain, max_length=max_len, ) except PEFormatError as e: self.__warnings.append( "Error parsing the import directory. " f"Invalid Import data at RVA: 0x{rva:x} ({e.value})" ) if error_count > 5: self.__warnings.append( "Too many errors parsing the import directory. " f"Invalid import data at RVA: 0x{rva:x}" ) break if not import_data: error_count += 1 # TODO: do not continue here continue dll = self.get_string_at_rva(import_desc.Name, MAX_DLL_LENGTH) if not is_valid_dos_filename(dll): dll = b("*invalid*") if dll: for symbol in import_data: if symbol.name is None: funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal) if funcname: symbol.name = funcname import_descs.append( ImportDescData(struct=import_desc, imports=import_data, dll=dll) ) if not dllnames_only: suspicious_imports = set(["LoadLibrary", "GetProcAddress"]) suspicious_imports_count = 0 total_symbols = 0 for imp_dll in import_descs: for symbol in imp_dll.imports: for suspicious_symbol in suspicious_imports: if not symbol or not symbol.name: continue name = symbol.name if type(symbol.name) == bytes: name = symbol.name.decode("utf-8") if name.startswith(suspicious_symbol): suspicious_imports_count += 1 break total_symbols += 1 if ( suspicious_imports_count == len(suspicious_imports) and total_symbols < 20 ): self.__warnings.append( "Imported symbols contain entries typical of packed executables." ) return import_descs def parse_imports( self, original_first_thunk, first_thunk, forwarder_chain, max_length=None, contains_addresses=False, ): """Parse the imported symbols. It will fill a list, which will be available as the dictionary attribute "imports". Its keys will be the DLL names and the values of all the symbols imported from that object. """ imported_symbols = [] # Import Lookup Table. Contains ordinals or pointers to strings. ilt = self.get_import_table( original_first_thunk, max_length, contains_addresses ) # Import Address Table. May have identical content to ILT if # PE file is not bound. It will contain the address of the # imported symbols once the binary is loaded or if it is already # bound. iat = self.get_import_table(first_thunk, max_length, contains_addresses) # OC Patch: # Would crash if IAT or ILT had None type if (not iat or len(iat) == 0) and (not ilt or len(ilt) == 0): self.__warnings.append( "Damaged Import Table information. " "ILT and/or IAT appear to be broken. " f"OriginalFirstThunk: 0x{original_first_thunk:x} " f"FirstThunk: 0x{first_thunk:x}" ) return [] table = None if ilt: table = ilt elif iat: table = iat else: return None imp_offset = 4 address_mask = 0x7FFFFFFF if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 imp_offset = 8 address_mask = 0x7FFFFFFFFFFFFFFF else: # Some PEs may have an invalid value in the Magic field of the # Optional Header. Just in case the remaining file is parseable # let's pretend it's a 32bit PE32 by default. ordinal_flag = IMAGE_ORDINAL_FLAG num_invalid = 0 for idx, tbl_entry in enumerate(table): imp_ord = None imp_hint = None imp_name = None name_offset = None hint_name_table_rva = None import_by_ordinal = False # declare it here first if tbl_entry.AddressOfData: # If imported by ordinal, we will append the ordinal number # if tbl_entry.AddressOfData & ordinal_flag: import_by_ordinal = True imp_ord = tbl_entry.AddressOfData & 0xFFFF imp_name = None name_offset = None else: import_by_ordinal = False try: hint_name_table_rva = tbl_entry.AddressOfData & address_mask data = self.get_data(hint_name_table_rva, 2) # Get the Hint imp_hint = self.get_word_from_data(data, 0) imp_name = self.get_string_at_rva( tbl_entry.AddressOfData + 2, MAX_IMPORT_NAME_LENGTH ) if not is_valid_function_name(imp_name): imp_name = b("*invalid*") name_offset = self.get_offset_from_rva( tbl_entry.AddressOfData + 2 ) except PEFormatError: pass # by nriva: we want the ThunkRVA and ThunkOffset thunk_offset = tbl_entry.get_file_offset() thunk_rva = self.get_rva_from_offset(thunk_offset) imp_address = ( first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset ) struct_iat = None try: if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData: imp_bound = iat[idx].AddressOfData struct_iat = iat[idx] else: imp_bound = None except IndexError: imp_bound = None # The file with hashes: # # MD5: bfe97192e8107d52dd7b4010d12b2924 # SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5 # # has an invalid table built in a way that it's parseable but contains # invalid entries that lead pefile to take extremely long amounts of time to # parse. It also leads to extreme memory consumption. # To prevent similar cases, if invalid entries are found in the middle of a # table the parsing will be aborted # if imp_ord is None and imp_name is None: raise PEFormatError("Invalid entries, aborting parsing.") # Some PEs appear to interleave valid and invalid imports. Instead of # aborting the parsing altogether we will simply skip the invalid entries. # Although if we see 1000 invalid entries and no legit ones, we abort. if imp_name == b("*invalid*"): if num_invalid > 1000 and num_invalid == idx: raise PEFormatError("Too many invalid names, aborting parsing.") num_invalid += 1 continue if imp_ord or imp_name: imported_symbols.append( ImportData( pe=self, struct_table=tbl_entry, struct_iat=struct_iat, # for bound imports if any import_by_ordinal=import_by_ordinal, ordinal=imp_ord, ordinal_offset=tbl_entry.get_file_offset(), hint=imp_hint, name=imp_name, name_offset=name_offset, bound=imp_bound, address=imp_address, hint_name_table_rva=hint_name_table_rva, thunk_offset=thunk_offset, thunk_rva=thunk_rva, ) ) return imported_symbols def get_import_table(self, rva, max_length=None, contains_addresses=False): table = [] # We need the ordinal flag for a simple heuristic # we're implementing within the loop # if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG format = self.__IMAGE_THUNK_DATA_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 format = self.__IMAGE_THUNK_DATA64_format__ else: # Some PEs may have an invalid value in the Magic field of the # Optional Header. Just in case the remaining file is parseable # let's pretend it's a 32bit PE32 by default. ordinal_flag = IMAGE_ORDINAL_FLAG format = self.__IMAGE_THUNK_DATA_format__ expected_size = Structure(format).sizeof() MAX_ADDRESS_SPREAD = 128 * 2**20 # 128 MB ADDR_4GB = 2**32 MAX_REPEATED_ADDRESSES = 15 repeated_address = 0 addresses_of_data_set_64 = AddressSet() addresses_of_data_set_32 = AddressSet() start_rva = rva while rva: if max_length is not None and rva >= start_rva + max_length: self.__warnings.append( "Error parsing the import table. Entries go beyond bounds." ) break # Enforce an upper bounds on import symbols. if self.__total_import_symbols > MAX_IMPORT_SYMBOLS: self.__warnings.append( "Excessive number of imports %d (>%s)" % (self.__total_import_symbols, MAX_IMPORT_SYMBOLS) ) break self.__total_import_symbols += 1 # if we see too many times the same entry we assume it could be # a table containing bogus data (with malicious intent or otherwise) if repeated_address >= MAX_REPEATED_ADDRESSES: return [] # if the addresses point somewhere but the difference between the highest # and lowest address is larger than MAX_ADDRESS_SPREAD we assume a bogus # table as the addresses should be contained within a module if addresses_of_data_set_32.diff() > MAX_ADDRESS_SPREAD: return [] if addresses_of_data_set_64.diff() > MAX_ADDRESS_SPREAD: return [] failed = False try: data = self.get_data(rva, expected_size) except PEFormatError: failed = True if failed or len(data) != expected_size: self.__warnings.append( "Error parsing the import table. " "Invalid data at RVA: 0x%x" % rva ) return None thunk_data = self.__unpack_data__( format, data, file_offset=self.get_offset_from_rva(rva) ) # If the thunk data contains VAs instead of RVAs, we need to normalize them if contains_addresses: thunk_data.AddressOfData = self.normalize_import_va( thunk_data.AddressOfData ) thunk_data.ForwarderString = self.normalize_import_va( thunk_data.ForwarderString ) thunk_data.Function = self.normalize_import_va(thunk_data.Function) thunk_data.Ordinal = self.normalize_import_va(thunk_data.Ordinal) # Check if the AddressOfData lies within the range of RVAs that it's # being scanned, abort if that is the case, as it is very unlikely # to be legitimate data. # Seen in PE with SHA256: # 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c if ( thunk_data and thunk_data.AddressOfData >= start_rva and thunk_data.AddressOfData <= rva ): self.__warnings.append( "Error parsing the import table. " "AddressOfData overlaps with THUNK_DATA for " "THUNK at RVA 0x%x" % (rva) ) break if thunk_data and thunk_data.AddressOfData: addr_of_data = thunk_data.AddressOfData # If the entry looks like could be an ordinal... if addr_of_data & ordinal_flag: # but its value is beyond 2^16, we will assume it's a # corrupted and ignore it altogether if addr_of_data & 0x7FFFFFFF > 0xFFFF: return [] # and if it looks like it should be an RVA else: # keep track of the RVAs seen and store them to study their # properties. When certain non-standard features are detected # the parsing will be aborted if addr_of_data >= ADDR_4GB: the_set = addresses_of_data_set_64 else: the_set = addresses_of_data_set_32 if addr_of_data in the_set: repeated_address += 1 the_set.add(addr_of_data) if not thunk_data or thunk_data.all_zeroes(): break rva += thunk_data.sizeof() table.append(thunk_data) return table def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None): """Returns the data corresponding to the memory layout of the PE file. The data includes the PE header and the sections loaded at offsets corresponding to their relative virtual addresses. (the VirtualAddress section header member). Any offset in this data corresponds to the absolute memory address ImageBase+offset. The optional argument 'max_virtual_address' provides with means of limiting which sections are processed. Any section with their VirtualAddress beyond this value will be skipped. Normally, sections with values beyond this range are just there to confuse tools. It's a common trick to see in packed executables. If the 'ImageBase' optional argument is supplied, the file's relocations will be applied to the image by calling the 'relocate_image()' method. Beware that the relocation information is applied permanently. """ # Rebase if requested # if ImageBase is not None: # Keep a copy of the image's data before modifying it by rebasing it # original_data = self.__data__ self.relocate_image(ImageBase) # Collect all sections in one code block mapped_data = self.__data__[:] for section in self.sections: # Miscellaneous integrity tests. # Some packer will set these to bogus values to make tools go nuts. if section.Misc_VirtualSize == 0 and section.SizeOfRawData == 0: continue srd = section.SizeOfRawData prd = self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) if ( srd > len(self.__data__) or prd > len(self.__data__) or srd + prd > len(self.__data__) or VirtualAddress_adj >= max_virtual_address ): continue padding_length = VirtualAddress_adj - len(mapped_data) if padding_length > 0: mapped_data += b"\0" * padding_length elif padding_length < 0: mapped_data = mapped_data[:padding_length] mapped_data += section.get_data() # If the image was rebased, restore it to its original form # if ImageBase is not None: self.__data__ = original_data return mapped_data def get_resources_strings(self): """Returns a list of all the strings found withing the resources (if any). This method will scan all entries in the resources directory of the PE, if there is one, and will return a [] with the strings. An empty list will be returned otherwise. """ resources_strings = [] if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: if hasattr(res_type, "directory"): for resource_id in res_type.directory.entries: if hasattr(resource_id, "directory"): if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): for res_string in list( resource_id.directory.strings.values() ): resources_strings.append(res_string) return resources_strings def get_data(self, rva=0, length=None): """Get data regardless of the section where it lies on. Given a RVA and the size of the chunk to retrieve, this method will find the section where the data lies and return the data. """ s = self.get_section_by_rva(rva) if length: end = rva + length else: end = None if not s: if rva < len(self.header): return self.header[rva:end] # Before we give up we check whether the file might # contain the data anyway. There are cases of PE files # without sections that rely on windows loading the first # 8291 bytes into memory and assume the data will be # there # A functional file with these characteristics is: # MD5: 0008892cdfbc3bda5ce047c565e52295 # SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9 # if rva < len(self.__data__): return self.__data__[rva:end] raise PEFormatError("data at RVA can't be fetched. Corrupt header?") return s.get_data(rva, length) def get_rva_from_offset(self, offset): """Get the RVA corresponding to this file offset.""" s = self.get_section_by_offset(offset) if not s: if self.sections: lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) for s in self.sections ] ) if offset < lowest_rva: # We will assume that the offset lies within the headers, or # at least points before where the earliest section starts # and we will simply return the offset as the RVA # # The case illustrating this behavior can be found at: # http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html # where the import table is not contained by any section # hence the RVA needs to be resolved to a raw offset return offset return None else: return offset return s.get_rva_from_offset(offset) def get_offset_from_rva(self, rva): """Get the file offset corresponding to this RVA. Given a RVA , this method will find the section where the data lies and return the offset within the file. """ s = self.get_section_by_rva(rva) if not s: # If not found within a section assume it might # point to overlay data or otherwise data present # but not contained in any section. In those # cases the RVA should equal the offset if rva < len(self.__data__): return rva raise PEFormatError(f"data at RVA 0x{rva:x} can't be fetched") return s.get_offset_from_rva(rva) def get_string_at_rva(self, rva, max_length=MAX_STRING_LENGTH): """Get an ASCII string located at the given address.""" if rva is None: return None s = self.get_section_by_rva(rva) if not s: return self.get_string_from_data(0, self.__data__[rva : rva + max_length]) return self.get_string_from_data(0, s.get_data(rva, length=max_length)) def get_bytes_from_data(self, offset, data): """.""" if offset > len(data): return b"" d = data[offset:] if isinstance(d, bytearray): return bytes(d) return d def get_string_from_data(self, offset, data): """Get an ASCII string from data.""" s = self.get_bytes_from_data(offset, data) end = s.find(b"\0") if end >= 0: s = s[:end] return s def get_string_u_at_rva(self, rva, max_length=2**16, encoding=None): """Get an Unicode string located at the given address.""" if max_length == 0: return b"" # If the RVA is invalid let the exception reach the callers. All # call-sites of get_string_u_at_rva() will handle it. data = self.get_data(rva, 2) # max_length is the maximum count of 16bit characters needs to be # doubled to get size in bytes max_length <<= 1 requested = min(max_length, 256) data = self.get_data(rva, requested) # try to find null-termination null_index = -1 while True: null_index = data.find(b"\x00\x00", null_index + 1) if null_index == -1: data_length = len(data) if data_length < requested or data_length == max_length: null_index = len(data) >> 1 break # Request remaining part of data limited by max_length data += self.get_data(rva + data_length, max_length - data_length) null_index = requested - 1 requested = max_length elif null_index % 2 == 0: null_index >>= 1 break # convert selected part of the string to unicode uchrs = struct.unpack("<{:d}H".format(null_index), data[: null_index * 2]) s = "".join(map(chr, uchrs)) if encoding: return b(s.encode(encoding, "backslashreplace_")) return b(s.encode("utf-8", "backslashreplace_")) def get_section_by_offset(self, offset): """Get the section containing the given file offset.""" for section in self.sections: if section.contains_offset(offset): return section return None def get_section_by_rva(self, rva): """Get the section containing the given address.""" # if we look a lot of times at RVA in the same section, "cache" the last used section # to speedup lookups (very useful when parsing import table) if self._get_section_by_rva_last_used is not None: if self._get_section_by_rva_last_used.contains_rva(rva): return self._get_section_by_rva_last_used for section in self.sections: if section.contains_rva(rva): self._get_section_by_rva_last_used = section return section return None def __str__(self): return self.dump_info() def has_relocs(self): """Checks if the PE file has relocation directory""" return hasattr(self, "DIRECTORY_ENTRY_BASERELOC") def has_dynamic_relocs(self): if hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG"): if self.DIRECTORY_ENTRY_LOAD_CONFIG.dynamic_relocations: return True return False def print_info(self, encoding="utf-8"): """Print all the PE header information in a human readable from.""" print(self.dump_info(encoding=encoding)) def dump_info(self, dump=None, encoding="ascii"): """Dump all the PE header information into human readable string.""" if dump is None: dump = Dump() warnings = self.get_warnings() if warnings: dump.add_header("Parsing Warnings") for warning in warnings: dump.add_line(warning) dump.add_newline() dump.add_header("DOS_HEADER") dump.add_lines(self.DOS_HEADER.dump()) dump.add_newline() dump.add_header("NT_HEADERS") dump.add_lines(self.NT_HEADERS.dump()) dump.add_newline() dump.add_header("FILE_HEADER") dump.add_lines(self.FILE_HEADER.dump()) image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") dump.add("Flags: ") flags = [] for flag in sorted(image_flags): if getattr(self.FILE_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_newline() if hasattr(self, "OPTIONAL_HEADER") and self.OPTIONAL_HEADER is not None: dump.add_header("OPTIONAL_HEADER") dump.add_lines(self.OPTIONAL_HEADER.dump()) dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) dump.add("DllCharacteristics: ") flags = [] for flag in sorted(dll_characteristics_flags): if getattr(self.OPTIONAL_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_newline() dump.add_header("PE Sections") section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") for section in self.sections: dump.add_lines(section.dump()) dump.add("Flags: ") flags = [] for flag in sorted(section_flags): if getattr(section, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_line( "Entropy: {0:f} (Min=0.0, Max=8.0)".format(section.get_entropy()) ) if md5 is not None: dump.add_line("MD5 hash: {0}".format(section.get_hash_md5())) if sha1 is not None: dump.add_line("SHA-1 hash: %s" % section.get_hash_sha1()) if sha256 is not None: dump.add_line("SHA-256 hash: %s" % section.get_hash_sha256()) if sha512 is not None: dump.add_line("SHA-512 hash: %s" % section.get_hash_sha512()) dump.add_newline() if hasattr(self, "OPTIONAL_HEADER") and hasattr( self.OPTIONAL_HEADER, "DATA_DIRECTORY" ): dump.add_header("Directories") for directory in self.OPTIONAL_HEADER.DATA_DIRECTORY: if directory is not None: dump.add_lines(directory.dump()) dump.add_newline() if hasattr(self, "VS_VERSIONINFO"): for idx, vinfo_entry in enumerate(self.VS_VERSIONINFO): if len(self.VS_VERSIONINFO) > 1: dump.add_header(f"Version Information {idx + 1}") else: dump.add_header("Version Information") if vinfo_entry is not None: dump.add_lines(vinfo_entry.dump()) dump.add_newline() if hasattr(self, "VS_FIXEDFILEINFO"): dump.add_lines(self.VS_FIXEDFILEINFO[idx].dump()) dump.add_newline() if hasattr(self, "FileInfo") and len(self.FileInfo) > idx: for entry in self.FileInfo[idx]: dump.add_lines(entry.dump()) dump.add_newline() if hasattr(entry, "StringTable"): for st_entry in entry.StringTable: [dump.add_line(" " + line) for line in st_entry.dump()] dump.add_line( " LangID: {0}".format( st_entry.LangID.decode( encoding, "backslashreplace_" ) ) ) dump.add_newline() for str_entry in sorted(list(st_entry.entries.items())): # try: dump.add_line( " {0}: {1}".format( str_entry[0].decode( encoding, "backslashreplace_" ), str_entry[1].decode( encoding, "backslashreplace_" ), ) ) dump.add_newline() elif hasattr(entry, "Var"): for var_entry in entry.Var: if hasattr(var_entry, "entry"): [ dump.add_line(" " + line) for line in var_entry.dump() ] dump.add_line( " {0}: {1}".format( list(var_entry.entry.keys())[0].decode( "utf-8", "backslashreplace_" ), list(var_entry.entry.values())[0], ) ) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_EXPORT"): dump.add_header("Exported symbols") dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump()) dump.add_newline() dump.add_line("%-10s %-10s %s" % ("Ordinal", "RVA", "Name")) for export in self.DIRECTORY_ENTRY_EXPORT.symbols: if export.address is not None: name = b("None") if export.name: name = export.name dump.add( "%-10d 0x%08X %s" % (export.ordinal, export.address, name.decode(encoding)) ) if export.forwarder: dump.add_line( " forwarder: {0}".format( export.forwarder.decode(encoding, "backslashreplace_") ) ) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): dump.add_header("Imported symbols") for module in self.DIRECTORY_ENTRY_IMPORT: dump.add_lines(module.struct.dump()) # Print the name of the DLL if there are no imports. if not module.imports: dump.add( " Name -> {0}".format( self.get_string_at_rva(module.struct.Name).decode( encoding, "backslashreplace_" ) ) ) dump.add_newline() dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: if symbol.name is not None: dump.add( "{0}.{1} Ordinal[{2}] (Imported by Ordinal)".format( module.dll.decode("utf-8"), symbol.name.decode("utf-8"), symbol.ordinal, ) ) else: dump.add( "{0} Ordinal[{1}] (Imported by Ordinal)".format( module.dll.decode("utf-8"), symbol.ordinal ) ) else: dump.add( "{0}.{1} Hint[{2:d}]".format( module.dll.decode(encoding, "backslashreplace_"), symbol.name.decode(encoding, "backslashreplace_"), symbol.hint, ) ) if symbol.bound: dump.add_line(" Bound: 0x{0:08X}".format(symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_BOUND_IMPORT"): dump.add_header("Bound imports") for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT: dump.add_lines(bound_imp_desc.struct.dump()) dump.add_line( "DLL: {0}".format( bound_imp_desc.name.decode(encoding, "backslashreplace_") ) ) dump.add_newline() for bound_imp_ref in bound_imp_desc.entries: dump.add_lines(bound_imp_ref.struct.dump(), 4) dump.add_line( "DLL: {0}".format( bound_imp_ref.name.decode(encoding, "backslashreplace_") ), 4, ) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_DELAY_IMPORT"): dump.add_header("Delay Imported symbols") for module in self.DIRECTORY_ENTRY_DELAY_IMPORT: dump.add_lines(module.struct.dump()) dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: dump.add( "{0} Ordinal[{1:d}] (Imported by Ordinal)".format( module.dll.decode(encoding, "backslashreplace_"), symbol.ordinal, ) ) else: dump.add( "{0}.{1} Hint[{2}]".format( module.dll.decode(encoding, "backslashreplace_"), symbol.name.decode(encoding, "backslashreplace_"), symbol.hint, ) ) if symbol.bound: dump.add_line(" Bound: 0x{0:08X}".format(symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): dump.add_header("Resource directory") dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump()) for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: if res_type.name is not None: name = res_type.name.decode(encoding, "backslashreplace_") dump.add_line( f"Name: [{name}]", 2, ) else: res_type_id = RESOURCE_TYPE.get(res_type.struct.Id, "-") dump.add_line( f"Id: [0x{res_type.struct.Id:X}] ({res_type_id})", 2, ) dump.add_lines(res_type.struct.dump(), 2) if hasattr(res_type, "directory"): dump.add_lines(res_type.directory.struct.dump(), 4) for resource_id in res_type.directory.entries: if resource_id.name is not None: name = resource_id.name.decode("utf-8", "backslashreplace_") dump.add_line( f"Name: [{name}]", 6, ) else: dump.add_line(f"Id: [0x{resource_id.struct.Id:X}]", 6) dump.add_lines(resource_id.struct.dump(), 6) if hasattr(resource_id, "directory"): dump.add_lines(resource_id.directory.struct.dump(), 8) for resource_lang in resource_id.directory.entries: if hasattr(resource_lang, "data"): dump.add_line( "\\--- LANG [%d,%d][%s,%s]" % ( resource_lang.data.lang, resource_lang.data.sublang, LANG.get( resource_lang.data.lang, "*unknown*" ), get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang, ), ), 8, ) dump.add_lines(resource_lang.struct.dump(), 10) dump.add_lines(resource_lang.data.struct.dump(), 12) if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): dump.add_line("[STRINGS]", 10) for idx, res_string in list( sorted(resource_id.directory.strings.items()) ): dump.add_line( "{0:6d}: {1}".format( idx, res_string.encode( "unicode-escape", "backslashreplace" ).decode("ascii"), ), 12, ) dump.add_newline() dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_TLS") and self.DIRECTORY_ENTRY_TLS and self.DIRECTORY_ENTRY_TLS.struct ): dump.add_header("TLS") dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump()) dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG") and self.DIRECTORY_ENTRY_LOAD_CONFIG and self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ): dump.add_header("LOAD_CONFIG") dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump()) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_DEBUG"): dump.add_header("Debug information") for dbg in self.DIRECTORY_ENTRY_DEBUG: dump.add_lines(dbg.struct.dump()) try: dump.add_line("Type: " + DEBUG_TYPE[dbg.struct.Type]) except KeyError: dump.add_line("Type: 0x{0:x}(Unknown)".format(dbg.struct.Type)) dump.add_newline() if dbg.entry: dump.add_lines(dbg.entry.dump(), 4) dump.add_newline() if self.has_relocs(): dump.add_header("Base relocations") for base_reloc in self.DIRECTORY_ENTRY_BASERELOC: dump.add_lines(base_reloc.struct.dump()) for reloc in base_reloc.entries: try: dump.add_line( "%08Xh %s" % (reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4, ) except KeyError: dump.add_line( "0x%08X 0x%x(Unknown)" % (reloc.rva, reloc.type), 4 ) dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_EXCEPTION") and len(self.DIRECTORY_ENTRY_EXCEPTION) > 0 ): dump.add_header("Unwind data for exception handling") for rf in self.DIRECTORY_ENTRY_EXCEPTION: dump.add_lines(rf.struct.dump()) if hasattr(rf, "unwindinfo") and rf.unwindinfo is not None: dump.add_lines(rf.unwindinfo.dump(), 4) return dump.get_text() def dump_dict(self): """Dump all the PE header information into a dictionary.""" dump_dict = {} warnings = self.get_warnings() if warnings: dump_dict["Parsing Warnings"] = warnings dump_dict["DOS_HEADER"] = self.DOS_HEADER.dump_dict() dump_dict["NT_HEADERS"] = self.NT_HEADERS.dump_dict() dump_dict["FILE_HEADER"] = self.FILE_HEADER.dump_dict() image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") dump_dict["Flags"] = [] for flag in image_flags: if getattr(self.FILE_HEADER, flag[0]): dump_dict["Flags"].append(flag[0]) if hasattr(self, "OPTIONAL_HEADER") and self.OPTIONAL_HEADER is not None: dump_dict["OPTIONAL_HEADER"] = self.OPTIONAL_HEADER.dump_dict() dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) dump_dict["DllCharacteristics"] = [] for flag in dll_characteristics_flags: if getattr(self.OPTIONAL_HEADER, flag[0]): dump_dict["DllCharacteristics"].append(flag[0]) dump_dict["PE Sections"] = [] section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") for section in self.sections: section_dict = section.dump_dict() dump_dict["PE Sections"].append(section_dict) section_dict["Flags"] = [] for flag in section_flags: if getattr(section, flag[0]): section_dict["Flags"].append(flag[0]) section_dict["Entropy"] = section.get_entropy() if md5 is not None: section_dict["MD5"] = section.get_hash_md5() if sha1 is not None: section_dict["SHA1"] = section.get_hash_sha1() if sha256 is not None: section_dict["SHA256"] = section.get_hash_sha256() if sha512 is not None: section_dict["SHA512"] = section.get_hash_sha512() if hasattr(self, "OPTIONAL_HEADER") and hasattr( self.OPTIONAL_HEADER, "DATA_DIRECTORY" ): dump_dict["Directories"] = [] for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY): if directory is not None: dump_dict["Directories"].append(directory.dump_dict()) if hasattr(self, "VS_VERSIONINFO"): dump_dict["Version Information"] = [] for idx, vs_vinfo in enumerate(self.VS_VERSIONINFO): version_info_list = [] version_info_list.append(vs_vinfo.dump_dict()) if hasattr(self, "VS_FIXEDFILEINFO"): version_info_list.append(self.VS_FIXEDFILEINFO[idx].dump_dict()) if hasattr(self, "FileInfo") and len(self.FileInfo) > idx: fileinfo_list = [] version_info_list.append(fileinfo_list) for entry in self.FileInfo[idx]: fileinfo_list.append(entry.dump_dict()) if hasattr(entry, "StringTable"): stringtable_dict = {} for st_entry in entry.StringTable: fileinfo_list.extend(st_entry.dump_dict()) stringtable_dict["LangID"] = st_entry.LangID for str_entry in list(st_entry.entries.items()): stringtable_dict[str_entry[0]] = str_entry[1] fileinfo_list.append(stringtable_dict) elif hasattr(entry, "Var"): for var_entry in entry.Var: var_dict = {} if hasattr(var_entry, "entry"): fileinfo_list.extend(var_entry.dump_dict()) var_dict[list(var_entry.entry.keys())[0]] = list( var_entry.entry.values() )[0] fileinfo_list.append(var_dict) dump_dict["Version Information"].append(version_info_list) if hasattr(self, "DIRECTORY_ENTRY_EXPORT"): dump_dict["Exported symbols"] = [] dump_dict["Exported symbols"].append( self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict() ) for export in self.DIRECTORY_ENTRY_EXPORT.symbols: export_dict = {} if export.address is not None: export_dict.update( { "Ordinal": export.ordinal, "RVA": export.address, "Name": export.name, } ) if export.forwarder: export_dict["forwarder"] = export.forwarder dump_dict["Exported symbols"].append(export_dict) if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): dump_dict["Imported symbols"] = [] for module in self.DIRECTORY_ENTRY_IMPORT: import_list = [] dump_dict["Imported symbols"].append(import_list) import_list.append(module.struct.dump_dict()) for symbol in module.imports: symbol_dict = {} if symbol.import_by_ordinal is True: symbol_dict["DLL"] = module.dll symbol_dict["Ordinal"] = symbol.ordinal else: symbol_dict["DLL"] = module.dll symbol_dict["Name"] = symbol.name symbol_dict["Hint"] = symbol.hint if symbol.bound: symbol_dict["Bound"] = symbol.bound import_list.append(symbol_dict) if hasattr(self, "DIRECTORY_ENTRY_BOUND_IMPORT"): dump_dict["Bound imports"] = [] for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT: bound_imp_desc_dict = {} dump_dict["Bound imports"].append(bound_imp_desc_dict) bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict()) bound_imp_desc_dict["DLL"] = bound_imp_desc.name for bound_imp_ref in bound_imp_desc.entries: bound_imp_ref_dict = {} bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict()) bound_imp_ref_dict["DLL"] = bound_imp_ref.name if hasattr(self, "DIRECTORY_ENTRY_DELAY_IMPORT"): dump_dict["Delay Imported symbols"] = [] for module in self.DIRECTORY_ENTRY_DELAY_IMPORT: module_list = [] dump_dict["Delay Imported symbols"].append(module_list) module_list.append(module.struct.dump_dict()) for symbol in module.imports: symbol_dict = {} if symbol.import_by_ordinal is True: symbol_dict["DLL"] = module.dll symbol_dict["Ordinal"] = symbol.ordinal else: symbol_dict["DLL"] = module.dll symbol_dict["Name"] = symbol.name symbol_dict["Hint"] = symbol.hint if symbol.bound: symbol_dict["Bound"] = symbol.bound module_list.append(symbol_dict) if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): dump_dict["Resource directory"] = [] dump_dict["Resource directory"].append( self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict() ) for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: resource_type_dict = {} if res_type.name is not None: resource_type_dict["Name"] = res_type.name else: resource_type_dict["Id"] = ( res_type.struct.Id, RESOURCE_TYPE.get(res_type.struct.Id, "-"), ) resource_type_dict.update(res_type.struct.dump_dict()) dump_dict["Resource directory"].append(resource_type_dict) if hasattr(res_type, "directory"): directory_list = [] directory_list.append(res_type.directory.struct.dump_dict()) dump_dict["Resource directory"].append(directory_list) for resource_id in res_type.directory.entries: resource_id_dict = {} if resource_id.name is not None: resource_id_dict["Name"] = resource_id.name else: resource_id_dict["Id"] = resource_id.struct.Id resource_id_dict.update(resource_id.struct.dump_dict()) directory_list.append(resource_id_dict) if hasattr(resource_id, "directory"): resource_id_list = [] resource_id_list.append( resource_id.directory.struct.dump_dict() ) directory_list.append(resource_id_list) for resource_lang in resource_id.directory.entries: if hasattr(resource_lang, "data"): resource_lang_dict = {} resource_lang_dict["LANG"] = resource_lang.data.lang resource_lang_dict[ "SUBLANG" ] = resource_lang.data.sublang resource_lang_dict["LANG_NAME"] = LANG.get( resource_lang.data.lang, "*unknown*" ) resource_lang_dict[ "SUBLANG_NAME" ] = get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang, ) resource_lang_dict.update( resource_lang.struct.dump_dict() ) resource_lang_dict.update( resource_lang.data.struct.dump_dict() ) resource_id_list.append(resource_lang_dict) if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): for idx, res_string in list( resource_id.directory.strings.items() ): resource_id_list.append( res_string.encode( "unicode-escape", "backslashreplace" ).decode("ascii") ) if ( hasattr(self, "DIRECTORY_ENTRY_TLS") and self.DIRECTORY_ENTRY_TLS and self.DIRECTORY_ENTRY_TLS.struct ): dump_dict["TLS"] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict() if ( hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG") and self.DIRECTORY_ENTRY_LOAD_CONFIG and self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ): dump_dict[ "LOAD_CONFIG" ] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict() if hasattr(self, "DIRECTORY_ENTRY_DEBUG"): dump_dict["Debug information"] = [] for dbg in self.DIRECTORY_ENTRY_DEBUG: dbg_dict = {} dump_dict["Debug information"].append(dbg_dict) dbg_dict.update(dbg.struct.dump_dict()) dbg_dict["Type"] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type) if self.has_relocs(): dump_dict["Base relocations"] = [] for base_reloc in self.DIRECTORY_ENTRY_BASERELOC: base_reloc_list = [] dump_dict["Base relocations"].append(base_reloc_list) base_reloc_list.append(base_reloc.struct.dump_dict()) for reloc in base_reloc.entries: reloc_dict = {} base_reloc_list.append(reloc_dict) reloc_dict["RVA"] = reloc.rva try: reloc_dict["Type"] = RELOCATION_TYPE[reloc.type][16:] except KeyError: reloc_dict["Type"] = reloc.type return dump_dict # OC Patch def get_physical_by_rva(self, rva): """Gets the physical address in the PE file from an RVA value.""" try: return self.get_offset_from_rva(rva) except Exception: return None ## # Double-Word get / set ## def get_data_from_dword(self, dword): """Return a four byte string representing the double word value (little endian).""" return struct.pack("<L", dword & 0xFFFFFFFF) def get_dword_from_data(self, data, offset): """Convert four bytes of data to a double word (little endian) 'offset' is assumed to index into a dword array. So setting it to N will return a dword out of the data starting at offset N*4. Returns None if the data can't be turned into a double word. """ if (offset + 1) * 4 > len(data): return None return struct.unpack("<I", data[offset * 4 : (offset + 1) * 4])[0] def get_dword_at_rva(self, rva): """Return the double word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_dword_from_data(self.get_data(rva, 4), 0) except PEFormatError: return None def get_dword_from_offset(self, offset): """Return the double word value at the given file offset. (little endian)""" if offset + 4 > len(self.__data__): return None return self.get_dword_from_data(self.__data__[offset : offset + 4], 0) def set_dword_at_rva(self, rva, dword): """Set the double word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword)) def set_dword_at_offset(self, offset, dword): """Set the double word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword)) ## # Word get / set ## def get_data_from_word(self, word): """Return a two byte string representing the word value. (little endian).""" return struct.pack("<H", word) def get_word_from_data(self, data, offset): """Convert two bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*2. Returns None if the data can't be turned into a word. """ if (offset + 1) * 2 > len(data): return None return struct.unpack("<H", data[offset * 2 : (offset + 1) * 2])[0] def get_word_at_rva(self, rva): """Return the word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_word_from_data(self.get_data(rva)[:2], 0) except PEFormatError: return None def get_word_from_offset(self, offset): """Return the word value at the given file offset. (little endian)""" if offset + 2 > len(self.__data__): return None return self.get_word_from_data(self.__data__[offset : offset + 2], 0) def set_word_at_rva(self, rva, word): """Set the word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_word(word)) def set_word_at_offset(self, offset, word): """Set the word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_word(word)) ## # Quad-Word get / set ## def get_data_from_qword(self, word): """Return an eight byte string representing the quad-word value (little endian).""" return struct.pack("<Q", word) def get_qword_from_data(self, data, offset): """Convert eight bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*8. Returns None if the data can't be turned into a quad word. """ if (offset + 1) * 8 > len(data): return None return struct.unpack("<Q", data[offset * 8 : (offset + 1) * 8])[0] def get_qword_at_rva(self, rva): """Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_qword_from_data(self.get_data(rva)[:8], 0) except PEFormatError: return None def get_qword_from_offset(self, offset): """Return the quad-word value at the given file offset. (little endian)""" if offset + 8 > len(self.__data__): return None return self.get_qword_from_data(self.__data__[offset : offset + 8], 0) def set_qword_at_rva(self, rva, qword): """Set the quad-word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword)) def set_qword_at_offset(self, offset, qword): """Set the quad-word value at the given file offset.""" return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword)) ## # Set bytes ## def set_bytes_at_rva(self, rva, data): """Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError("data should be of type: bytes") offset = self.get_physical_by_rva(rva) if not offset: return False return self.set_bytes_at_offset(offset, data) def set_bytes_at_offset(self, offset, data): """Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError("data should be of type: bytes") if 0 <= offset < len(self.__data__): self.set_data_bytes(offset, data) else: return False return True def set_data_bytes(self, offset: int, data: bytes): if not isinstance(self.__data__, bytearray): self.__data__ = bytearray(self.__data__) self.__data__[offset : offset + len(data)] = data def merge_modified_section_data(self): """Update the PE image content with any individual section data that has been modified. """ for section in self.sections: section_data_start = self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) section_data_end = section_data_start + section.SizeOfRawData if section_data_start < len(self.__data__) and section_data_end < len( self.__data__ ): self.set_data_bytes(section_data_start, section.get_data()) def relocate_image(self, new_ImageBase): """Apply the relocation information to the image using the provided image base. This method will apply the relocation information to the image. Given the new base, all the relocations will be processed and both the raw data and the section's data will be fixed accordingly. The resulting image can be retrieved as well through the method: get_memory_mapped_image() In order to get something that would more closely match what could be found in memory once the Windows loader finished its work. """ relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase if ( len(self.OPTIONAL_HEADER.DATA_DIRECTORY) >= 6 and self.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size ): if not hasattr(self, "DIRECTORY_ENTRY_BASERELOC"): self.parse_data_directories( directories=[DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_BASERELOC"]] ) if not hasattr(self, "DIRECTORY_ENTRY_BASERELOC"): self.__warnings.append( "Relocating image but PE does not have (or pefile cannot " "parse) a DIRECTORY_ENTRY_BASERELOC" ) else: for reloc in self.DIRECTORY_ENTRY_BASERELOC: # We iterate with an index because if the relocation is of type # IMAGE_REL_BASED_HIGHADJ we need to also process the next entry # at once and skip it for the next iteration # entry_idx = 0 while entry_idx < len(reloc.entries): entry = reloc.entries[entry_idx] entry_idx += 1 if entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_ABSOLUTE"]: # Nothing to do for this type of relocation pass elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGH"]: # Fix the high 16-bits of a relocation # # Add high 16-bits of relocation_difference to the # 16-bit value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference >> 16 ) & 0xFFFF, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_LOW"]: # Fix the low 16-bits of a relocation # # Add low 16 bits of relocation_difference to the 16-bit # value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference ) & 0xFFFF, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGHLOW"]: # Handle all high and low parts of a 32-bit relocation # # Add relocation_difference to the value at RVA=entry.rva self.set_dword_at_rva( entry.rva, self.get_dword_at_rva(entry.rva) + relocation_difference, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGHADJ"]: # Fix the high 16-bits of a relocation and adjust # # Add high 16-bits of relocation_difference to the 32-bit # value composed from the (16-bit value at # RVA=entry.rva)<<16 plus the 16-bit value at the next # relocation entry. # If the next entry is beyond the array's limits, # abort... the table is corrupt if entry_idx == len(reloc.entries): break next_entry = reloc.entries[entry_idx] entry_idx += 1 self.set_word_at_rva( entry.rva, ( (self.get_word_at_rva(entry.rva) << 16) + next_entry.rva + relocation_difference & 0xFFFF0000 ) >> 16, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_DIR64"]: # Apply the difference to the 64-bit value at the offset # RVA=entry.rva self.set_qword_at_rva( entry.rva, self.get_qword_at_rva(entry.rva) + relocation_difference, ) self.OPTIONAL_HEADER.ImageBase = new_ImageBase # correct VAs(virtual addresses) occurrences in directory information if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): for dll in self.DIRECTORY_ENTRY_IMPORT: for func in dll.imports: func.address += relocation_difference if hasattr(self, "DIRECTORY_ENTRY_TLS"): self.DIRECTORY_ENTRY_TLS.struct.StartAddressOfRawData += ( relocation_difference ) self.DIRECTORY_ENTRY_TLS.struct.EndAddressOfRawData += ( relocation_difference ) self.DIRECTORY_ENTRY_TLS.struct.AddressOfIndex += relocation_difference self.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks += ( relocation_difference ) if hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG"): load_config = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct if ( hasattr(load_config, "LockPrefixTable") and load_config.LockPrefixTable ): load_config.LockPrefixTable += relocation_difference if hasattr(load_config, "EditList") and load_config.EditList: load_config.EditList += relocation_difference if ( hasattr(load_config, "SecurityCookie") and load_config.SecurityCookie ): load_config.SecurityCookie += relocation_difference if ( hasattr(load_config, "SEHandlerTable") and load_config.SEHandlerTable ): load_config.SEHandlerTable += relocation_difference if ( hasattr(load_config, "GuardCFCheckFunctionPointer") and load_config.GuardCFCheckFunctionPointer ): load_config.GuardCFCheckFunctionPointer += relocation_difference if ( hasattr(load_config, "GuardCFDispatchFunctionPointer") and load_config.GuardCFDispatchFunctionPointer ): load_config.GuardCFDispatchFunctionPointer += relocation_difference if ( hasattr(load_config, "GuardCFFunctionTable") and load_config.GuardCFFunctionTable ): load_config.GuardCFFunctionTable += relocation_difference if ( hasattr(load_config, "GuardAddressTakenIatEntryTable") and load_config.GuardAddressTakenIatEntryTable ): load_config.GuardAddressTakenIatEntryTable += relocation_difference if ( hasattr(load_config, "GuardLongJumpTargetTable") and load_config.GuardLongJumpTargetTable ): load_config.GuardLongJumpTargetTable += relocation_difference if ( hasattr(load_config, "DynamicValueRelocTable") and load_config.DynamicValueRelocTable ): load_config.DynamicValueRelocTable += relocation_difference if ( self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS and hasattr(load_config, "CHPEMetadataPointer") and load_config.CHPEMetadataPointer ): load_config.CHPEMetadataPointer += relocation_difference if ( hasattr(load_config, "GuardRFFailureRoutine") and load_config.GuardRFFailureRoutine ): load_config.GuardRFFailureRoutine += relocation_difference if ( hasattr(load_config, "GuardRFFailureRoutineFunctionPointer") and load_config.GuardRFFailureRoutineFunctionPointer ): load_config.GuardRFVerifyStackPointerFunctionPointer += ( relocation_difference ) if ( hasattr(load_config, "GuardRFVerifyStackPointerFunctionPointer") and load_config.GuardRFVerifyStackPointerFunctionPointer ): load_config.GuardRFVerifyStackPointerFunctionPointer += ( relocation_difference ) if ( hasattr(load_config, "EnclaveConfigurationPointer") and load_config.EnclaveConfigurationPointer ): load_config.EnclaveConfigurationPointer += relocation_difference def verify_checksum(self): return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum() def generate_checksum(self): # This will make sure that the data representing the PE image # is updated with any changes that might have been made by # assigning values to header fields as those are not automatically # updated upon assignment. # # data = self.write() # print('{0}'.format(len(data))) # for idx, b in enumerate(data): # if b != ord(self.__data__[idx]) or (idx > 1244440 and idx < 1244460): # print('Idx: {0} G {1:02x} {3} B {2:02x}'.format( # idx, ord(self.__data__[idx]), b, # self.__data__[idx], chr(b))) self.__data__ = self.write() # Get the offset to the CheckSum field in the OptionalHeader # (The offset is the same in PE32 and PE32+) checksum_offset = self.OPTIONAL_HEADER.get_file_offset() + 0x40 # 64 checksum = 0 # Verify the data is dword-aligned. Add padding if needed # remainder = len(self.__data__) % 4 data_len = len(self.__data__) + ((4 - remainder) * (remainder != 0)) for i in range(int(data_len / 4)): # Skip the checksum field if i == int(checksum_offset / 4): continue if i + 1 == (int(data_len / 4)) and remainder: dword = struct.unpack( "I", self.__data__[i * 4 :] + (b"\0" * (4 - remainder)) )[0] else: dword = struct.unpack("I", self.__data__[i * 4 : i * 4 + 4])[0] # Optimized the calculation (thanks to Emmanuel Bourg for pointing it out!) checksum += dword if checksum >= 2**32: checksum = (checksum & 0xFFFFFFFF) + (checksum >> 32) checksum = (checksum & 0xFFFF) + (checksum >> 16) checksum = (checksum) + (checksum >> 16) checksum = checksum & 0xFFFF # The length is the one of the original data, not the padded one # return checksum + len(self.__data__) def is_exe(self): """Check whether the file is a standard executable. This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either. """ EXE_flag = IMAGE_CHARACTERISTICS["IMAGE_FILE_EXECUTABLE_IMAGE"] if ( (not self.is_dll()) and (not self.is_driver()) and (EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag ): return True return False def is_dll(self): """Check whether the file is a standard DLL. This will return true only if the image has the IMAGE_FILE_DLL flag set. """ DLL_flag = IMAGE_CHARACTERISTICS["IMAGE_FILE_DLL"] if (DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag: return True return False def is_driver(self): """Check whether the file is a Windows driver. This will return true only if there are reliable indicators of the image being a driver. """ # Checking that the ImageBase field of the OptionalHeader is above or # equal to 0x80000000 (that is, whether it lies in the upper 2GB of # the address space, normally belonging to the kernel) is not a # reliable enough indicator. For instance, PEs that play the invalid # ImageBase trick to get relocated could be incorrectly assumed to be # drivers. # This is not reliable either... # # if any((section.Characteristics & # SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for # section in self.sections ): # return True # If the import directory was not parsed (fast_load = True); do it now. if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): self.parse_data_directories( directories=[DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_IMPORT"]] ) # If there's still no import directory (the PE doesn't have one or it's # malformed), give up. if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): return False # self.DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty. # If it imports from "ntoskrnl.exe" or other kernel components it should # be a driver # system_DLLs = set( (b"ntoskrnl.exe", b"hal.dll", b"ndis.sys", b"bootvid.dll", b"kdcom.dll") ) if system_DLLs.intersection( [imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT] ): return True driver_like_section_names = set((b"page", b"paged")) if driver_like_section_names.intersection( [section.Name.lower().rstrip(b"\x00") for section in self.sections] ) and ( self.OPTIONAL_HEADER.Subsystem in ( SUBSYSTEM_TYPE["IMAGE_SUBSYSTEM_NATIVE"], SUBSYSTEM_TYPE["IMAGE_SUBSYSTEM_NATIVE_WINDOWS"], ) ): return True return False def get_overlay_data_start_offset(self): """Get the offset of data appended to the file and not contained within the area described in the headers.""" largest_offset_and_size = (0, 0) def update_if_sum_is_larger_and_within_file( offset_and_size, file_size=len(self.__data__) ): if sum(offset_and_size) <= file_size and sum(offset_and_size) > sum( largest_offset_and_size ): return offset_and_size return largest_offset_and_size if hasattr(self, "OPTIONAL_HEADER"): largest_offset_and_size = update_if_sum_is_larger_and_within_file( ( self.OPTIONAL_HEADER.get_file_offset(), self.FILE_HEADER.SizeOfOptionalHeader, ) ) for section in self.sections: largest_offset_and_size = update_if_sum_is_larger_and_within_file( (section.PointerToRawData, section.SizeOfRawData) ) skip_directories = [DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]] for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY): if idx in skip_directories: continue try: largest_offset_and_size = update_if_sum_is_larger_and_within_file( (self.get_offset_from_rva(directory.VirtualAddress), directory.Size) ) # Ignore directories with RVA out of file except PEFormatError: continue if len(self.__data__) > sum(largest_offset_and_size): return sum(largest_offset_and_size) return None def get_overlay(self): """Get the data appended to the file and not contained within the area described in the headers.""" overlay_data_offset = self.get_overlay_data_start_offset() if overlay_data_offset is not None: return self.__data__[overlay_data_offset:] return None def trim(self): """Return the just data defined by the PE headers, removing any overlaid data.""" overlay_data_offset = self.get_overlay_data_start_offset() if overlay_data_offset is not None: return self.__data__[:overlay_data_offset] return self.__data__[:] # According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html # if PointerToRawData is less that 0x200 it's rounded to zero. Loading the test file # in a debugger it's easy to verify that the PointerToRawData value of 1 is rounded # to zero. Hence we reproduce the behavior # # According to the document: # [ Microsoft Portable Executable and Common Object File Format Specification ] # "The alignment factor (in bytes) that is used to align the raw data of sections in # the image file. The value should be a power of 2 between 512 and 64 K, inclusive. # The default is 512. If the SectionAlignment is less than the architecture's page # size, then FileAlignment must match SectionAlignment." # # The following is a hard-coded constant if the Windows loader def adjust_FileAlignment(self, val, file_alignment): if file_alignment > FILE_ALIGNMENT_HARDCODED_VALUE: # If it's not a power of two, report it: if self.FileAlignment_Warning is False and not power_of_two(file_alignment): self.__warnings.append( "If FileAlignment > 0x200 it should be a power of 2. Value: %x" % (file_alignment) ) self.FileAlignment_Warning = True return cache_adjust_FileAlignment(val, file_alignment) # According to the document: # [ Microsoft Portable Executable and Common Object File Format Specification ] # "The alignment (in bytes) of sections when they are loaded into memory. It must be # greater than or equal to FileAlignment. The default is the page size for the # architecture." # def adjust_SectionAlignment(self, val, section_alignment, file_alignment): if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE: if ( file_alignment != section_alignment and self.SectionAlignment_Warning is False ): self.__warnings.append( "If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)" % (file_alignment, section_alignment) ) self.SectionAlignment_Warning = True return cache_adjust_SectionAlignment(val, section_alignment, file_alignment)
(name=None, data=None, fast_load=None, max_symbol_exports=8192, max_repeated_symbol=120)
35,575
pefile
__exit__
null
def __exit__(self, type, value, traceback): self.close()
(self, type, value, traceback)
35,576
pefile
__init__
null
def __init__( self, name=None, data=None, fast_load=None, max_symbol_exports=MAX_SYMBOL_EXPORT_COUNT, max_repeated_symbol=120, ): self.max_symbol_exports = max_symbol_exports self.max_repeated_symbol = max_repeated_symbol self._get_section_by_rva_last_used = None self.sections = [] self.__warnings = [] self.PE_TYPE = None if name is None and data is None: raise ValueError("Must supply either name or data") # This list will keep track of all the structures created. # That will allow for an easy iteration through the list # in order to save the modifications made self.__structures__ = [] self.__from_file = None # We only want to print these warnings once self.FileAlignment_Warning = False self.SectionAlignment_Warning = False # Count of total resource entries across nested tables self.__total_resource_entries_count = 0 # Sum of the size of all resource entries parsed, which should not # exceed the file size. self.__total_resource_bytes = 0 # The number of imports parsed in this file self.__total_import_symbols = 0 self.dynamic_relocation_format_by_symbol = { 3: PE.__IMAGE_IMPORT_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__, 4: PE.__IMAGE_INDIR_CONTROL_TRANSFER_DYNAMIC_RELOCATION_format__, 5: PE.__IMAGE_SWITCHTABLE_BRANCH_DYNAMIC_RELOCATION_format__, } fast_load = fast_load if fast_load is not None else globals()["fast_load"] try: self.__parse__(name, data, fast_load) except: self.close() raise
(self, name=None, data=None, fast_load=None, max_symbol_exports=8192, max_repeated_symbol=120)
35,577
pefile
__parse__
Parse a Portable Executable file. Loads a PE file, parsing all its structures and making them available through the instance's attributes.
def __parse__(self, fname, data, fast_load): """Parse a Portable Executable file. Loads a PE file, parsing all its structures and making them available through the instance's attributes. """ if fname is not None: stat = os.stat(fname) if stat.st_size == 0: raise PEFormatError("The file is empty") fd = None try: fd = open(fname, "rb") self.fileno = fd.fileno() if hasattr(mmap, "MAP_PRIVATE"): # Unix self.__data__ = mmap.mmap(self.fileno, 0, mmap.MAP_PRIVATE) else: # Windows self.__data__ = mmap.mmap(self.fileno, 0, access=mmap.ACCESS_READ) self.__from_file = True except IOError as excp: exception_msg = "{0}".format(excp) exception_msg = exception_msg and (": %s" % exception_msg) raise Exception( "Unable to access file '{0}'{1}".format(fname, exception_msg) ) finally: if fd is not None: fd.close() elif data is not None: self.__data__ = data self.__from_file = False # Resources should not overlap each other, so they should not exceed the # file size. self.__resource_size_limit_upperbounds = len(self.__data__) self.__resource_size_limit_reached = False if not fast_load: for byte, byte_count in Counter(bytearray(self.__data__)).items(): # Only report the cases where a byte makes up for more than 50% (if # zero) or 15% (if non-zero) of the file's contents. There are # legitimate PEs where 0x00 bytes are close to 50% of the whole # file's contents. if (byte == 0 and byte_count / len(self.__data__) > 0.5) or ( byte != 0 and byte_count / len(self.__data__) > 0.15 ): self.__warnings.append( ( "Byte 0x{0:02x} makes up {1:.4f}% of the file's contents." " This may indicate truncation / malformation." ).format(byte, 100.0 * byte_count / len(self.__data__)) ) dos_header_data = self.__data__[:64] if len(dos_header_data) != 64: raise PEFormatError( "Unable to read the DOS Header, possibly a truncated file." ) self.DOS_HEADER = self.__unpack_data__( self.__IMAGE_DOS_HEADER_format__, dos_header_data, file_offset=0 ) if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE: raise PEFormatError("Probably a ZM Executable (not a PE file).") if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE: raise PEFormatError("DOS Header magic not found.") # OC Patch: # Check for sane value in e_lfanew # if self.DOS_HEADER.e_lfanew > len(self.__data__): raise PEFormatError("Invalid e_lfanew value, probably not a PE file") nt_headers_offset = self.DOS_HEADER.e_lfanew self.NT_HEADERS = self.__unpack_data__( self.__IMAGE_NT_HEADERS_format__, self.__data__[nt_headers_offset : nt_headers_offset + 8], file_offset=nt_headers_offset, ) # We better check the signature right here, before the file screws # around with sections: # OC Patch: # Some malware will cause the Signature value to not exist at all if not self.NT_HEADERS or not self.NT_HEADERS.Signature: raise PEFormatError("NT Headers not found.") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_NE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a NE file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a LE file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LX_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a LX file") if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_TE_SIGNATURE: raise PEFormatError("Invalid NT Headers signature. Probably a TE file") if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE: raise PEFormatError("Invalid NT Headers signature.") self.FILE_HEADER = self.__unpack_data__( self.__IMAGE_FILE_HEADER_format__, self.__data__[nt_headers_offset + 4 : nt_headers_offset + 4 + 32], file_offset=nt_headers_offset + 4, ) image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") if not self.FILE_HEADER: raise PEFormatError("File Header missing") # Set the image's flags according the the Characteristics member set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags) optional_header_offset = nt_headers_offset + 4 + self.FILE_HEADER.sizeof() # Note: location of sections can be controlled from PE header: sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, # Read up to 256 bytes to allow creating a copy of too much data self.__data__[optional_header_offset : optional_header_offset + 256], file_offset=optional_header_offset, ) # According to solardesigner's findings for his # Tiny PE project, the optional header does not # need fields beyond "Subsystem" in order to be # loadable by the Windows loader (given that zeros # are acceptable values and the header is loaded # in a zeroed memory page) # If trying to parse a full Optional Header fails # we try to parse it again with some 0 padding # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69 if ( self.OPTIONAL_HEADER is None and len( self.__data__[optional_header_offset : optional_header_offset + 0x200] ) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): # Add enough zeros to make up for the unused fields # padding_length = 128 # Create padding # padded_data = self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] + (b"\0" * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER_format__, padded_data, file_offset=optional_header_offset, ) # Check the Magic in the OPTIONAL_HEADER and set the PE file # type accordingly # if self.OPTIONAL_HEADER is not None: if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS: self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, self.__data__[ optional_header_offset : optional_header_offset + 0x200 ], file_offset=optional_header_offset, ) # Again, as explained above, we try to parse # a reduced form of the Optional Header which # is still valid despite not including all # structure members # MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69 + 4 if ( self.OPTIONAL_HEADER is None and len( self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] ) >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ): padding_length = 128 padded_data = self.__data__[ optional_header_offset : optional_header_offset + 0x200 ] + (b"\0" * padding_length) self.OPTIONAL_HEADER = self.__unpack_data__( self.__IMAGE_OPTIONAL_HEADER64_format__, padded_data, file_offset=optional_header_offset, ) if not self.FILE_HEADER: raise PEFormatError("File Header missing") # OC Patch: # Die gracefully if there is no OPTIONAL_HEADER field # 975440f5ad5e2e4a92c4d9a5f22f75c1 if self.OPTIONAL_HEADER is None: raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file.") if self.PE_TYPE is None: self.__warnings.append( "Invalid type 0x{0:04x} in Optional Header.".format( self.OPTIONAL_HEADER.Magic ) ) dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) # Set the Dll Characteristics flags according the the DllCharacteristics member set_flags( self.OPTIONAL_HEADER, self.OPTIONAL_HEADER.DllCharacteristics, dll_characteristics_flags, ) self.OPTIONAL_HEADER.DATA_DIRECTORY = [] # offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader) offset = optional_header_offset + self.OPTIONAL_HEADER.sizeof() self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER # Windows 8 specific check # if ( self.OPTIONAL_HEADER.AddressOfEntryPoint < self.OPTIONAL_HEADER.SizeOfHeaders ): self.__warnings.append( "SizeOfHeaders is smaller than AddressOfEntryPoint: this file " "cannot run under Windows 8." ) # The NumberOfRvaAndSizes is sanitized to stay within # reasonable limits so can be casted to an int # if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10: self.__warnings.append( "Suspicious NumberOfRvaAndSizes in the Optional Header. " "Normal values are never larger than 0x10, the value is: 0x%x" % self.OPTIONAL_HEADER.NumberOfRvaAndSizes ) MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100 for i in range(int(0x7FFFFFFF & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)): if len(self.__data__) - offset == 0: break if len(self.__data__) - offset < 8: data = self.__data__[offset:] + b"\0" * 8 else: data = self.__data__[ offset : offset + MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES ] dir_entry = self.__unpack_data__( self.__IMAGE_DATA_DIRECTORY_format__, data, file_offset=offset ) if dir_entry is None: break # Would fail if missing an entry # 1d4937b2fa4d84ad1bce0309857e70ca offending sample try: dir_entry.name = DIRECTORY_ENTRY[i] except (KeyError, AttributeError): break offset += dir_entry.sizeof() self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry) # If the offset goes outside the optional header, # the loop is broken, regardless of how many directories # NumberOfRvaAndSizes says there are # # We assume a normally sized optional header, hence that we do # a sizeof() instead of reading SizeOfOptionalHeader. # Then we add a default number of directories times their size, # if we go beyond that, we assume the number of directories # is wrong and stop processing if offset >= ( optional_header_offset + self.OPTIONAL_HEADER.sizeof() + 8 * 16 ): break offset = self.parse_sections(sections_offset) # OC Patch: # There could be a problem if there are no raw data sections # greater than 0 # fc91013eb72529da005110a3403541b6 example # Should this throw an exception in the minimum header offset # can't be found? # rawDataPointers = [ self.adjust_FileAlignment( s.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections if s.PointerToRawData > 0 ] if len(rawDataPointers) > 0: lowest_section_offset = min(rawDataPointers) else: lowest_section_offset = None if not lowest_section_offset or lowest_section_offset < offset: self.header = self.__data__[:offset] else: self.header = self.__data__[:lowest_section_offset] # Check whether the entry point lies within a section # if ( self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None ): # Check whether the entry point lies within the file # ep_offset = self.get_offset_from_rva( self.OPTIONAL_HEADER.AddressOfEntryPoint ) if ep_offset > len(self.__data__): self.__warnings.append( "Possibly corrupt file. AddressOfEntryPoint lies outside the" " file. AddressOfEntryPoint: 0x%x" % self.OPTIONAL_HEADER.AddressOfEntryPoint ) else: self.__warnings.append( "AddressOfEntryPoint lies outside the sections' boundaries. " "AddressOfEntryPoint: 0x%x" % self.OPTIONAL_HEADER.AddressOfEntryPoint ) if not fast_load: self.full_load()
(self, fname, data, fast_load)
35,578
pefile
__str__
null
def __str__(self): return self.dump_info()
(self)
35,579
pefile
__unpack_data__
Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise.
def __unpack_data__(self, format, data, file_offset): """Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise. """ structure = Structure(format, file_offset=file_offset) try: structure.__unpack__(data) except PEFormatError as err: self.__warnings.append( 'Corrupt header "{0}" at file offset {1}. Exception: {2}'.format( format[0], file_offset, err ) ) return None self.__structures__.append(structure) return structure
(self, format, data, file_offset)
35,580
pefile
__unpack_data_with_bitfields__
Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise.
def __unpack_data_with_bitfields__(self, format, data, file_offset): """Apply structure format to raw data. Returns an unpacked structure object if successful, None otherwise. """ structure = StructureWithBitfields(format, file_offset=file_offset) try: structure.__unpack__(data) except PEFormatError as err: self.__warnings.append( 'Corrupt header "{0}" at file offset {1}. Exception: {2}'.format( format[0], file_offset, err ) ) return None self.__structures__.append(structure) return structure
(self, format, data, file_offset)
35,581
pefile
adjust_FileAlignment
null
def adjust_FileAlignment(self, val, file_alignment): if file_alignment > FILE_ALIGNMENT_HARDCODED_VALUE: # If it's not a power of two, report it: if self.FileAlignment_Warning is False and not power_of_two(file_alignment): self.__warnings.append( "If FileAlignment > 0x200 it should be a power of 2. Value: %x" % (file_alignment) ) self.FileAlignment_Warning = True return cache_adjust_FileAlignment(val, file_alignment)
(self, val, file_alignment)
35,582
pefile
adjust_SectionAlignment
null
def adjust_SectionAlignment(self, val, section_alignment, file_alignment): if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE: if ( file_alignment != section_alignment and self.SectionAlignment_Warning is False ): self.__warnings.append( "If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)" % (file_alignment, section_alignment) ) self.SectionAlignment_Warning = True return cache_adjust_SectionAlignment(val, section_alignment, file_alignment)
(self, val, section_alignment, file_alignment)
35,583
pefile
close
null
def close(self): if ( self.__from_file is True and hasattr(self, "__data__") and ( (isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap)) or "mmap.mmap" in repr(type(self.__data__)) ) ): self.__data__.close() del self.__data__
(self)
35,584
pefile
dump_dict
Dump all the PE header information into a dictionary.
def dump_dict(self): """Dump all the PE header information into a dictionary.""" dump_dict = {} warnings = self.get_warnings() if warnings: dump_dict["Parsing Warnings"] = warnings dump_dict["DOS_HEADER"] = self.DOS_HEADER.dump_dict() dump_dict["NT_HEADERS"] = self.NT_HEADERS.dump_dict() dump_dict["FILE_HEADER"] = self.FILE_HEADER.dump_dict() image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") dump_dict["Flags"] = [] for flag in image_flags: if getattr(self.FILE_HEADER, flag[0]): dump_dict["Flags"].append(flag[0]) if hasattr(self, "OPTIONAL_HEADER") and self.OPTIONAL_HEADER is not None: dump_dict["OPTIONAL_HEADER"] = self.OPTIONAL_HEADER.dump_dict() dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) dump_dict["DllCharacteristics"] = [] for flag in dll_characteristics_flags: if getattr(self.OPTIONAL_HEADER, flag[0]): dump_dict["DllCharacteristics"].append(flag[0]) dump_dict["PE Sections"] = [] section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") for section in self.sections: section_dict = section.dump_dict() dump_dict["PE Sections"].append(section_dict) section_dict["Flags"] = [] for flag in section_flags: if getattr(section, flag[0]): section_dict["Flags"].append(flag[0]) section_dict["Entropy"] = section.get_entropy() if md5 is not None: section_dict["MD5"] = section.get_hash_md5() if sha1 is not None: section_dict["SHA1"] = section.get_hash_sha1() if sha256 is not None: section_dict["SHA256"] = section.get_hash_sha256() if sha512 is not None: section_dict["SHA512"] = section.get_hash_sha512() if hasattr(self, "OPTIONAL_HEADER") and hasattr( self.OPTIONAL_HEADER, "DATA_DIRECTORY" ): dump_dict["Directories"] = [] for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY): if directory is not None: dump_dict["Directories"].append(directory.dump_dict()) if hasattr(self, "VS_VERSIONINFO"): dump_dict["Version Information"] = [] for idx, vs_vinfo in enumerate(self.VS_VERSIONINFO): version_info_list = [] version_info_list.append(vs_vinfo.dump_dict()) if hasattr(self, "VS_FIXEDFILEINFO"): version_info_list.append(self.VS_FIXEDFILEINFO[idx].dump_dict()) if hasattr(self, "FileInfo") and len(self.FileInfo) > idx: fileinfo_list = [] version_info_list.append(fileinfo_list) for entry in self.FileInfo[idx]: fileinfo_list.append(entry.dump_dict()) if hasattr(entry, "StringTable"): stringtable_dict = {} for st_entry in entry.StringTable: fileinfo_list.extend(st_entry.dump_dict()) stringtable_dict["LangID"] = st_entry.LangID for str_entry in list(st_entry.entries.items()): stringtable_dict[str_entry[0]] = str_entry[1] fileinfo_list.append(stringtable_dict) elif hasattr(entry, "Var"): for var_entry in entry.Var: var_dict = {} if hasattr(var_entry, "entry"): fileinfo_list.extend(var_entry.dump_dict()) var_dict[list(var_entry.entry.keys())[0]] = list( var_entry.entry.values() )[0] fileinfo_list.append(var_dict) dump_dict["Version Information"].append(version_info_list) if hasattr(self, "DIRECTORY_ENTRY_EXPORT"): dump_dict["Exported symbols"] = [] dump_dict["Exported symbols"].append( self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict() ) for export in self.DIRECTORY_ENTRY_EXPORT.symbols: export_dict = {} if export.address is not None: export_dict.update( { "Ordinal": export.ordinal, "RVA": export.address, "Name": export.name, } ) if export.forwarder: export_dict["forwarder"] = export.forwarder dump_dict["Exported symbols"].append(export_dict) if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): dump_dict["Imported symbols"] = [] for module in self.DIRECTORY_ENTRY_IMPORT: import_list = [] dump_dict["Imported symbols"].append(import_list) import_list.append(module.struct.dump_dict()) for symbol in module.imports: symbol_dict = {} if symbol.import_by_ordinal is True: symbol_dict["DLL"] = module.dll symbol_dict["Ordinal"] = symbol.ordinal else: symbol_dict["DLL"] = module.dll symbol_dict["Name"] = symbol.name symbol_dict["Hint"] = symbol.hint if symbol.bound: symbol_dict["Bound"] = symbol.bound import_list.append(symbol_dict) if hasattr(self, "DIRECTORY_ENTRY_BOUND_IMPORT"): dump_dict["Bound imports"] = [] for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT: bound_imp_desc_dict = {} dump_dict["Bound imports"].append(bound_imp_desc_dict) bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict()) bound_imp_desc_dict["DLL"] = bound_imp_desc.name for bound_imp_ref in bound_imp_desc.entries: bound_imp_ref_dict = {} bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict()) bound_imp_ref_dict["DLL"] = bound_imp_ref.name if hasattr(self, "DIRECTORY_ENTRY_DELAY_IMPORT"): dump_dict["Delay Imported symbols"] = [] for module in self.DIRECTORY_ENTRY_DELAY_IMPORT: module_list = [] dump_dict["Delay Imported symbols"].append(module_list) module_list.append(module.struct.dump_dict()) for symbol in module.imports: symbol_dict = {} if symbol.import_by_ordinal is True: symbol_dict["DLL"] = module.dll symbol_dict["Ordinal"] = symbol.ordinal else: symbol_dict["DLL"] = module.dll symbol_dict["Name"] = symbol.name symbol_dict["Hint"] = symbol.hint if symbol.bound: symbol_dict["Bound"] = symbol.bound module_list.append(symbol_dict) if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): dump_dict["Resource directory"] = [] dump_dict["Resource directory"].append( self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict() ) for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: resource_type_dict = {} if res_type.name is not None: resource_type_dict["Name"] = res_type.name else: resource_type_dict["Id"] = ( res_type.struct.Id, RESOURCE_TYPE.get(res_type.struct.Id, "-"), ) resource_type_dict.update(res_type.struct.dump_dict()) dump_dict["Resource directory"].append(resource_type_dict) if hasattr(res_type, "directory"): directory_list = [] directory_list.append(res_type.directory.struct.dump_dict()) dump_dict["Resource directory"].append(directory_list) for resource_id in res_type.directory.entries: resource_id_dict = {} if resource_id.name is not None: resource_id_dict["Name"] = resource_id.name else: resource_id_dict["Id"] = resource_id.struct.Id resource_id_dict.update(resource_id.struct.dump_dict()) directory_list.append(resource_id_dict) if hasattr(resource_id, "directory"): resource_id_list = [] resource_id_list.append( resource_id.directory.struct.dump_dict() ) directory_list.append(resource_id_list) for resource_lang in resource_id.directory.entries: if hasattr(resource_lang, "data"): resource_lang_dict = {} resource_lang_dict["LANG"] = resource_lang.data.lang resource_lang_dict[ "SUBLANG" ] = resource_lang.data.sublang resource_lang_dict["LANG_NAME"] = LANG.get( resource_lang.data.lang, "*unknown*" ) resource_lang_dict[ "SUBLANG_NAME" ] = get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang, ) resource_lang_dict.update( resource_lang.struct.dump_dict() ) resource_lang_dict.update( resource_lang.data.struct.dump_dict() ) resource_id_list.append(resource_lang_dict) if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): for idx, res_string in list( resource_id.directory.strings.items() ): resource_id_list.append( res_string.encode( "unicode-escape", "backslashreplace" ).decode("ascii") ) if ( hasattr(self, "DIRECTORY_ENTRY_TLS") and self.DIRECTORY_ENTRY_TLS and self.DIRECTORY_ENTRY_TLS.struct ): dump_dict["TLS"] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict() if ( hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG") and self.DIRECTORY_ENTRY_LOAD_CONFIG and self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ): dump_dict[ "LOAD_CONFIG" ] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict() if hasattr(self, "DIRECTORY_ENTRY_DEBUG"): dump_dict["Debug information"] = [] for dbg in self.DIRECTORY_ENTRY_DEBUG: dbg_dict = {} dump_dict["Debug information"].append(dbg_dict) dbg_dict.update(dbg.struct.dump_dict()) dbg_dict["Type"] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type) if self.has_relocs(): dump_dict["Base relocations"] = [] for base_reloc in self.DIRECTORY_ENTRY_BASERELOC: base_reloc_list = [] dump_dict["Base relocations"].append(base_reloc_list) base_reloc_list.append(base_reloc.struct.dump_dict()) for reloc in base_reloc.entries: reloc_dict = {} base_reloc_list.append(reloc_dict) reloc_dict["RVA"] = reloc.rva try: reloc_dict["Type"] = RELOCATION_TYPE[reloc.type][16:] except KeyError: reloc_dict["Type"] = reloc.type return dump_dict
(self)
35,585
pefile
dump_info
Dump all the PE header information into human readable string.
def dump_info(self, dump=None, encoding="ascii"): """Dump all the PE header information into human readable string.""" if dump is None: dump = Dump() warnings = self.get_warnings() if warnings: dump.add_header("Parsing Warnings") for warning in warnings: dump.add_line(warning) dump.add_newline() dump.add_header("DOS_HEADER") dump.add_lines(self.DOS_HEADER.dump()) dump.add_newline() dump.add_header("NT_HEADERS") dump.add_lines(self.NT_HEADERS.dump()) dump.add_newline() dump.add_header("FILE_HEADER") dump.add_lines(self.FILE_HEADER.dump()) image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, "IMAGE_FILE_") dump.add("Flags: ") flags = [] for flag in sorted(image_flags): if getattr(self.FILE_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_newline() if hasattr(self, "OPTIONAL_HEADER") and self.OPTIONAL_HEADER is not None: dump.add_header("OPTIONAL_HEADER") dump.add_lines(self.OPTIONAL_HEADER.dump()) dll_characteristics_flags = retrieve_flags( DLL_CHARACTERISTICS, "IMAGE_DLLCHARACTERISTICS_" ) dump.add("DllCharacteristics: ") flags = [] for flag in sorted(dll_characteristics_flags): if getattr(self.OPTIONAL_HEADER, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_newline() dump.add_header("PE Sections") section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") for section in self.sections: dump.add_lines(section.dump()) dump.add("Flags: ") flags = [] for flag in sorted(section_flags): if getattr(section, flag[0]): flags.append(flag[0]) dump.add_line(", ".join(flags)) dump.add_line( "Entropy: {0:f} (Min=0.0, Max=8.0)".format(section.get_entropy()) ) if md5 is not None: dump.add_line("MD5 hash: {0}".format(section.get_hash_md5())) if sha1 is not None: dump.add_line("SHA-1 hash: %s" % section.get_hash_sha1()) if sha256 is not None: dump.add_line("SHA-256 hash: %s" % section.get_hash_sha256()) if sha512 is not None: dump.add_line("SHA-512 hash: %s" % section.get_hash_sha512()) dump.add_newline() if hasattr(self, "OPTIONAL_HEADER") and hasattr( self.OPTIONAL_HEADER, "DATA_DIRECTORY" ): dump.add_header("Directories") for directory in self.OPTIONAL_HEADER.DATA_DIRECTORY: if directory is not None: dump.add_lines(directory.dump()) dump.add_newline() if hasattr(self, "VS_VERSIONINFO"): for idx, vinfo_entry in enumerate(self.VS_VERSIONINFO): if len(self.VS_VERSIONINFO) > 1: dump.add_header(f"Version Information {idx + 1}") else: dump.add_header("Version Information") if vinfo_entry is not None: dump.add_lines(vinfo_entry.dump()) dump.add_newline() if hasattr(self, "VS_FIXEDFILEINFO"): dump.add_lines(self.VS_FIXEDFILEINFO[idx].dump()) dump.add_newline() if hasattr(self, "FileInfo") and len(self.FileInfo) > idx: for entry in self.FileInfo[idx]: dump.add_lines(entry.dump()) dump.add_newline() if hasattr(entry, "StringTable"): for st_entry in entry.StringTable: [dump.add_line(" " + line) for line in st_entry.dump()] dump.add_line( " LangID: {0}".format( st_entry.LangID.decode( encoding, "backslashreplace_" ) ) ) dump.add_newline() for str_entry in sorted(list(st_entry.entries.items())): # try: dump.add_line( " {0}: {1}".format( str_entry[0].decode( encoding, "backslashreplace_" ), str_entry[1].decode( encoding, "backslashreplace_" ), ) ) dump.add_newline() elif hasattr(entry, "Var"): for var_entry in entry.Var: if hasattr(var_entry, "entry"): [ dump.add_line(" " + line) for line in var_entry.dump() ] dump.add_line( " {0}: {1}".format( list(var_entry.entry.keys())[0].decode( "utf-8", "backslashreplace_" ), list(var_entry.entry.values())[0], ) ) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_EXPORT"): dump.add_header("Exported symbols") dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump()) dump.add_newline() dump.add_line("%-10s %-10s %s" % ("Ordinal", "RVA", "Name")) for export in self.DIRECTORY_ENTRY_EXPORT.symbols: if export.address is not None: name = b("None") if export.name: name = export.name dump.add( "%-10d 0x%08X %s" % (export.ordinal, export.address, name.decode(encoding)) ) if export.forwarder: dump.add_line( " forwarder: {0}".format( export.forwarder.decode(encoding, "backslashreplace_") ) ) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): dump.add_header("Imported symbols") for module in self.DIRECTORY_ENTRY_IMPORT: dump.add_lines(module.struct.dump()) # Print the name of the DLL if there are no imports. if not module.imports: dump.add( " Name -> {0}".format( self.get_string_at_rva(module.struct.Name).decode( encoding, "backslashreplace_" ) ) ) dump.add_newline() dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: if symbol.name is not None: dump.add( "{0}.{1} Ordinal[{2}] (Imported by Ordinal)".format( module.dll.decode("utf-8"), symbol.name.decode("utf-8"), symbol.ordinal, ) ) else: dump.add( "{0} Ordinal[{1}] (Imported by Ordinal)".format( module.dll.decode("utf-8"), symbol.ordinal ) ) else: dump.add( "{0}.{1} Hint[{2:d}]".format( module.dll.decode(encoding, "backslashreplace_"), symbol.name.decode(encoding, "backslashreplace_"), symbol.hint, ) ) if symbol.bound: dump.add_line(" Bound: 0x{0:08X}".format(symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_BOUND_IMPORT"): dump.add_header("Bound imports") for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT: dump.add_lines(bound_imp_desc.struct.dump()) dump.add_line( "DLL: {0}".format( bound_imp_desc.name.decode(encoding, "backslashreplace_") ) ) dump.add_newline() for bound_imp_ref in bound_imp_desc.entries: dump.add_lines(bound_imp_ref.struct.dump(), 4) dump.add_line( "DLL: {0}".format( bound_imp_ref.name.decode(encoding, "backslashreplace_") ), 4, ) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_DELAY_IMPORT"): dump.add_header("Delay Imported symbols") for module in self.DIRECTORY_ENTRY_DELAY_IMPORT: dump.add_lines(module.struct.dump()) dump.add_newline() for symbol in module.imports: if symbol.import_by_ordinal is True: dump.add( "{0} Ordinal[{1:d}] (Imported by Ordinal)".format( module.dll.decode(encoding, "backslashreplace_"), symbol.ordinal, ) ) else: dump.add( "{0}.{1} Hint[{2}]".format( module.dll.decode(encoding, "backslashreplace_"), symbol.name.decode(encoding, "backslashreplace_"), symbol.hint, ) ) if symbol.bound: dump.add_line(" Bound: 0x{0:08X}".format(symbol.bound)) else: dump.add_newline() dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): dump.add_header("Resource directory") dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump()) for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: if res_type.name is not None: name = res_type.name.decode(encoding, "backslashreplace_") dump.add_line( f"Name: [{name}]", 2, ) else: res_type_id = RESOURCE_TYPE.get(res_type.struct.Id, "-") dump.add_line( f"Id: [0x{res_type.struct.Id:X}] ({res_type_id})", 2, ) dump.add_lines(res_type.struct.dump(), 2) if hasattr(res_type, "directory"): dump.add_lines(res_type.directory.struct.dump(), 4) for resource_id in res_type.directory.entries: if resource_id.name is not None: name = resource_id.name.decode("utf-8", "backslashreplace_") dump.add_line( f"Name: [{name}]", 6, ) else: dump.add_line(f"Id: [0x{resource_id.struct.Id:X}]", 6) dump.add_lines(resource_id.struct.dump(), 6) if hasattr(resource_id, "directory"): dump.add_lines(resource_id.directory.struct.dump(), 8) for resource_lang in resource_id.directory.entries: if hasattr(resource_lang, "data"): dump.add_line( "\\--- LANG [%d,%d][%s,%s]" % ( resource_lang.data.lang, resource_lang.data.sublang, LANG.get( resource_lang.data.lang, "*unknown*" ), get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang, ), ), 8, ) dump.add_lines(resource_lang.struct.dump(), 10) dump.add_lines(resource_lang.data.struct.dump(), 12) if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): dump.add_line("[STRINGS]", 10) for idx, res_string in list( sorted(resource_id.directory.strings.items()) ): dump.add_line( "{0:6d}: {1}".format( idx, res_string.encode( "unicode-escape", "backslashreplace" ).decode("ascii"), ), 12, ) dump.add_newline() dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_TLS") and self.DIRECTORY_ENTRY_TLS and self.DIRECTORY_ENTRY_TLS.struct ): dump.add_header("TLS") dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump()) dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG") and self.DIRECTORY_ENTRY_LOAD_CONFIG and self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ): dump.add_header("LOAD_CONFIG") dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump()) dump.add_newline() if hasattr(self, "DIRECTORY_ENTRY_DEBUG"): dump.add_header("Debug information") for dbg in self.DIRECTORY_ENTRY_DEBUG: dump.add_lines(dbg.struct.dump()) try: dump.add_line("Type: " + DEBUG_TYPE[dbg.struct.Type]) except KeyError: dump.add_line("Type: 0x{0:x}(Unknown)".format(dbg.struct.Type)) dump.add_newline() if dbg.entry: dump.add_lines(dbg.entry.dump(), 4) dump.add_newline() if self.has_relocs(): dump.add_header("Base relocations") for base_reloc in self.DIRECTORY_ENTRY_BASERELOC: dump.add_lines(base_reloc.struct.dump()) for reloc in base_reloc.entries: try: dump.add_line( "%08Xh %s" % (reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4, ) except KeyError: dump.add_line( "0x%08X 0x%x(Unknown)" % (reloc.rva, reloc.type), 4 ) dump.add_newline() if ( hasattr(self, "DIRECTORY_ENTRY_EXCEPTION") and len(self.DIRECTORY_ENTRY_EXCEPTION) > 0 ): dump.add_header("Unwind data for exception handling") for rf in self.DIRECTORY_ENTRY_EXCEPTION: dump.add_lines(rf.struct.dump()) if hasattr(rf, "unwindinfo") and rf.unwindinfo is not None: dump.add_lines(rf.unwindinfo.dump(), 4) return dump.get_text()
(self, dump=None, encoding='ascii')
35,586
pefile
dword_align
null
def dword_align(self, offset, base): return ((offset + base + 3) & 0xFFFFFFFC) - (base & 0xFFFFFFFC)
(self, offset, base)
35,587
pefile
full_load
Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used.
def full_load(self): """Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used. """ self.parse_data_directories() class RichHeader: pass rich_header = self.parse_rich_header() if rich_header: self.RICH_HEADER = RichHeader() self.RICH_HEADER.checksum = rich_header.get("checksum", None) self.RICH_HEADER.values = rich_header.get("values", None) self.RICH_HEADER.key = rich_header.get("key", None) self.RICH_HEADER.raw_data = rich_header.get("raw_data", None) self.RICH_HEADER.clear_data = rich_header.get("clear_data", None) else: self.RICH_HEADER = None
(self)
35,588
pefile
generate_checksum
null
def generate_checksum(self): # This will make sure that the data representing the PE image # is updated with any changes that might have been made by # assigning values to header fields as those are not automatically # updated upon assignment. # # data = self.write() # print('{0}'.format(len(data))) # for idx, b in enumerate(data): # if b != ord(self.__data__[idx]) or (idx > 1244440 and idx < 1244460): # print('Idx: {0} G {1:02x} {3} B {2:02x}'.format( # idx, ord(self.__data__[idx]), b, # self.__data__[idx], chr(b))) self.__data__ = self.write() # Get the offset to the CheckSum field in the OptionalHeader # (The offset is the same in PE32 and PE32+) checksum_offset = self.OPTIONAL_HEADER.get_file_offset() + 0x40 # 64 checksum = 0 # Verify the data is dword-aligned. Add padding if needed # remainder = len(self.__data__) % 4 data_len = len(self.__data__) + ((4 - remainder) * (remainder != 0)) for i in range(int(data_len / 4)): # Skip the checksum field if i == int(checksum_offset / 4): continue if i + 1 == (int(data_len / 4)) and remainder: dword = struct.unpack( "I", self.__data__[i * 4 :] + (b"\0" * (4 - remainder)) )[0] else: dword = struct.unpack("I", self.__data__[i * 4 : i * 4 + 4])[0] # Optimized the calculation (thanks to Emmanuel Bourg for pointing it out!) checksum += dword if checksum >= 2**32: checksum = (checksum & 0xFFFFFFFF) + (checksum >> 32) checksum = (checksum & 0xFFFF) + (checksum >> 16) checksum = (checksum) + (checksum >> 16) checksum = checksum & 0xFFFF # The length is the one of the original data, not the padded one # return checksum + len(self.__data__)
(self)
35,589
pefile
get_bytes_from_data
.
def get_bytes_from_data(self, offset, data): """.""" if offset > len(data): return b"" d = data[offset:] if isinstance(d, bytearray): return bytes(d) return d
(self, offset, data)
35,590
pefile
get_data
Get data regardless of the section where it lies on. Given a RVA and the size of the chunk to retrieve, this method will find the section where the data lies and return the data.
def get_data(self, rva=0, length=None): """Get data regardless of the section where it lies on. Given a RVA and the size of the chunk to retrieve, this method will find the section where the data lies and return the data. """ s = self.get_section_by_rva(rva) if length: end = rva + length else: end = None if not s: if rva < len(self.header): return self.header[rva:end] # Before we give up we check whether the file might # contain the data anyway. There are cases of PE files # without sections that rely on windows loading the first # 8291 bytes into memory and assume the data will be # there # A functional file with these characteristics is: # MD5: 0008892cdfbc3bda5ce047c565e52295 # SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9 # if rva < len(self.__data__): return self.__data__[rva:end] raise PEFormatError("data at RVA can't be fetched. Corrupt header?") return s.get_data(rva, length)
(self, rva=0, length=None)
35,591
pefile
get_data_from_dword
Return a four byte string representing the double word value (little endian).
def get_data_from_dword(self, dword): """Return a four byte string representing the double word value (little endian).""" return struct.pack("<L", dword & 0xFFFFFFFF)
(self, dword)
35,592
pefile
get_data_from_qword
Return an eight byte string representing the quad-word value (little endian).
def get_data_from_qword(self, word): """Return an eight byte string representing the quad-word value (little endian).""" return struct.pack("<Q", word)
(self, word)
35,593
pefile
get_data_from_word
Return a two byte string representing the word value. (little endian).
def get_data_from_word(self, word): """Return a two byte string representing the word value. (little endian).""" return struct.pack("<H", word)
(self, word)
35,594
pefile
get_dword_at_rva
Return the double word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset.
def get_dword_at_rva(self, rva): """Return the double word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_dword_from_data(self.get_data(rva, 4), 0) except PEFormatError: return None
(self, rva)
35,595
pefile
get_dword_from_data
Convert four bytes of data to a double word (little endian) 'offset' is assumed to index into a dword array. So setting it to N will return a dword out of the data starting at offset N*4. Returns None if the data can't be turned into a double word.
def get_dword_from_data(self, data, offset): """Convert four bytes of data to a double word (little endian) 'offset' is assumed to index into a dword array. So setting it to N will return a dword out of the data starting at offset N*4. Returns None if the data can't be turned into a double word. """ if (offset + 1) * 4 > len(data): return None return struct.unpack("<I", data[offset * 4 : (offset + 1) * 4])[0]
(self, data, offset)
35,596
pefile
get_dword_from_offset
Return the double word value at the given file offset. (little endian)
def get_dword_from_offset(self, offset): """Return the double word value at the given file offset. (little endian)""" if offset + 4 > len(self.__data__): return None return self.get_dword_from_data(self.__data__[offset : offset + 4], 0)
(self, offset)
35,597
pefile
get_exphash
Return the exphash of the PE file. Similar to imphash, but based on exported symbol names and their specific order. Returns: the hexdigest of the SHA256 hash of the exported symbols.
def get_exphash(self): """Return the exphash of the PE file. Similar to imphash, but based on exported symbol names and their specific order. Returns: the hexdigest of the SHA256 hash of the exported symbols. """ if not hasattr(self, "DIRECTORY_ENTRY_EXPORT"): return "" if not hasattr(self.DIRECTORY_ENTRY_EXPORT, "symbols"): return "" export_list = [ e.name.decode().lower() for e in self.DIRECTORY_ENTRY_EXPORT.symbols if e and e.name is not None ] if len(export_list) == 0: return "" return sha256(",".join(export_list).encode()).hexdigest()
(self)
35,598
pefile
get_imphash
Return the imphash of the PE file. Creates a hash based on imported symbol names and their specific order within the executable: https://www.mandiant.com/resources/blog/tracking-malware-import-hashing Returns: the hexdigest of the MD5 hash of the exported symbols.
def get_imphash(self): """Return the imphash of the PE file. Creates a hash based on imported symbol names and their specific order within the executable: https://www.mandiant.com/resources/blog/tracking-malware-import-hashing Returns: the hexdigest of the MD5 hash of the exported symbols. """ impstrs = [] exts = ["ocx", "sys", "dll"] if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): return "" for entry in self.DIRECTORY_ENTRY_IMPORT: if isinstance(entry.dll, bytes): libname = entry.dll.decode().lower() else: libname = entry.dll.lower() parts = libname.rsplit(".", 1) if len(parts) > 1 and parts[1] in exts: libname = parts[0] entry_dll_lower = entry.dll.lower() for imp in entry.imports: funcname = None if not imp.name: funcname = ordlookup.ordLookup( entry_dll_lower, imp.ordinal, make_name=True ) if not funcname: raise PEFormatError( f"Unable to look up ordinal {entry.dll}:{imp.ordinal:04x}" ) else: funcname = imp.name if not funcname: continue if isinstance(funcname, bytes): funcname = funcname.decode() impstrs.append("%s.%s" % (libname.lower(), funcname.lower())) return md5(",".join(impstrs).encode()).hexdigest()
(self)
35,599
pefile
get_import_table
null
def get_import_table(self, rva, max_length=None, contains_addresses=False): table = [] # We need the ordinal flag for a simple heuristic # we're implementing within the loop # if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG format = self.__IMAGE_THUNK_DATA_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 format = self.__IMAGE_THUNK_DATA64_format__ else: # Some PEs may have an invalid value in the Magic field of the # Optional Header. Just in case the remaining file is parseable # let's pretend it's a 32bit PE32 by default. ordinal_flag = IMAGE_ORDINAL_FLAG format = self.__IMAGE_THUNK_DATA_format__ expected_size = Structure(format).sizeof() MAX_ADDRESS_SPREAD = 128 * 2**20 # 128 MB ADDR_4GB = 2**32 MAX_REPEATED_ADDRESSES = 15 repeated_address = 0 addresses_of_data_set_64 = AddressSet() addresses_of_data_set_32 = AddressSet() start_rva = rva while rva: if max_length is not None and rva >= start_rva + max_length: self.__warnings.append( "Error parsing the import table. Entries go beyond bounds." ) break # Enforce an upper bounds on import symbols. if self.__total_import_symbols > MAX_IMPORT_SYMBOLS: self.__warnings.append( "Excessive number of imports %d (>%s)" % (self.__total_import_symbols, MAX_IMPORT_SYMBOLS) ) break self.__total_import_symbols += 1 # if we see too many times the same entry we assume it could be # a table containing bogus data (with malicious intent or otherwise) if repeated_address >= MAX_REPEATED_ADDRESSES: return [] # if the addresses point somewhere but the difference between the highest # and lowest address is larger than MAX_ADDRESS_SPREAD we assume a bogus # table as the addresses should be contained within a module if addresses_of_data_set_32.diff() > MAX_ADDRESS_SPREAD: return [] if addresses_of_data_set_64.diff() > MAX_ADDRESS_SPREAD: return [] failed = False try: data = self.get_data(rva, expected_size) except PEFormatError: failed = True if failed or len(data) != expected_size: self.__warnings.append( "Error parsing the import table. " "Invalid data at RVA: 0x%x" % rva ) return None thunk_data = self.__unpack_data__( format, data, file_offset=self.get_offset_from_rva(rva) ) # If the thunk data contains VAs instead of RVAs, we need to normalize them if contains_addresses: thunk_data.AddressOfData = self.normalize_import_va( thunk_data.AddressOfData ) thunk_data.ForwarderString = self.normalize_import_va( thunk_data.ForwarderString ) thunk_data.Function = self.normalize_import_va(thunk_data.Function) thunk_data.Ordinal = self.normalize_import_va(thunk_data.Ordinal) # Check if the AddressOfData lies within the range of RVAs that it's # being scanned, abort if that is the case, as it is very unlikely # to be legitimate data. # Seen in PE with SHA256: # 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c if ( thunk_data and thunk_data.AddressOfData >= start_rva and thunk_data.AddressOfData <= rva ): self.__warnings.append( "Error parsing the import table. " "AddressOfData overlaps with THUNK_DATA for " "THUNK at RVA 0x%x" % (rva) ) break if thunk_data and thunk_data.AddressOfData: addr_of_data = thunk_data.AddressOfData # If the entry looks like could be an ordinal... if addr_of_data & ordinal_flag: # but its value is beyond 2^16, we will assume it's a # corrupted and ignore it altogether if addr_of_data & 0x7FFFFFFF > 0xFFFF: return [] # and if it looks like it should be an RVA else: # keep track of the RVAs seen and store them to study their # properties. When certain non-standard features are detected # the parsing will be aborted if addr_of_data >= ADDR_4GB: the_set = addresses_of_data_set_64 else: the_set = addresses_of_data_set_32 if addr_of_data in the_set: repeated_address += 1 the_set.add(addr_of_data) if not thunk_data or thunk_data.all_zeroes(): break rva += thunk_data.sizeof() table.append(thunk_data) return table
(self, rva, max_length=None, contains_addresses=False)
35,600
pefile
get_memory_mapped_image
Returns the data corresponding to the memory layout of the PE file. The data includes the PE header and the sections loaded at offsets corresponding to their relative virtual addresses. (the VirtualAddress section header member). Any offset in this data corresponds to the absolute memory address ImageBase+offset. The optional argument 'max_virtual_address' provides with means of limiting which sections are processed. Any section with their VirtualAddress beyond this value will be skipped. Normally, sections with values beyond this range are just there to confuse tools. It's a common trick to see in packed executables. If the 'ImageBase' optional argument is supplied, the file's relocations will be applied to the image by calling the 'relocate_image()' method. Beware that the relocation information is applied permanently.
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None): """Returns the data corresponding to the memory layout of the PE file. The data includes the PE header and the sections loaded at offsets corresponding to their relative virtual addresses. (the VirtualAddress section header member). Any offset in this data corresponds to the absolute memory address ImageBase+offset. The optional argument 'max_virtual_address' provides with means of limiting which sections are processed. Any section with their VirtualAddress beyond this value will be skipped. Normally, sections with values beyond this range are just there to confuse tools. It's a common trick to see in packed executables. If the 'ImageBase' optional argument is supplied, the file's relocations will be applied to the image by calling the 'relocate_image()' method. Beware that the relocation information is applied permanently. """ # Rebase if requested # if ImageBase is not None: # Keep a copy of the image's data before modifying it by rebasing it # original_data = self.__data__ self.relocate_image(ImageBase) # Collect all sections in one code block mapped_data = self.__data__[:] for section in self.sections: # Miscellaneous integrity tests. # Some packer will set these to bogus values to make tools go nuts. if section.Misc_VirtualSize == 0 and section.SizeOfRawData == 0: continue srd = section.SizeOfRawData prd = self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) if ( srd > len(self.__data__) or prd > len(self.__data__) or srd + prd > len(self.__data__) or VirtualAddress_adj >= max_virtual_address ): continue padding_length = VirtualAddress_adj - len(mapped_data) if padding_length > 0: mapped_data += b"\0" * padding_length elif padding_length < 0: mapped_data = mapped_data[:padding_length] mapped_data += section.get_data() # If the image was rebased, restore it to its original form # if ImageBase is not None: self.__data__ = original_data return mapped_data
(self, max_virtual_address=268435456, ImageBase=None)
35,601
pefile
get_offset_from_rva
Get the file offset corresponding to this RVA. Given a RVA , this method will find the section where the data lies and return the offset within the file.
def get_offset_from_rva(self, rva): """Get the file offset corresponding to this RVA. Given a RVA , this method will find the section where the data lies and return the offset within the file. """ s = self.get_section_by_rva(rva) if not s: # If not found within a section assume it might # point to overlay data or otherwise data present # but not contained in any section. In those # cases the RVA should equal the offset if rva < len(self.__data__): return rva raise PEFormatError(f"data at RVA 0x{rva:x} can't be fetched") return s.get_offset_from_rva(rva)
(self, rva)
35,602
pefile
get_overlay
Get the data appended to the file and not contained within the area described in the headers.
def get_overlay(self): """Get the data appended to the file and not contained within the area described in the headers.""" overlay_data_offset = self.get_overlay_data_start_offset() if overlay_data_offset is not None: return self.__data__[overlay_data_offset:] return None
(self)
35,603
pefile
get_overlay_data_start_offset
Get the offset of data appended to the file and not contained within the area described in the headers.
def get_overlay_data_start_offset(self): """Get the offset of data appended to the file and not contained within the area described in the headers.""" largest_offset_and_size = (0, 0) def update_if_sum_is_larger_and_within_file( offset_and_size, file_size=len(self.__data__) ): if sum(offset_and_size) <= file_size and sum(offset_and_size) > sum( largest_offset_and_size ): return offset_and_size return largest_offset_and_size if hasattr(self, "OPTIONAL_HEADER"): largest_offset_and_size = update_if_sum_is_larger_and_within_file( ( self.OPTIONAL_HEADER.get_file_offset(), self.FILE_HEADER.SizeOfOptionalHeader, ) ) for section in self.sections: largest_offset_and_size = update_if_sum_is_larger_and_within_file( (section.PointerToRawData, section.SizeOfRawData) ) skip_directories = [DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]] for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY): if idx in skip_directories: continue try: largest_offset_and_size = update_if_sum_is_larger_and_within_file( (self.get_offset_from_rva(directory.VirtualAddress), directory.Size) ) # Ignore directories with RVA out of file except PEFormatError: continue if len(self.__data__) > sum(largest_offset_and_size): return sum(largest_offset_and_size) return None
(self)
35,604
pefile
get_physical_by_rva
Gets the physical address in the PE file from an RVA value.
def get_physical_by_rva(self, rva): """Gets the physical address in the PE file from an RVA value.""" try: return self.get_offset_from_rva(rva) except Exception: return None
(self, rva)
35,605
pefile
get_qword_at_rva
Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset.
def get_qword_at_rva(self, rva): """Return the quad-word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_qword_from_data(self.get_data(rva)[:8], 0) except PEFormatError: return None
(self, rva)
35,606
pefile
get_qword_from_data
Convert eight bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*8. Returns None if the data can't be turned into a quad word.
def get_qword_from_data(self, data, offset): """Convert eight bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*8. Returns None if the data can't be turned into a quad word. """ if (offset + 1) * 8 > len(data): return None return struct.unpack("<Q", data[offset * 8 : (offset + 1) * 8])[0]
(self, data, offset)
35,607
pefile
get_qword_from_offset
Return the quad-word value at the given file offset. (little endian)
def get_qword_from_offset(self, offset): """Return the quad-word value at the given file offset. (little endian)""" if offset + 8 > len(self.__data__): return None return self.get_qword_from_data(self.__data__[offset : offset + 8], 0)
(self, offset)
35,608
pefile
get_resources_strings
Returns a list of all the strings found withing the resources (if any). This method will scan all entries in the resources directory of the PE, if there is one, and will return a [] with the strings. An empty list will be returned otherwise.
def get_resources_strings(self): """Returns a list of all the strings found withing the resources (if any). This method will scan all entries in the resources directory of the PE, if there is one, and will return a [] with the strings. An empty list will be returned otherwise. """ resources_strings = [] if hasattr(self, "DIRECTORY_ENTRY_RESOURCE"): for res_type in self.DIRECTORY_ENTRY_RESOURCE.entries: if hasattr(res_type, "directory"): for resource_id in res_type.directory.entries: if hasattr(resource_id, "directory"): if ( hasattr(resource_id.directory, "strings") and resource_id.directory.strings ): for res_string in list( resource_id.directory.strings.values() ): resources_strings.append(res_string) return resources_strings
(self)
35,609
pefile
get_rich_header_hash
null
def get_rich_header_hash(self, algorithm="md5"): if not hasattr(self, "RICH_HEADER") or self.RICH_HEADER is None: return "" if algorithm == "md5": return md5(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha1": return sha1(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha256": return sha256(self.RICH_HEADER.clear_data).hexdigest() elif algorithm == "sha512": return sha512(self.RICH_HEADER.clear_data).hexdigest() raise Exception("Invalid hashing algorithm specified")
(self, algorithm='md5')
35,610
pefile
get_rva_from_offset
Get the RVA corresponding to this file offset.
def get_rva_from_offset(self, offset): """Get the RVA corresponding to this file offset.""" s = self.get_section_by_offset(offset) if not s: if self.sections: lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) for s in self.sections ] ) if offset < lowest_rva: # We will assume that the offset lies within the headers, or # at least points before where the earliest section starts # and we will simply return the offset as the RVA # # The case illustrating this behavior can be found at: # http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html # where the import table is not contained by any section # hence the RVA needs to be resolved to a raw offset return offset return None else: return offset return s.get_rva_from_offset(offset)
(self, offset)
35,611
pefile
get_section_by_offset
Get the section containing the given file offset.
def get_section_by_offset(self, offset): """Get the section containing the given file offset.""" for section in self.sections: if section.contains_offset(offset): return section return None
(self, offset)
35,612
pefile
get_section_by_rva
Get the section containing the given address.
def get_section_by_rva(self, rva): """Get the section containing the given address.""" # if we look a lot of times at RVA in the same section, "cache" the last used section # to speedup lookups (very useful when parsing import table) if self._get_section_by_rva_last_used is not None: if self._get_section_by_rva_last_used.contains_rva(rva): return self._get_section_by_rva_last_used for section in self.sections: if section.contains_rva(rva): self._get_section_by_rva_last_used = section return section return None
(self, rva)
35,613
pefile
get_string_at_rva
Get an ASCII string located at the given address.
def get_string_at_rva(self, rva, max_length=MAX_STRING_LENGTH): """Get an ASCII string located at the given address.""" if rva is None: return None s = self.get_section_by_rva(rva) if not s: return self.get_string_from_data(0, self.__data__[rva : rva + max_length]) return self.get_string_from_data(0, s.get_data(rva, length=max_length))
(self, rva, max_length=1048576)
35,614
pefile
get_string_from_data
Get an ASCII string from data.
def get_string_from_data(self, offset, data): """Get an ASCII string from data.""" s = self.get_bytes_from_data(offset, data) end = s.find(b"\0") if end >= 0: s = s[:end] return s
(self, offset, data)
35,615
pefile
get_string_u_at_rva
Get an Unicode string located at the given address.
def get_string_u_at_rva(self, rva, max_length=2**16, encoding=None): """Get an Unicode string located at the given address.""" if max_length == 0: return b"" # If the RVA is invalid let the exception reach the callers. All # call-sites of get_string_u_at_rva() will handle it. data = self.get_data(rva, 2) # max_length is the maximum count of 16bit characters needs to be # doubled to get size in bytes max_length <<= 1 requested = min(max_length, 256) data = self.get_data(rva, requested) # try to find null-termination null_index = -1 while True: null_index = data.find(b"\x00\x00", null_index + 1) if null_index == -1: data_length = len(data) if data_length < requested or data_length == max_length: null_index = len(data) >> 1 break # Request remaining part of data limited by max_length data += self.get_data(rva + data_length, max_length - data_length) null_index = requested - 1 requested = max_length elif null_index % 2 == 0: null_index >>= 1 break # convert selected part of the string to unicode uchrs = struct.unpack("<{:d}H".format(null_index), data[: null_index * 2]) s = "".join(map(chr, uchrs)) if encoding: return b(s.encode(encoding, "backslashreplace_")) return b(s.encode("utf-8", "backslashreplace_"))
(self, rva, max_length=65536, encoding=None)
35,616
pefile
get_warnings
Return the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method returns the full list.
def get_warnings(self): """Return the list of warnings. Non-critical problems found when parsing the PE file are appended to a list of warnings. This method returns the full list. """ return self.__warnings
(self)
35,617
pefile
get_word_at_rva
Return the word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset.
def get_word_at_rva(self, rva): """Return the word value at the given RVA. Returns None if the value can't be read, i.e. the RVA can't be mapped to a file offset. """ try: return self.get_word_from_data(self.get_data(rva)[:2], 0) except PEFormatError: return None
(self, rva)
35,618
pefile
get_word_from_data
Convert two bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*2. Returns None if the data can't be turned into a word.
def get_word_from_data(self, data, offset): """Convert two bytes of data to a word (little endian) 'offset' is assumed to index into a word array. So setting it to N will return a dword out of the data starting at offset N*2. Returns None if the data can't be turned into a word. """ if (offset + 1) * 2 > len(data): return None return struct.unpack("<H", data[offset * 2 : (offset + 1) * 2])[0]
(self, data, offset)
35,619
pefile
get_word_from_offset
Return the word value at the given file offset. (little endian)
def get_word_from_offset(self, offset): """Return the word value at the given file offset. (little endian)""" if offset + 2 > len(self.__data__): return None return self.get_word_from_data(self.__data__[offset : offset + 2], 0)
(self, offset)
35,620
pefile
has_dynamic_relocs
null
def has_dynamic_relocs(self): if hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG"): if self.DIRECTORY_ENTRY_LOAD_CONFIG.dynamic_relocations: return True return False
(self)
35,621
pefile
has_relocs
Checks if the PE file has relocation directory
def has_relocs(self): """Checks if the PE file has relocation directory""" return hasattr(self, "DIRECTORY_ENTRY_BASERELOC")
(self)
35,622
pefile
is_dll
Check whether the file is a standard DLL. This will return true only if the image has the IMAGE_FILE_DLL flag set.
def is_dll(self): """Check whether the file is a standard DLL. This will return true only if the image has the IMAGE_FILE_DLL flag set. """ DLL_flag = IMAGE_CHARACTERISTICS["IMAGE_FILE_DLL"] if (DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag: return True return False
(self)
35,623
pefile
is_driver
Check whether the file is a Windows driver. This will return true only if there are reliable indicators of the image being a driver.
def is_driver(self): """Check whether the file is a Windows driver. This will return true only if there are reliable indicators of the image being a driver. """ # Checking that the ImageBase field of the OptionalHeader is above or # equal to 0x80000000 (that is, whether it lies in the upper 2GB of # the address space, normally belonging to the kernel) is not a # reliable enough indicator. For instance, PEs that play the invalid # ImageBase trick to get relocated could be incorrectly assumed to be # drivers. # This is not reliable either... # # if any((section.Characteristics & # SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for # section in self.sections ): # return True # If the import directory was not parsed (fast_load = True); do it now. if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): self.parse_data_directories( directories=[DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_IMPORT"]] ) # If there's still no import directory (the PE doesn't have one or it's # malformed), give up. if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"): return False # self.DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty. # If it imports from "ntoskrnl.exe" or other kernel components it should # be a driver # system_DLLs = set( (b"ntoskrnl.exe", b"hal.dll", b"ndis.sys", b"bootvid.dll", b"kdcom.dll") ) if system_DLLs.intersection( [imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT] ): return True driver_like_section_names = set((b"page", b"paged")) if driver_like_section_names.intersection( [section.Name.lower().rstrip(b"\x00") for section in self.sections] ) and ( self.OPTIONAL_HEADER.Subsystem in ( SUBSYSTEM_TYPE["IMAGE_SUBSYSTEM_NATIVE"], SUBSYSTEM_TYPE["IMAGE_SUBSYSTEM_NATIVE_WINDOWS"], ) ): return True return False
(self)
35,624
pefile
is_exe
Check whether the file is a standard executable. This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
def is_exe(self): """Check whether the file is a standard executable. This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either. """ EXE_flag = IMAGE_CHARACTERISTICS["IMAGE_FILE_EXECUTABLE_IMAGE"] if ( (not self.is_dll()) and (not self.is_driver()) and (EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag ): return True return False
(self)
35,625
pefile
merge_modified_section_data
Update the PE image content with any individual section data that has been modified.
def merge_modified_section_data(self): """Update the PE image content with any individual section data that has been modified. """ for section in self.sections: section_data_start = self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) section_data_end = section_data_start + section.SizeOfRawData if section_data_start < len(self.__data__) and section_data_end < len( self.__data__ ): self.set_data_bytes(section_data_start, section.get_data())
(self)
35,626
pefile
normalize_import_va
null
def normalize_import_va(self, va): # Setup image range begin_of_image = self.OPTIONAL_HEADER.ImageBase end_of_image = self.OPTIONAL_HEADER.ImageBase + self.OPTIONAL_HEADER.SizeOfImage # Try to avoid bogus VAs, which are out of the image. # This also filters out entries that are zero if begin_of_image <= va and va < end_of_image: va -= begin_of_image return va
(self, va)
35,627
pefile
parse_data_directories
Parse and process the PE file's data directories. If the optional argument 'directories' is given, only the directories at the specified indexes will be parsed. Such functionality allows parsing of areas of interest without the burden of having to parse all others. The directories can then be specified as: For export / import only: directories = [ 0, 1 ] or (more verbosely): directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ] If 'directories' is a list, the ones that are processed will be removed, leaving only the ones that are not present in the image. If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT attribute will only contain exports that are forwarded to another DLL. If `import_dllnames_only` is True, symbols will not be parsed from the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT attribute will not have a `symbols` attribute.
def parse_data_directories( self, directories=None, forwarded_exports_only=False, import_dllnames_only=False ): """Parse and process the PE file's data directories. If the optional argument 'directories' is given, only the directories at the specified indexes will be parsed. Such functionality allows parsing of areas of interest without the burden of having to parse all others. The directories can then be specified as: For export / import only: directories = [ 0, 1 ] or (more verbosely): directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ] If 'directories' is a list, the ones that are processed will be removed, leaving only the ones that are not present in the image. If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT attribute will only contain exports that are forwarded to another DLL. If `import_dllnames_only` is True, symbols will not be parsed from the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT attribute will not have a `symbols` attribute. """ directory_parsing = ( ("IMAGE_DIRECTORY_ENTRY_IMPORT", self.parse_import_directory), ("IMAGE_DIRECTORY_ENTRY_EXPORT", self.parse_export_directory), ("IMAGE_DIRECTORY_ENTRY_RESOURCE", self.parse_resources_directory), ("IMAGE_DIRECTORY_ENTRY_DEBUG", self.parse_debug_directory), ("IMAGE_DIRECTORY_ENTRY_BASERELOC", self.parse_relocations_directory), ("IMAGE_DIRECTORY_ENTRY_TLS", self.parse_directory_tls), ("IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", self.parse_directory_load_config), ("IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", self.parse_delay_import_directory), ("IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", self.parse_directory_bound_imports), ("IMAGE_DIRECTORY_ENTRY_EXCEPTION", self.parse_exceptions_directory), ) if directories is not None: if not isinstance(directories, (tuple, list)): directories = [directories] for entry in directory_parsing: # OC Patch: # try: directory_index = DIRECTORY_ENTRY[entry[0]] dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index] except IndexError: break # Only process all the directories if no individual ones have # been chosen # if directories is None or directory_index in directories: value = None if dir_entry.VirtualAddress: if ( forwarded_exports_only and entry[0] == "IMAGE_DIRECTORY_ENTRY_EXPORT" ): value = entry[1]( dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True, ) elif ( import_dllnames_only and entry[0] == "IMAGE_DIRECTORY_ENTRY_IMPORT" ): value = entry[1]( dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True ) else: try: value = entry[1](dir_entry.VirtualAddress, dir_entry.Size) except PEFormatError as excp: self.__warnings.append( f'Failed to process directoty "{entry[0]}": {excp}' ) if value: setattr(self, entry[0][6:], value) if ( (directories is not None) and isinstance(directories, list) and (entry[0] in directories) ): directories.remove(directory_index)
(self, directories=None, forwarded_exports_only=False, import_dllnames_only=False)
35,628
pefile
parse_debug_directory
def parse_debug_directory(self, rva, size): """""" dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof() debug = [] for idx in range(int(size / dbg_size)): try: data = self.get_data(rva + dbg_size * idx, dbg_size) except PEFormatError: self.__warnings.append( "Invalid debug information. Can't read " "data at RVA: 0x%x" % rva ) return None dbg = self.__unpack_data__( self.__IMAGE_DEBUG_DIRECTORY_format__, data, file_offset=self.get_offset_from_rva(rva + dbg_size * idx), ) if not dbg: return None # apply structure according to DEBUG_TYPE # http://www.debuginfo.com/articles/debuginfomatch.html # dbg_type = None if dbg.Type == 1: # IMAGE_DEBUG_TYPE_COFF pass elif dbg.Type == 2: # if IMAGE_DEBUG_TYPE_CODEVIEW dbg_type_offset = dbg.PointerToRawData dbg_type_size = dbg.SizeOfData dbg_type_data = self.__data__[ dbg_type_offset : dbg_type_offset + dbg_type_size ] if dbg_type_data[:4] == b"RSDS": # pdb7.0 __CV_INFO_PDB70_format__ = [ "CV_INFO_PDB70", [ "4s,CvSignature", "I,Signature_Data1", # Signature is of GUID type "H,Signature_Data2", "H,Signature_Data3", "B,Signature_Data4", "B,Signature_Data5", "6s,Signature_Data6", "I,Age", ], ] pdbFileName_size = ( dbg_type_size - Structure(__CV_INFO_PDB70_format__).sizeof() ) # pdbFileName_size can be negative here, as seen in the malware # sample with hash # MD5: 7c297600870d026c014d42596bb9b5fd # SHA256: # 83f4e63681fcba8a9d7bbb1688c71981b1837446514a1773597e0192bba9fac3 # Checking for positive size here to ensure proper parsing. if pdbFileName_size > 0: __CV_INFO_PDB70_format__[1].append( "{0}s,PdbFileName".format(pdbFileName_size) ) dbg_type = self.__unpack_data__( __CV_INFO_PDB70_format__, dbg_type_data, dbg_type_offset ) if dbg_type is not None: dbg_type.Signature_Data6_value = struct.unpack( ">Q", b"\0\0" + dbg_type.Signature_Data6 )[0] dbg_type.Signature_String = ( str( uuid.UUID( fields=( dbg_type.Signature_Data1, dbg_type.Signature_Data2, dbg_type.Signature_Data3, dbg_type.Signature_Data4, dbg_type.Signature_Data5, dbg_type.Signature_Data6_value, ) ) ) .replace("-", "") .upper() + f"{dbg_type.Age:X}" ) elif dbg_type_data[:4] == b"NB10": # pdb2.0 __CV_INFO_PDB20_format__ = [ "CV_INFO_PDB20", [ "I,CvHeaderSignature", "I,CvHeaderOffset", "I,Signature", "I,Age", ], ] pdbFileName_size = ( dbg_type_size - Structure(__CV_INFO_PDB20_format__).sizeof() ) # As with the PDB 7.0 case, ensuring a positive size for # pdbFileName_size to ensure proper parsing. if pdbFileName_size > 0: # Add the last variable-length string field. __CV_INFO_PDB20_format__[1].append( "{0}s,PdbFileName".format(pdbFileName_size) ) dbg_type = self.__unpack_data__( __CV_INFO_PDB20_format__, dbg_type_data, dbg_type_offset ) elif dbg.Type == 4: # IMAGE_DEBUG_TYPE_MISC dbg_type_offset = dbg.PointerToRawData dbg_type_size = dbg.SizeOfData dbg_type_data = self.__data__[ dbg_type_offset : dbg_type_offset + dbg_type_size ] ___IMAGE_DEBUG_MISC_format__ = [ "IMAGE_DEBUG_MISC", [ "I,DataType", "I,Length", "B,Unicode", "B,Reserved1", "H,Reserved2", ], ] dbg_type_partial = self.__unpack_data__( ___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset ) # Need to check that dbg_type_partial contains a correctly unpacked data # structure, as the malware sample with the following hash # MD5: 5e7d6707d693108de5a303045c17d95b # SHA256: # 5dd94a95025f3b6e3dd440d52f7c6d2964fdd1aa119e0ee92e38c7bf83829e5c # contains a value of None for dbg_type_partial after unpacking, # presumably due to a malformed DEBUG entry. if dbg_type_partial: # The Unicode bool should be set to 0 or 1. if dbg_type_partial.Unicode in (0, 1): data_size = ( dbg_type_size - Structure(___IMAGE_DEBUG_MISC_format__).sizeof() ) # As with the PDB case, ensuring a positive size for data_size # here to ensure proper parsing. if data_size > 0: ___IMAGE_DEBUG_MISC_format__[1].append( "{0}s,Data".format(data_size) ) dbg_type = self.__unpack_data__( ___IMAGE_DEBUG_MISC_format__, dbg_type_data, dbg_type_offset ) debug.append(DebugData(struct=dbg, entry=dbg_type)) return debug
(self, rva, size)
35,629
pefile
parse_delay_import_directory
Walk and parse the delay import directory.
def parse_delay_import_directory(self, rva, size): """Walk and parse the delay import directory.""" import_descs = [] error_count = 0 while True: try: # If the RVA is invalid all would blow up. Some PEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof(), ) except PEFormatError: self.__warnings.append( "Error parsing the Delay import directory at RVA: 0x%x" % (rva) ) break file_offset = self.get_offset_from_rva(rva) import_desc = self.__unpack_data__( self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__, data, file_offset=file_offset, ) # If the structure is all zeros, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break contains_addresses = False # Handle old import descriptor that has Virtual Addresses instead of RVAs # This version of import descriptor is created by old Visual Studio versions # (pre 6.0) # Can only be present in 32-bit binaries (no 64-bit compiler existed at the # time) # Sample: e8d3bff0c1a9a6955993f7a441121a2692261421e82fdfadaaded45d3bea9980 if ( import_desc.grAttrs == 0 and self.FILE_HEADER.Machine == MACHINE_TYPE["IMAGE_FILE_MACHINE_I386"] ): import_desc.pBoundIAT = self.normalize_import_va(import_desc.pBoundIAT) import_desc.pIAT = self.normalize_import_va(import_desc.pIAT) import_desc.pINT = self.normalize_import_va(import_desc.pINT) import_desc.pUnloadIAT = self.normalize_import_va( import_desc.pUnloadIAT ) import_desc.phmod = self.normalize_import_va(import_desc.pUnloadIAT) import_desc.szName = self.normalize_import_va(import_desc.szName) contains_addresses = True rva += import_desc.sizeof() # If the array of thunks is somewhere earlier than the import # descriptor we can set a maximum length for the array. Otherwise # just set a maximum length of the size of the file max_len = len(self.__data__) - file_offset if rva > import_desc.pINT or rva > import_desc.pIAT: max_len = max(rva - import_desc.pINT, rva - import_desc.pIAT) import_data = [] try: import_data = self.parse_imports( import_desc.pINT, import_desc.pIAT, None, max_len, contains_addresses, ) except PEFormatError as excp: self.__warnings.append( "Error parsing the Delay import directory. " "Invalid import data at RVA: 0x{0:x} ({1})".format(rva, excp.value) ) if error_count > 5: self.__warnings.append( "Too many errors parsing the Delay import directory. " "Invalid import data at RVA: 0x{0:x}".format(rva) ) break if not import_data: error_count += 1 continue if self.__total_import_symbols > MAX_IMPORT_SYMBOLS: self.__warnings.append( "Error, too many imported symbols %d (>%s)" % (self.__total_import_symbols, MAX_IMPORT_SYMBOLS) ) break dll = self.get_string_at_rva(import_desc.szName, MAX_DLL_LENGTH) if not is_valid_dos_filename(dll): dll = b("*invalid*") if dll: for symbol in import_data: if symbol.name is None: funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal) if funcname: symbol.name = funcname import_descs.append( ImportDescData(struct=import_desc, imports=import_data, dll=dll) ) return import_descs
(self, rva, size)
35,630
pefile
parse_directory_bound_imports
def parse_directory_bound_imports(self, rva, size): """""" bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__) bnd_descr_size = bnd_descr.sizeof() start = rva bound_imports = [] while True: bnd_descr = self.__unpack_data__( self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__, self.__data__[rva : rva + bnd_descr_size], file_offset=rva, ) if bnd_descr is None: # If can't parse directory then silently return. # This directory does not necessarily have to be valid to # still have a valid PE file self.__warnings.append( "The Bound Imports directory exists but can't be parsed." ) return if bnd_descr.all_zeroes(): break rva += bnd_descr.sizeof() section = self.get_section_by_offset(rva) file_offset = self.get_offset_from_rva(rva) if section is None: safety_boundary = len(self.__data__) - file_offset sections_after_offset = [ s.PointerToRawData for s in self.sections if s.PointerToRawData > file_offset ] if sections_after_offset: # Find the first section starting at a later offset than that # specified by 'rva' first_section_after_offset = min(sections_after_offset) section = self.get_section_by_offset(first_section_after_offset) if section is not None: safety_boundary = section.PointerToRawData - file_offset else: safety_boundary = ( section.PointerToRawData + len(section.get_data()) - file_offset ) if not section: self.__warnings.append( ( "RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points " "to an invalid address: {0:x}" ).format(rva) ) return forwarder_refs = [] # 8 is the size of __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ for _ in range( min(bnd_descr.NumberOfModuleForwarderRefs, int(safety_boundary / 8)) ): # Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and # IMAGE_BOUND_FORWARDER_REF have the same size. bnd_frwd_ref = self.__unpack_data__( self.__IMAGE_BOUND_FORWARDER_REF_format__, self.__data__[rva : rva + bnd_descr_size], file_offset=rva, ) # OC Patch: if not bnd_frwd_ref: raise PEFormatError("IMAGE_BOUND_FORWARDER_REF cannot be read") rva += bnd_frwd_ref.sizeof() offset = start + bnd_frwd_ref.OffsetModuleName name_str = self.get_string_from_data( 0, self.__data__[offset : offset + MAX_STRING_LENGTH] ) # OffsetModuleName points to a DLL name. These shouldn't be too long. # Anything longer than a safety length of 128 will be taken to indicate # a corrupt entry and abort the processing of these entries. # Names shorter than 4 characters will be taken as invalid as well. if name_str: invalid_chars = [ c for c in bytearray(name_str) if chr(c) not in string.printable ] if len(name_str) > 256 or invalid_chars: break forwarder_refs.append( BoundImportRefData(struct=bnd_frwd_ref, name=name_str) ) offset = start + bnd_descr.OffsetModuleName name_str = self.get_string_from_data( 0, self.__data__[offset : offset + MAX_STRING_LENGTH] ) if name_str: invalid_chars = [ c for c in bytearray(name_str) if chr(c) not in string.printable ] if len(name_str) > 256 or invalid_chars: break if not name_str: break bound_imports.append( BoundImportDescData( struct=bnd_descr, name=name_str, entries=forwarder_refs ) ) return bound_imports
(self, rva, size)
35,631
pefile
parse_directory_load_config
def parse_directory_load_config(self, rva, size): """""" if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: load_config_dir_sz = self.get_dword_at_rva(rva) format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__ elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: load_config_dir_sz = self.get_dword_at_rva(rva) format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ else: self.__warnings.append( "Don't know how to parse LOAD_CONFIG information for non-PE32/" "PE32+ file" ) return None # load config directory size can be less than represented by 'format' variable, # generate truncated format which correspond load config directory size fields_counter = 0 cumulative_sz = 0 for field in format[1]: fields_counter += 1 cumulative_sz += STRUCT_SIZEOF_TYPES[field.split(",")[0]] if cumulative_sz == load_config_dir_sz: break format = (format[0], format[1][:fields_counter]) load_config_struct = None try: load_config_struct = self.__unpack_data__( format, self.get_data(rva, Structure(format).sizeof()), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid LOAD_CONFIG information. Can't read " "data at RVA: 0x%x" % rva ) if not load_config_struct: return None dynamic_relocations = None if fields_counter > 35: dynamic_relocations = self.parse_dynamic_relocations( load_config_struct.DynamicValueRelocTableOffset, load_config_struct.DynamicValueRelocTableSection, ) return LoadConfigData( struct=load_config_struct, dynamic_relocations=dynamic_relocations )
(self, rva, size)
35,632
pefile
parse_directory_tls
def parse_directory_tls(self, rva, size): """""" # By default let's pretend the format is a 32-bit PE. It may help # produce some output for files where the Magic in the Optional Header # is incorrect. format = self.__IMAGE_TLS_DIRECTORY_format__ if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_TLS_DIRECTORY64_format__ try: tls_struct = self.__unpack_data__( format, self.get_data(rva, Structure(format).sizeof()), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid TLS information. Can't read " "data at RVA: 0x%x" % rva ) tls_struct = None if not tls_struct: return None return TlsData(struct=tls_struct)
(self, rva, size)
35,633
pefile
parse_dynamic_relocations
null
def parse_dynamic_relocations( self, dynamic_value_reloc_table_offset, dynamic_value_reloc_table_section ): if not dynamic_value_reloc_table_offset: return None if not dynamic_value_reloc_table_section: return None if dynamic_value_reloc_table_section > len(self.sections): return None section = self.sections[dynamic_value_reloc_table_section - 1] rva = section.VirtualAddress + dynamic_value_reloc_table_offset image_dynamic_reloc_table_struct = None reloc_table_size = Structure( self.__IMAGE_DYNAMIC_RELOCATION_TABLE_format__ ).sizeof() try: image_dynamic_reloc_table_struct = self.__unpack_data__( self.__IMAGE_DYNAMIC_RELOCATION_TABLE_format__, self.get_data(rva, reloc_table_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid IMAGE_DYNAMIC_RELOCATION_TABLE information. Can't read " "data at RVA: 0x%x" % rva ) if image_dynamic_reloc_table_struct.Version != 1: self.__warnings.append( "No pasring available for IMAGE_DYNAMIC_RELOCATION_TABLE.Version = %d", image_dynamic_reloc_table_struct.Version, ) return None rva += reloc_table_size end = rva + image_dynamic_reloc_table_struct.Size dynamic_relocations = [] while rva < end: format = self.__IMAGE_DYNAMIC_RELOCATION_format__ if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: format = self.__IMAGE_DYNAMIC_RELOCATION64_format__ rlc_size = Structure(format).sizeof() try: dynamic_rlc = self.__unpack_data__( format, self.get_data(rva, rlc_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid relocation information. Can't read " "data at RVA: 0x%x" % rva ) dynamic_rlc = None if not dynamic_rlc: break rva += rlc_size symbol = dynamic_rlc.Symbol size = dynamic_rlc.BaseRelocSize if 3 <= symbol <= 5: relocations = self.parse_image_base_relocation_list( rva, size, self.dynamic_relocation_format_by_symbol[symbol] ) dynamic_relocations.append( DynamicRelocationData( struct=dynamic_rlc, symbol=symbol, relocations=relocations ) ) if symbol > 5: relocations = self.parse_image_base_relocation_list(rva, size) dynamic_relocations.append( DynamicRelocationData( struct=dynamic_rlc, symbol=symbol, relocations=relocations ) ) rva += size return dynamic_relocations
(self, dynamic_value_reloc_table_offset, dynamic_value_reloc_table_section)
35,634
pefile
parse_exceptions_directory
Parses exception directory All the code related to handling exception directories is documented in https://auscitte.github.io/systems%20blog/Exception-Directory-pefile#implementation-details
def parse_exceptions_directory(self, rva, size): """Parses exception directory All the code related to handling exception directories is documented in https://auscitte.github.io/systems%20blog/Exception-Directory-pefile#implementation-details """ # "For x64 and Itanium platforms; the format is different for other platforms" if ( self.FILE_HEADER.Machine != MACHINE_TYPE["IMAGE_FILE_MACHINE_AMD64"] and self.FILE_HEADER.Machine != MACHINE_TYPE["IMAGE_FILE_MACHINE_IA64"] ): return None rf = Structure(self.__RUNTIME_FUNCTION_format__) rf_size = rf.sizeof() rva2rt = {} rt_funcs = [] rva2infos = {} for _ in range(size // rf_size): rf = self.__unpack_data__( self.__RUNTIME_FUNCTION_format__, self.get_data(rva, rf_size), file_offset=self.get_offset_from_rva(rva), ) if rf is None: break ui = None if (rf.UnwindData & 0x1) == 0: # according to "Improving Automated Analysis of Windows x64 Binaries", # if the lowest bit is set, (UnwindData & ~0x1) should point to the # chained RUNTIME_FUNCTION instead of UNWIND_INFO if ( rf.UnwindData in rva2infos ): # unwind info data structures can be shared among functions ui = rva2infos[rf.UnwindData] else: ui = UnwindInfo(file_offset=self.get_offset_from_rva(rf.UnwindData)) rva2infos[rf.UnwindData] = ui ws = ui.unpack_in_stages(self.get_data(rf.UnwindData, ui.sizeof())) if ws != None: self.__warnings.append(ws) break ws = ui.unpack_in_stages(self.get_data(rf.UnwindData, ui.sizeof())) if ws != None: self.__warnings.append(ws) break self.__structures__.append(ui) entry = ExceptionsDirEntryData(struct=rf, unwindinfo=ui) rt_funcs.append(entry) rva2rt[rf.BeginAddress] = entry rva += rf_size # each chained function entry holds a reference to the function first in chain for rf in rt_funcs: if rf.unwindinfo is None: # TODO: have not encountered such a binary yet; # in theory, (UnwindData & ~0x1) should point to the chained # RUNTIME_FUNCTION which could be used to locate the corresponding # ExceptionsDirEntryData and set_chained_function_entry() continue if not hasattr(rf.unwindinfo, "FunctionEntry"): continue if not rf.unwindinfo.FunctionEntry in rva2rt: self.__warnings.append( f"FunctionEntry of UNWIND_INFO at {rf.struct.get_file_offset():x}" " points to an entry that does not exist" ) continue try: rf.unwindinfo.set_chained_function_entry( rva2rt[rf.unwindinfo.FunctionEntry] ) except PEFormatError as excp: self.__warnings.append( "Failed parsing FunctionEntry of UNWIND_INFO at " f"{rf.struct.get_file_offset():x}: {excp}" ) continue return rt_funcs
(self, rva, size)
35,635
pefile
parse_export_directory
Parse the export directory. Given the RVA of the export directory, it will process all its entries. The exports will be made available as a list of ExportData instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute.
def parse_export_directory(self, rva, size, forwarded_only=False): """Parse the export directory. Given the RVA of the export directory, it will process all its entries. The exports will be made available as a list of ExportData instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute. """ try: export_dir = self.__unpack_data__( self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Error parsing export directory at RVA: 0x%x" % (rva) ) return if not export_dir: return # We keep track of the bytes left in the file and use it to set a upper # bound in the number of items that can be read from the different # arrays. def length_until_eof(rva): return len(self.__data__) - self.get_offset_from_rva(rva) try: address_of_names = self.get_data( export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames * 4, ), ) address_of_name_ordinals = self.get_data( export_dir.AddressOfNameOrdinals, min( length_until_eof(export_dir.AddressOfNameOrdinals), export_dir.NumberOfNames * 4, ), ) address_of_functions = self.get_data( export_dir.AddressOfFunctions, min( length_until_eof(export_dir.AddressOfFunctions), export_dir.NumberOfFunctions * 4, ), ) except PEFormatError: self.__warnings.append( "Error parsing export directory at RVA: 0x%x" % (rva) ) return exports = [] max_failed_entries_before_giving_up = 10 section = self.get_section_by_rva(export_dir.AddressOfNames) # Overly generous upper bound safety_boundary = len(self.__data__) if section: safety_boundary = ( section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfNames ) symbol_counts = collections.defaultdict(int) export_parsing_loop_completed_normally = True for i in range(min(export_dir.NumberOfNames, int(safety_boundary / 4))): symbol_ordinal = self.get_word_from_data(address_of_name_ordinals, i) if symbol_ordinal is not None and symbol_ordinal * 4 < len( address_of_functions ): symbol_address = self.get_dword_from_data( address_of_functions, symbol_ordinal ) else: # Corrupt? a bad pointer... we assume it's all # useless, no exports return None if symbol_address is None or symbol_address == 0: continue # If the function's RVA points within the export directory # it will point to a string with the forwarded symbol's string # instead of pointing the the function start address. if symbol_address >= rva and symbol_address < rva + size: forwarder_str = self.get_string_at_rva(symbol_address) try: forwarder_offset = self.get_offset_from_rva(symbol_address) except PEFormatError: continue else: if forwarded_only: continue forwarder_str = None forwarder_offset = None symbol_name_address = self.get_dword_from_data(address_of_names, i) if symbol_name_address is None: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break symbol_name = self.get_string_at_rva( symbol_name_address, MAX_SYMBOL_NAME_LENGTH ) if not is_valid_function_name(symbol_name, relax_allowed_characters=True): export_parsing_loop_completed_normally = False break try: symbol_name_offset = self.get_offset_from_rva(symbol_name_address) except PEFormatError: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break try: symbol_name_offset = self.get_offset_from_rva(symbol_name_address) except PEFormatError: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break continue # File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 # was being parsed as potentially containing millions of exports. # Checking for duplicates addresses the issue. symbol_counts[(symbol_name, symbol_address)] += 1 if symbol_counts[(symbol_name, symbol_address)] > 10: self.__warnings.append( f"Export directory contains more than 10 repeated entries " f"({symbol_name}, {symbol_address:#02x}). Assuming corrupt." ) break elif len(symbol_counts) > self.max_symbol_exports: self.__warnings.append( "Export directory contains more than {} symbol entries. " "Assuming corrupt.".format(self.max_symbol_exports) ) break exports.append( ExportData( pe=self, ordinal=export_dir.Base + symbol_ordinal, ordinal_offset=self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2 * i ), address=symbol_address, address_offset=self.get_offset_from_rva( export_dir.AddressOfFunctions + 4 * symbol_ordinal ), name=symbol_name, name_offset=symbol_name_offset, forwarder=forwarder_str, forwarder_offset=forwarder_offset, ) ) if not export_parsing_loop_completed_normally: self.__warnings.append( f"RVA AddressOfNames in the export directory points to an invalid " f"address: {export_dir.AddressOfNames:x}" ) ordinals = {exp.ordinal for exp in exports} max_failed_entries_before_giving_up = 10 section = self.get_section_by_rva(export_dir.AddressOfFunctions) # Overly generous upper bound safety_boundary = len(self.__data__) if section: safety_boundary = ( section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions ) symbol_counts = collections.defaultdict(int) export_parsing_loop_completed_normally = True for idx in range(min(export_dir.NumberOfFunctions, int(safety_boundary / 4))): if not idx + export_dir.Base in ordinals: try: symbol_address = self.get_dword_from_data(address_of_functions, idx) except PEFormatError: symbol_address = None if symbol_address is None: max_failed_entries_before_giving_up -= 1 if max_failed_entries_before_giving_up <= 0: export_parsing_loop_completed_normally = False break if symbol_address == 0: continue # Checking for forwarder again. if ( symbol_address is not None and symbol_address >= rva and symbol_address < rva + size ): forwarder_str = self.get_string_at_rva(symbol_address) else: forwarder_str = None # File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 # was being parsed as potentially containing millions of exports. # Checking for duplicates addresses the issue. symbol_counts[symbol_address] += 1 if symbol_counts[symbol_address] > self.max_repeated_symbol: # if most_common and most_common[0][1] > 10: self.__warnings.append( "Export directory contains more than {} repeated " "ordinal entries (0x{:x}). Assuming corrupt.".format( self.max_repeated_symbol, symbol_address ) ) break elif len(symbol_counts) > self.max_symbol_exports: self.__warnings.append( "Export directory contains more than " f"{self.max_symbol_exports} ordinal entries. Assuming corrupt." ) break exports.append( ExportData( ordinal=export_dir.Base + idx, address=symbol_address, name=None, forwarder=forwarder_str, ) ) if not export_parsing_loop_completed_normally: self.__warnings.append( "RVA AddressOfFunctions in the export directory points to an invalid " f"address: {export_dir.AddressOfFunctions:x}" ) return if not exports and export_dir.all_zeroes(): return None return ExportDirData( struct=export_dir, symbols=exports, name=self.get_string_at_rva(export_dir.Name), )
(self, rva, size, forwarded_only=False)
35,636
pefile
parse_image_base_relocation_list
null
def parse_image_base_relocation_list(self, rva, size, fmt=None): rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof() end = rva + size relocations = [] while rva < end: # OC Patch: # Malware that has bad RVA entries will cause an error. # Just continue on after an exception # try: rlc = self.__unpack_data__( self.__IMAGE_BASE_RELOCATION_format__, self.get_data(rva, rlc_size), file_offset=self.get_offset_from_rva(rva), ) except PEFormatError: self.__warnings.append( "Invalid relocation information. Can't read " "data at RVA: 0x%x" % rva ) rlc = None if not rlc: break # rlc.VirtualAddress must lie within the Image if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage: self.__warnings.append( "Invalid relocation information. VirtualAddress outside" " of Image: 0x%x" % rlc.VirtualAddress ) break # rlc.SizeOfBlock must be less or equal than the size of the image # (It's a rather loose sanity test) if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage: self.__warnings.append( "Invalid relocation information. SizeOfBlock too large" ": %d" % rlc.SizeOfBlock ) break if fmt is None: reloc_entries = self.parse_relocations( rva + rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock - rlc_size ) else: reloc_entries = self.parse_relocations_with_format( rva + rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock - rlc_size, fmt ) relocations.append(BaseRelocationData(struct=rlc, entries=reloc_entries)) if not rlc.SizeOfBlock: break rva += rlc.SizeOfBlock return relocations
(self, rva, size, fmt=None)
35,637
pefile
parse_import_directory
Walk and parse the import directory.
def parse_import_directory(self, rva, size, dllnames_only=False): """Walk and parse the import directory.""" import_descs = [] error_count = 0 image_import_descriptor_size = Structure( self.__IMAGE_IMPORT_DESCRIPTOR_format__ ).sizeof() while True: try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data(rva, image_import_descriptor_size) except PEFormatError: self.__warnings.append( f"Error parsing the import directory at RVA: 0x{rva:x}" ) break file_offset = self.get_offset_from_rva(rva) import_desc = self.__unpack_data__( self.__IMAGE_IMPORT_DESCRIPTOR_format__, data, file_offset=file_offset ) # If the structure is all zeros, we reached the end of the list if not import_desc or import_desc.all_zeroes(): break rva += import_desc.sizeof() # If the array of thunks is somewhere earlier than the import # descriptor we can set a maximum length for the array. Otherwise # just set a maximum length of the size of the file max_len = len(self.__data__) - file_offset if rva > import_desc.OriginalFirstThunk or rva > import_desc.FirstThunk: max_len = max( rva - import_desc.OriginalFirstThunk, rva - import_desc.FirstThunk ) import_data = [] if not dllnames_only: try: import_data = self.parse_imports( import_desc.OriginalFirstThunk, import_desc.FirstThunk, import_desc.ForwarderChain, max_length=max_len, ) except PEFormatError as e: self.__warnings.append( "Error parsing the import directory. " f"Invalid Import data at RVA: 0x{rva:x} ({e.value})" ) if error_count > 5: self.__warnings.append( "Too many errors parsing the import directory. " f"Invalid import data at RVA: 0x{rva:x}" ) break if not import_data: error_count += 1 # TODO: do not continue here continue dll = self.get_string_at_rva(import_desc.Name, MAX_DLL_LENGTH) if not is_valid_dos_filename(dll): dll = b("*invalid*") if dll: for symbol in import_data: if symbol.name is None: funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal) if funcname: symbol.name = funcname import_descs.append( ImportDescData(struct=import_desc, imports=import_data, dll=dll) ) if not dllnames_only: suspicious_imports = set(["LoadLibrary", "GetProcAddress"]) suspicious_imports_count = 0 total_symbols = 0 for imp_dll in import_descs: for symbol in imp_dll.imports: for suspicious_symbol in suspicious_imports: if not symbol or not symbol.name: continue name = symbol.name if type(symbol.name) == bytes: name = symbol.name.decode("utf-8") if name.startswith(suspicious_symbol): suspicious_imports_count += 1 break total_symbols += 1 if ( suspicious_imports_count == len(suspicious_imports) and total_symbols < 20 ): self.__warnings.append( "Imported symbols contain entries typical of packed executables." ) return import_descs
(self, rva, size, dllnames_only=False)
35,638
pefile
parse_imports
Parse the imported symbols. It will fill a list, which will be available as the dictionary attribute "imports". Its keys will be the DLL names and the values of all the symbols imported from that object.
def parse_imports( self, original_first_thunk, first_thunk, forwarder_chain, max_length=None, contains_addresses=False, ): """Parse the imported symbols. It will fill a list, which will be available as the dictionary attribute "imports". Its keys will be the DLL names and the values of all the symbols imported from that object. """ imported_symbols = [] # Import Lookup Table. Contains ordinals or pointers to strings. ilt = self.get_import_table( original_first_thunk, max_length, contains_addresses ) # Import Address Table. May have identical content to ILT if # PE file is not bound. It will contain the address of the # imported symbols once the binary is loaded or if it is already # bound. iat = self.get_import_table(first_thunk, max_length, contains_addresses) # OC Patch: # Would crash if IAT or ILT had None type if (not iat or len(iat) == 0) and (not ilt or len(ilt) == 0): self.__warnings.append( "Damaged Import Table information. " "ILT and/or IAT appear to be broken. " f"OriginalFirstThunk: 0x{original_first_thunk:x} " f"FirstThunk: 0x{first_thunk:x}" ) return [] table = None if ilt: table = ilt elif iat: table = iat else: return None imp_offset = 4 address_mask = 0x7FFFFFFF if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE: ordinal_flag = IMAGE_ORDINAL_FLAG elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS: ordinal_flag = IMAGE_ORDINAL_FLAG64 imp_offset = 8 address_mask = 0x7FFFFFFFFFFFFFFF else: # Some PEs may have an invalid value in the Magic field of the # Optional Header. Just in case the remaining file is parseable # let's pretend it's a 32bit PE32 by default. ordinal_flag = IMAGE_ORDINAL_FLAG num_invalid = 0 for idx, tbl_entry in enumerate(table): imp_ord = None imp_hint = None imp_name = None name_offset = None hint_name_table_rva = None import_by_ordinal = False # declare it here first if tbl_entry.AddressOfData: # If imported by ordinal, we will append the ordinal number # if tbl_entry.AddressOfData & ordinal_flag: import_by_ordinal = True imp_ord = tbl_entry.AddressOfData & 0xFFFF imp_name = None name_offset = None else: import_by_ordinal = False try: hint_name_table_rva = tbl_entry.AddressOfData & address_mask data = self.get_data(hint_name_table_rva, 2) # Get the Hint imp_hint = self.get_word_from_data(data, 0) imp_name = self.get_string_at_rva( tbl_entry.AddressOfData + 2, MAX_IMPORT_NAME_LENGTH ) if not is_valid_function_name(imp_name): imp_name = b("*invalid*") name_offset = self.get_offset_from_rva( tbl_entry.AddressOfData + 2 ) except PEFormatError: pass # by nriva: we want the ThunkRVA and ThunkOffset thunk_offset = tbl_entry.get_file_offset() thunk_rva = self.get_rva_from_offset(thunk_offset) imp_address = ( first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset ) struct_iat = None try: if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData: imp_bound = iat[idx].AddressOfData struct_iat = iat[idx] else: imp_bound = None except IndexError: imp_bound = None # The file with hashes: # # MD5: bfe97192e8107d52dd7b4010d12b2924 # SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5 # # has an invalid table built in a way that it's parseable but contains # invalid entries that lead pefile to take extremely long amounts of time to # parse. It also leads to extreme memory consumption. # To prevent similar cases, if invalid entries are found in the middle of a # table the parsing will be aborted # if imp_ord is None and imp_name is None: raise PEFormatError("Invalid entries, aborting parsing.") # Some PEs appear to interleave valid and invalid imports. Instead of # aborting the parsing altogether we will simply skip the invalid entries. # Although if we see 1000 invalid entries and no legit ones, we abort. if imp_name == b("*invalid*"): if num_invalid > 1000 and num_invalid == idx: raise PEFormatError("Too many invalid names, aborting parsing.") num_invalid += 1 continue if imp_ord or imp_name: imported_symbols.append( ImportData( pe=self, struct_table=tbl_entry, struct_iat=struct_iat, # for bound imports if any import_by_ordinal=import_by_ordinal, ordinal=imp_ord, ordinal_offset=tbl_entry.get_file_offset(), hint=imp_hint, name=imp_name, name_offset=name_offset, bound=imp_bound, address=imp_address, hint_name_table_rva=hint_name_table_rva, thunk_offset=thunk_offset, thunk_rva=thunk_rva, ) ) return imported_symbols
(self, original_first_thunk, first_thunk, forwarder_chain, max_length=None, contains_addresses=False)
35,639
pefile
parse_relocations
def parse_relocations(self, data_rva, rva, size): """""" try: data = self.get_data(data_rva, size) file_offset = self.get_offset_from_rva(data_rva) except PEFormatError: self.__warnings.append(f"Bad RVA in relocation data: 0x{data_rva:x}") return [] entries = [] offsets_and_type = set() for idx in range(int(len(data) / 2)): entry = self.__unpack_data__( self.__IMAGE_BASE_RELOCATION_ENTRY_format__, data[idx * 2 : (idx + 1) * 2], file_offset=file_offset, ) if not entry: break word = entry.Data reloc_type = word >> 12 reloc_offset = word & 0x0FFF if (reloc_offset, reloc_type) in offsets_and_type: self.__warnings.append( "Overlapping offsets in relocation data " "at RVA: 0x%x" % (reloc_offset + rva) ) break offsets_and_type.add((reloc_offset, reloc_type)) entries.append( RelocationData( struct=entry, type=reloc_type, base_rva=rva, rva=reloc_offset + rva ) ) file_offset += entry.sizeof() return entries
(self, data_rva, rva, size)
35,640
pefile
parse_relocations_directory
def parse_relocations_directory(self, rva, size): """""" return self.parse_image_base_relocation_list(rva, size)
(self, rva, size)
35,641
pefile
parse_relocations_with_format
def parse_relocations_with_format(self, data_rva, rva, size, format): """""" try: data = self.get_data(data_rva, size) file_offset = self.get_offset_from_rva(data_rva) except PEFormatError: self.__warnings.append(f"Bad RVA in relocation data: 0x{data_rva:x}") return [] entry_size = StructureWithBitfields(format).sizeof() entries = [] offsets = set() for idx in range(int(len(data) / entry_size)): entry = self.__unpack_data_with_bitfields__( format, data[idx * entry_size : (idx + 1) * entry_size], file_offset=file_offset, ) if not entry: break reloc_offset = entry.PageRelativeOffset if reloc_offset in offsets: self.__warnings.append( "Overlapping offsets in relocation data " "at RVA: 0x%x" % (reloc_offset + rva) ) break offsets.add(reloc_offset) entries.append( RelocationData(struct=entry, base_rva=rva, rva=reloc_offset + rva) ) file_offset += entry_size return entries
(self, data_rva, rva, size, format)
35,642
pefile
parse_resource_data_entry
Parse a data entry from the resources directory.
def parse_resource_data_entry(self, rva): """Parse a data entry from the resources directory.""" try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() ) except PEFormatError: self.__warnings.append( "Error parsing a resource directory data entry, " "the RVA is invalid: 0x%x" % (rva) ) return None data_entry = self.__unpack_data__( self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data, file_offset=self.get_offset_from_rva(rva), ) return data_entry
(self, rva)
35,643
pefile
parse_resource_entry
Parse a directory entry from the resources directory.
def parse_resource_entry(self, rva): """Parse a directory entry from the resources directory.""" try: data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() ) except PEFormatError: # A warning will be added by the caller if this method returns None return None resource = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data, file_offset=self.get_offset_from_rva(rva), ) if resource is None: return None # resource.NameIsString = (resource.Name & 0x80000000L) >> 31 resource.NameOffset = resource.Name & 0x7FFFFFFF resource.__pad = resource.Name & 0xFFFF0000 resource.Id = resource.Name & 0x0000FFFF resource.DataIsDirectory = (resource.OffsetToData & 0x80000000) >> 31 resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFF return resource
(self, rva)
35,644
pefile
parse_resources_directory
Parse the resources directory. Given the RVA of the resources directory, it will process all its entries. The root will have the corresponding member of its structure, IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the entries in the directory. Those entries will have, correspondingly, all the structure's members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one, "directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure representing upper layers of the tree. This one will also have an 'entries' attribute, pointing to the 3rd, and last, level. Another directory with more entries. Those last entries will have a new attribute (both 'leaf' or 'data_entry' can be used to access it). This structure finally points to the resource data. All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY, are available as its attributes.
def parse_resources_directory(self, rva, size=0, base_rva=None, level=0, dirs=None): """Parse the resources directory. Given the RVA of the resources directory, it will process all its entries. The root will have the corresponding member of its structure, IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the entries in the directory. Those entries will have, correspondingly, all the structure's members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one, "directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure representing upper layers of the tree. This one will also have an 'entries' attribute, pointing to the 3rd, and last, level. Another directory with more entries. Those last entries will have a new attribute (both 'leaf' or 'data_entry' can be used to access it). This structure finally points to the resource data. All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY, are available as its attributes. """ # OC Patch: if dirs is None: dirs = [rva] if base_rva is None: base_rva = rva if level > MAX_RESOURCE_DEPTH: self.__warnings.append( "Error parsing the resources directory. " "Excessively nested table depth %d (>%s)" % (level, MAX_RESOURCE_DEPTH) ) return None try: # If the RVA is invalid all would blow up. Some EXEs seem to be # specially nasty and have an invalid RVA. data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() ) except PEFormatError: self.__warnings.append( "Invalid resources directory. Can't read " "directory data at RVA: 0x%x" % rva ) return None # Get the resource directory structure, that is, the header # of the table preceding the actual entries # resource_dir = self.__unpack_data__( self.__IMAGE_RESOURCE_DIRECTORY_format__, data, file_offset=self.get_offset_from_rva(rva), ) if resource_dir is None: # If we can't parse resources directory then silently return. # This directory does not necessarily have to be valid to # still have a valid PE file self.__warnings.append( "Invalid resources directory. Can't parse " "directory data at RVA: 0x%x" % rva ) return None dir_entries = [] # Advance the RVA to the position immediately following the directory # table header and pointing to the first entry in the table # rva += resource_dir.sizeof() number_of_entries = ( resource_dir.NumberOfNamedEntries + resource_dir.NumberOfIdEntries ) # Set a hard limit on the maximum reasonable number of entries MAX_ALLOWED_ENTRIES = 4096 if number_of_entries > MAX_ALLOWED_ENTRIES: self.__warnings.append( "Error parsing the resources directory. " "The directory contains %d entries (>%s)" % (number_of_entries, MAX_ALLOWED_ENTRIES) ) return None self.__total_resource_entries_count += number_of_entries if self.__total_resource_entries_count > MAX_RESOURCE_ENTRIES: self.__warnings.append( "Error parsing the resources directory. " "The file contains at least %d entries (>%d)" % (self.__total_resource_entries_count, MAX_RESOURCE_ENTRIES) ) return None strings_to_postprocess = [] # Keep track of the last name's start and end offsets in order # to be able to detect overlapping entries that might suggest # and invalid or corrupt directory. last_name_begin_end = None for idx in range(number_of_entries): if ( not self.__resource_size_limit_reached and self.__total_resource_bytes > self.__resource_size_limit_upperbounds ): self.__resource_size_limit_reached = True self.__warnings.append( "Resource size 0x%x exceeds file size 0x%x, overlapping " "resources found." % ( self.__total_resource_bytes, self.__resource_size_limit_upperbounds, ) ) res = self.parse_resource_entry(rva) if res is None: self.__warnings.append( "Error parsing the resources directory, " "Entry %d is invalid, RVA = 0x%x. " % (idx, rva) ) break entry_name = None entry_id = None name_is_string = (res.Name & 0x80000000) >> 31 if not name_is_string: entry_id = res.Name else: ustr_offset = base_rva + res.NameOffset try: entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset) self.__total_resource_bytes += entry_name.get_pascal_16_length() # If the last entry's offset points before the current's but its end # is past the current's beginning, assume the overlap indicates a # corrupt name. if last_name_begin_end and ( last_name_begin_end[0] < ustr_offset and last_name_begin_end[1] >= ustr_offset ): # Remove the previous overlapping entry as it's likely to be # already corrupt data. strings_to_postprocess.pop() self.__warnings.append( "Error parsing the resources directory, " "attempting to read entry name. " "Entry names overlap 0x%x" % (ustr_offset) ) break last_name_begin_end = ( ustr_offset, ustr_offset + entry_name.get_pascal_16_length(), ) strings_to_postprocess.append(entry_name) except PEFormatError: self.__warnings.append( "Error parsing the resources directory, " "attempting to read entry name. " "Can't read unicode string at offset 0x%x" % (ustr_offset) ) if res.DataIsDirectory: # OC Patch: # # One trick malware can do is to recursively reference # the next directory. This causes hilarity to ensue when # trying to parse everything correctly. # If the original RVA given to this function is equal to # the next one to parse, we assume that it's a trick. # Instead of raising a PEFormatError this would skip some # reasonable data so we just break. # # 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample if base_rva + res.OffsetToDirectory in dirs: break entry_directory = self.parse_resources_directory( base_rva + res.OffsetToDirectory, size - (rva - base_rva), # size base_rva=base_rva, level=level + 1, dirs=dirs + [base_rva + res.OffsetToDirectory], ) if not entry_directory: break # Ange Albertini's code to process resources' strings # strings = None if entry_id == RESOURCE_TYPE["RT_STRING"]: strings = {} for resource_id in entry_directory.entries: if hasattr(resource_id, "directory"): resource_strings = {} for resource_lang in resource_id.directory.entries: if ( resource_lang is None or not hasattr(resource_lang, "data") or resource_lang.data.struct.Size is None or resource_id.id is None ): continue string_entry_rva = ( resource_lang.data.struct.OffsetToData ) string_entry_size = resource_lang.data.struct.Size string_entry_id = resource_id.id # XXX: has been raising exceptions preventing parsing try: string_entry_data = self.get_data( string_entry_rva, string_entry_size ) except PEFormatError: self.__warnings.append( f"Error parsing resource of type RT_STRING at " f"RVA 0x{string_entry_rva:x} with " f"size {string_entry_size}" ) continue parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings, ) strings.update(resource_strings) resource_id.directory.strings = resource_strings dir_entries.append( ResourceDirEntryData( struct=res, name=entry_name, id=entry_id, directory=entry_directory, ) ) else: struct = self.parse_resource_data_entry( base_rva + res.OffsetToDirectory ) if struct: self.__total_resource_bytes += struct.Size entry_data = ResourceDataEntryData( struct=struct, lang=res.Name & 0x3FF, sublang=res.Name >> 10 ) dir_entries.append( ResourceDirEntryData( struct=res, name=entry_name, id=entry_id, data=entry_data ) ) else: break # Check if this entry contains version information # if level == 0 and res.Id == RESOURCE_TYPE["RT_VERSION"]: if dir_entries: last_entry = dir_entries[-1] try: version_entries = last_entry.directory.entries[0].directory.entries except: # Maybe a malformed directory structure...? # Let's ignore it pass else: for version_entry in version_entries: rt_version_struct = None try: rt_version_struct = version_entry.data.struct except: # Maybe a malformed directory structure...? # Let's ignore it pass if rt_version_struct is not None: self.parse_version_information(rt_version_struct) rva += res.sizeof() string_rvas = [s.get_rva() for s in strings_to_postprocess] string_rvas.sort() for idx, s in enumerate(strings_to_postprocess): s.render_pascal_16() resource_directory_data = ResourceDirData( struct=resource_dir, entries=dir_entries ) return resource_directory_data
(self, rva, size=0, base_rva=None, level=0, dirs=None)
35,645
pefile
parse_rich_header
Parses the rich header see http://www.ntcore.com/files/richsign.htm for more information Structure: 00 DanS ^ checksum, checksum, checksum, checksum 10 Symbol RVA ^ checksum, Symbol size ^ checksum... ... XX Rich, checksum, 0, 0,...
def parse_rich_header(self): """Parses the rich header see http://www.ntcore.com/files/richsign.htm for more information Structure: 00 DanS ^ checksum, checksum, checksum, checksum 10 Symbol RVA ^ checksum, Symbol size ^ checksum... ... XX Rich, checksum, 0, 0,... """ # Rich Header constants # DANS = 0x536E6144 # 'DanS' as dword RICH = 0x68636952 # 'Rich' as dword rich_index = self.__data__.find( b"Rich", 0x80, self.OPTIONAL_HEADER.get_file_offset() ) if rich_index == -1: return None # Read a block of data try: # The end of the structure is 8 bytes after the start of the Rich # string. rich_data = self.__data__[0x80 : rich_index + 8] # Make the data have length a multiple of 4, otherwise the # subsequent parsing will fail. It's not impossible that we retrieve # truncated data that it's not a multiple. rich_data = rich_data[: 4 * int(len(rich_data) / 4)] data = list( struct.unpack("<{0}I".format(int(len(rich_data) / 4)), rich_data) ) if RICH not in data: return None except PEFormatError: return None # get key, raw_data and clear_data key = struct.pack("<L", data[data.index(RICH) + 1]) result = {"key": key} raw_data = rich_data[: rich_data.find(b"Rich")] result["raw_data"] = raw_data ord_ = lambda c: ord(c) if not isinstance(c, int) else c clear_data = bytearray() for idx, val in enumerate(raw_data): clear_data.append((ord_(val) ^ ord_(key[idx % len(key)]))) result["clear_data"] = bytes(clear_data) # the checksum should be present 3 times after the DanS signature # checksum = data[1] if data[0] ^ checksum != DANS or data[2] != checksum or data[3] != checksum: return None result["checksum"] = checksum headervalues = [] result["values"] = headervalues data = data[4:] for i in range(int(len(data) / 2)): # Stop until the Rich footer signature is found # if data[2 * i] == RICH: # it should be followed by the checksum # if data[2 * i + 1] != checksum: self.__warnings.append("Rich Header is malformed") break # header values come by pairs # headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum] return result
(self)
35,646
pefile
parse_sections
Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info.
def parse_sections(self, offset): """Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info. """ self.sections = [] MAX_SIMULTANEOUS_ERRORS = 3 for i in range(self.FILE_HEADER.NumberOfSections): if i >= MAX_SECTIONS: self.__warnings.append( "Too many sections {0} (>={1})".format( self.FILE_HEADER.NumberOfSections, MAX_SECTIONS ) ) break simultaneous_errors = 0 section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__, pe=self) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section_data = self.__data__[ section_offset : section_offset + section.sizeof() ] # Check if the section is all nulls and stop if so. if count_zeroes(section_data) == section.sizeof(): self.__warnings.append(f"Invalid section {i}. Contents are null-bytes.") break if not section_data: self.__warnings.append( f"Invalid section {i}. No data in the file (is this corkami's " "virtsectblXP?)." ) break section.__unpack__(section_data) self.__structures__.append(section) if section.SizeOfRawData + section.PointerToRawData > len(self.__data__): simultaneous_errors += 1 self.__warnings.append( f"Error parsing section {i}. SizeOfRawData is larger than file." ) if self.adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__): simultaneous_errors += 1 self.__warnings.append( f"Error parsing section {i}. PointerToRawData points beyond " "the end of the file." ) if section.Misc_VirtualSize > 0x10000000: simultaneous_errors += 1 self.__warnings.append( f"Suspicious value found parsing section {i}. VirtualSize is " "extremely large > 256MiB." ) if ( self.adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment, ) > 0x10000000 ): simultaneous_errors += 1 self.__warnings.append( f"Suspicious value found parsing section {i}. VirtualAddress is " "beyond 0x10000000." ) if ( self.OPTIONAL_HEADER.FileAlignment != 0 and (section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0 ): simultaneous_errors += 1 self.__warnings.append( ( f"Error parsing section {i}. " "PointerToRawData should normally be " "a multiple of FileAlignment, this might imply the file " "is trying to confuse tools which parse this incorrectly." ) ) if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS: self.__warnings.append("Too many warnings parsing section. Aborting.") break section_flags = retrieve_flags(SECTION_CHARACTERISTICS, "IMAGE_SCN_") # Set the section's flags according the the Characteristics member set_flags(section, section.Characteristics, section_flags) if section.__dict__.get( "IMAGE_SCN_MEM_WRITE", False ) and section.__dict__.get("IMAGE_SCN_MEM_EXECUTE", False): if section.Name.rstrip(b"\x00") == b"PAGE" and self.is_driver(): # Drivers can have a PAGE section with those flags set without # implying that it is malicious pass else: self.__warnings.append( f"Suspicious flags set for section {i}. " "Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. " "This might indicate a packed executable." ) self.sections.append(section) # Sort the sections by their VirtualAddress and add a field to each of them # with the VirtualAddress of the next section. This will allow to check # for potentially overlapping sections in badly constructed PEs. self.sections.sort(key=lambda a: a.VirtualAddress) for idx, section in enumerate(self.sections): if idx == len(self.sections) - 1: section.next_section_virtual_address = None else: section.next_section_virtual_address = self.sections[ idx + 1 ].VirtualAddress if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return ( offset + self.sections[0].sizeof() * self.FILE_HEADER.NumberOfSections ) else: return offset
(self, offset)
35,647
pefile
parse_version_information
Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key / value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var.
def parse_version_information(self, version_struct): """Parse version information structure. The date will be made available in three attributes of the PE object. VS_VERSIONINFO will contain the first three fields of the main structure: 'Length', 'ValueLength', and 'Type' VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes: 'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS', 'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags', 'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS' FileInfo is a list of all StringFileInfo and VarFileInfo structures. StringFileInfo structures will have a list as an attribute named 'StringTable' containing all the StringTable structures. Each of those structures contains a dictionary 'entries' with all the key / value version information string pairs. VarFileInfo structures will have a list as an attribute named 'Var' containing all Var structures. Each Var structure will have a dictionary as an attribute named 'entry' which will contain the name and value of the Var. """ # Retrieve the data for the version info resource # try: start_offset = self.get_offset_from_rva(version_struct.OffsetToData) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read OffsetToData with RVA: 0x{:x}".format( version_struct.OffsetToData ) ) return raw_data = self.__data__[start_offset : start_offset + version_struct.Size] # Map the main structure and the subsequent string # versioninfo_struct = self.__unpack_data__( self.__VS_VERSIONINFO_format__, raw_data, file_offset=start_offset ) if versioninfo_struct is None: return ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof() section = self.get_section_by_rva(ustr_offset) section_end = None if section: section_end = section.VirtualAddress + max( section.SizeOfRawData, section.Misc_VirtualSize ) versioninfo_string = None # These should return 'ascii' decoded data. For the case when it's # garbled data the ascii string will retain the byte values while # encoding it to something else may yield values that don't match the # file's contents. try: if section_end is None: versioninfo_string = self.get_string_u_at_rva( ustr_offset, encoding="ascii" ) else: versioninfo_string = self.get_string_u_at_rva( ustr_offset, (section_end - ustr_offset) >> 1, encoding="ascii" ) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read VS_VERSION_INFO string. Can't " "read unicode string at offset 0x%x" % (ustr_offset) ) if versioninfo_string is None: self.__warnings.append( "Invalid VS_VERSION_INFO block: {0}".format(versioninfo_string) ) return # If the structure does not contain the expected name, it's assumed to # be invalid if versioninfo_string is not None and versioninfo_string != b"VS_VERSION_INFO": if len(versioninfo_string) > 128: excerpt = versioninfo_string[:128].decode("ascii") # Don't leave any half-escaped characters excerpt = excerpt[: excerpt.rfind("\\u")] versioninfo_string = b( "{0} ... ({1} bytes, too long to display)".format( excerpt, len(versioninfo_string) ) ) self.__warnings.append( "Invalid VS_VERSION_INFO block: {0}".format( versioninfo_string.decode("ascii").replace("\00", "\\00") ) ) return if not hasattr(self, "VS_VERSIONINFO"): self.VS_VERSIONINFO = [] # Set the PE object's VS_VERSIONINFO to this one vinfo = versioninfo_struct # Set the Key attribute to point to the unicode string identifying the structure vinfo.Key = versioninfo_string self.VS_VERSIONINFO.append(vinfo) if versioninfo_string is None: versioninfo_string = "" # Process the fixed version information, get the offset and structure fixedfileinfo_offset = self.dword_align( versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1), version_struct.OffsetToData, ) fixedfileinfo_struct = self.__unpack_data__( self.__VS_FIXEDFILEINFO_format__, raw_data[fixedfileinfo_offset:], file_offset=start_offset + fixedfileinfo_offset, ) if not fixedfileinfo_struct: return if not hasattr(self, "VS_FIXEDFILEINFO"): self.VS_FIXEDFILEINFO = [] # Set the PE object's VS_FIXEDFILEINFO to this one self.VS_FIXEDFILEINFO.append(fixedfileinfo_struct) # Start parsing all the StringFileInfo and VarFileInfo structures # Get the first one stringfileinfo_offset = self.dword_align( fixedfileinfo_offset + fixedfileinfo_struct.sizeof(), version_struct.OffsetToData, ) # Set the PE object's attribute that will contain them all. if not hasattr(self, "FileInfo"): self.FileInfo = [] finfo = [] while True: # Process the StringFileInfo/VarFileInfo structure stringfileinfo_struct = self.__unpack_data__( self.__StringFileInfo_format__, raw_data[stringfileinfo_offset:], file_offset=start_offset + stringfileinfo_offset, ) if stringfileinfo_struct is None: self.__warnings.append( "Error parsing StringFileInfo/VarFileInfo struct" ) return None # Get the subsequent string defining the structure. ustr_offset = ( version_struct.OffsetToData + stringfileinfo_offset + versioninfo_struct.sizeof() ) try: stringfileinfo_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringFileInfo string. Can't " "read unicode string at offset 0x{0:x}".format(ustr_offset) ) break # Set such string as the Key attribute stringfileinfo_struct.Key = stringfileinfo_string # Append the structure to the PE object's list finfo.append(stringfileinfo_struct) # Parse a StringFileInfo entry if stringfileinfo_string and stringfileinfo_string.startswith( b"StringFileInfo" ): if ( stringfileinfo_struct.Type in (0, 1) and stringfileinfo_struct.ValueLength == 0 ): stringtable_offset = self.dword_align( stringfileinfo_offset + stringfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData, ) stringfileinfo_struct.StringTable = [] # Process the String Table entries while True: stringtable_struct = self.__unpack_data__( self.__StringTable_format__, raw_data[stringtable_offset:], file_offset=start_offset + stringtable_offset, ) if not stringtable_struct: break ustr_offset = ( version_struct.OffsetToData + stringtable_offset + stringtable_struct.sizeof() ) try: stringtable_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringTable string. Can't " "read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break stringtable_struct.LangID = stringtable_string stringtable_struct.entries = {} stringtable_struct.entries_offsets = {} stringtable_struct.entries_lengths = {} stringfileinfo_struct.StringTable.append(stringtable_struct) entry_offset = self.dword_align( stringtable_offset + stringtable_struct.sizeof() + 2 * (len(stringtable_string) + 1), version_struct.OffsetToData, ) # Process all entries in the string table while ( entry_offset < stringtable_offset + stringtable_struct.Length ): string_struct = self.__unpack_data__( self.__String_format__, raw_data[entry_offset:], file_offset=start_offset + entry_offset, ) if not string_struct: break ustr_offset = ( version_struct.OffsetToData + entry_offset + string_struct.sizeof() ) try: key = self.get_string_u_at_rva(ustr_offset) key_offset = self.get_offset_from_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read StringTable Key string. Can't " "read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break value_offset = self.dword_align( 2 * (len(key) + 1) + entry_offset + string_struct.sizeof(), version_struct.OffsetToData, ) ustr_offset = version_struct.OffsetToData + value_offset try: value = self.get_string_u_at_rva( ustr_offset, max_length=string_struct.ValueLength ) value_offset = self.get_offset_from_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, attempting " "to read StringTable Value string. Can't read " f"unicode string at offset 0x{ustr_offset:x}" ) break if string_struct.Length == 0: entry_offset = ( stringtable_offset + stringtable_struct.Length ) else: entry_offset = self.dword_align( string_struct.Length + entry_offset, version_struct.OffsetToData, ) stringtable_struct.entries[key] = value stringtable_struct.entries_offsets[key] = ( key_offset, value_offset, ) stringtable_struct.entries_lengths[key] = ( len(key), len(value), ) new_stringtable_offset = self.dword_align( stringtable_struct.Length + stringtable_offset, version_struct.OffsetToData, ) # Check if the entry is crafted in a way that would lead # to an infinite loop and break if so. if new_stringtable_offset == stringtable_offset: break stringtable_offset = new_stringtable_offset if stringtable_offset >= stringfileinfo_struct.Length: break # Parse a VarFileInfo entry elif stringfileinfo_string and stringfileinfo_string.startswith( b"VarFileInfo" ): varfileinfo_struct = stringfileinfo_struct varfileinfo_struct.name = "VarFileInfo" if ( varfileinfo_struct.Type in (0, 1) and varfileinfo_struct.ValueLength == 0 ): var_offset = self.dword_align( stringfileinfo_offset + varfileinfo_struct.sizeof() + 2 * (len(stringfileinfo_string) + 1), version_struct.OffsetToData, ) varfileinfo_struct.Var = [] # Process all entries while True: var_struct = self.__unpack_data__( self.__Var_format__, raw_data[var_offset:], file_offset=start_offset + var_offset, ) if not var_struct: break ustr_offset = ( version_struct.OffsetToData + var_offset + var_struct.sizeof() ) try: var_string = self.get_string_u_at_rva(ustr_offset) except PEFormatError: self.__warnings.append( "Error parsing the version information, " "attempting to read VarFileInfo Var string. " "Can't read unicode string at offset 0x{0:x}".format( ustr_offset ) ) break if var_string is None: break varfileinfo_struct.Var.append(var_struct) varword_offset = self.dword_align( 2 * (len(var_string) + 1) + var_offset + var_struct.sizeof(), version_struct.OffsetToData, ) orig_varword_offset = varword_offset while ( varword_offset < orig_varword_offset + var_struct.ValueLength ): word1 = self.get_word_from_data( raw_data[varword_offset : varword_offset + 2], 0 ) word2 = self.get_word_from_data( raw_data[varword_offset + 2 : varword_offset + 4], 0 ) varword_offset += 4 if isinstance(word1, int) and isinstance(word2, int): var_struct.entry = { var_string: "0x%04x 0x%04x" % (word1, word2) } var_offset = self.dword_align( var_offset + var_struct.Length, version_struct.OffsetToData ) if var_offset <= var_offset + var_struct.Length: break # Increment and align the offset stringfileinfo_offset = self.dword_align( stringfileinfo_struct.Length + stringfileinfo_offset, version_struct.OffsetToData, ) # Check if all the StringFileInfo and VarFileInfo items have been processed if ( stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length ): break self.FileInfo.append(finfo)
(self, version_struct)
35,648
pefile
print_info
Print all the PE header information in a human readable from.
def print_info(self, encoding="utf-8"): """Print all the PE header information in a human readable from.""" print(self.dump_info(encoding=encoding))
(self, encoding='utf-8')
35,649
pefile
relocate_image
Apply the relocation information to the image using the provided image base. This method will apply the relocation information to the image. Given the new base, all the relocations will be processed and both the raw data and the section's data will be fixed accordingly. The resulting image can be retrieved as well through the method: get_memory_mapped_image() In order to get something that would more closely match what could be found in memory once the Windows loader finished its work.
def relocate_image(self, new_ImageBase): """Apply the relocation information to the image using the provided image base. This method will apply the relocation information to the image. Given the new base, all the relocations will be processed and both the raw data and the section's data will be fixed accordingly. The resulting image can be retrieved as well through the method: get_memory_mapped_image() In order to get something that would more closely match what could be found in memory once the Windows loader finished its work. """ relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase if ( len(self.OPTIONAL_HEADER.DATA_DIRECTORY) >= 6 and self.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size ): if not hasattr(self, "DIRECTORY_ENTRY_BASERELOC"): self.parse_data_directories( directories=[DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_BASERELOC"]] ) if not hasattr(self, "DIRECTORY_ENTRY_BASERELOC"): self.__warnings.append( "Relocating image but PE does not have (or pefile cannot " "parse) a DIRECTORY_ENTRY_BASERELOC" ) else: for reloc in self.DIRECTORY_ENTRY_BASERELOC: # We iterate with an index because if the relocation is of type # IMAGE_REL_BASED_HIGHADJ we need to also process the next entry # at once and skip it for the next iteration # entry_idx = 0 while entry_idx < len(reloc.entries): entry = reloc.entries[entry_idx] entry_idx += 1 if entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_ABSOLUTE"]: # Nothing to do for this type of relocation pass elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGH"]: # Fix the high 16-bits of a relocation # # Add high 16-bits of relocation_difference to the # 16-bit value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference >> 16 ) & 0xFFFF, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_LOW"]: # Fix the low 16-bits of a relocation # # Add low 16 bits of relocation_difference to the 16-bit # value at RVA=entry.rva self.set_word_at_rva( entry.rva, ( self.get_word_at_rva(entry.rva) + relocation_difference ) & 0xFFFF, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGHLOW"]: # Handle all high and low parts of a 32-bit relocation # # Add relocation_difference to the value at RVA=entry.rva self.set_dword_at_rva( entry.rva, self.get_dword_at_rva(entry.rva) + relocation_difference, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_HIGHADJ"]: # Fix the high 16-bits of a relocation and adjust # # Add high 16-bits of relocation_difference to the 32-bit # value composed from the (16-bit value at # RVA=entry.rva)<<16 plus the 16-bit value at the next # relocation entry. # If the next entry is beyond the array's limits, # abort... the table is corrupt if entry_idx == len(reloc.entries): break next_entry = reloc.entries[entry_idx] entry_idx += 1 self.set_word_at_rva( entry.rva, ( (self.get_word_at_rva(entry.rva) << 16) + next_entry.rva + relocation_difference & 0xFFFF0000 ) >> 16, ) elif entry.type == RELOCATION_TYPE["IMAGE_REL_BASED_DIR64"]: # Apply the difference to the 64-bit value at the offset # RVA=entry.rva self.set_qword_at_rva( entry.rva, self.get_qword_at_rva(entry.rva) + relocation_difference, ) self.OPTIONAL_HEADER.ImageBase = new_ImageBase # correct VAs(virtual addresses) occurrences in directory information if hasattr(self, "DIRECTORY_ENTRY_IMPORT"): for dll in self.DIRECTORY_ENTRY_IMPORT: for func in dll.imports: func.address += relocation_difference if hasattr(self, "DIRECTORY_ENTRY_TLS"): self.DIRECTORY_ENTRY_TLS.struct.StartAddressOfRawData += ( relocation_difference ) self.DIRECTORY_ENTRY_TLS.struct.EndAddressOfRawData += ( relocation_difference ) self.DIRECTORY_ENTRY_TLS.struct.AddressOfIndex += relocation_difference self.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks += ( relocation_difference ) if hasattr(self, "DIRECTORY_ENTRY_LOAD_CONFIG"): load_config = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct if ( hasattr(load_config, "LockPrefixTable") and load_config.LockPrefixTable ): load_config.LockPrefixTable += relocation_difference if hasattr(load_config, "EditList") and load_config.EditList: load_config.EditList += relocation_difference if ( hasattr(load_config, "SecurityCookie") and load_config.SecurityCookie ): load_config.SecurityCookie += relocation_difference if ( hasattr(load_config, "SEHandlerTable") and load_config.SEHandlerTable ): load_config.SEHandlerTable += relocation_difference if ( hasattr(load_config, "GuardCFCheckFunctionPointer") and load_config.GuardCFCheckFunctionPointer ): load_config.GuardCFCheckFunctionPointer += relocation_difference if ( hasattr(load_config, "GuardCFDispatchFunctionPointer") and load_config.GuardCFDispatchFunctionPointer ): load_config.GuardCFDispatchFunctionPointer += relocation_difference if ( hasattr(load_config, "GuardCFFunctionTable") and load_config.GuardCFFunctionTable ): load_config.GuardCFFunctionTable += relocation_difference if ( hasattr(load_config, "GuardAddressTakenIatEntryTable") and load_config.GuardAddressTakenIatEntryTable ): load_config.GuardAddressTakenIatEntryTable += relocation_difference if ( hasattr(load_config, "GuardLongJumpTargetTable") and load_config.GuardLongJumpTargetTable ): load_config.GuardLongJumpTargetTable += relocation_difference if ( hasattr(load_config, "DynamicValueRelocTable") and load_config.DynamicValueRelocTable ): load_config.DynamicValueRelocTable += relocation_difference if ( self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS and hasattr(load_config, "CHPEMetadataPointer") and load_config.CHPEMetadataPointer ): load_config.CHPEMetadataPointer += relocation_difference if ( hasattr(load_config, "GuardRFFailureRoutine") and load_config.GuardRFFailureRoutine ): load_config.GuardRFFailureRoutine += relocation_difference if ( hasattr(load_config, "GuardRFFailureRoutineFunctionPointer") and load_config.GuardRFFailureRoutineFunctionPointer ): load_config.GuardRFVerifyStackPointerFunctionPointer += ( relocation_difference ) if ( hasattr(load_config, "GuardRFVerifyStackPointerFunctionPointer") and load_config.GuardRFVerifyStackPointerFunctionPointer ): load_config.GuardRFVerifyStackPointerFunctionPointer += ( relocation_difference ) if ( hasattr(load_config, "EnclaveConfigurationPointer") and load_config.EnclaveConfigurationPointer ): load_config.EnclaveConfigurationPointer += relocation_difference
(self, new_ImageBase)
35,650
pefile
set_bytes_at_offset
Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries.
def set_bytes_at_offset(self, offset, data): """Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError("data should be of type: bytes") if 0 <= offset < len(self.__data__): self.set_data_bytes(offset, data) else: return False return True
(self, offset, data)
35,651
pefile
set_bytes_at_rva
Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries.
def set_bytes_at_rva(self, rva, data): """Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError("data should be of type: bytes") offset = self.get_physical_by_rva(rva) if not offset: return False return self.set_bytes_at_offset(offset, data)
(self, rva, data)
35,652
pefile
set_data_bytes
null
def set_data_bytes(self, offset: int, data: bytes): if not isinstance(self.__data__, bytearray): self.__data__ = bytearray(self.__data__) self.__data__[offset : offset + len(data)] = data
(self, offset: int, data: bytes)