code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
try:
# won't work before calling CreateProcess or DebugActiveProcess
win32.DebugSetProcessKillOnExit(bKillOnExit)
except (AttributeError, WindowsError):
return False
return True | def set_kill_on_exit_mode(bKillOnExit = False) | Defines the behavior of the debugged processes when the debugging
thread dies. This method only affects the calling thread.
Works on the following platforms:
- Microsoft Windows XP and above.
- Wine (Windows Emulator).
Fails on the following platforms:
- Microsoft Windows 2000 and below.
- ReactOS.
@type bKillOnExit: bool
@param bKillOnExit: C{True} to automatically kill processes when the
debugger thread dies. C{False} to automatically detach from
processes when the debugger thread dies.
@rtype: bool
@return: C{True} on success, C{False} on error.
@note:
This call will fail if a debug port was not created. That is, if
the debugger isn't attached to at least one process. For more info
see: U{http://msdn.microsoft.com/en-us/library/ms679307.aspx} | 7.935698 | 8.559233 | 0.927151 |
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError(
"MSR reading is only supported on i386 or amd64 processors.")
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = 0
win32.NtSystemDebugControl(win32.SysDbgReadMsr,
InputBuffer = msr,
OutputBuffer = msr)
return msr.Data | def read_msr(address) | Read the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to read.
@rtype: int
@return: Value of the specified MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary. | 4.203738 | 4.228814 | 0.99407 |
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError(
"MSR writing is only supported on i386 or amd64 processors.")
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = value
win32.NtSystemDebugControl(win32.SysDbgWriteMsr, InputBuffer = msr) | def write_msr(address, value) | Set the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to write.
@type value: int
@param value: Contents to write on the MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary. | 4.56775 | 4.260389 | 1.072144 |
cls.write_msr(DebugRegister.DebugCtlMSR,
DebugRegister.BranchTrapFlag | DebugRegister.LastBranchRecord) | def enable_step_on_branch_mode(cls) | When tracing, call this on every single step event
for step on branch mode.
@raise WindowsError:
Raises C{ERROR_DEBUGGER_INACTIVE} if the debugger is not attached
to least one process.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
This method uses the processor's machine specific registers (MSR).
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
@note:
It doesn't seem to work in VMWare or VirtualBox machines.
Maybe it fails in other virtualization/emulation environments,
no extensive testing was made so far. | 55.491356 | 47.064068 | 1.17906 |
LastBranchFromIP = cls.read_msr(DebugRegister.LastBranchFromIP)
LastBranchToIP = cls.read_msr(DebugRegister.LastBranchToIP)
return ( LastBranchFromIP, LastBranchToIP ) | def get_last_branch_location(cls) | Returns the source and destination addresses of the last taken branch.
@rtype: tuple( int, int )
@return: Source and destination addresses of the last taken branch.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
This method uses the processor's machine specific registers (MSR).
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
@note:
It doesn't seem to work in VMWare or VirtualBox machines.
Maybe it fails in other virtualization/emulation environments,
no extensive testing was made so far. | 5.906154 | 4.636833 | 1.273747 |
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug'
key = cls.registry[keyname]
debugger = key.get('Debugger')
auto = key.get('Auto')
hotkey = key.get('UserDebuggerHotkey')
if auto is not None:
auto = bool(auto)
return (debugger, auto, hotkey) | def get_postmortem_debugger(cls, bits = None) | Returns the postmortem debugging settings from the Registry.
@see: L{set_postmortem_debugger}
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}.
@rtype: tuple( str, bool, int )
@return: A tuple containing the command line string to the postmortem
debugger, a boolean specifying if user interaction is allowed
before attaching, and an integer specifying a user defined hotkey.
Any member of the tuple may be C{None}.
See L{set_postmortem_debugger} for more details.
@raise WindowsError:
Raises an exception on error. | 3.425394 | 3.10294 | 1.103919 |
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
try:
key = cls.registry[keyname]
except KeyError:
return []
return [name for (name, enabled) in key.items() if enabled] | def get_postmortem_exclusion_list(cls, bits = None) | Returns the exclusion list for the postmortem debugger.
@see: L{get_postmortem_debugger}
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@rtype: list( str )
@return: List of excluded application filenames.
@raise WindowsError:
Raises an exception on error. | 3.206183 | 3.086022 | 1.038937 |
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug'
key = cls.registry[keyname]
if cmdline is not None:
key['Debugger'] = cmdline
if auto is not None:
key['Auto'] = int(bool(auto))
if hotkey is not None:
key['UserDebuggerHotkey'] = int(hotkey) | def set_postmortem_debugger(cls, cmdline,
auto = None, hotkey = None, bits = None) | Sets the postmortem debugging settings in the Registry.
@warning: This method requires administrative rights.
@see: L{get_postmortem_debugger}
@type cmdline: str
@param cmdline: Command line to the new postmortem debugger.
When the debugger is invoked, the first "%ld" is replaced with the
process ID and the second "%ld" is replaced with the event handle.
Don't forget to enclose the program filename in double quotes if
the path contains spaces.
@type auto: bool
@param auto: Set to C{True} if no user interaction is allowed, C{False}
to prompt a confirmation dialog before attaching.
Use C{None} to leave this value unchanged.
@type hotkey: int
@param hotkey: Virtual key scan code for the user defined hotkey.
Use C{0} to disable the hotkey.
Use C{None} to leave this value unchanged.
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@rtype: tuple( str, bool, int )
@return: Previously defined command line and auto flag.
@raise WindowsError:
Raises an exception on error. | 2.968997 | 3.107447 | 0.955445 |
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
try:
key = cls.registry[keyname]
except KeyError:
key = cls.registry.create(keyname)
key[pathname] = 1 | def add_to_postmortem_exclusion_list(cls, pathname, bits = None) | Adds the given filename to the exclusion list for postmortem debugging.
@warning: This method requires administrative rights.
@see: L{get_postmortem_exclusion_list}
@type pathname: str
@param pathname:
Application pathname to exclude from postmortem debugging.
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@raise WindowsError:
Raises an exception on error. | 3.125082 | 2.987724 | 1.045974 |
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
try:
key = cls.registry[keyname]
except KeyError:
return
try:
del key[pathname]
except KeyError:
return | def remove_from_postmortem_exclusion_list(cls, pathname, bits = None) | Removes the given filename to the exclusion list for postmortem
debugging from the Registry.
@warning: This method requires administrative rights.
@warning: Don't ever delete entries you haven't created yourself!
Some entries are set by default for your version of Windows.
Deleting them might deadlock your system under some circumstances.
For more details see:
U{http://msdn.microsoft.com/en-us/library/bb204634(v=vs.85).aspx}
@see: L{get_postmortem_exclusion_list}
@type pathname: str
@param pathname: Application pathname to remove from the postmortem
debugging exclusion list.
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@raise WindowsError:
Raises an exception on error. | 3.102891 | 2.908957 | 1.066668 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
try:
return win32.EnumServicesStatusEx(hSCManager)
except AttributeError:
return win32.EnumServicesStatus(hSCManager) | def get_services() | Retrieve a list of all system services.
@see: L{get_active_services},
L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@rtype: list( L{win32.ServiceStatusProcessEntry} )
@return: List of service status descriptors. | 3.59812 | 4.026629 | 0.893581 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return [ entry for entry in win32.EnumServicesStatusEx(hSCManager,
dwServiceType = win32.SERVICE_WIN32,
dwServiceState = win32.SERVICE_ACTIVE) \
if entry.ProcessId ] | def get_active_services() | Retrieve a list of all active system services.
@see: L{get_services},
L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@rtype: list( L{win32.ServiceStatusProcessEntry} )
@return: List of service status descriptors. | 4.160636 | 4.12843 | 1.007801 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_QUERY_STATUS
) as hService:
try:
return win32.QueryServiceStatusEx(hService)
except AttributeError:
return win32.QueryServiceStatus(hService) | def get_service(name) | Get the service descriptor for the given service name.
@see: L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: L{win32.ServiceStatusProcess}
@return: Service status descriptor. | 2.596155 | 2.625175 | 0.988946 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return win32.GetServiceDisplayName(hSCManager, name) | def get_service_display_name(name) | Get the service display name for the given service name.
@see: L{get_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: str
@return: Service display name. | 4.507514 | 5.574769 | 0.808556 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return win32.GetServiceKeyName(hSCManager, displayName) | def get_service_from_display_name(displayName) | Get the service unique name given its display name.
@see: L{get_service}
@type displayName: str
@param displayName: Service display name. You can get this value from
the C{DisplayName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: str
@return: Service unique name. | 4.736645 | 6.351656 | 0.745734 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_START
) as hService:
win32.StartService(hService) | def start_service(name, argv = None) | Start the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@see: L{stop_service}, L{pause_service}, L{resume_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}. | 3.488517 | 4.265662 | 0.817814 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_STOP
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_STOP) | def stop_service(name) | Stop the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{pause_service}, L{resume_service} | 3.195681 | 3.528264 | 0.905737 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_PAUSE_CONTINUE
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_PAUSE) | def pause_service(name) | Pause the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{resume_service} | 3.12656 | 3.706441 | 0.843548 |
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_CONNECT
) as hSCManager:
with win32.OpenService(hSCManager, name,
dwDesiredAccess = win32.SERVICE_PAUSE_CONTINUE
) as hService:
win32.ControlService(hService, win32.SERVICE_CONTROL_CONTINUE) | def resume_service(name) | Resume the service given by name.
@warn: This method requires UAC elevation in Windows Vista and above.
@note: Not all services support this.
@see: L{get_services}, L{get_active_services},
L{start_service}, L{stop_service}, L{pause_service} | 3.182473 | 3.698695 | 0.860431 |
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None | def _ifconfig_getnode() | Get the hardware address on Unix by running ifconfig. | 6.021395 | 5.754632 | 1.046356 |
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300) # @UndefinedVariable
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16) | def _ipconfig_getnode() | Get the hardware address on Windows by running ipconfig.exe. | 2.534945 | 2.444228 | 1.037115 |
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5]) | def _netbios_getnode() | Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details. | 3.099181 | 3.096049 | 1.001012 |
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node | def getnode() | Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122. | 3.537219 | 3.394346 | 1.042091 |
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1) | def uuid1(node=None, clock_seq=None) | Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen. | 2.724974 | 2.647128 | 1.029408 |
import md5
hash = md5.md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3) | def uuid3(namespace, name) | Generate a UUID from the MD5 hash of a namespace UUID and a name. | 5.210209 | 4.021977 | 1.295435 |
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4) | def uuid4() | Generate a random UUID. | 4.200197 | 3.997188 | 1.050788 |
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5) | def uuid5(namespace, name) | Generate a UUID from the SHA-1 hash of a namespace UUID and a name. | 6.343993 | 4.870277 | 1.302594 |
'''
Return True if this frame should be traced, False if tracing should be blocked.
'''
# First, check whether this code object has a cached value
ignored_lines = _filename_to_ignored_lines.get(filename)
if ignored_lines is None:
# Now, look up that line of code and check for a @DontTrace
# preceding or on the same line as the method.
# E.g.:
# #@DontTrace
# def test():
# pass
# ... or ...
# def test(): #@DontTrace
# pass
ignored_lines = {}
lines = linecache.getlines(filename)
for i_line, line in enumerate(lines):
j = line.find('#')
if j >= 0:
comment = line[j:]
if DONT_TRACE_TAG in comment:
ignored_lines[i_line] = 1
#Note: when it's found in the comment, mark it up and down for the decorator lines found.
k = i_line - 1
while k >= 0:
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k -= 1
else:
break
k = i_line + 1
while k <= len(lines):
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k += 1
else:
break
_filename_to_ignored_lines[filename] = ignored_lines
func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed
return not (
func_line - 1 in ignored_lines or #-1 to get line before method
func_line in ignored_lines) | def default_should_trace_hook(frame, filename) | Return True if this frame should be traced, False if tracing should be blocked. | 4.312209 | 4.031567 | 1.069611 |
'''
Clear the trace filter cache.
Call this after reloading.
'''
global should_trace_hook
try:
# Need to temporarily disable a hook because otherwise
# _filename_to_ignored_lines.clear() will never complete.
old_hook = should_trace_hook
should_trace_hook = None
# Clear the linecache
linecache.clearcache()
_filename_to_ignored_lines.clear()
finally:
should_trace_hook = old_hook | def clear_trace_filter_cache() | Clear the trace filter cache.
Call this after reloading. | 6.60215 | 5.735111 | 1.151181 |
'''
Set the trace filter mode.
mode: Whether to enable the trace hook.
True: Trace filtering on (skipping methods tagged @DontTrace)
False: Trace filtering off (trace methods tagged @DontTrace)
None/default: Toggle trace filtering.
'''
global should_trace_hook
if mode is None:
mode = should_trace_hook is None
if mode:
should_trace_hook = default_should_trace_hook
else:
should_trace_hook = None
return mode | def trace_filter(mode) | Set the trace filter mode.
mode: Whether to enable the trace hook.
True: Trace filtering on (skipping methods tagged @DontTrace)
False: Trace filtering off (trace methods tagged @DontTrace)
None/default: Toggle trace filtering. | 6.490001 | 2.275057 | 2.852676 |
".symfix - Set the default Microsoft Symbol Store settings if missing"
self.debug.system.fix_symbol_store_path(remote = True, force = False) | def do(self, arg) | .symfix - Set the default Microsoft Symbol Store settings if missing | 78.383751 | 12.098248 | 6.478934 |
with open(os.devnull, 'w') as devnull:
# Suppress isort messages
sys.stdout = devnull
if SortImports(path, check=True).incorrectly_sorted:
return [{
'lnum': 0,
'col': 0,
'text': 'Incorrectly sorted imports.',
'type': 'ISORT'
}]
else:
return [] | def run(self, path, **meta) | Lint the file. Return an array of error dicts if appropriate. | 5.977693 | 5.112288 | 1.169279 |
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close() | def dump(self, filename) | Dump the grammar tables to a pickle file. | 2.75569 | 2.687111 | 1.025522 |
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d) | def load(self, filename) | Load the grammar tables from a pickle file. | 2.518416 | 2.406187 | 1.046642 |
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new | def copy(self) | Copy the grammar. | 4.819281 | 3.989083 | 1.208118 |
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start | def report(self) | Dump the grammar tables to standard output, for debugging. | 3.359112 | 2.675007 | 1.255739 |
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject() | def dyld_image_suffix_search(iterator, env=None) | For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics | 3.535763 | 3.255933 | 1.085945 |
'''
Meant to be used as
class B:
@overrides(A.m1)
def m1(self):
pass
'''
def wrapper(func):
if func.__name__ != method.__name__:
msg = "Wrong @override: %r expected, but overwriting %r."
msg = msg % (func.__name__, method.__name__)
raise AssertionError(msg)
if func.__doc__ is None:
func.__doc__ = method.__doc__
return func
return wrapper | def overrides(method) | Meant to be used as
class B:
@overrides(A.m1)
def m1(self):
pass | 3.975498 | 3.058854 | 1.299669 |
pending = [names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield "".join([ch.value for ch in node.children])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError("unkown node type") | def traverse_imports(names) | Walks over all the names imported in a dotted_as_names node. | 2.63378 | 2.506276 | 1.050874 |
class UnmodifiedIsInstance(type):
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
return type.__instancecheck__(cls, instance)
return with_metaclass(UnmodifiedIsInstance, *bases) | def unmodified_isinstance(*bases) | When called in the form
MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass | 2.714799 | 2.837194 | 0.956861 |
".exchain - Show the SEH chain"
thread = self.get_thread_from_prefix()
print "Exception handlers for thread %d" % thread.get_tid()
print
table = Table()
table.addRow("Block", "Function")
bits = thread.get_bits()
for (seh, seh_func) in thread.get_seh_chain():
if seh is not None:
seh = HexDump.address(seh, bits)
if seh_func is not None:
seh_func = HexDump.address(seh_func, bits)
table.addRow(seh, seh_func)
print table.getOutput() | def do(self, arg) | .exchain - Show the SEH chain | 5.4976 | 4.257031 | 1.291416 |
options = {}
if not opts:
return options
for opt in opts.split(';'):
try:
key, value = opt.split('=')
except ValueError:
continue
try:
options[key] = DEBUG_OPTIONS_PARSER[key](value)
except KeyError:
continue
return options | def _parse_debug_options(opts) | Debug options are semicolon separated key=value pairs
WAIT_ON_ABNORMAL_EXIT=True|False
WAIT_ON_NORMAL_EXIT=True|False
REDIRECT_OUTPUT=True|False
VERSION=string
INTERPRETER_OPTIONS=string
WEB_BROWSER_URL=string url
DJANGO_DEBUG=True|False
CLIENT_OS_TYPE=WINDOWS|UNIX
DEBUG_STDLIB=True|False | 2.792217 | 2.727412 | 1.023761 |
if not element.length or element.length == 'max':
return "TEXT"
else:
return compiler.visit_VARCHAR(element, **kw) | def _compile_varchar_mysql(element, compiler, **kw) | MySQL hack to avoid the "VARCHAR requires a length" error. | 4.249999 | 3.557525 | 1.19465 |
return self._transactional(fn, *argv, **argd) | def Transactional(fn, self, *argv, **argd) | Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}. | 5.007961 | 6.241146 | 0.80241 |
try:
cursor = dbapi_connection.cursor()
try:
cursor.execute("PRAGMA foreign_keys = ON;")
cursor.execute("PRAGMA foreign_keys;")
if cursor.fetchone()[0] != 1:
raise Exception()
finally:
cursor.close()
except Exception:
dbapi_connection.close()
raise sqlite3.Error() | def connect(dbapi_connection, connection_record) | Called once by SQLAlchemy for each new SQLite DB-API connection.
Here is where we issue some PRAGMA statements to configure how we're
going to access the SQLite database.
@param dbapi_connection:
A newly connected raw SQLite DB-API connection.
@param connection_record:
Unused by this method. | 2.242326 | 2.360697 | 0.949858 |
self._session.begin(subtransactions = True)
try:
result = method(self, *argv, **argd)
self._session.commit()
return result
except:
self._session.rollback()
raise | def _transactional(self, method, *argv, **argd) | Begins a transaction and calls the given DAO method.
If the method executes successfully the transaction is commited.
If the method fails, the transaction is rolled back.
@type method: callable
@param method: Bound method of this class or one of its subclasses.
The first argument will always be C{self}.
@return: The return value of the method call.
@raise Exception: Any exception raised by the method. | 2.242261 | 2.604307 | 0.860982 |
mbi = win32.MemoryBasicInformation()
mbi.BaseAddress = self.address
mbi.RegionSize = self.size
mbi.State = self._parse_state(self.state)
mbi.Protect = self._parse_access(self.access)
mbi.Type = self._parse_type(self.type)
if self.alloc_base is not None:
mbi.AllocationBase = self.alloc_base
else:
mbi.AllocationBase = mbi.BaseAddress
if self.alloc_access is not None:
mbi.AllocationProtect = self._parse_access(self.alloc_access)
else:
mbi.AllocationProtect = mbi.Protect
if self.filename is not None:
mbi.filename = self.filename
if getMemoryDump and self.content is not None:
mbi.content = self.content
return mbi | def toMBI(self, getMemoryDump = False) | Returns a L{win32.MemoryBasicInformation} object using the data
retrieved from the database.
@type getMemoryDump: bool
@param getMemoryDump: (Optional) If C{True} retrieve the memory dump.
Defaults to C{False} since this may be a costly operation.
@rtype: L{win32.MemoryBasicInformation}
@return: Memory block information. | 2.165955 | 2.115423 | 1.023887 |
crash = Marshaller.loads(str(self.data))
if not isinstance(crash, Crash):
raise TypeError(
"Expected Crash instance, got %s instead" % type(crash))
crash._rowid = self.id
if not crash.memoryMap:
memory = getattr(self, "memory", [])
if memory:
crash.memoryMap = [dto.toMBI(getMemoryDump) for dto in memory]
return crash | def toCrash(self, getMemoryDump = False) | Returns a L{Crash} object using the data retrieved from the database.
@type getMemoryDump: bool
@param getMemoryDump: If C{True} retrieve the memory dump.
Defaults to C{False} since this may be a costly operation.
@rtype: L{Crash}
@return: Crash object. | 6.269375 | 7.234988 | 0.866536 |
# Filter out duplicated crashes, if requested.
if not allow_duplicates:
signature = pickle.dumps(crash.signature, protocol = 0)
if self._session.query(CrashDTO.id) \
.filter_by(signature = signature) \
.count() > 0:
return
# Fill out a new row for the crashes table.
crash_id = self.__add_crash(crash)
# Fill out new rows for the memory dump.
self.__add_memory(crash_id, crash.memoryMap)
# On success set the row ID for the Crash object.
# WARNING: In nested calls, make sure to delete
# this property before a session rollback!
crash._rowid = crash_id | def add(self, crash, allow_duplicates = True) | Add a new crash dump to the database, optionally filtering them by
signature to avoid duplicates.
@type crash: L{Crash}
@param crash: Crash object.
@type allow_duplicates: bool
@param allow_duplicates: (Optional)
C{True} to always add the new crash dump.
C{False} to only add the crash dump if no other crash with the
same signature is found in the database.
Sometimes, your fuzzer turns out to be I{too} good. Then you find
youself browsing through gigabytes of crash dumps, only to find
a handful of actual bugs in them. This simple heuristic filter
saves you the trouble by discarding crashes that seem to be similar
to another one you've already found. | 7.269676 | 7.251105 | 1.002561 |
# Validate the parameters.
if since and until and since > until:
warnings.warn("CrashDAO.find() got the 'since' and 'until'"
" arguments reversed, corrected automatically.")
since, until = until, since
if limit is not None and not limit:
warnings.warn("CrashDAO.find() was set a limit of 0 results,"
" returning without executing a query.")
return []
# Build the SQL query.
query = self._session.query(CrashDTO)
if signature is not None:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter(CrashDTO.signature == sig_pickled)
if since:
query = query.filter(CrashDTO.timestamp >= since)
if until:
query = query.filter(CrashDTO.timestamp < until)
if order:
if order > 0:
query = query.order_by(asc(CrashDTO.timestamp))
else:
query = query.order_by(desc(CrashDTO.timestamp))
else:
# Default ordering is by row ID, to get consistent results.
# Also some database engines require ordering when using offsets.
query = query.order_by(asc(CrashDTO.id))
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
# Execute the SQL query and convert the results.
try:
return [dto.toCrash() for dto in query.all()]
except NoResultFound:
return [] | def find(self,
signature = None, order = 0,
since = None, until = None,
offset = None, limit = None) | Retrieve all crash dumps in the database, optionally filtering them by
signature and timestamp, and/or sorting them by timestamp.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find_by_example}
@type signature: object
@param signature: (Optional) Return only through crashes matching
this signature. See L{Crash.signature} for more details.
@type order: int
@param order: (Optional) Sort by timestamp.
If C{== 0}, results are not sorted.
If C{> 0}, results are sorted from older to newer.
If C{< 0}, results are sorted from newer to older.
@type since: datetime
@param since: (Optional) Return only the crashes after and
including this date and time.
@type until: datetime
@param until: (Optional) Return only the crashes before this date
and time, not including it.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of Crash objects. | 2.981143 | 2.91951 | 1.021111 |
# Validate the parameters.
if limit is not None and not limit:
warnings.warn("CrashDAO.find_by_example() was set a limit of 0"
" results, returning without executing a query.")
return []
# Build the query.
query = self._session.query(CrashDTO)
# Order by row ID to get consistent results.
# Also some database engines require ordering when using offsets.
query = query.asc(CrashDTO.id)
# Build a CrashDTO from the Crash object.
dto = CrashDTO(crash)
# Filter all the fields in the crashes table that are present in the
# CrashDTO object and not set to None, except for the row ID.
for name, column in compat.iteritems(CrashDTO.__dict__):
if not name.startswith('__') and name not in ('id',
'signature',
'data'):
if isinstance(column, Column):
value = getattr(dto, name, None)
if value is not None:
query = query.filter(column == value)
# Page the query.
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
# Execute the SQL query and convert the results.
try:
return [dto.toCrash() for dto in query.all()]
except NoResultFound:
return [] | def find_by_example(self, crash, offset = None, limit = None) | Find all crash dumps that have common properties with the crash dump
provided.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find}
@type crash: L{Crash}
@param crash: Crash object to compare with. Fields set to C{None} are
ignored, all other fields but the signature are used in the
comparison.
To search for signature instead use the L{find} method.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of similar crash dumps found. | 4.754256 | 4.476459 | 1.062057 |
query = self._session.query(CrashDTO.id)
if signature:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter_by(signature = sig_pickled)
return query.count() | def count(self, signature = None) | Counts how many crash dumps have been stored in this database.
Optionally filters the count by heuristic signature.
@type signature: object
@param signature: (Optional) Count only the crashes that match
this signature. See L{Crash.signature} for more details.
@rtype: int
@return: Count of crash dumps stored in this database. | 4.737669 | 4.662131 | 1.016202 |
query = self._session.query(CrashDTO).filter_by(id = crash._rowid)
query.delete(synchronize_session = False)
del crash._rowid | def delete(self, crash) | Remove the given crash dump from the database.
@type crash: L{Crash}
@param crash: Crash dump to remove. | 5.76651 | 7.506353 | 0.768217 |
'''Returns an iterable of the parts in the final repr string.'''
try:
obj_repr = type(obj).__repr__
except Exception:
obj_repr = None
def has_obj_repr(t):
r = t.__repr__
try:
return obj_repr == r
except Exception:
return obj_repr is r
for t, prefix, suffix, comma in self.collection_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_iter(obj, level, prefix, suffix, comma)
for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types: # noqa
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_dict(obj, level, prefix, suffix,
item_prefix, item_sep, item_suffix)
for t in self.string_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_str(obj, level)
if self._is_long_iter(obj):
return self._repr_long_iter(obj)
return self._repr_other(obj, level) | def _repr(self, obj, level) | Returns an iterable of the parts in the final repr string. | 2.757989 | 2.401572 | 1.14841 |
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len) | def get_characteristic_subpattern(subpatterns) | Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars | 2.596369 | 2.466125 | 1.052813 |
node = self
subp = []
while node:
if node.type == TYPE_ALTERNATIVES:
node.alternatives.append(subp)
if len(node.alternatives) == len(node.children):
#last alternative
subp = [tuple(node.alternatives)]
node.alternatives = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == TYPE_GROUP:
node.group.append(subp)
#probably should check the number of leaves
if len(node.group) == len(node.children):
subp = get_characteristic_subpattern(node.group)
node.group = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == token_labels.NAME and node.name:
#in case of type=name, use the name instead
subp.append(node.name)
else:
subp.append(node.type)
node = node.parent
return subp | def leaf_to_root(self) | Internal method. Returns a characteristic path of the
pattern tree. This method must be run for all leaves until the
linear subpatterns are merged into a single | 3.629758 | 3.629757 | 1 |
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp | def get_linear_subpattern(self) | Drives the leaf_to_root method. The reason that
leaf_to_root must be run multiple times is because we need to
reject 'group' matches; for example the alternative form
(a | b c) creates a group [b c] that needs to be matched. Since
matching multiple linear patterns overcomes the automaton's
capabilities, leaf_to_root merges each group into a single
choice based on 'characteristic'ity,
i.e. (a|b c) -> (a|b) if b more characteristic than c
Returns: The most 'characteristic'(as defined by
get_characteristic_subpattern) path for the compiled pattern
tree. | 9.460382 | 4.830835 | 1.958333 |
"Generator that returns the leaves of the tree"
for child in self.children:
for x in child.leaves():
yield x
if not self.children:
yield self | def leaves(self) | Generator that returns the leaves of the tree | 4.294895 | 4.12104 | 1.042187 |
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple | def tokenize_wrapper(input) | Tokenizes a string suppressing significant whitespace. | 3.678239 | 3.380832 | 1.087969 |
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context) | def pattern_convert(grammar, raw_node_info) | Converts raw node information to a Node or Leaf instance. | 5.243523 | 4.861432 | 1.078596 |
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root) | def compile_pattern(self, input, debug=False, with_tree=False) | Compiles a pattern string to a nested pytree.*Pattern object. | 3.589343 | 3.651819 | 0.982892 |
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize() | def compile_node(self, node) | Compiles a node, recursively.
This is one big switch on the node type. | 3.16119 | 3.145673 | 1.004933 |
wParam = ctypes.cast(wParam, LPVOID).value
if wParam is None:
wParam = 0
return wParam | def MAKE_WPARAM(wParam) | Convert arguments to the WPARAM type.
Used automatically by SendMessage, PostMessage, etc.
You shouldn't need to call this function. | 5.04706 | 4.95755 | 1.018055 |
return MapWindowPoints(hWndFrom, hWndTo, [self]) | def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP) | Translate coordinates from one window to another.
@note: To translate multiple points it's more efficient to use the
L{MapWindowPoints} function instead.
@see: L{client_to_screen}, L{screen_to_client}
@type hWndFrom: int or L{HWND} or L{system.Window}
@param hWndFrom: Window handle to translate from.
Use C{HWND_DESKTOP} for screen coordinates.
@type hWndTo: int or L{HWND} or L{system.Window}
@param hWndTo: Window handle to translate to.
Use C{HWND_DESKTOP} for screen coordinates.
@rtype: L{Point}
@return: New object containing the translated coordinates. | 15.006954 | 25.338129 | 0.592268 |
return RECT(self.left, self.top, self.right, self.bottom) | def _as_parameter_(self) | Compatibility with ctypes.
Allows passing transparently a Point object to an API call. | 5.233282 | 3.956614 | 1.322667 |
topleft = ScreenToClient(hWnd, (self.left, self.top))
bottomright = ScreenToClient(hWnd, (self.bottom, self.right))
return Rect( topleft.x, topleft.y, bottomright.x, bottomright.y ) | def screen_to_client(self, hWnd) | Translates window screen coordinates to client coordinates.
@see: L{client_to_screen}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Rect}
@return: New object containing the translated coordinates. | 3.183853 | 3.710417 | 0.858085 |
topleft = ClientToScreen(hWnd, (self.left, self.top))
bottomright = ClientToScreen(hWnd, (self.bottom, self.right))
return Rect( topleft.x, topleft.y, bottomright.x, bottomright.y ) | def client_to_screen(self, hWnd) | Translates window client coordinates to screen coordinates.
@see: L{screen_to_client}, L{translate}
@type hWnd: int or L{HWND} or L{system.Window}
@param hWnd: Window handle.
@rtype: L{Rect}
@return: New object containing the translated coordinates. | 3.160034 | 3.721949 | 0.849027 |
points = [ (self.left, self.top), (self.right, self.bottom) ]
return MapWindowPoints(hWndFrom, hWndTo, points) | def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP) | Translate coordinates from one window to another.
@see: L{client_to_screen}, L{screen_to_client}
@type hWndFrom: int or L{HWND} or L{system.Window}
@param hWndFrom: Window handle to translate from.
Use C{HWND_DESKTOP} for screen coordinates.
@type hWndTo: int or L{HWND} or L{system.Window}
@param hWndTo: Window handle to translate to.
Use C{HWND_DESKTOP} for screen coordinates.
@rtype: L{Rect}
@return: New object containing the translated coordinates. | 4.260007 | 6.033062 | 0.70611 |
wp = WINDOWPLACEMENT()
wp.length = sizeof(wp)
wp.flags = self.flags
wp.showCmd = self.showCmd
wp.ptMinPosition.x = self.ptMinPosition.x
wp.ptMinPosition.y = self.ptMinPosition.y
wp.ptMaxPosition.x = self.ptMaxPosition.x
wp.ptMaxPosition.y = self.ptMaxPosition.y
wp.rcNormalPosition.left = self.rcNormalPosition.left
wp.rcNormalPosition.top = self.rcNormalPosition.top
wp.rcNormalPosition.right = self.rcNormalPosition.right
wp.rcNormalPosition.bottom = self.rcNormalPosition.bottom
return wp | def _as_parameter_(self) | Compatibility with ctypes.
Allows passing transparently a Point object to an API call. | 2.33468 | 2.102823 | 1.11026 |
'''
Note: there's a copy of this method in interpreterInfo.py
'''
try:
ret = sys.getfilesystemencoding()
if not ret:
raise RuntimeError('Unable to get encoding.')
return ret
except:
try:
#Handle Jython
from java.lang import System # @UnresolvedImport
env = System.getProperty("os.name").lower()
if env.find('win') != -1:
return 'ISO-8859-1' #mbcs does not work on Jython, so, use a (hopefully) suitable replacement
return 'utf-8'
except:
pass
#Only available from 2.3 onwards.
if sys.platform == 'win32':
return 'mbcs'
return 'utf-8' | def __getfilesystemencoding() | Note: there's a copy of this method in interpreterInfo.py | 4.824618 | 3.780766 | 1.276095 |
handle = self.handle
index = 0
while 1:
subkey = win32.RegEnumKey(handle, index)
if subkey is None:
break
yield self.child(subkey)
index += 1 | def iterchildren(self) | Iterates the subkeys for this Registry key.
@rtype: iter of L{RegistryKey}
@return: Iterator of subkeys. | 3.877644 | 3.582757 | 1.082307 |
# return list(self.iterchildren()) # that can't be optimized by psyco
handle = self.handle
result = []
index = 0
while 1:
subkey = win32.RegEnumKey(handle, index)
if subkey is None:
break
result.append( self.child(subkey) )
index += 1
return result | def children(self) | Returns a list of subkeys for this Registry key.
@rtype: list(L{RegistryKey})
@return: List of subkeys. | 5.365046 | 5.021453 | 1.068425 |
path = self._path + '\\' + subkey
handle = win32.RegOpenKey(self.handle, subkey)
return RegistryKey(path, handle) | def child(self, subkey) | Retrieves a subkey for this Registry key, given its name.
@type subkey: str
@param subkey: Name of the subkey.
@rtype: L{RegistryKey}
@return: Subkey. | 5.392796 | 5.380393 | 1.002305 |
if '\\' in path:
p = path.find('\\')
hive = path[:p]
path = path[p+1:]
else:
hive = path
path = None
handle = self._hives_by_name[ hive.upper() ]
return handle, path | def _split_path(self, path) | Splits a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
The hive handle is always one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG} | 4.285941 | 4.003141 | 1.070645 |
handle, path = self._split_path(path)
if self._machine is not None:
handle = self._connect_hive(handle)
return handle, path | def _parse_path(self, path) | Parses a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
For a local Registry, the hive handle is an integer.
For a remote Registry, the hive handle is a L{RegistryKeyHandle}. | 8.612229 | 6.516703 | 1.321562 |
path = self._hives_by_value[hive]
if subkey:
path = path + '\\' + subkey
return path | def _join_path(self, hive, subkey) | Joins the hive and key to make a Registry path.
@type hive: int
@param hive: Registry hive handle.
The hive handle must be one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG}
@type subkey: str
@param subkey: Subkey path.
@rtype: str
@return: Registry path. | 5.266762 | 6.660064 | 0.790798 |
try:
handle = self._remote_hives[hive]
except KeyError:
handle = win32.RegConnectRegistry(self._machine, hive)
self._remote_hives[hive] = handle
return handle | def _connect_hive(self, hive) | Connect to the specified hive of a remote Registry.
@note: The connection will be cached, to close all connections and
erase this cache call the L{close} method.
@type hive: int
@param hive: Hive to connect to.
@rtype: L{win32.RegistryKeyHandle}
@return: Open handle to the remote Registry hive. | 4.120654 | 2.965263 | 1.389642 |
while self._remote_hives:
hive = self._remote_hives.popitem()[1]
try:
hive.close()
except Exception:
try:
e = sys.exc_info()[1]
msg = "Cannot close registry hive handle %s, reason: %s"
msg %= (hive.value, str(e))
warnings.warn(msg)
except Exception:
pass | def close(self) | Closes all open connections to the remote Registry.
No exceptions are raised, even if an error occurs.
This method has no effect when opening the local Registry.
The remote Registry will still be accessible after calling this method
(new connections will be opened automatically on access). | 4.087908 | 3.833304 | 1.066419 |
path = self._sanitize_path(path)
hive, subpath = self._parse_path(path)
handle = win32.RegCreateKey(hive, subpath)
return RegistryKey(path, handle) | def create(self, path) | Creates a new Registry key.
@type path: str
@param path: Registry key path.
@rtype: L{RegistryKey}
@return: The newly created Registry key. | 5.461438 | 4.924703 | 1.108988 |
result = list()
hive, subpath = self._parse_path(path)
with win32.RegOpenKey(hive, subpath) as handle:
index = 0
while 1:
name = win32.RegEnumKey(handle, index)
if name is None:
break
result.append(name)
index += 1
return result | def subkeys(self, path) | Returns a list of subkeys for the given Registry key.
@type path: str
@param path: Registry key path.
@rtype: list(str)
@return: List of subkey names. | 3.083064 | 2.996906 | 1.028749 |
if path.endswith('\\'):
path = path[:-1]
if not self.has_key(path):
raise KeyError(path)
stack = collections.deque()
stack.appendleft(path)
return self.__iterate(stack) | def iterate(self, path) | Returns a recursive iterator on the specified key and its subkeys.
@type path: str
@param path: Registry key path.
@rtype: iterator
@return: Recursive iterator that returns Registry key paths.
@raise KeyError: The specified path does not exist. | 4.034529 | 3.649825 | 1.105404 |
stack = collections.deque(self._hives)
stack.reverse()
return self.__iterate(stack) | def iterkeys(self) | Returns an iterator that crawls the entire Windows Registry. | 16.232414 | 9.536345 | 1.702163 |
'''
:param ide_os: 'WINDOWS' or 'UNIX'
:param breakpoints_by: 'ID' or 'LINE'
'''
if breakpoints_by == 'ID':
py_db._set_breakpoints_with_id = True
else:
py_db._set_breakpoints_with_id = False
pydevd_file_utils.set_ide_os(ide_os)
return py_db.cmd_factory.make_version_message(seq) | def set_ide_os_and_breakpoints_by(self, py_db, seq, ide_os, breakpoints_by) | :param ide_os: 'WINDOWS' or 'UNIX'
:param breakpoints_by: 'ID' or 'LINE' | 4.305896 | 3.354272 | 1.283705 |
'''
:param scope: 'FRAME' or 'GLOBAL'
'''
py_db.post_method_as_internal_command(
thread_id, internal_change_variable, seq, thread_id, frame_id, scope, attr, value) | def request_change_variable(self, py_db, seq, thread_id, frame_id, scope, attr, value) | :param scope: 'FRAME' or 'GLOBAL' | 7.898219 | 5.408708 | 1.460278 |
'''
:param scope: 'FRAME' or 'GLOBAL'
'''
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id) | def request_get_variable(self, py_db, seq, thread_id, frame_id, scope, attrs) | :param scope: 'FRAME' or 'GLOBAL' | 5.701104 | 4.125348 | 1.381969 |
'''
In py2 converts a unicode to str (bytes) using utf-8.
-- in py3 raises an error if it's not str already.
'''
if s.__class__ != str:
if not IS_PY3K:
s = s.encode('utf-8')
else:
raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (s, s.__class__))
return s | def to_str(self, s) | In py2 converts a unicode to str (bytes) using utf-8.
-- in py3 raises an error if it's not str already. | 6.254378 | 3.0063 | 2.080424 |
'''
In py2 converts a unicode to str (bytes) using the file system encoding.
-- in py3 raises an error if it's not str already.
'''
if filename.__class__ != str:
if not IS_PY3K:
filename = filename.encode(file_system_encoding)
else:
raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (filename, filename.__class__))
return filename | def filename_to_str(self, filename) | In py2 converts a unicode to str (bytes) using the file system encoding.
-- in py3 raises an error if it's not str already. | 6.974648 | 2.984231 | 2.337168 |
'''
Removes all the breakpoints from a given file or from all files if filename == '*'.
'''
changed = False
lst = [
py_db.file_to_id_to_line_breakpoint,
py_db.file_to_id_to_plugin_breakpoint,
py_db.breakpoints
]
if hasattr(py_db, 'django_breakpoints'):
lst.append(py_db.django_breakpoints)
if hasattr(py_db, 'jinja2_breakpoints'):
lst.append(py_db.jinja2_breakpoints)
for file_to_id_to_breakpoint in lst:
if filename == '*':
if file_to_id_to_breakpoint:
file_to_id_to_breakpoint.clear()
changed = True
else:
if filename in file_to_id_to_breakpoint:
del file_to_id_to_breakpoint[filename]
changed = True
if changed:
py_db.on_breakpoints_changed(removed=True) | def remove_all_breakpoints(self, py_db, filename) | Removes all the breakpoints from a given file or from all files if filename == '*'. | 2.585335 | 2.309565 | 1.119404 |
'''
:param str filename:
Note: must be already translated for the server.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id:
'''
file_to_id_to_breakpoint = None
if breakpoint_type == 'python-line':
breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
elif py_db.plugin is not None:
result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.critical('Error removing breakpoint. Cannot handle breakpoint of type %s', breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(filename, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
pydev_log.info('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
filename, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
py_db.consolidate_breakpoints(filename, id_to_pybreakpoint, breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
except KeyError:
pydev_log.info("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n",
filename, breakpoint_id, dict_keys(id_to_pybreakpoint))
py_db.on_breakpoints_changed(removed=True) | def remove_breakpoint(self, py_db, filename, breakpoint_type, breakpoint_id) | :param str filename:
Note: must be already translated for the server.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id: | 3.348633 | 2.881766 | 1.162007 |
'''
:param str filename:
Note: must be already translated for the server.
'''
try:
assert filename.__class__ == str # i.e.: bytes on py2 and str on py3
with open(filename, 'r') as stream:
source = stream.read()
cmd = py_db.cmd_factory.make_load_source_message(seq, source)
except:
cmd = py_db.cmd_factory.make_error_message(seq, get_exception_traceback_str())
py_db.writer.add_command(cmd) | def request_load_source(self, py_db, seq, filename) | :param str filename:
Note: must be already translated for the server. | 4.922337 | 3.523501 | 1.397002 |
'''
:param VariablesRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_get_variable_json, request) | def request_get_variable_json(self, py_db, request, thread_id) | :param VariablesRequest request: | 13.168398 | 8.815404 | 1.493794 |
'''
:param SetVariableRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_change_variable_json, request) | def request_change_variable_json(self, py_db, request, thread_id) | :param SetVariableRequest request: | 12.355237 | 8.122445 | 1.521123 |
'''
:param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG.
'''
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup:
handler.to_argv(ret, setup)
return ret | def setup_to_argv(setup) | :param dict setup:
A dict previously gotten from process_command_line.
:note: does not handle --file nor --DEBUG. | 11.102363 | 4.238618 | 2.619335 |
setup = {}
for handler in ACCEPTED_ARG_HANDLERS:
setup[handler.arg_name] = handler.default_val
setup['file'] = ''
setup['qt-support'] = ''
i = 0
del argv[0]
while i < len(argv):
handler = ARGV_REP_TO_HANDLER.get(argv[i])
if handler is not None:
handler.handle_argv(argv, i, setup)
elif argv[i].startswith('--qt-support'):
# The --qt-support is special because we want to keep backward compatibility:
# Previously, just passing '--qt-support' meant that we should use the auto-discovery mode
# whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where
# mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside'.
if argv[i] == '--qt-support':
setup['qt-support'] = 'auto'
elif argv[i].startswith('--qt-support='):
qt_support = argv[i][len('--qt-support='):]
valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside')
if qt_support not in valid_modes:
raise ValueError("qt-support mode invalid: " + qt_support)
if qt_support == 'none':
# On none, actually set an empty string to evaluate to False.
setup['qt-support'] = ''
else:
setup['qt-support'] = qt_support
else:
raise ValueError("Unexpected definition for qt-support flag: " + argv[i])
del argv[i]
elif argv[i] == '--file':
# --file is special because it's the last one (so, no handler for it).
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG':
from pydevd import set_debug
del argv[i]
set_debug(setup)
else:
raise ValueError("Unexpected option: " + argv[i])
return setup | def process_command_line(argv) | parses the arguments.
removes our arguments from the command line | 3.711828 | 3.769548 | 0.984688 |
'''
:return tuple(names, used___dict__), where used___dict__ means we have to access
using obj.__dict__[name] instead of getattr(obj, name)
'''
# TODO: Those should be options (would fix https://github.com/Microsoft/ptvsd/issues/66).
filter_private = False
filter_special = True
filter_function = True
filter_builtin = True
if not names:
names, used___dict__ = self.get_names(var)
d = {}
# Be aware that the order in which the filters are applied attempts to
# optimize the operation by removing as many items as possible in the
# first filters, leaving fewer items for later filters
if filter_builtin or filter_function:
for name in names:
try:
name_as_str = name
if name_as_str.__class__ != str:
name_as_str = '%r' % (name_as_str,)
if filter_special:
if name_as_str.startswith('__') and name_as_str.endswith('__'):
continue
if filter_private:
if name_as_str.startswith('_') or name_as_str.endswith('__'):
continue
if not used___dict__:
attr = getattr(var, name)
else:
attr = var.__dict__[name]
# filter builtins?
if filter_builtin:
if inspect.isbuiltin(attr):
continue
# filter functions?
if filter_function:
if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType):
continue
except:
# if some error occurs getting it, let's put it to the user.
strIO = StringIO.StringIO()
traceback.print_exc(file=strIO)
attr = strIO.getvalue()
d[name_as_str] = attr
return d, used___dict__ | def _get_py_dictionary(self, var, names=None, used___dict__=False) | :return tuple(names, used___dict__), where used___dict__ means we have to access
using obj.__dict__[name] instead of getattr(obj, name) | 3.950548 | 3.354478 | 1.177694 |
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
ret = []
i = 0
for key, val in dict_iter_items(dct):
i += 1
key_as_str = self.key_to_str(key, fmt)
eval_key_str = self.key_to_str(key) # do not format the key
ret.append((key_as_str, val, '[%s]' % (eval_key_str,)))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
ret.append(('__len__', len(dct), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
# in case the class extends built-in type and has some additional fields
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(dct, fmt)
if from_default_resolver:
ret = from_default_resolver + ret
return sorted(ret, key=lambda tup: sorted_attributes_key(tup[0])) | def get_contents_debug_adapter_protocol(self, dct, fmt=None) | This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str)) | 7.643821 | 3.628504 | 2.106604 |
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
l = len(lst)
ret = []
format_str = '%0' + str(int(len(str(l - 1)))) + 'd'
if fmt is not None and fmt.get('hex', False):
format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x'
for i, item in enumerate(lst):
ret.append((format_str % i, item, '[%s]' % i))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
ret.append(('__len__', len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
# Needed in case the class extends the built-in type and has some additional fields.
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
return ret | def get_contents_debug_adapter_protocol(self, lst, fmt=None) | This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str)) | 6.961733 | 3.312335 | 2.10176 |
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ) | def _other_endian(typ) | Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported. | 4.386366 | 4.268835 | 1.027532 |
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = dbg.find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(dbg, frame, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var | def getVariable(dbg, thread_id, frame_id, scope, attrs) | returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case). | 4.809515 | 4.509456 | 1.06654 |
var = getVariable(dbg, thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(var)
except:
pydev_log.exception('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s.',
thread_id, frame_id, scope, attrs) | def resolve_compound_variable_fields(dbg, thread_id, frame_id, scope, attrs) | Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields | 5.072242 | 5.236456 | 0.96864 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.