code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
''' Get completions and write back to the client
'''
try:
frame = dbg.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd) | def do_it(self, dbg) | Get completions and write back to the client | 4.647596 | 3.857646 | 1.204775 |
''' Converts request into python variable '''
try:
try:
# don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush() | def do_it(self, dbg) | Converts request into python variable | 5.093088 | 4.732234 | 1.076255 |
'''Starts a thread that will load values asynchronously'''
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd) | def do_it(self, dbg) | Starts a thread that will load values asynchronously | 4.974616 | 4.516438 | 1.101447 |
code = GetLastError()
if code != ERROR_SUCCESS:
raise ctypes.WinError(code)
return result | def RaiseIfLastError(result, func = None, arguments = ()) | Error checking for Win32 API calls with no error-specific return value.
Regardless of the return value, the function calls GetLastError(). If the
code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised.
For this to work, the user MUST call SetLastError(ERROR_SUCCESS) prior to
calling the API. Otherwise an exception may be raised even on success,
since most API calls don't clear the error status code. | 3.798711 | 5.220104 | 0.727708 |
_Wow64EnableWow64FsRedirection = windll.kernel32.Wow64EnableWow64FsRedirection
_Wow64EnableWow64FsRedirection.argtypes = [BOOLEAN]
_Wow64EnableWow64FsRedirection.restype = BOOLEAN
_Wow64EnableWow64FsRedirection.errcheck = RaiseIfZero | def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection) | This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx} | 1.540948 | 1.680687 | 0.916856 |
if self.bOwnership and self.value not in (None, INVALID_HANDLE_VALUE):
if Handle.__bLeakDetection: # XXX DEBUG
print("CLOSE HANDLE (%d) %r" % (self.value, self))
try:
self._close()
finally:
self._value = None | def close(self) | Closes the Win32 handle. | 12.804081 | 11.327833 | 1.13032 |
if hasattr(value, 'value'):
value = value.value
if value is not None:
value = long(value)
return value | def _normalize(value) | Normalize handle values. | 4.096873 | 3.676937 | 1.114208 |
if self.value is None:
raise ValueError("Handle is already closed!")
if dwMilliseconds is None:
dwMilliseconds = INFINITE
r = WaitForSingleObject(self.value, dwMilliseconds)
if r != WAIT_OBJECT_0:
raise ctypes.WinError(r) | def wait(self, dwMilliseconds = None) | Wait for the Win32 object to be signaled.
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout value in milliseconds.
Use C{INFINITE} or C{None} for no timeout. | 3.377237 | 3.475324 | 0.971776 |
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer) | def add_fixer(self, fixer) | Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached | 5.613872 | 4.147364 | 1.3536 |
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes | def add(self, pattern, start) | Recursively adds a linear pattern to the AC automaton | 3.681453 | 3.310982 | 1.111892 |
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results:
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results | def run(self, leaves) | The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys | 3.091098 | 2.78075 | 1.111606 |
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}") | def print_ac(self) | Prints a graphviz diagram of the BM automaton(for debugging) | 5.299638 | 3.864891 | 1.371226 |
if _get_globals_callback is not None:
return _get_globals_callback()
else:
try:
from __main__ import __dict__ as namespace
except ImportError:
try:
# The import fails on IronPython
import __main__
namespace = __main__.__dict__
except:
namespace
shell = namespace.get('__ipythonshell__')
if shell is not None and hasattr(shell, 'user_ns'):
# IPython 0.12+ kernel
return shell.user_ns
else:
# Python interpreter
return namespace
return namespace | def _get_globals() | Return current Python interpreter globals namespace | 3.93937 | 3.680902 | 1.070219 |
try:
if hasattr(filename, 'decode'):
filename = filename.decode('utf-8')
except (UnicodeError, TypeError):
pass
global __umd__
if os.environ.get("PYDEV_UMD_ENABLED", "").lower() == "true":
if __umd__ is None:
namelist = os.environ.get("PYDEV_UMD_NAMELIST", None)
if namelist is not None:
namelist = namelist.split(',')
__umd__ = UserModuleDeleter(namelist=namelist)
else:
verbose = os.environ.get("PYDEV_UMD_VERBOSE", "").lower() == "true"
__umd__.run(verbose=verbose)
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
if '__file__' in namespace:
old_file = namespace['__file__']
else:
old_file = None
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in args.split():
sys.argv.append(arg)
if wdir is not None:
try:
if hasattr(wdir, 'decode'):
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError):
pass
os.chdir(wdir)
execfile(filename, namespace)
sys.argv = ['']
if old_file is None:
del namespace['__file__']
else:
namespace['__file__'] = old_file | def runfile(filename, args=None, wdir=None, namespace=None) | Run filename
args: command line arguments (string)
wdir: working directory | 2.30673 | 2.326133 | 0.991659 |
log = []
modules_copy = dict(sys.modules)
for modname, module in modules_copy.items():
if modname == 'aaaaa':
print(modname, module)
print(self.previous_modules)
if modname not in self.previous_modules:
modpath = getattr(module, '__file__', None)
if modpath is None:
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
continue
if not self.is_module_blacklisted(modname, modpath):
log.append(modname)
del sys.modules[modname]
if verbose and log:
print("\x1b[4;33m%s\x1b[24m%s\x1b[0m" % ("UMD has deleted",
": " + ", ".join(log))) | def run(self, verbose=False) | Del user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules | 4.451091 | 4.062027 | 1.095781 |
if sys.version_info >= (2, 7):
import sysconfig
return sysconfig.get_paths()['stdlib']
else:
return os.path.join(sys.prefix, 'lib') | def get_stdlib_path() | Returns the path to the standard lib for the current path installation.
This function can be dropped and "sysconfig.get_paths()" used directly once Python 2.6 support is dropped. | 2.757732 | 2.199167 | 1.253989 |
result = os.path.exists(path)
if sys.platform.startswith('win') and result:
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result | def exists_case_sensitive(path) | Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python
can only import using the case of the real file. | 2.769833 | 3.168462 | 0.874188 |
lines = copy.copy(lines)
while lines and lines[0].startswith("#"):
lines = lines[1:]
return "\n".join(lines) | def _strip_top_comments(lines) | Strips # comments that exist at the top of the given lines | 3.035533 | 2.731745 | 1.111207 |
for forced_separate in self.config['forced_separate']:
# Ensure all forced_separate patterns will match to end of string
path_glob = forced_separate
if not forced_separate.endswith('*'):
path_glob = '%s*' % forced_separate
if fnmatch(module_name, path_glob) or fnmatch(module_name, '.' + path_glob):
return forced_separate
if module_name.startswith("."):
return self.sections.LOCALFOLDER
# Try to find most specific placement instruction match (if any)
parts = module_name.split('.')
module_names_to_check = ['.'.join(parts[:first_k]) for first_k in range(len(parts), 0, -1)]
for module_name_to_check in module_names_to_check:
for pattern, placement in self.known_patterns:
if pattern.match(module_name_to_check):
return placement
# Use a copy of sys.path to avoid any unintended modifications
# to it - e.g. `+=` used below will change paths in place and
# if not copied, consequently sys.path, which will grow unbounded
# with duplicates on every call to this method.
paths = list(sys.path)
virtual_env = self.config.get('virtual_env') or os.environ.get('VIRTUAL_ENV')
virtual_env_src = False
if virtual_env:
paths += [path for path in glob('{0}/lib/python*/site-packages'.format(virtual_env))
if path not in paths]
paths += [path for path in glob('{0}/src/*'.format(virtual_env)) if os.path.isdir(path)]
virtual_env_src = '{0}/src/'.format(virtual_env)
# handle case-insensitive paths on windows
stdlib_lib_prefix = os.path.normcase(get_stdlib_path())
for prefix in paths:
module_path = "/".join((prefix, module_name.replace(".", "/")))
package_path = "/".join((prefix, module_name.split(".")[0]))
is_module = (exists_case_sensitive(module_path + ".py") or
exists_case_sensitive(module_path + ".so"))
is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path)
if is_module or is_package:
if ('site-packages' in prefix or 'dist-packages' in prefix or
(virtual_env and virtual_env_src in prefix)):
return self.sections.THIRDPARTY
elif os.path.normcase(prefix).startswith(stdlib_lib_prefix):
return self.sections.STDLIB
else:
return self.config['default_section']
return self.config['default_section'] | def place_module(self, module_name) | Tries to determine if a module is a python std import, third party import, or project code:
if it can't determine - it assumes it is project code | 3.827552 | 3.77435 | 1.014096 |
return comments and "{0} # {1}".format(self._strip_comments(original_string)[0],
"; ".join(comments)) or original_string | def _add_comments(self, comments, original_string="") | Returns a string with comments added | 7.735901 | 7.880767 | 0.981618 |
if comments is None:
comments = []
new_comments = False
comment_start = line.find("#")
if comment_start != -1:
comments.append(line[comment_start + 1:].strip())
new_comments = True
line = line[:comment_start]
return line, comments, new_comments | def _strip_comments(line, comments=None) | Removes comments from import line. | 2.314918 | 2.208221 | 1.048318 |
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: deprecated
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
@param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which
called this method.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
)
finally:
_set_trace_lock.release() | def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
stop_at_frame=None,
) | Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: deprecated
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
@param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which
called this method. | 4.901727 | 1.245914 | 3.934242 |
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
GlobalDebuggerHolder.global_dbg = None
threading.current_thread().additional_info = None
from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if port is not None:
global connected
connected = False
global forked
forked = True
custom_frames_container_init()
if clear_thread_local_info is not None:
clear_thread_local_info()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
) | def settrace_forked() | When creating a fork from a process in the debugger, we need to reset the whole debugger environment! | 5.845376 | 4.506875 | 1.296991 |
'''
:param abs_real_path_and_basename:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
_pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE:
If it's a file internal to the debugger which shouldn't be
traced nor shown to the user.
_pydevd_bundle.pydevd_dont_trace_files.LIB_FILE:
If it's a file in a library which shouldn't be traced.
None:
If it's a regular user file which should be traced.
'''
try:
return _cache_file_type[abs_real_path_and_basename[0]]
except:
file_type = self._internal_get_file_type(abs_real_path_and_basename)
if file_type is None:
file_type = PYDEV_FILE if self.dont_trace_external_files(abs_real_path_and_basename[0]) else None
_cache_file_type[abs_real_path_and_basename[0]] = file_type
return file_type | def get_file_type(self, abs_real_path_and_basename, _cache_file_type=_CACHE_FILE_TYPE) | :param abs_real_path_and_basename:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
_pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE:
If it's a file internal to the debugger which shouldn't be
traced nor shown to the user.
_pydevd_bundle.pydevd_dont_trace_files.LIB_FILE:
If it's a file in a library which shouldn't be traced.
None:
If it's a regular user file which should be traced. | 3.112996 | 1.448175 | 2.1496 |
'''
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
'''
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
return
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func) | def enable_tracing(self, thread_trace_func=None) | Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread. | 4.918681 | 2.197959 | 2.23784 |
'''
When breakpoints change, we have to re-evaluate all the assumptions we've made so far.
'''
if not self.ready_to_run:
# No need to do anything if we're still not running.
return
self.mtime += 1
if not removed:
# When removing breakpoints we can leave tracing as was, but if a breakpoint was added
# we have to reset the tracing for the existing functions to be re-evaluated.
self.set_tracing_for_untraced_contexts() | def on_breakpoints_changed(self, removed=False) | When breakpoints change, we have to re-evaluate all the assumptions we've made so far. | 9.220956 | 6.692783 | 1.377746 |
'''
Note: in general this method should not be used (apply_files_filter should be used
in most cases as it also handles the project scope check).
'''
try:
return self._in_project_scope_cache[filename]
except KeyError:
cache = self._in_project_scope_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are never considered to be in the project scope.
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = False
else:
cache[filename] = self._files_filtering.in_project_roots(filename)
return cache[filename] | def in_project_scope(self, filename) | Note: in general this method should not be used (apply_files_filter should be used
in most cases as it also handles the project scope check). | 5.587152 | 3.438545 | 1.624859 |
'''
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
try:
return self._exclude_by_filter_cache[filename]
except KeyError:
cache = self._exclude_by_filter_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are always filtered out
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__')
cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name)
return cache[filename] | def _exclude_by_filter(self, frame, filename) | :param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file. | 4.021332 | 3.164146 | 1.270906 |
'''
Should only be called if `self.is_files_filter_enabled == True`.
Note that it covers both the filter by specific paths includes/excludes as well
as the check which filters out libraries if not in the project scope.
:param force_check_project_scope:
Check that the file is in the project scope even if the global setting
is off.
:return bool:
True if it should be excluded when stepping and False if it should be
included.
'''
cache_key = (frame.f_code.co_firstlineno, frame.f_code.co_name, filename, force_check_project_scope)
try:
return self._apply_filter_cache[cache_key]
except KeyError:
if self.plugin is not None and (self.has_plugin_line_breaks or self.has_plugin_exception_breaks):
# If it's explicitly needed by some plugin, we can't skip it.
if not self.plugin.can_skip(self, frame):
# print('include (include by plugins): %s' % filename)
self._apply_filter_cache[cache_key] = False
return False
if self._exclude_filters_enabled:
exclude_by_filter = self._exclude_by_filter(frame, filename)
if exclude_by_filter is not None:
if exclude_by_filter:
# ignore files matching stepping filters
# print('exclude (filtered out): %s' % filename)
self._apply_filter_cache[cache_key] = True
return True
else:
# print('include (explicitly included): %s' % filename)
self._apply_filter_cache[cache_key] = False
return False
if (self._is_libraries_filter_enabled or force_check_project_scope) and not self.in_project_scope(filename):
# print('exclude (not on project): %s' % filename)
# ignore library files while stepping
self._apply_filter_cache[cache_key] = True
return True
# print('include (on project): %s' % filename)
self._apply_filter_cache[cache_key] = False
return False | def apply_files_filter(self, frame, filename, force_check_project_scope) | Should only be called if `self.is_files_filter_enabled == True`.
Note that it covers both the filter by specific paths includes/excludes as well
as the check which filters out libraries if not in the project scope.
:param force_check_project_scope:
Check that the file is in the project scope even if the global setting
is off.
:return bool:
True if it should be excluded when stepping and False if it should be
included. | 3.645703 | 2.353951 | 1.548759 |
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
return self._cmd_queue[thread_id] | def get_internal_queue(self, thread_id) | returns internal command queue for a given thread.
if new queue is created, notify the RDB about it | 5.045266 | 4.593048 | 1.098457 |
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd) | def post_internal_command(self, int_cmd, thread_id) | if thread_id is *, post to the '*' queue | 4.103518 | 3.36187 | 1.220606 |
if self.writer is None:
return
with self._lock_running_thread_ids if use_lock else NULL:
if not self._enable_thread_notifications:
return
thread = self._running_thread_ids.pop(thread_id, None)
if thread is None:
return
was_notified = thread.additional_info.pydev_notify_kill
if not was_notified:
thread.additional_info.pydev_notify_kill = True
self.writer.add_command(self.cmd_factory.make_thread_killed_message(thread_id)) | def notify_thread_not_alive(self, thread_id, use_lock=True) | if thread is not alive, cancel trace_dispatch processing | 4.391347 | 4.260439 | 1.030727 |
'''This function processes internal commands
'''
with self._main_lock:
self.check_output_redirect()
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
reset_cache = not self._running_thread_ids
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.')
elif is_thread_alive(t):
if reset_cache:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
clear_cached_thread_id(t)
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
self.notify_thread_created(thread_id, t, use_lock=False)
# Compute and notify about threads which are no longer alive.
thread_ids = list(self._running_thread_ids.keys())
for thread_id in thread_ids:
if thread_id not in program_threads_alive:
program_threads_dead.append(thread_id)
for thread_id in program_threads_dead:
self.notify_thread_not_alive(thread_id, use_lock=False)
# Without self._lock_running_thread_ids
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
else:
# Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids
# acquired at this point as it could lead to a deadlock if some command evaluated tried to
# create a thread and wait for it -- which would try to notify about it getting that lock).
curr_thread_id = get_current_thread_id(threadingCurrentThread())
for thread_id in (curr_thread_id, '*'):
queue = self.get_internal_queue(thread_id)
# some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydev_log.verbose("processing internal command ", int_cmd)
int_cmd.do_it(self)
else:
pydev_log.verbose("NOT processing internal command ", int_cmd)
cmds_to_add_back.append(int_cmd)
except _queue.Empty: # @UndefinedVariable
# this is how we exit
for int_cmd in cmds_to_add_back:
queue.put(int_cmd) | def process_internal_commands(self) | This function processes internal commands | 5.06001 | 5.025196 | 1.006928 |
'''
:param thread:
The thread which should be suspended.
:param stop_reason:
Reason why the thread was suspended.
:param suspend_other_threads:
Whether to force other threads to be suspended (i.e.: when hitting a breakpoint
with a suspend all threads policy).
:param is_pause:
If this is a pause to suspend all threads, any thread can be considered as the 'main'
thread paused.
'''
self._threads_suspended_single_notification.increment_suspend_time()
if is_pause:
self._threads_suspended_single_notification.on_pause()
info = self._mark_suspend(thread, stop_reason)
if is_pause:
# Must set tracing after setting the state to suspend.
frame = info.get_topmost_frame(thread)
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
# If conditional breakpoint raises any exception during evaluation send the details to the client.
if stop_reason == CMD_SET_BREAK and info.conditional_breakpoint_exception is not None:
conditional_breakpoint_exception_tuple = info.conditional_breakpoint_exception
info.conditional_breakpoint_exception = None
self._send_breakpoint_condition_exception(thread, conditional_breakpoint_exception_tuple)
if not suspend_other_threads and self.multi_threads_single_notification:
# In the mode which gives a single notification when all threads are
# stopped, stop all threads whenever a set_suspend is issued.
suspend_other_threads = True
if suspend_other_threads:
# Suspend all other threads.
all_threads = pydevd_utils.get_non_pydevd_threads()
for t in all_threads:
if getattr(t, 'pydev_do_not_trace', None):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is thread:
continue
info = self._mark_suspend(t, CMD_THREAD_SUSPEND)
frame = info.get_topmost_frame(t)
# Reset the time as in this case this was not the main thread suspended.
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None | def set_suspend(self, thread, stop_reason, suspend_other_threads=False, is_pause=False) | :param thread:
The thread which should be suspended.
:param stop_reason:
Reason why the thread was suspended.
:param suspend_other_threads:
Whether to force other threads to be suspended (i.e.: when hitting a breakpoint
with a suspend all threads policy).
:param is_pause:
If this is a pause to suspend all threads, any thread can be considered as the 'main'
thread paused. | 4.926497 | 3.710921 | 1.327567 |
thread_id = get_thread_id(thread)
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
self.post_internal_command(int_cmd, thread_id) | def _send_breakpoint_condition_exception(self, thread, conditional_breakpoint_exception_tuple) | If conditional breakpoint raises an exception during evaluation
send exception details to java | 3.731052 | 3.591836 | 1.038759 |
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id) | def send_caught_exception_stack(self, thread, arg, curr_frame_id) | Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object | 6.034667 | 7.012891 | 0.860511 |
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands() | def send_caught_exception_stack_proceeded(self, thread) | Sends that some thread was resumed and is no longer showing an exception trace. | 7.668965 | 6.626757 | 1.157273 |
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd) | def send_process_created_message(self) | Sends a message that a new process has been created. | 6.687088 | 5.594803 | 1.195232 |
# arg must be the exception info (tuple(exc_type, exc, traceback))
tb = arg[2]
while tb is not None:
frame_id_to_lineno[id(tb.tb_frame)] = tb.tb_lineno
tb = tb.tb_next
with self.suspended_frames_manager.track_frames(self) as frames_tracker:
frames_tracker.track(thread_id, frame, frame_id_to_lineno)
cmd = frames_tracker.create_thread_suspend_command(thread_id, stop_reason, message, suspend_type)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
from_this_thread = []
for frame_custom_thread_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
frames_tracker.track(thread_id, custom_frame.frame, frame_id_to_lineno, frame_custom_thread_id=frame_custom_thread_id)
# print('Frame created as thread: %s' % (frame_custom_thread_id,))
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(
frame_custom_thread_id, custom_frame.name))
self.writer.add_command(
frames_tracker.create_thread_suspend_command(frame_custom_thread_id, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_custom_thread_id)
with self._threads_suspended_single_notification.notify_thread_suspended(thread_id, stop_reason):
keep_suspended = self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker)
if keep_suspended:
# This means that we should pause again after a set next statement.
self._threads_suspended_single_notification.increment_suspend_time()
self.do_wait_suspend(thread, frame, event, arg, is_unhandled_exception) | def do_wait_suspend(self, thread, frame, event, arg, is_unhandled_exception=False): # @UnusedVariable
# print('do_wait_suspend %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event))
self.process_internal_commands()
thread_id = get_current_thread_id(thread)
# Send the suspend message
message = thread.additional_info.pydev_message
suspend_type = thread.additional_info.trace_suspend_type
thread.additional_info.trace_suspend_type = 'trace' # Reset to trace mode for next call.
frame_id_to_lineno = {}
stop_reason = thread.stop_reason
if is_unhandled_exception | busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
:param is_unhandled_exception:
If True we should use the line of the exception instead of the current line in the frame
as the paused location on the top-level frame (exception info must be passed on 'arg'). | 3.578057 | 3.553244 | 1.006983 |
'''
This function should have frames tracked by unhandled exceptions (the `_exec` name is important).
'''
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a separator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals | def _exec(self, is_module, entry_point_fn, module_name, file, globals, locals) | This function should have frames tracked by unhandled exceptions (the `_exec` name is important). | 4.484659 | 3.318631 | 1.351358 |
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN | def _get_arch() | Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine. | 7.986921 | 7.849977 | 1.017445 |
# Try to determine if the debugger itself is running on WOW64.
# On error assume False.
if bits == 64:
wow64 = False
else:
try:
wow64 = IsWow64Process( GetCurrentProcess() )
except Exception:
wow64 = False
return wow64 | def _get_wow64() | Determines if the current process is running in Windows-On-Windows 64 bits.
@rtype: bool
@return: C{True} of the current process is a 32 bit program running in a
64 bit version of Windows, C{False} if it's either a 32 bit program
in a 32 bit Windows or a 64 bit program in a 64 bit Windows. | 5.116559 | 5.181659 | 0.987436 |
if not osvi:
osvi = GetVersionEx()
ntddi = 0
ntddi += (osvi.dwMajorVersion & 0xFF) << 24
ntddi += (osvi.dwMinorVersion & 0xFF) << 16
ntddi += (osvi.wServicePackMajor & 0xFF) << 8
ntddi += (osvi.wServicePackMinor & 0xFF)
return ntddi | def _get_ntddi(osvi) | Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{kernel32.GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{kernel32.GetVersionEx}.
@rtype: int
@return: NTDDI version number. | 2.330928 | 2.069716 | 1.126207 |
'''
To be used to temporarily change the logging settings.
'''
original_trace_level = DebugInfoHolder.DEBUG_TRACE_LEVEL
original_stream = DebugInfoHolder.DEBUG_STREAM
DebugInfoHolder.DEBUG_TRACE_LEVEL = trace_level
DebugInfoHolder.DEBUG_STREAM = stream
try:
yield
finally:
DebugInfoHolder.DEBUG_TRACE_LEVEL = original_trace_level
DebugInfoHolder.DEBUG_STREAM = original_stream | def log_context(trace_level, stream) | To be used to temporarily change the logging settings. | 2.561523 | 2.008519 | 1.275329 |
'''
Levels are:
0 most serious warnings/errors (always printed)
1 warnings/significant events
2 informational trace
3 verbose mode
'''
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
# yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
try:
if args:
msg = msg % args
except:
msg = '%s - %s' % (msg, args)
DebugInfoHolder.DEBUG_STREAM.write('%s\n' % (msg,))
DebugInfoHolder.DEBUG_STREAM.flush()
except:
pass
return True | def _pydevd_log(level, msg, *args) | Levels are:
0 most serious warnings/errors (always printed)
1 warnings/significant events
2 informational trace
3 verbose mode | 7.888815 | 4.424337 | 1.78305 |
'''
Note: don't call directly. Use PyDb.in_project_scope (no caching here).
'''
project_roots = self._get_project_roots()
if not filename.endswith('>'):
filename = self._normpath(filename)
found_in_project = []
for root in project_roots:
if root and filename.startswith(root):
found_in_project.append(root)
found_in_library = []
library_roots = self._get_library_roots()
for root in library_roots:
if root and filename.startswith(root):
found_in_library.append(root)
if not project_roots:
# If we have no project roots configured, consider it being in the project
# roots if it's not found in site-packages (because we have defaults for those
# and not the other way around).
if filename.endswith('>'):
# This is a dummy filename that is usually used for eval or exec. Assume
# that it is user code, with one exception: <frozen ...> is used in the
# standard library.
in_project = not filename.startswith('<frozen ')
else:
in_project = not found_in_library
else:
in_project = False
if found_in_project:
if not found_in_library:
in_project = True
else:
# Found in both, let's see which one has the bigger path matched.
if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library):
in_project = True
return in_project | def in_project_roots(self, filename) | Note: don't call directly. Use PyDb.in_project_scope (no caching here). | 4.129684 | 3.463382 | 1.192385 |
'''
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
for exclude_filter in self._exclude_filters: # : :type exclude_filter: ExcludeFilter
if exclude_filter.is_path:
if glob_matches_path(filename, exclude_filter.name):
if exclude_filter.exclude:
pydev_log.debug("File %s ignored by filter %s" % (filename, exclude_filter.name))
return exclude_filter.exclude
else:
# Module filter.
if exclude_filter.name == module_name or module_name.startswith(exclude_filter.name + '.'):
return exclude_filter.exclude
return None | def exclude_by_filter(self, filename, module_name) | :return: True if it should be excluded, False if it should be included and None
if no rule matched the given file. | 3.880848 | 2.953616 | 1.313931 |
'''
:param list(ExcludeFilter) exclude_filters:
'''
self._exclude_filters = exclude_filters
self.require_module = False
for exclude_filter in exclude_filters:
if not exclude_filter.is_path:
self.require_module = True
break | def set_exclude_filters(self, exclude_filters) | :param list(ExcludeFilter) exclude_filters: | 3.872037 | 3.171125 | 1.221029 |
if isinstance(init, (str, unicode)):
if size is None:
size = len(init) + 1
buftype = c_char * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = c_char * init
buf = buftype()
return buf
raise TypeError, init | def create_string_buffer(init, size=None) | create_string_buffer(aString) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aString, anInteger) -> character array | 2.059632 | 2.221114 | 0.927297 |
try:
return _c_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_CDECL
_c_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType | def CFUNCTYPE(restype, *argtypes) | CFUNCTYPE(restype, *argtypes) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in three ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name | 2.274912 | 2.882975 | 0.789085 |
jump_instructions = (
'jmp', 'jecxz', 'jcxz',
'ja', 'jnbe', 'jae', 'jnb', 'jb', 'jnae', 'jbe', 'jna', 'jc', 'je',
'jz', 'jnc', 'jne', 'jnz', 'jnp', 'jpo', 'jp', 'jpe', 'jg', 'jnle',
'jge', 'jnl', 'jl', 'jnge', 'jle', 'jng', 'jno', 'jns', 'jo', 'js'
)
call_instructions = ( 'call', 'ret', 'retn' )
loop_instructions = ( 'loop', 'loopz', 'loopnz', 'loope', 'loopne' )
control_flow_instructions = call_instructions + loop_instructions + \
jump_instructions
isControlFlow = False
instruction = None
if self.pc is not None and self.faultDisasm:
for disasm in self.faultDisasm:
if disasm[0] == self.pc:
instruction = disasm[2].lower().strip()
break
if instruction:
for x in control_flow_instructions:
if x in instruction:
isControlFlow = True
break
return isControlFlow | def __is_control_flow(self) | Private method to tell if the instruction pointed to by the program
counter is a control flow instruction.
Currently only works for x86 and amd64 architectures. | 2.793469 | 2.764318 | 1.010546 |
block_data_move_instructions = ('movs', 'stos', 'lods')
isBlockDataMove = False
instruction = None
if self.pc is not None and self.faultDisasm:
for disasm in self.faultDisasm:
if disasm[0] == self.pc:
instruction = disasm[2].lower().strip()
break
if instruction:
for x in block_data_move_instructions:
if x in instruction:
isBlockDataMove = True
break
return isBlockDataMove | def __is_block_data_move(self) | Private method to tell if the instruction pointed to by the program
counter is a block data move instruction.
Currently only works for x86 and amd64 architectures. | 3.74701 | 3.454317 | 1.084733 |
msg = self.briefReport()
msg += '\n'
if self.bits == 32:
width = 16
else:
width = 8
if self.eventCode == win32.EXCEPTION_DEBUG_EVENT:
(exploitability, expcode, expdescription) = self.isExploitable()
msg += '\nSecurity risk level: %s\n' % exploitability
msg += ' %s\n' % expdescription
if bShowNotes and self.notes:
msg += '\nNotes:\n'
msg += self.notesReport()
if self.commandLine:
msg += '\nCommand line: %s\n' % self.commandLine
if self.environment:
msg += '\nEnvironment:\n'
msg += self.environmentReport()
if not self.labelPC:
base = HexDump.address(self.lpBaseOfDll, self.bits)
if self.modFileName:
fn = PathOperations.pathname_to_filename(self.modFileName)
msg += '\nRunning in %s (%s)\n' % (fn, base)
else:
msg += '\nRunning in module at %s\n' % base
if self.registers:
msg += '\nRegisters:\n'
msg += CrashDump.dump_registers(self.registers)
if self.registersPeek:
msg += '\n'
msg += CrashDump.dump_registers_peek(self.registers,
self.registersPeek,
width = width)
if self.faultDisasm:
msg += '\nCode disassembly:\n'
msg += CrashDump.dump_code(self.faultDisasm, self.pc,
bits = self.bits)
if self.stackTrace:
msg += '\nStack trace:\n'
if self.stackTracePretty:
msg += CrashDump.dump_stack_trace_with_labels(
self.stackTracePretty,
bits = self.bits)
else:
msg += CrashDump.dump_stack_trace(self.stackTrace,
bits = self.bits)
if self.stackFrame:
if self.stackPeek:
msg += '\nStack pointers:\n'
msg += CrashDump.dump_stack_peek(self.stackPeek, width = width)
msg += '\nStack dump:\n'
msg += HexDump.hexblock(self.stackFrame, self.sp,
bits = self.bits, width = width)
if self.faultCode and not self.modFileName:
msg += '\nCode dump:\n'
msg += HexDump.hexblock(self.faultCode, self.pc,
bits = self.bits, width = width)
if self.faultMem:
if self.faultPeek:
msg += '\nException address pointers:\n'
msg += CrashDump.dump_data_peek(self.faultPeek,
self.exceptionAddress,
bits = self.bits,
width = width)
msg += '\nException address dump:\n'
msg += HexDump.hexblock(self.faultMem, self.exceptionAddress,
bits = self.bits, width = width)
if self.memoryMap:
msg += '\nMemory map:\n'
mappedFileNames = dict()
for mbi in self.memoryMap:
if hasattr(mbi, 'filename') and mbi.filename:
mappedFileNames[mbi.BaseAddress] = mbi.filename
msg += CrashDump.dump_memory_map(self.memoryMap, mappedFileNames,
bits = self.bits)
if not msg.endswith('\n\n'):
if not msg.endswith('\n'):
msg += '\n'
msg += '\n'
return msg | def fullReport(self, bShowNotes = True) | @type bShowNotes: bool
@param bShowNotes: C{True} to show the user notes, C{False} otherwise.
@rtype: str
@return: Long description of the event. | 2.730659 | 2.761506 | 0.98883 |
if key in self.__keys:
return self.__keys[key]
skey = pickle.dumps(key, protocol = 0)
if self.compressKeys:
skey = zlib.compress(skey, zlib.Z_BEST_COMPRESSION)
if self.escapeKeys:
skey = skey.encode('hex')
if self.binaryKeys:
skey = buffer(skey)
self.__keys[key] = skey
return skey | def marshall_key(self, key) | Marshalls a Crash key to be used in the database.
@see: L{__init__}
@type key: L{Crash} key.
@param key: Key to convert.
@rtype: str or buffer
@return: Converted key. | 2.863786 | 2.989365 | 0.957992 |
key = str(key)
if self.escapeKeys:
key = key.decode('hex')
if self.compressKeys:
key = zlib.decompress(key)
key = pickle.loads(key)
return key | def unmarshall_key(self, key) | Unmarshalls a Crash key read from the database.
@type key: str or buffer
@param key: Key to convert.
@rtype: L{Crash} key.
@return: Converted key. | 3.641973 | 4.435093 | 0.821172 |
if hasattr(value, 'memoryMap'):
crash = value
memoryMap = crash.memoryMap
try:
crash.memoryMap = None
if storeMemoryMap and memoryMap is not None:
# convert the generator to a list
crash.memoryMap = list(memoryMap)
if self.optimizeValues:
value = pickle.dumps(crash, protocol = HIGHEST_PROTOCOL)
value = optimize(value)
else:
value = pickle.dumps(crash, protocol = 0)
finally:
crash.memoryMap = memoryMap
del memoryMap
del crash
if self.compressValues:
value = zlib.compress(value, zlib.Z_BEST_COMPRESSION)
if self.escapeValues:
value = value.encode('hex')
if self.binaryValues:
value = buffer(value)
return value | def marshall_value(self, value, storeMemoryMap = False) | Marshalls a Crash object to be used in the database.
By default the C{memoryMap} member is B{NOT} stored here.
@warning: Setting the C{storeMemoryMap} argument to C{True} can lead to
a severe performance penalty!
@type value: L{Crash}
@param value: Object to convert.
@type storeMemoryMap: bool
@param storeMemoryMap: C{True} to store the memory map, C{False}
otherwise.
@rtype: str
@return: Converted object. | 3.354664 | 3.347235 | 1.002219 |
value = str(value)
if self.escapeValues:
value = value.decode('hex')
if self.compressValues:
value = zlib.decompress(value)
value = pickle.loads(value)
return value | def unmarshall_value(self, value) | Unmarshalls a Crash object read from the database.
@type value: str
@param value: Object to convert.
@rtype: L{Crash}
@return: Converted object. | 4.066543 | 4.995083 | 0.814109 |
if crash not in self:
key = crash.key()
skey = self.marshall_key(key)
data = self.marshall_value(crash, storeMemoryMap = True)
self.__db[skey] = data | def add(self, crash) | Adds a new crash to the container.
If the crash appears to be already known, it's ignored.
@see: L{Crash.key}
@type crash: L{Crash}
@param crash: Crash object to add. | 9.111738 | 9.352784 | 0.974227 |
skey = self.marshall_key(key)
data = self.__db[skey]
crash = self.unmarshall_value(data)
return crash | def get(self, key) | Retrieves a crash from the container.
@type key: L{Crash} unique key.
@param key: Key of the crash to get.
@rtype: L{Crash} object.
@return: Crash matching the given key.
@see: L{iterkeys}
@warning: A B{copy} of each object is returned,
so any changes made to them will be lost.
To preserve changes do the following:
1. Keep a reference to the object.
2. Delete the object from the set.
3. Modify the object and add it again. | 7.779871 | 8.983549 | 0.866013 |
found = self._dao.find(signature=key, limit=1, order=-1)
if not found:
raise KeyError(key)
return found[0] | def get(self, key) | Retrieves a crash from the container.
@type key: L{Crash} signature.
@param key: Heuristic signature of the crash to get.
@rtype: L{Crash} object.
@return: Crash matching the given signature. If more than one is found,
retrieve the newest one.
@see: L{iterkeys}
@warning: A B{copy} of each object is returned,
so any changes made to them will be lost.
To preserve changes do the following:
1. Keep a reference to the object.
2. Delete the object from the set.
3. Modify the object and add it again. | 7.043251 | 8.088119 | 0.870815 |
self.__keys.add( crash.signature )
self.__count += 1 | def add(self, crash) | Adds a new crash to the container.
@note:
When the C{allowRepeatedKeys} parameter of the constructor
is set to C{False}, duplicated crashes are ignored.
@see: L{Crash.key}
@type crash: L{Crash}
@param crash: Crash object to add. | 14.419532 | 25.874846 | 0.55728 |
'''
Called when a context is stopped or a new context is made runnable.
'''
try:
if not prev and not next:
return
current_frame = sys._getframe()
if next:
register_tasklet_info(next)
# Ok, making next runnable: set the tracing facility in it.
debugger = get_global_debugger()
if debugger is not None:
next.trace_function = debugger.get_thread_local_trace_func()
frame = next.frame
if frame is current_frame:
frame = frame.f_back
if hasattr(frame, 'f_trace'): # Note: can be None (but hasattr should cover for that too).
frame.f_trace = debugger.get_thread_local_trace_func()
debugger = None
if prev:
register_tasklet_info(prev)
try:
for tasklet_ref, tasklet_info in dict_items(_weak_tasklet_registered_to_info): # Make sure it's a copy!
tasklet = tasklet_ref()
if tasklet is None or not tasklet.alive:
# Garbage-collected already!
try:
del _weak_tasklet_registered_to_info[tasklet_ref]
except KeyError:
pass
if tasklet_info.frame_id is not None:
remove_custom_frame(tasklet_info.frame_id)
else:
is_running = stackless.get_thread_info(tasklet.thread_id)[1] is tasklet
if tasklet is prev or (tasklet is not next and not is_running):
# the tasklet won't run after this scheduler action:
# - the tasklet is the previous tasklet
# - it is not the next tasklet and it is not an already running tasklet
frame = tasklet.frame
if frame is current_frame:
frame = frame.f_back
if frame is not None:
abs_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
# print >>sys.stderr, "SchedCB: %r, %d, '%s', '%s'" % (tasklet, frame.f_lineno, _filename, base)
if debugger.get_file_type(abs_real_path_and_base) is None:
tasklet_info.update_name()
if tasklet_info.frame_id is None:
tasklet_info.frame_id = add_custom_frame(frame, tasklet_info.tasklet_name, tasklet.thread_id)
else:
update_custom_frame(tasklet_info.frame_id, frame, tasklet.thread_id, name=tasklet_info.tasklet_name)
elif tasklet is next or is_running:
if tasklet_info.frame_id is not None:
# Remove info about stackless suspended when it starts to run.
remove_custom_frame(tasklet_info.frame_id)
tasklet_info.frame_id = None
finally:
tasklet = None
tasklet_info = None
frame = None
except:
pydev_log.exception()
if _application_set_schedule_callback is not None:
return _application_set_schedule_callback(prev, next) | def _schedule_callback(prev, next) | Called when a context is stopped or a new context is made runnable. | 3.92913 | 3.706296 | 1.060123 |
'''
This function should be called to patch the stackless module so that new tasklets are properly tracked in the
debugger.
'''
global _application_set_schedule_callback
_application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback)
def set_schedule_callback(callable):
global _application_set_schedule_callback
old = _application_set_schedule_callback
_application_set_schedule_callback = callable
return old
def get_schedule_callback():
global _application_set_schedule_callback
return _application_set_schedule_callback
set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__
if hasattr(stackless, "get_schedule_callback"):
get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__
stackless.set_schedule_callback = set_schedule_callback
stackless.get_schedule_callback = get_schedule_callback
if not hasattr(stackless.tasklet, "trace_function"):
# Older versions of Stackless, released before 2014
__call__.__doc__ = stackless.tasklet.__call__.__doc__
stackless.tasklet.__call__ = __call__
setup.__doc__ = stackless.tasklet.setup.__doc__
stackless.tasklet.setup = setup
run.__doc__ = stackless.run.__doc__
stackless.run = run | def patch_stackless() | This function should be called to patch the stackless module so that new tasklets are properly tracked in the
debugger. | 2.395071 | 2.016962 | 1.187465 |
# Get the Process object from the snapshot,
# if missing create a new one.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Attach to the process.
win32.DebugActiveProcess(dwProcessId)
# Add the new PID to the set of debugees.
self.__attachedDebugees.add(dwProcessId)
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# If the Process object was not in the snapshot, add it now.
if not self.system.has_process(dwProcessId):
self.system._add_process(aProcess)
# Scan the process threads and loaded modules.
# This is prefered because the thread and library events do not
# properly give some information, like the filename for each module.
aProcess.scan_threads()
aProcess.scan_modules()
# Return the Process object, like the execv() and execl() methods.
return aProcess | def attach(self, dwProcessId) | Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process. | 6.431426 | 5.970711 | 1.077163 |
if type(argv) in (str, compat.unicode):
raise TypeError("Debug.execv expects a list, not a string")
lpCmdLine = self.system.argv_to_cmdline(argv)
return self.execl(lpCmdLine, **kwargs) | def execv(self, argv, **kwargs) | Starts a new process for debugging.
This method uses a list of arguments. To use a command line string
instead, use L{execl}.
@see: L{attach}, L{detach}
@type argv: list( str... )
@param argv: List of command line arguments to pass to the debugee.
The first element must be the debugee executable filename.
@type bBreakOnEntryPoint: bool
@keyword bBreakOnEntryPoint: C{True} to automatically set a breakpoint
at the program entry point.
@type bConsole: bool
@keyword bConsole: True to inherit the console of the debugger.
Defaults to C{False}.
@type bFollow: bool
@keyword bFollow: C{True} to automatically attach to child processes.
Defaults to C{False}.
@type bInheritHandles: bool
@keyword bInheritHandles: C{True} if the new process should inherit
it's parent process' handles. Defaults to C{False}.
@type bSuspended: bool
@keyword bSuspended: C{True} to suspend the main thread before any code
is executed in the debugee. Defaults to C{False}.
@keyword dwParentProcessId: C{None} or C{0} if the debugger process
should be the parent process (default), or a process ID to
forcefully set as the debugee's parent (only available for Windows
Vista and above).
In hostile mode, the default is not the debugger process but the
process ID for "explorer.exe".
@type iTrustLevel: int or None
@keyword iTrustLevel: Trust level.
Must be one of the following values:
- 0: B{No trust}. May not access certain resources, such as
cryptographic keys and credentials. Only available since
Windows XP and 2003, desktop editions. This is the default
in hostile mode.
- 1: B{Normal trust}. Run with the same privileges as a normal
user, that is, one that doesn't have the I{Administrator} or
I{Power User} user rights. Only available since Windows XP
and 2003, desktop editions.
- 2: B{Full trust}. Run with the exact same privileges as the
current user. This is the default in normal mode.
@type bAllowElevation: bool
@keyword bAllowElevation: C{True} to allow the child process to keep
UAC elevation, if the debugger itself is running elevated. C{False}
to ensure the child process doesn't run with elevation. Defaults to
C{True}.
This flag is only meaningful on Windows Vista and above, and if the
debugger itself is running with elevation. It can be used to make
sure the child processes don't run elevated as well.
This flag DOES NOT force an elevation prompt when the debugger is
not running with elevation.
Note that running the debugger with elevation (or the Python
interpreter at all for that matter) is not normally required.
You should only need to if the target program requires elevation
to work properly (for example if you try to debug an installer).
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error. | 7.965952 | 10.200554 | 0.780933 |
# Register the process object with the snapshot.
if not self.system.has_process(dwProcessId):
aProcess = Process(dwProcessId)
self.system._add_process(aProcess)
else:
aProcess = self.system.get_process(dwProcessId)
# Test for debug privileges on the target process.
# Raises WindowsException on error.
aProcess.get_handle()
# Register the process ID with the debugger.
if bStarted:
self.__attachedDebugees.add(dwProcessId)
else:
self.__startedDebugees.add(dwProcessId)
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# Scan the process threads and loaded modules.
# This is prefered because the thread and library events do not
# properly give some information, like the filename for each module.
aProcess.scan_threads()
aProcess.scan_modules() | def add_existing_session(self, dwProcessId, bStarted = False) | Use this method only when for some reason the debugger's been attached
to the target outside of WinAppDbg (for example when integrating with
other tools).
You don't normally need to call this method. Most users should call
L{attach}, L{execv} or L{execl} instead.
@type dwProcessId: int
@param dwProcessId: Global process ID.
@type bStarted: bool
@param bStarted: C{True} if the process was started by the debugger,
or C{False} if the process was attached to instead.
@raise WindowsError: The target process does not exist, is not attached
to the debugger anymore. | 6.209615 | 6.065703 | 1.023726 |
# If the process is being debugged...
if self.is_debugee(dwProcessId):
# Make sure a Process object exists or the following calls fail.
if not self.system.has_process(dwProcessId):
aProcess = Process(dwProcessId)
try:
aProcess.get_handle()
except WindowsError:
pass # fails later on with more specific reason
self.system._add_process(aProcess)
# Erase all breakpoints in the process.
try:
self.erase_process_breakpoints(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Stop tracing all threads in the process.
try:
self.stop_tracing_process(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# The process is no longer a debugee.
try:
if dwProcessId in self.__attachedDebugees:
self.__attachedDebugees.remove(dwProcessId)
if dwProcessId in self.__startedDebugees:
self.__startedDebugees.remove(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Clear and remove the process from the snapshot.
# If the user wants to do something with it after detaching
# a new Process instance should be created.
try:
if self.system.has_process(dwProcessId):
try:
self.system.get_process(dwProcessId).clear()
finally:
self.system._del_process(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# If the last debugging event is related to this process, forget it.
try:
if self.lastEvent and self.lastEvent.get_pid() == dwProcessId:
self.lastEvent = None
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning) | def __cleanup_process(self, dwProcessId, bIgnoreExceptions = False) | Perform the necessary cleanup of a process about to be killed or
detached from.
This private method is called by L{kill} and L{detach}.
@type dwProcessId: int
@param dwProcessId: Global ID of a process to kill.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when killing the process.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 2.654756 | 2.604389 | 1.019339 |
# Keep a reference to the process. We'll need it later.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Cleanup all data referring to the process.
self.__cleanup_process(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
# Kill the process.
try:
try:
if self.is_debugee(dwProcessId):
try:
if aProcess.is_alive():
aProcess.suspend()
finally:
self.detach(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
finally:
aProcess.kill()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Cleanup what remains of the process data.
try:
aProcess.clear()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning) | def kill(self, dwProcessId, bIgnoreExceptions = False) | Kills a process currently being debugged.
@see: L{detach}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to kill.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when killing the process.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 3.038574 | 3.043725 | 0.998308 |
for pid in self.get_debugee_pids():
self.kill(pid, bIgnoreExceptions = bIgnoreExceptions) | def kill_all(self, bIgnoreExceptions = False) | Kills from all processes currently being debugged.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when killing each process. C{False} to stop and raise an
exception when encountering an error.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 4.767862 | 5.587283 | 0.853342 |
# Keep a reference to the process. We'll need it later.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Determine if there is support for detaching.
# This check should only fail on Windows 2000 and older.
try:
win32.DebugActiveProcessStop
can_detach = True
except AttributeError:
can_detach = False
# Continue the last event before detaching.
# XXX not sure about this...
try:
if can_detach and self.lastEvent and \
self.lastEvent.get_pid() == dwProcessId:
self.cont(self.lastEvent)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Cleanup all data referring to the process.
self.__cleanup_process(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
try:
# Detach from the process.
# On Windows 2000 and before, kill the process.
if can_detach:
try:
win32.DebugActiveProcessStop(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
else:
try:
aProcess.kill()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
finally:
# Cleanup what remains of the process data.
aProcess.clear() | def detach(self, dwProcessId, bIgnoreExceptions = False) | Detaches from a process currently being debugged.
@note: On Windows 2000 and below the process is killed.
@see: L{attach}, L{detach_from_all}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to detach from.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching. C{False} to stop and raise an exception when
encountering an error.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 3.279203 | 3.230534 | 1.015065 |
for pid in self.get_debugee_pids():
self.detach(pid, bIgnoreExceptions = bIgnoreExceptions) | def detach_from_all(self, bIgnoreExceptions = False) | Detaches from all processes currently being debugged.
@note: To better handle last debugging event, call L{stop} instead.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 5.092321 | 6.166854 | 0.825757 |
# Wait for the next debug event.
raw = win32.WaitForDebugEvent(dwMilliseconds)
event = EventFactory.get(self, raw)
# Remember it.
self.lastEvent = event
# Return it.
return event | def wait(self, dwMilliseconds = None) | Waits for the next debug event.
@see: L{cont}, L{dispatch}, L{loop}
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout in milliseconds.
Use C{INFINITE} or C{None} for no timeout.
@rtype: L{Event}
@return: An event that occured in one of the debugees.
@raise WindowsError: Raises an exception on error.
If no target processes are left to debug,
the error code is L{win32.ERROR_INVALID_HANDLE}. | 7.587305 | 6.253953 | 1.213202 |
# If no event object was given, use the last event.
if event is None:
event = self.lastEvent
# Ignore dummy events.
if not event:
return
# Determine the default behaviour for this event.
# XXX HACK
# Some undocumented flags are used, but as far as I know in those
# versions of Windows that don't support them they should behave
# like DGB_CONTINUE.
code = event.get_event_code()
if code == win32.EXCEPTION_DEBUG_EVENT:
# At this point, by default some exception types are swallowed by
# the debugger, because we don't know yet if it was caused by the
# debugger itself or the debugged process.
#
# Later on (see breakpoint.py) if we determined the exception was
# not caused directly by the debugger itself, we set the default
# back to passing the exception to the debugee.
#
# The "invalid handle" exception is also swallowed by the debugger
# because it's not normally generated by the debugee. But in
# hostile mode we want to pass it to the debugee, as it may be the
# result of an anti-debug trick. In that case it's best to disable
# bad handles detection with Microsoft's gflags.exe utility. See:
# http://msdn.microsoft.com/en-us/library/windows/hardware/ff549557(v=vs.85).aspx
exc_code = event.get_exception_code()
if exc_code in (
win32.EXCEPTION_BREAKPOINT,
win32.EXCEPTION_WX86_BREAKPOINT,
win32.EXCEPTION_SINGLE_STEP,
win32.EXCEPTION_GUARD_PAGE,
):
event.continueStatus = win32.DBG_CONTINUE
elif exc_code == win32.EXCEPTION_INVALID_HANDLE:
if self.__bHostileCode:
event.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
else:
event.continueStatus = win32.DBG_CONTINUE
else:
event.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
elif code == win32.RIP_EVENT and \
event.get_rip_type() == win32.SLE_ERROR:
# RIP events that signal fatal events should kill the process.
event.continueStatus = win32.DBG_TERMINATE_PROCESS
else:
# Other events need this continue code.
# Sometimes other codes can be used and are ignored, sometimes not.
# For example, when using the DBG_EXCEPTION_NOT_HANDLED code,
# debug strings are sent twice (!)
event.continueStatus = win32.DBG_CONTINUE
# Dispatch the debug event.
return EventDispatcher.dispatch(self, event) | def dispatch(self, event = None) | Calls the debug event notify callbacks.
@see: L{cont}, L{loop}, L{wait}
@type event: L{Event}
@param event: (Optional) Event object returned by L{wait}.
@raise WindowsError: Raises an exception on error. | 5.54849 | 5.532843 | 1.002828 |
# If no event object was given, use the last event.
if event is None:
event = self.lastEvent
# Ignore dummy events.
if not event:
return
# Get the event continue status information.
dwProcessId = event.get_pid()
dwThreadId = event.get_tid()
dwContinueStatus = event.continueStatus
# Check if the process is still being debugged.
if self.is_debugee(dwProcessId):
# Try to flush the instruction cache.
try:
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.flush_instruction_cache()
except WindowsError:
pass
# XXX TODO
#
# Try to execute the UnhandledExceptionFilter for second chance
# exceptions, at least when in hostile mode (in normal mode it
# would be breaking compatibility, as users may actually expect
# second chance exceptions to be raised again).
#
# Reportedly in Windows 7 (maybe in Vista too) this seems to be
# happening already. In XP and below the UnhandledExceptionFilter
# was never called for processes being debugged.
# Continue execution of the debugee.
win32.ContinueDebugEvent(dwProcessId, dwThreadId, dwContinueStatus)
# If the event is the last event, forget it.
if event == self.lastEvent:
self.lastEvent = None | def cont(self, event = None) | Resumes execution after processing a debug event.
@see: dispatch(), loop(), wait()
@type event: L{Event}
@param event: (Optional) Event object returned by L{wait}.
@raise WindowsError: Raises an exception on error. | 6.352577 | 6.031078 | 1.053307 |
# Determine if we have a last debug event that we need to continue.
try:
event = self.lastEvent
has_event = bool(event)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
has_event = False
# If we do...
if has_event:
# Disable all breakpoints in the process before resuming execution.
try:
pid = event.get_pid()
self.disable_process_breakpoints(pid)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Disable all breakpoints in the thread before resuming execution.
try:
tid = event.get_tid()
self.disable_thread_breakpoints(tid)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Resume execution.
try:
event.continueDebugEvent = win32.DBG_CONTINUE
self.cont(event)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Detach from or kill all debuggees.
try:
if self.__bKillOnExit:
self.kill_all(bIgnoreExceptions)
else:
self.detach_from_all(bIgnoreExceptions)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Cleanup the process snapshots.
try:
self.system.clear()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Close all Win32 handles the Python garbage collector failed to close.
self.force_garbage_collection(bIgnoreExceptions) | def stop(self, bIgnoreExceptions = True) | Stops debugging all processes.
If the kill on exit mode is on, debugged processes are killed when the
debugger is stopped. Otherwise when the debugger stops it detaches from
all debugged processes and leaves them running (default). For more
details see: L{__init__}
@note: This method is better than L{detach_from_all} because it can
gracefully handle the last debugging event before detaching.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching. | 2.588964 | 2.367764 | 1.093421 |
try:
event = self.wait()
except Exception:
self.stop()
raise
try:
self.dispatch()
finally:
self.cont() | def next(self) | Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches. | 7.014825 | 4.198926 | 1.670624 |
print('')
print("-" * 79)
print("Interactive debugging session started.")
print("Use the \"help\" command to list all available commands.")
print("Use the \"quit\" command to close this session.")
print("-" * 79)
if self.lastEvent is None:
print('')
console = ConsoleDebugger()
console.confirm_quit = bConfirmQuit
console.load_history()
try:
console.start_using_debugger(self)
console.loop()
finally:
console.stop_using_debugger()
console.save_history()
print('')
print("-" * 79)
print("Interactive debugging session closed.")
print("-" * 79)
print('') | def interactive(self, bConfirmQuit = True, bShowBanner = True) | Start an interactive debugging session.
@type bConfirmQuit: bool
@param bConfirmQuit: Set to C{True} to ask the user for confirmation
before closing the session, C{False} otherwise.
@type bShowBanner: bool
@param bShowBanner: Set to C{True} to show a banner before entering
the session and after leaving it, C{False} otherwise.
@warn: This will temporarily disable the user-defined event handler!
This method returns when the user closes the session. | 3.502269 | 3.415474 | 1.025412 |
try:
import gc
gc.collect()
bRecollect = False
for obj in list(gc.garbage):
try:
if isinstance(obj, win32.Handle):
obj.close()
elif isinstance(obj, Event):
obj.debug = None
elif isinstance(obj, Process):
obj.clear()
elif isinstance(obj, Thread):
obj.set_process(None)
obj.clear()
elif isinstance(obj, Module):
obj.set_process(None)
elif isinstance(obj, Window):
obj.set_process(None)
else:
continue
gc.garbage.remove(obj)
del obj
bRecollect = True
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
if bRecollect:
gc.collect()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning) | def force_garbage_collection(bIgnoreExceptions = True) | Close all Win32 handles the Python garbage collector failed to close.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching. | 2.416749 | 2.405743 | 1.004575 |
dwProcessId = event.get_pid()
if dwProcessId not in self.__attachedDebugees:
if dwProcessId not in self.__startedDebugees:
self.__startedDebugees.add(dwProcessId)
retval = self.system._notify_create_process(event)
# Set a breakpoint on the program's entry point if requested.
# Try not to use the Event object's entry point value, as in some cases
# it may be wrong. See: http://pferrie.host22.com/misc/lowlevel3.htm
if dwProcessId in self.__breakOnEP:
try:
lpEntryPoint = event.get_process().get_entry_point()
except Exception:
lpEntryPoint = event.get_start_address()
# It'd be best to use a hardware breakpoint instead, at least in
# hostile mode. But since the main thread's context gets smashed
# by the loader, I haven't found a way to make it work yet.
self.break_at(dwProcessId, lpEntryPoint)
# Defeat isDebuggerPresent by patching PEB->BeingDebugged.
# When we do this, some debugging APIs cease to work as expected.
# For example, the system breakpoint isn't hit when we attach.
# For that reason we need to define a code breakpoint at the
# code location where a new thread is spawned by the debugging
# APIs, ntdll!DbgUiRemoteBreakin.
if self.__bHostileCode:
aProcess = event.get_process()
try:
hProcess = aProcess.get_handle(win32.PROCESS_QUERY_INFORMATION)
pbi = win32.NtQueryInformationProcess(
hProcess, win32.ProcessBasicInformation)
ptr = pbi.PebBaseAddress + 2
if aProcess.peek(ptr, 1) == '\x01':
aProcess.poke(ptr, '\x00')
except WindowsError:
e = sys.exc_info()[1]
warnings.warn(
"Cannot patch PEB->BeingDebugged, reason: %s" % e.strerror)
return retval | def _notify_create_process(self, event) | Notify the creation of a new process.
@warning: This method is meant to be used internally by the debugger.
@type event: L{CreateProcessEvent}
@param event: Create process event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 7.671633 | 7.739474 | 0.991234 |
# Pass the event to the breakpoint container.
bCallHandler = _BreakpointContainer._notify_load_dll(self, event)
# Get the process where the DLL was loaded.
aProcess = event.get_process()
# Pass the event to the process.
bCallHandler = aProcess._notify_load_dll(event) and bCallHandler
# Anti-anti-debugging tricks on ntdll.dll.
if self.__bHostileCode:
aModule = event.get_module()
if aModule.match_name('ntdll.dll'):
# Since we've overwritten the PEB to hide
# ourselves, we no longer have the system
# breakpoint when attaching to the process.
# Set a breakpoint at ntdll!DbgUiRemoteBreakin
# instead (that's where the debug API spawns
# it's auxiliary threads). This also defeats
# a simple anti-debugging trick: the hostile
# process could have overwritten the int3
# instruction at the system breakpoint.
self.break_at(aProcess.get_pid(),
aProcess.resolve_label('ntdll!DbgUiRemoteBreakin'))
return bCallHandler | def _notify_load_dll(self, event) | Notify the load of a new module.
@warning: This method is meant to be used internally by the debugger.
@type event: L{LoadDLLEvent}
@param event: Load DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 9.648598 | 9.652826 | 0.999562 |
bCallHandler1 = _BreakpointContainer._notify_exit_process(self, event)
bCallHandler2 = self.system._notify_exit_process(event)
try:
self.detach( event.get_pid() )
except WindowsError:
e = sys.exc_info()[1]
if e.winerror != win32.ERROR_INVALID_PARAMETER:
warnings.warn(
"Failed to detach from dead process, reason: %s" % str(e),
RuntimeWarning)
except Exception:
e = sys.exc_info()[1]
warnings.warn(
"Failed to detach from dead process, reason: %s" % str(e),
RuntimeWarning)
return bCallHandler1 and bCallHandler2 | def _notify_exit_process(self, event) | Notify the termination of a process.
@warning: This method is meant to be used internally by the debugger.
@type event: L{ExitProcessEvent}
@param event: Exit process event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 3.400597 | 3.483593 | 0.976175 |
bCallHandler1 = _BreakpointContainer._notify_exit_thread(self, event)
bCallHandler2 = event.get_process()._notify_exit_thread(event)
return bCallHandler1 and bCallHandler2 | def _notify_exit_thread(self, event) | Notify the termination of a thread.
@warning: This method is meant to be used internally by the debugger.
@type event: L{ExitThreadEvent}
@param event: Exit thread event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 6.297072 | 6.856499 | 0.918409 |
bCallHandler1 = _BreakpointContainer._notify_unload_dll(self, event)
bCallHandler2 = event.get_process()._notify_unload_dll(event)
return bCallHandler1 and bCallHandler2 | def _notify_unload_dll(self, event) | Notify the unload of a module.
@warning: This method is meant to be used internally by the debugger.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 6.480187 | 6.917377 | 0.936798 |
if event.is_first_chance():
event.continueStatus = win32.DBG_EXCEPTION_HANDLED
return True | def _notify_debug_control_c(self, event) | Notify of a Debug Ctrl-C exception.
@warning: This method is meant to be used internally by the debugger.
@note: This exception is only raised when a debugger is attached, and
applications are not supposed to handle it, so we need to handle it
ourselves or the application may crash.
@see: U{http://msdn.microsoft.com/en-us/library/aa363082(VS.85).aspx}
@type event: L{ExceptionEvent}
@param event: Debug Ctrl-C exception event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 25.333443 | 31.183548 | 0.812398 |
dwType = event.get_exception_information(0)
if dwType == 0x1000:
pszName = event.get_exception_information(1)
dwThreadId = event.get_exception_information(2)
dwFlags = event.get_exception_information(3)
aProcess = event.get_process()
szName = aProcess.peek_string(pszName, fUnicode = False)
if szName:
if dwThreadId == -1:
dwThreadId = event.get_tid()
if aProcess.has_thread(dwThreadId):
aThread = aProcess.get_thread(dwThreadId)
else:
aThread = Thread(dwThreadId)
aProcess._add_thread(aThread)
## if aThread.get_name() is None:
## aThread.set_name(szName)
aThread.set_name(szName)
return True | def _notify_ms_vc_exception(self, event) | Notify of a Microsoft Visual C exception.
@warning: This method is meant to be used internally by the debugger.
@note: This allows the debugger to understand the
Microsoft Visual C thread naming convention.
@see: U{http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx}
@type event: L{ExceptionEvent}
@param event: Microsoft Visual C exception event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise. | 3.128945 | 3.196364 | 0.978908 |
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g | def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None) | Load the grammar (maybe from a pickle). | 3.061171 | 3.149779 | 0.971869 |
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b) | def _newer(a, b) | Inquire whether file a was written since file b. | 1.937139 | 1.69715 | 1.141407 |
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True | def main(*args) | Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file. | 4.344485 | 3.783026 | 1.148415 |
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode | def parse_tokens(self, tokens, debug=False) | Parse a series of tokens and return the syntax tree. | 3.846068 | 3.747682 | 1.026253 |
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug) | def parse_stream_raw(self, stream, debug=False) | Parse a stream and return the syntax tree. | 4.261395 | 3.304369 | 1.289624 |
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close() | def parse_file(self, filename, encoding=None, debug=False) | Parse a file and return the syntax tree. | 2.716952 | 2.706639 | 1.00381 |
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug) | def parse_string(self, text, debug=False) | Parse a string and return the syntax tree. | 2.841472 | 2.405281 | 1.181348 |
'''
Helper to dump thread info.
'''
if stream is None:
stream = sys.stderr
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s, pydevd thread: %s)' % (
t.name, t.daemon, getattr(t, 'is_pydev_daemon_thread', False))
except:
pass
from _pydevd_bundle.pydevd_additional_thread_info_regular import _current_frames
stream.write('===============================================================================\n')
stream.write('Threads running\n')
stream.write('================================= Thread Dump =================================\n')
stream.flush()
for thread_id, stack in _current_frames().items():
stream.write('\n-------------------------------------------------------------------------------\n')
stream.write(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stream.write('\n\n')
for i, (filename, lineno, name, line) in enumerate(traceback.extract_stack(stack)):
stream.write(' File "%s", line %d, in %s\n' % (filename, lineno, name))
if line:
stream.write(" %s\n" % (line.strip()))
if i == 0 and 'self' in stack.f_locals:
stream.write(' self: ')
try:
stream.write(str(stack.f_locals['self']))
except:
stream.write('Unable to get str of: %s' % (type(stack.f_locals['self']),))
stream.write('\n')
stream.flush()
stream.write('\n=============================== END Thread Dump ===============================')
stream.flush() | def dump_threads(stream=None) | Helper to dump thread info. | 2.656539 | 2.561644 | 1.037045 |
# I'd love to reverse the order of the parameters
# but that might create some confusion. :(
hWnd = win32.FindWindow(className, windowName)
if hWnd:
return Window(hWnd) | def find_window(className = None, windowName = None) | Find the first top-level window in the current desktop to match the
given class name and/or window name. If neither are provided any
top-level window will match.
@see: L{get_window_at}
@type className: str
@param className: (Optional) Class name of the window to find.
If C{None} or not used any class name will match the search.
@type windowName: str
@param windowName: (Optional) Caption text of the window to find.
If C{None} or not used any caption text will match the search.
@rtype: L{Window} or None
@return: A window that matches the request. There may be more matching
windows, but this method only returns one. If no matching window
is found, the return value is C{None}.
@raise WindowsError: An error occured while processing this request. | 9.863647 | 12.351924 | 0.798552 |
try:
cls.request_privileges(win32.SE_DEBUG_NAME)
return True
except Exception:
if not bIgnoreExceptions:
raise
return False | def request_debug_privileges(cls, bIgnoreExceptions = False) | Requests debug privileges.
This may be needed to debug processes running as SYSTEM
(such as services) since Windows XP.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when requesting debug privileges.
@rtype: bool
@return: C{True} on success, C{False} on failure.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 3.523715 | 3.775884 | 0.933216 |
try:
cls.drop_privileges(win32.SE_DEBUG_NAME)
return True
except Exception:
if not bIgnoreExceptions:
raise
return False | def drop_debug_privileges(cls, bIgnoreExceptions = False) | Drops debug privileges.
This may be needed to avoid being detected
by certain anti-debug tricks.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when dropping debug privileges.
@rtype: bool
@return: C{True} on success, C{False} on failure.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | 3.857978 | 3.654212 | 1.055762 |
with win32.OpenProcessToken(win32.GetCurrentProcess(),
win32.TOKEN_ADJUST_PRIVILEGES) as hToken:
NewState = ( (priv, state) for priv in privileges )
win32.AdjustTokenPrivileges(hToken, NewState) | def adjust_privileges(state, privileges) | Requests or drops privileges.
@type state: bool
@param state: C{True} to request, C{False} to drop.
@type privileges: list(int)
@param privileges: Privileges to request or drop.
@raise WindowsError: Raises an exception on error. | 4.742939 | 4.477026 | 1.059395 |
# Get the file version info structure.
pBlock = win32.GetFileVersionInfo(filename)
pBuffer, dwLen = win32.VerQueryValue(pBlock, "\\")
if dwLen != ctypes.sizeof(win32.VS_FIXEDFILEINFO):
raise ctypes.WinError(win32.ERROR_BAD_LENGTH)
pVersionInfo = ctypes.cast(pBuffer,
ctypes.POINTER(win32.VS_FIXEDFILEINFO))
VersionInfo = pVersionInfo.contents
if VersionInfo.dwSignature != 0xFEEF04BD:
raise ctypes.WinError(win32.ERROR_BAD_ARGUMENTS)
# File and product versions.
FileVersion = "%d.%d" % (VersionInfo.dwFileVersionMS,
VersionInfo.dwFileVersionLS)
ProductVersion = "%d.%d" % (VersionInfo.dwProductVersionMS,
VersionInfo.dwProductVersionLS)
# Debug build?
if VersionInfo.dwFileFlagsMask & win32.VS_FF_DEBUG:
DebugBuild = (VersionInfo.dwFileFlags & win32.VS_FF_DEBUG) != 0
else:
DebugBuild = None
# Legacy OS build?
LegacyBuild = (VersionInfo.dwFileOS != win32.VOS_NT_WINDOWS32)
# File type.
FileType = cls.__binary_types.get(VersionInfo.dwFileType)
if VersionInfo.dwFileType == win32.VFT_DRV:
FileType = cls.__driver_types.get(VersionInfo.dwFileSubtype)
elif VersionInfo.dwFileType == win32.VFT_FONT:
FileType = cls.__font_types.get(VersionInfo.dwFileSubtype)
# Timestamp, ex: "Monday, July 7, 2013 (12:20:50.126)".
# FIXME: how do we know the time zone?
FileDate = (VersionInfo.dwFileDateMS << 32) + VersionInfo.dwFileDateLS
if FileDate:
CreationTime = win32.FileTimeToSystemTime(FileDate)
CreationTimestamp = "%s, %s %d, %d (%d:%d:%d.%d)" % (
cls.__days_of_the_week[CreationTime.wDayOfWeek],
cls.__months[CreationTime.wMonth],
CreationTime.wDay,
CreationTime.wYear,
CreationTime.wHour,
CreationTime.wMinute,
CreationTime.wSecond,
CreationTime.wMilliseconds,
)
else:
CreationTimestamp = None
# Return the file version info.
return (
FileVersion,
ProductVersion,
DebugBuild,
LegacyBuild,
FileType,
CreationTimestamp,
) | def get_file_version_info(cls, filename) | Get the program version from an executable file, if available.
@type filename: str
@param filename: Pathname to the executable file to query.
@rtype: tuple(str, str, bool, bool, str, str)
@return: Tuple with version information extracted from the executable
file metadata, containing the following:
- File version number (C{"major.minor"}).
- Product version number (C{"major.minor"}).
- C{True} for debug builds, C{False} for production builds.
- C{True} for legacy OS builds (DOS, OS/2, Win16),
C{False} for modern OS builds.
- Binary file type.
May be one of the following values:
- "application"
- "dynamic link library"
- "static link library"
- "font"
- "raster font"
- "TrueType font"
- "vector font"
- "driver"
- "communications driver"
- "display driver"
- "installable driver"
- "keyboard driver"
- "language driver"
- "legacy driver"
- "mouse driver"
- "network driver"
- "printer driver"
- "sound driver"
- "system driver"
- "versioned printer driver"
- Binary creation timestamp.
Any of the fields may be C{None} if not available.
@raise WindowsError: Raises an exception on error. | 2.515271 | 2.319585 | 1.084363 |
# If an explicit pathname was not given, search for the library.
if not pathname:
# Under WOW64 we'll treat AMD64 as I386.
arch = win32.arch
if arch == win32.ARCH_AMD64 and win32.bits == 32:
arch = win32.ARCH_I386
# Check if the architecture is supported.
if not arch in cls.__dbghelp_locations:
msg = "Architecture %s is not currently supported."
raise NotImplementedError(msg % arch)
# Grab all versions of the library we can find.
found = []
for pathname in cls.__dbghelp_locations[arch]:
if path.isfile(pathname):
try:
f_ver, p_ver = cls.get_file_version_info(pathname)[:2]
except WindowsError:
msg = "Failed to parse file version metadata for: %s"
warnings.warn(msg % pathname)
if not f_ver:
f_ver = p_ver
elif p_ver and p_ver > f_ver:
f_ver = p_ver
found.append( (f_ver, pathname) )
# If we found any, use the newest version.
if found:
found.sort()
pathname = found.pop()[1]
# If we didn't find any, trust the default DLL search algorithm.
else:
pathname = "dbghelp.dll"
# Load the library.
dbghelp = ctypes.windll.LoadLibrary(pathname)
# Set it globally as the library to be used.
ctypes.windll.dbghelp = dbghelp
# Return the library.
return dbghelp | def load_dbghelp(cls, pathname = None) | Load the specified version of the C{dbghelp.dll} library.
This library is shipped with the Debugging Tools for Windows, and it's
required to load debug symbols.
Normally you don't need to call this method, as WinAppDbg already tries
to load the latest version automatically - but it may come in handy if
the Debugging Tools are installed in a non standard folder.
Example::
from winappdbg import Debug
def simple_debugger( argv ):
# Instance a Debug object, passing it the event handler callback
debug = Debug( my_event_handler )
try:
# Load a specific dbghelp.dll file
debug.system.load_dbghelp("C:\\Some folder\\dbghelp.dll")
# Start a new process for debugging
debug.execv( argv )
# Wait for the debugee to finish
debug.loop()
# Stop the debugger
finally:
debug.stop()
@see: U{http://msdn.microsoft.com/en-us/library/ms679294(VS.85).aspx}
@type pathname: str
@param pathname:
(Optional) Full pathname to the C{dbghelp.dll} library.
If not provided this method will try to autodetect it.
@rtype: ctypes.WinDLL
@return: Loaded instance of C{dbghelp.dll}.
@raise NotImplementedError: This feature was not implemented for the
current architecture.
@raise WindowsError: An error occured while processing this request. | 3.504616 | 3.411768 | 1.027214 |
try:
if symbol_store_path is None:
local_path = "C:\\SYMBOLS"
if not path.isdir(local_path):
local_path = "C:\\Windows\\Symbols"
if not path.isdir(local_path):
local_path = path.abspath(".")
if remote:
symbol_store_path = (
"cache*;SRV*"
+ local_path +
"*"
"http://msdl.microsoft.com/download/symbols"
)
else:
symbol_store_path = "cache*;SRV*" + local_path
previous = os.environ.get("_NT_SYMBOL_PATH", None)
if not previous or force:
os.environ["_NT_SYMBOL_PATH"] = symbol_store_path
return previous
except Exception:
e = sys.exc_info()[1]
warnings.warn("Cannot fix symbol path, reason: %s" % str(e),
RuntimeWarning) | def fix_symbol_store_path(symbol_store_path = None,
remote = True,
force = False) | Fix the symbol store path. Equivalent to the C{.symfix} command in
Microsoft WinDbg.
If the symbol store path environment variable hasn't been set, this
method will provide a default one.
@type symbol_store_path: str or None
@param symbol_store_path: (Optional) Symbol store path to set.
@type remote: bool
@param remote: (Optional) Defines the symbol store path to set when the
C{symbol_store_path} is C{None}.
If C{True} the default symbol store path is set to the Microsoft
symbol server. Debug symbols will be downloaded through HTTP.
This gives the best results but is also quite slow.
If C{False} the default symbol store path is set to the local
cache only. This prevents debug symbols from being downloaded and
is faster, but unless you've installed the debug symbols on this
machine or downloaded them in a previous debugging session, some
symbols may be missing.
If the C{symbol_store_path} argument is not C{None}, this argument
is ignored entirely.
@type force: bool
@param force: (Optional) If C{True} the new symbol store path is set
always. If C{False} the new symbol store path is only set if
missing.
This allows you to call this method preventively to ensure the
symbol server is always set up correctly when running your script,
but without messing up whatever configuration the user has.
Example::
from winappdbg import Debug, System
def simple_debugger( argv ):
# Instance a Debug object
debug = Debug( MyEventHandler() )
try:
# Make sure the remote symbol store is set
System.fix_symbol_store_path(remote = True,
force = False)
# Start a new process for debugging
debug.execv( argv )
# Wait for the debugee to finish
debug.loop()
# Stop the debugger
finally:
debug.stop()
@rtype: str or None
@return: The previously set symbol store path if any,
otherwise returns C{None}. | 3.311931 | 3.367624 | 0.983462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.