code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)]) | def get_count(self, prefix='') | Return the total count of errors and warnings. | 8.974235 | 6.570931 | 1.365748 |
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)] | def get_statistics(self, prefix='') | Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports | 5.436575 | 4.814425 | 1.129226 |
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key])) | def print_benchmark(self) | Print benchmark numbers. | 5.267221 | 4.771887 | 1.103803 |
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset) | def init_file(self, filename, lines, expected, line_offset) | Signal a new file. | 6.60777 | 6.540308 | 1.010315 |
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code | def error(self, line_number, offset, text, check) | Report an error, according to options. | 6.77175 | 6.323921 | 1.070815 |
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
return self.file_errors | def get_file_results(self) | Print the result and return the overall count for this file. | 6.173759 | 5.906509 | 1.045247 |
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report | def init_report(self, reporter=None) | Initialize the report instance. | 5.68323 | 4.977983 | 1.141673 |
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report | def check_files(self, paths=None) | Run all checks on the paths. | 3.921873 | 3.736043 | 1.04974 |
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset) | def input_file(self, filename, lines=None, expected=None, line_offset=0) | Run all checks on a Python source file. | 3.784238 | 3.37305 | 1.121904 |
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename)) | def input_dir(self, dirname) | Check all files in this directory and all subdirectories. | 4.52662 | 4.449329 | 1.017371 |
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude) | def excluded(self, filename, parent=None) | Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename. | 2.408444 | 2.241351 | 1.07455 |
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select)) | def ignore_code(self, code) | Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True. | 5.303097 | 3.476019 | 1.525624 |
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks) | def get_checks(self, argument_name) | Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests. | 6.282911 | 6.620252 | 0.949044 |
if use_datetime and not datetime:
raise ValueError("the datetime module is not available")
if FastParser and FastUnmarshaller:
if use_datetime:
mkdatetime = _datetime_type
else:
mkdatetime = _datetime
target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime)
if FastParser:
parser = FastParser(target)
elif SgmlopParser:
parser = SgmlopParser(target)
elif ExpatParser:
parser = ExpatParser(target)
else:
parser = SlowParser(target)
return parser, target | def getparser(use_datetime=0) | getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects. | 4.568836 | 4.028685 | 1.134076 |
p, u = getparser(use_datetime=use_datetime)
p.feed(data)
p.close()
return u.close(), u.getmethodname() | def loads(data, use_datetime=0) | data -> unmarshalled data, method name
Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
If the XML-RPC packet represents a fault condition, this function
raises a Fault exception. | 6.906059 | 6.317891 | 1.093096 |
import_mod = results.get("module")
pref = import_mod.prefix
names = []
# create a Node list of the replacement modules
for name in MAPPING[import_mod.value][:-1]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
import_mod.replace(names) | def transform_import(self, node, results) | Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements. | 5.91541 | 5.643104 | 1.048255 |
mod_member = results.get("mod_member")
pref = mod_member.prefix
member = results.get("member")
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, "This is an invalid module element")
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results["members"]
for member in members:
# we only care about the actual members
if member.type == syms.import_as_name:
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if member_name != u",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if name.type == syms.import_as_name:
kids = [Name(name.children[0].value, prefix=prefix),
name.children[1].clone(),
name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[-1], pref))
new = FromImport(module, names)
if not first or node.parent.prefix.endswith(indentation):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, "All module elements are invalid") | def transform_member(self, node, results) | Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module. | 3.068382 | 2.982442 | 1.028815 |
module_dot = results.get("bare_with_attr")
member = results.get("member")
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name,
prefix=module_dot.prefix))
else:
self.cannot_convert(node, "This is an invalid module element") | def transform_dot(self, node, results) | Transform for calls to module members in code. | 5.218267 | 4.649691 | 1.122283 |
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name] | def protect_libraries_from_patching() | In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent). | 3.171526 | 2.961223 | 1.071019 |
'''
Note: the difference from get_current_thread_id to get_thread_id is that
for the current thread we can get the thread id while the thread.ident
is still not set in the Thread instance.
'''
try:
# Fast path without getting lock.
tid = thread.__pydevd_id__
if tid is None:
# Fix for https://www.brainwy.com/tracker/PyDev/645
# if __pydevd_id__ is None, recalculate it... also, use an heuristic
# that gives us always the same id for the thread (using thread.ident or id(thread)).
raise AttributeError()
except AttributeError:
tid = _get_or_compute_thread_id_with_lock(thread, is_current_thread=True)
return tid | def get_current_thread_id(thread) | Note: the difference from get_current_thread_id to get_thread_id is that
for the current thread we can get the thread id while the thread.ident
is still not set in the Thread instance. | 9.103081 | 5.566024 | 1.635473 |
'''
To be used as a decorator
@call_only_once
def func():
print 'Calling func only this time'
Actually, in PyDev it must be called as:
func = call_only_once(func) to support older versions of Python.
'''
def new_func(*args, **kwargs):
if not new_func._called:
new_func._called = True
return func(*args, **kwargs)
new_func._called = False
return new_func | def call_only_once(func) | To be used as a decorator
@call_only_once
def func():
print 'Calling func only this time'
Actually, in PyDev it must be called as:
func = call_only_once(func) to support older versions of Python. | 4.187099 | 1.850786 | 2.262335 |
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names | def get_all_fix_names(fixer_pkg, remove_prefix=True) | Return a sorted list of all available fix names in the given package. | 2.09791 | 1.971956 | 1.063873 |
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat)) | def _get_head_types(pat) | Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. | 5.529945 | 5.243286 | 1.054672 |
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes) | def _get_headnode_dict(fixer_list) | Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. | 2.928421 | 2.820871 | 1.038126 |
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers) | def get_fixers(self) | Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal. | 2.321453 | 2.22692 | 1.04245 |
if args:
msg = msg % args
self.logger.info(msg) | def log_message(self, msg, *args) | Hook to log a message. | 3.746071 | 3.704981 | 1.011091 |
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only) | def refactor(self, items, write=False, doctests_only=False) | Refactor a list of files and directories. | 1.822135 | 1.71957 | 1.059646 |
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] | def refactor_dir(self, dir_name, write=False, doctests_only=False) | Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped. | 2.724752 | 2.67408 | 1.018949 |
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding | def _read_python_source(self, filename) | Do our best to decode a Python source file correctly. | 3.203938 | 3.029844 | 1.05746 |
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += u"\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(unicode(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename) | def refactor_file(self, filename, write=False, doctests_only=False) | Refactors a file. | 3.743681 | 3.738926 | 1.001272 |
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree | def refactor_string(self, data, name) | Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse. | 4.507891 | 3.856506 | 1.168905 |
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed | def refactor_tree(self, tree, name) | Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise. | 4.154745 | 4.076144 | 1.019283 |
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new | def traverse_by(self, fixers, traversal) | Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None | 3.376168 | 3.807328 | 0.886755 |
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename) | def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None) | Called when a file has been refactored and there may be changes. | 2.969044 | 2.861391 | 1.037623 |
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True | def write_file(self, new_text, filename, old_text, encoding=None) | Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set. | 2.552113 | 2.736359 | 0.932668 |
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return u"".join(result) | def refactor_docstring(self, input, filename) | Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.) | 2.10301 | 2.085226 | 1.008528 |
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree | def parse_block(self, block, lineno, indent) | Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree. | 13.485756 | 13.407404 | 1.005844 |
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text | def wrap_toks(self, block, lineno, indent) | Wraps a tokenize stream to systematically modify start/end. | 6.614462 | 6.361721 | 1.039728 |
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + u"\n":
yield u"\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield "" | def gen_lines(self, block, indent) | Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line. | 3.69033 | 2.893183 | 1.275526 |
xml = ""
keys = dict_keys(frame_f_locals)
if hasattr(keys, 'sort'):
keys.sort() # Python 3.0 does not have it
else:
keys = sorted(keys) # Jython 2.1 does not have it
return_values_xml = ''
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == RETURN_VALUES_DICT:
for name, val in dict_iter_items(v):
return_values_xml += var_to_xml(val, name, additional_in_xml=' isRetVal="True"')
else:
if hidden_ns is not None and k in hidden_ns:
xml += var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"',
evaluate_full_value=eval_full_val)
else:
xml += var_to_xml(v, str(k), evaluate_full_value=eval_full_val)
except Exception:
pydev_log.exception("Unexpected error, recovered safely.")
# Show return values as the first entry.
return return_values_xml + xml | def frame_vars_to_xml(frame_f_locals, hidden_ns=None) | dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/> | 3.958286 | 3.934541 | 1.006035 |
type_name, type_qualifier, is_exception_on_eval, resolver, value = get_variable_details(
val, evaluate_full_value)
try:
name = quote(name, '/>_= ') # TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_valid_xml_value(type_name))
if type_qualifier:
xml_qualifier = 'qualifier="%s"' % make_valid_xml_value(type_qualifier)
else:
xml_qualifier = ''
if value:
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and trim_if_too_big:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
xml_value = ' value="%s"' % (make_valid_xml_value(quote(value, '/>_= ')))
else:
xml_value = ''
if is_exception_on_eval:
xml_container = ' isErrorOnEval="True"'
else:
if resolver is not None:
xml_container = ' isContainer="True"'
else:
xml_container = ''
return ''.join((xml, xml_qualifier, xml_value, xml_container, additional_in_xml, ' />\n')) | def var_to_xml(val, name, trim_if_too_big=True, additional_in_xml='', evaluate_full_value=True) | single variable or dictionary to xml representation | 3.542504 | 3.533328 | 1.002597 |
".exploitable - Determine the approximate exploitability rating"
from winappdbg import Crash
event = self.debug.lastEvent
crash = Crash(event)
crash.fetch_extra_data(event)
status, rule, description = crash.isExploitable()
print "-" * 79
print "Exploitability: %s" % status
print "Matched rule: %s" % rule
print "Description: %s" % description
print "-" * 79 | def do(self, arg) | .exploitable - Determine the approximate exploitability rating | 9.202699 | 6.166508 | 1.492368 |
self.device = None
self.doc = None
self.parser = None
self.resmgr = None
self.interpreter = None | def _cleanup(self) | Frees lots of non-textual information, such as the fonts
and images and the objects that were needed to parse the
PDF. | 9.33401 | 6.698015 | 1.393549 |
if clean:
return utils.normalise_whitespace(''.join(self).replace('\n', ' '))
else:
return ''.join(self) | def text(self, clean=True) | Returns the text of the PDF as a single string.
Options:
:clean:
Removes misc cruft, like lots of whitespace. | 4.321243 | 5.168847 | 0.836017 |
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower() | def camelToSnake(s) | https://gist.github.com/jaytaylor/3660565
Is it ironic that this function is written in camel case, yet it
converts to snake case? hmm.. | 1.600899 | 1.583867 | 1.010753 |
type_ = locate(app_name)
if inspect.isclass(type_):
return type_.name
return app_name | def get_app_name(app_name) | Returns a app name from new app config if is
a class or the same app name if is not a class. | 5.324897 | 5.273578 | 1.009731 |
if user and user.is_superuser:
return True
if not isinstance(roles, list):
roles = [roles]
normalized_roles = []
for role in roles:
if not inspect.isclass(role):
role = RolesManager.retrieve_role(role)
normalized_roles.append(role)
user_roles = get_user_roles(user)
return any([role in user_roles for role in normalized_roles]) | def has_role(user, roles) | Check if a user has any of the given roles. | 2.660582 | 2.631712 | 1.01097 |
if user and user.is_superuser:
return True
return permission_name in available_perm_names(user) | def has_permission(user, permission_name) | Check if a user has a given permission. | 6.507121 | 6.426465 | 1.012551 |
if user and user.is_superuser:
return True
checker = PermissionsManager.retrieve_checker(checker_name)
user_roles = get_user_roles(user)
if not user_roles:
user_roles = [None]
return any([checker(user_role, user, obj) for user_role in user_roles]) | def has_object_permission(checker_name, user, obj) | Check if a user has permission to perform an action on an object. | 3.308292 | 3.141878 | 1.052966 |
user_ct = ContentType.objects.get_for_model(get_user_model())
return Permission.objects.get_or_create(content_type=user_ct, codename=codename,
defaults={'name': name(codename) if callable(name) else name}) | def get_or_create_permission(codename, name=camel_or_snake_to_title) | Get a Permission object from a permission name.
@:param codename: permission code name
@:param name: human-readable permissions name (str) or callable that takes codename as
argument and returns str | 2.35345 | 2.452985 | 0.959423 |
if user:
groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related.
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return [] | def get_user_roles(user) | Get a list of a users's roles. | 6.04478 | 6.179418 | 0.978212 |
roles = get_user_roles(user)
for role in roles:
role.remove_role_from_user(user)
return roles | def clear_roles(user) | Remove all roles from a user. | 3.684204 | 3.758567 | 0.980215 |
roles = get_user_roles(user)
permission_hash = {}
for role in roles:
permission_names = role.permission_names_list()
for permission_name in permission_names:
permission_hash[permission_name] = get_permission(
permission_name) in user.user_permissions.all()
return permission_hash | def available_perm_status(user) | Get a boolean map of the permissions available to a user
based on that user's roles. | 3.331395 | 3.054186 | 1.090764 |
roles = get_user_roles(user)
perm_names = set(p for role in roles for p in role.permission_names_list())
return [p.codename for p in user.user_permissions.all() if p.codename in perm_names] \
if roles else [] | def available_perm_names(user) | Return a list of permissions codenames available to a user, based on that user's roles.
i.e., keys for all "True" permissions from available_perm_status(user):
Assert: set(available_perm_names(user)) == set(perm for perm,has_perm in available_perm_status(user) if has_perm)
Query efficient; especially when prefetch_related('group', 'user_permissions') on user object.
No side-effects; permissions are not created in DB as side-effect. | 4.359984 | 4.256854 | 1.024227 |
roles = get_user_roles(user)
for role in roles:
if permission_name in role.permission_names_list():
permission = get_permission(permission_name)
user.user_permissions.add(permission)
return
raise RolePermissionScopeException(
"This permission isn't in the scope of "
"any of this user's roles.") | def grant_permission(user, permission_name) | Grant a user a specified permission.
Permissions are only granted if they are in the scope any of the
user's roles. If the permission is out of scope,
a RolePermissionScopeException is raised. | 4.754414 | 3.404034 | 1.3967 |
roles = get_user_roles(user)
for role in roles:
if permission_name in role.permission_names_list():
permission = get_permission(permission_name)
user.user_permissions.remove(permission)
return
raise RolePermissionScopeException(
"This permission isn't in the scope of "
"any of this user's roles.") | def revoke_permission(user, permission_name) | Revoke a specified permission from a user.
Permissions are only revoked if they are in the scope any of the user's
roles. If the permission is out of scope, a RolePermissionScopeException
is raised. | 4.74712 | 3.511772 | 1.351774 |
if dataset_name in classification_dataset_names:
data_type = 'classification'
elif dataset_name in regression_dataset_names:
data_type = 'regression'
else:
raise ValueError('Dataset not found in PMLB.')
dataset_url = '{GITHUB_URL}/{DATA_TYPE}/{DATASET_NAME}/{DATASET_NAME}{SUFFIX}'.format(
GITHUB_URL=GITHUB_URL,
DATA_TYPE=data_type,
DATASET_NAME=dataset_name,
SUFFIX=suffix
)
if local_cache_dir is None:
dataset = pd.read_csv(dataset_url, sep='\t', compression='gzip')
else:
dataset_path = os.path.join(local_cache_dir, dataset_name) + suffix
# Use the local cache if the file already exists there
if os.path.exists(dataset_path):
dataset = pd.read_csv(dataset_path, sep='\t', compression='gzip')
# Download the data to the local cache if it is not already there
else:
dataset = pd.read_csv(dataset_url, sep='\t', compression='gzip')
dataset.to_csv(dataset_path, sep='\t', compression='gzip', index=False)
if return_X_y:
X = dataset.drop('target', axis=1).values
y = dataset['target'].values
return (X, y)
else:
return dataset | def fetch_data(dataset_name, return_X_y=False, local_cache_dir=None) | Download a data set from the PMLB, (optionally) store it locally, and return the data set.
You must be connected to the internet if you are fetching a data set that is not cached locally.
Parameters
----------
dataset_name: str
The name of the data set to load from PMLB.
return_X_y: bool (default: False)
Whether to return the data in scikit-learn format, with the features and labels stored in separate NumPy arrays.
local_cache_dir: str (default: None)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.
Returns
----------
dataset: pd.DataFrame or (array-like, array-like)
if return_X_y == False: A pandas DataFrame containing the fetched data set.
if return_X_y == True: A tuple of NumPy arrays containing (features, labels) | 1.953696 | 1.95656 | 0.998536 |
if not data:
return 0
#imb - shows measure of inbalance within a dataset
imb = 0
num_classes=float(len(Counter(data)))
for x in Counter(data).values():
p_x = float(x)/len(data)
if p_x > 0:
imb += (p_x - 1/num_classes)*(p_x - 1/num_classes)
#worst case scenario: all but 1 examplars in 1st class, the remaining one in 2nd class
worst_case=(num_classes-1)*pow(1/num_classes,2) + pow(1-1/num_classes,2)
return (num_classes,imb/worst_case) | def imbalance_metrics(data) | Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is perfectly balanced and the higher the value, the more imbalanced the dataset. | 4.814025 | 4.718541 | 1.020236 |
counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes).groups.items()}
if(len(features.groupby('class').apply(list))==2):
return('binary')
if ('float64' in counter):
return ('float')
return ('integer') | def determine_endpoint_type(features) | Determines the type of an endpoint
:param features: pandas.DataFrame
A dataset in a panda's data frame
:returns string
string with a name of a dataset | 6.480418 | 7.198895 | 0.900196 |
counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes)}
binary=0
if ('int64' in counter):
binary=len(set(features.loc[:, (features<=1).all(axis=0)].columns.values)
& set(features.loc[:, (features>=0).all(axis=0)].columns.values)
& set(counter['int64']))
return (binary,len(counter['int64'])-binary if 'int64' in counter else 0,len(counter['float64']) if 'float64' in counter else 0) | def count_features_type(features) | Counts three different types of features (float, integer, binary).
:param features: pandas.DataFrame
A dataset in a panda's data frame
:returns a tuple (binary, integer, float) | 3.85414 | 3.87371 | 0.994948 |
assert (local_cache_dir!=None)
readme_file = open(os.path.join(local_cache_dir,'datasets',dataset_name,'README.md'), 'wt')
try:
df = fetch_data(dataset_name)
fnames = [col for col in df.columns if col!='class']
#determine all required values
types = get_types(df.ix[:, df.columns != 'class'])
feat=count_features_type(df.ix[:, df.columns != 'class'])
endpoint=determine_endpoint_type(df.ix[:, df.columns == 'class'])
mse=imbalance_metrics(df['class'].tolist())
#proceed with writing
readme_file.write('# %s\n\n' % dataset_name)
readme_file.write('## Summary Stats\n\n')
readme_file.write('#instances: %s\n\n' % str(len(df.axes[0])))
readme_file.write("#features: %s\n\n" % str(len(df.axes[1])-1))
readme_file.write(" #binary_features: %s\n\n" % feat[0])
readme_file.write(" #integer_features: %s\n\n" % feat[1])
readme_file.write(" #float_features: %s\n\n" % feat[2])
readme_file.write("Endpoint type: %s\n\n" % endpoint)
readme_file.write("#Classes: %s\n\n" % int(mse[0]))
readme_file.write("Imbalance metric: %s\n\n" % mse[1])
readme_file.write('## Feature Types\n\n %s\n\n' % '\n\n'.join([f + ':' + t for f,t in
zip(fnames,types)]))
except IOError as err:
print(err)
finally:
readme_file.close() | def generate_description(dataset_name, local_cache_dir=None) | Generates desription for a given dataset in its README.md file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used. | 3.187881 | 3.162606 | 1.007992 |
report_filename = open(os.path.join(local_cache_dir, 'report.csv'), 'wt')
assert (local_cache_dir!=None)
try:
writer = csv.writer(report_filename, delimiter='\t')
writer.writerow(['Dataset','#instances','#features','#binary_features','#integer_features','#float_features',\
'Endpoint_type','#classes','Imbalance_metric'])
for dataset in dataset_names:
df=fetch_data(dataset)
print( "Dataset:", dataset)
assert 'class' in df.columns, "no class column"
#removing class column
print( "SIZE: "+ str(len(df.axes[0]))+ " x " + str(len(df.axes[1])-1))
feat=count_features_type(df.ix[:, df.columns != 'class'])
endpoint=determine_endpoint_type(df.ix[:, df.columns == 'class'])
mse=imbalance_metrics(df['class'].tolist())
#writer.writerow([file,str(len(df.axes[0])),str(len(df.axes[1])-1),feat[0],feat[1],feat[2],endpoint,mse[0],mse[1],mse[2]])
writer.writerow([dataset,str(len(df.axes[0])),str(len(df.axes[1])-1),feat[0],feat[1],feat[2],endpoint,int(mse[0]),mse[1]])
finally:
report_filename.close() | def generate_pmlb_summary(local_cache_dir=None) | Generates a summary report for all dataset in PMLB
:param local_cache_dir: str (required)
The directory on your local machine to store the data files. | 3.815614 | 3.835926 | 0.994705 |
response = requests.post(
url=target,
data=json.dumps(payload, cls=DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
if response.status_code == 410 and hook_id:
HookModel = get_hook_model()
hook = HookModel.object.get(id=hook_id)
hook.delete() | def run(self, target, payload, instance=None, hook_id=None, **kwargs) | target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing) | 2.732375 | 2.735111 | 0.999 |
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
action = 'created' if created else 'updated'
distill_model_event(instance, model, action) | def model_saved(sender, instance,
created,
raw,
using,
**kwargs) | Automatically triggers "created" and "updated" actions. | 4.475577 | 4.230914 | 1.057828 |
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, 'deleted') | def model_deleted(sender, instance,
using,
**kwargs) | Automatically triggers "deleted" actions. | 5.973723 | 5.712378 | 1.045751 |
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, action, user_override=user) | def custom_action(sender, action,
instance,
user=None,
**kwargs) | Manually trigger a custom action (or even a standard action). | 6.766157 | 6.632455 | 1.020159 |
HookModel = get_hook_model()
hooks = HookModel.objects.filter(user=user, event=event_name)
for hook in hooks:
new_payload = payload
if send_hook_meta:
new_payload = {
'hook': hook.dict(),
'data': payload
}
hook.deliver_hook(instance, payload_override=new_payload) | def raw_custom_event(sender, event_name,
payload,
user,
send_hook_meta=True,
instance=None,
**kwargs) | Give a full payload | 3.53952 | 3.630169 | 0.975029 |
if self.event not in HOOK_EVENTS.keys():
raise ValidationError(
"Invalid hook event {evt}.".format(evt=self.event)
) | def clean(self) | Validation for events. | 8.275844 | 6.313243 | 1.31087 |
if getattr(instance, 'serialize_hook', None) and callable(instance.serialize_hook):
return instance.serialize_hook(hook=self)
if getattr(settings, 'HOOK_SERIALIZER', None):
serializer = get_module(settings.HOOK_SERIALIZER)
return serializer(instance, hook=self)
# if no user defined serializers, fallback to the django builtin!
data = serializers.serialize('python', [instance])[0]
for k, v in data.items():
if isinstance(v, OrderedDict):
data[k] = dict(v)
if isinstance(data, OrderedDict):
data = dict(data)
return {
'hook': self.dict(),
'data': data,
} | def serialize_hook(self, instance) | Serialize the object down to Python primitives.
By default it uses Django's built in serializer. | 3.313737 | 3.248226 | 1.020168 |
payload = payload_override or self.serialize_hook(instance)
if getattr(settings, 'HOOK_DELIVERER', None):
deliverer = get_module(settings.HOOK_DELIVERER)
deliverer(self.target, payload, instance=instance, hook=self)
else:
client.post(
url=self.target,
data=json.dumps(payload, cls=DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
signals.hook_sent_event.send_robust(sender=self.__class__, payload=payload, instance=instance, hook=self)
return None | def deliver_hook(self, instance, payload_override=None) | Deliver the payload to the target URL.
By default it serializes to JSON and POSTs. | 2.84285 | 2.676115 | 1.062305 |
try:
from importlib import import_module
except ImportError as e:
from django.utils.importlib import import_module
try:
mod_name, func_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImportError(
'Error importing alert function {0}: "{1}"'.format(mod_name, e))
try:
func = getattr(mod, func_name)
except AttributeError:
raise ImportError(
('Module "{0}" does not define a "{1}" function'
).format(mod_name, func_name))
return func | def get_module(path) | A modified duplicate from Django's built in backend
retriever.
slugify = get_module('django.template.defaultfilters.slugify') | 2.101686 | 2.052624 | 1.023902 |
from rest_hooks.models import Hook
HookModel = Hook
if getattr(settings, 'HOOK_CUSTOM_MODEL', None):
HookModel = get_module(settings.HOOK_CUSTOM_MODEL)
return HookModel | def get_hook_model() | Returns the Custom Hook model if defined in settings,
otherwise the default Hook model. | 3.758198 | 3.300079 | 1.13882 |
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
from rest_hooks.models import HOOK_EVENTS
if not event_name in HOOK_EVENTS.keys():
raise Exception(
'"{}" does not exist in `settings.HOOK_EVENTS`.'.format(event_name)
)
filters = {'event': event_name}
# Ignore the user if the user_override is False
if user_override is not False:
if user_override:
filters['user'] = user_override
elif hasattr(instance, 'user'):
filters['user'] = instance.user
elif isinstance(instance, User):
filters['user'] = instance
else:
raise Exception(
'{} has no `user` property. REST Hooks needs this.'.format(repr(instance))
)
# NOTE: This is probably up for discussion, but I think, in this
# case, instead of raising an error, we should fire the hook for
# all users/accounts it is subscribed to. That would be a genuine
# usecase rather than erroring because no user is associated with
# this event.
HookModel = get_hook_model()
hooks = HookModel.objects.filter(**filters)
for hook in hooks:
hook.deliver_hook(instance) | def find_and_fire_hook(event_name, instance, user_override=None) | Look up Hooks that apply | 3.873348 | 3.906233 | 0.991581 |
from rest_hooks.models import HOOK_EVENTS
event_name = None
for maybe_event_name, auto in HOOK_EVENTS.items():
if auto:
# break auto into App.Model, Action
maybe_model, maybe_action = auto.rsplit('.', 1)
maybe_action = maybe_action.rsplit('+', 1)
if model == maybe_model and action == maybe_action[0]:
event_name = maybe_event_name
if len(maybe_action) == 2:
user_override = False
if event_name:
finder = find_and_fire_hook
if getattr(settings, 'HOOK_FINDER', None):
finder = get_module(settings.HOOK_FINDER)
finder(event_name, instance, user_override=user_override) | def distill_model_event(instance, model, action, user_override=None) | Take created, updated and deleted actions for built-in
app/model mappings, convert to the defined event.name
and let hooks fly.
If that model isn't represented, we just quit silenty. | 4.059921 | 3.789904 | 1.071246 |
out = data
if isinstance(data, PHATE):
out = data.transform()
else:
try:
if isinstance(data, anndata.AnnData):
try:
out = data.obsm['X_phate']
except KeyError:
raise RuntimeError(
"data.obsm['X_phate'] not found. "
"Please run `sc.tl.phate(adata)` before plotting.")
except NameError:
# anndata not installed
pass
if ndim is not None and out[0].shape[0] < ndim:
if isinstance(data, PHATE):
data.set_params(n_components=ndim)
out = data.transform()
else:
raise ValueError(
"Expected at least {}-dimensional data, got {}".format(
ndim, out[0].shape[0]))
return out | def _get_plot_data(data, ndim=None) | Get plot data out of an input object
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
ndim : int, optional (default: None)
Minimum number of dimensions | 3.112899 | 2.917491 | 1.066978 |
warnings.warn("`phate.plot.rotate_scatter3d` is deprecated. "
"Use `scprep.plot.rotate_scatter3d` instead.",
FutureWarning)
return scprep.plot.rotate_scatter3d(data,
filename=filename,
elev=elev,
rotation_speed=rotation_speed,
fps=fps,
ax=ax,
figsize=figsize,
dpi=dpi,
ipython_html=ipython_html,
**kwargs) | def rotate_scatter3d(data,
filename=None,
elev=30,
rotation_speed=30,
fps=10,
ax=None,
figsize=None,
dpi=None,
ipython_html="jshtml",
**kwargs) | Create a rotating 3D scatter plot
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, `phate.PHATE` or `scanpy.AnnData`
Input data. Only the first three dimensions are used.
filename : str, optional (default: None)
If not None, saves a .gif or .mp4 with the output
elev : float, optional (default: 30)
Elevation of viewpoint from horizontal, in degrees
rotation_speed : float, optional (default: 30)
Speed of axis rotation, in degrees per second
fps : int, optional (default: 10)
Frames per second. Increase this for a smoother animation
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
dpi : number, optional (default: None)
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie.
If None, defaults to rcParams["savefig.dpi"]
ipython_html : {'html5', 'jshtml'}
which html writer to use if using a Jupyter Notebook
**kwargs : keyword arguments
See :~func:`phate.plot.scatter3d`.
Returns
-------
ani : `matplotlib.animation.FuncAnimation`
animation object
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(n_components=3, k=5, a=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> phate.plot.rotate_scatter3d(tree_phate, c=tree_clusters) | 1.825353 | 2.215875 | 0.823762 |
if phate_op.graph is not None:
diff_potential = phate_op.calculate_potential()
if isinstance(phate_op.graph, graphtools.graphs.LandmarkGraph):
diff_potential = phate_op.graph.interpolate(diff_potential)
return cluster.KMeans(k, random_state=random_state).fit_predict(diff_potential)
else:
raise exceptions.NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method.") | def kmeans(phate_op, k=8, random_state=None) | KMeans on the PHATE potential
Clustering on the PHATE operator as introduced in Moon et al.
This is similar to spectral clustering.
Parameters
----------
phate_op : phate.PHATE
Fitted PHATE operator
k : int, optional (default: 8)
Number of clusters
random_state : int or None, optional (default: None)
Random seed for k-means
Returns
-------
clusters : np.ndarray
Integer array of cluster assignments | 3.659527 | 3.765407 | 0.971881 |
tasklogger.log_debug("Performing classic MDS on {} of shape {}...".format(
type(D).__name__, D.shape))
D = D**2
D = D - D.mean(axis=0)[None, :]
D = D - D.mean(axis=1)[:, None]
pca = PCA(n_components=ndim, svd_solver='randomized')
Y = pca.fit_transform(D)
return Y | def cmdscale_fast(D, ndim) | Fast CMDS using random SVD
Parameters
----------
D : array-like, input data [n_samples, n_dimensions]
ndim : int, number of dimensions in which to embed `D`
Returns
-------
Y : array-like, embedded data [n_sample, ndim] | 3.726873 | 3.805647 | 0.979301 |
if how not in ['classic', 'metric', 'nonmetric']:
raise ValueError("Allowable 'how' values for MDS: 'classic', "
"'metric', or 'nonmetric'. "
"'{}' was passed.".format(how))
# MDS embeddings, each gives a different output.
X_dist = squareform(pdist(X, distance_metric))
# initialize all by CMDS
Y = cmdscale_fast(X_dist, ndim)
if how in ['metric', 'nonmetric']:
tasklogger.log_debug("Performing metric MDS on "
"{} of shape {}...".format(type(X_dist),
X_dist.shape))
# Metric MDS from sklearn
Y, _ = smacof(X_dist, n_components=ndim, metric=True, max_iter=3000,
eps=1e-6, random_state=seed, n_jobs=n_jobs,
n_init=1, init=Y, verbose=verbose)
if how == 'nonmetric':
tasklogger.log_debug(
"Performing non-metric MDS on "
"{} of shape {}...".format(type(X_dist),
X_dist.shape))
# Nonmetric MDS from sklearn using metric MDS as an initialization
Y, _ = smacof(X_dist, n_components=ndim, metric=True, max_iter=3000,
eps=1e-6, random_state=seed, n_jobs=n_jobs,
n_init=1, init=Y, verbose=verbose)
return Y | def embed_MDS(X, ndim=2, how='metric', distance_metric='euclidean',
n_jobs=1, seed=None, verbose=0) | Performs classic, metric, and non-metric MDS
Metric MDS is initialized using classic MDS,
non-metric MDS is initialized using metric MDS.
Parameters
----------
X: ndarray [n_samples, n_samples]
2 dimensional input data array with n_samples
embed_MDS does not check for matrix squareness,
but this is necessary for PHATE
n_dim : int, optional, default: 2
number of dimensions in which the data will be embedded
how : string, optional, default: 'classic'
choose from ['classic', 'metric', 'nonmetric']
which MDS algorithm is used for dimensionality reduction
distance_metric : string, optional, default: 'euclidean'
choose from ['cosine', 'euclidean']
distance metric for MDS
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
seed: integer or numpy.RandomState, optional
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global numpy random number generator
Returns
-------
Y : ndarray [n_samples, n_dim]
low dimensional embedding of X using MDS | 3.208234 | 3.231682 | 0.992744 |
_, eigenvalues, _ = svd(data)
entropy = []
eigenvalues_t = np.copy(eigenvalues)
for _ in range(t_max):
prob = eigenvalues_t / np.sum(eigenvalues_t)
prob = prob + np.finfo(float).eps
entropy.append(-np.sum(prob * np.log(prob)))
eigenvalues_t = eigenvalues_t * eigenvalues
entropy = np.array(entropy)
return np.array(entropy) | def compute_von_neumann_entropy(data, t_max=100) | Determines the Von Neumann entropy of data
at varying matrix powers. The user should select a value of t
around the "knee" of the entropy curve.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of t
Examples
--------
>>> import numpy as np
>>> import phate
>>> X = np.eye(10)
>>> X[0,0] = 5
>>> X[3,2] = 4
>>> h = phate.vne.compute_von_neumann_entropy(X)
>>> phate.vne.find_knee_point(h)
23 | 2.652746 | 3.511005 | 0.755552 |
try:
y.shape
except AttributeError:
y = np.array(y)
if len(y) < 3:
raise ValueError("Cannot find knee point on vector of length 3")
elif len(y.shape) > 1:
raise ValueError("y must be 1-dimensional")
if x is None:
x = np.arange(len(y))
else:
try:
x.shape
except AttributeError:
x = np.array(x)
if not x.shape == y.shape:
raise ValueError("x and y must be the same shape")
else:
# ensure x is sorted float
idx = np.argsort(x)
x = x[idx]
y = y[idx]
n = np.arange(2, len(y) + 1).astype(np.float32)
# figure out the m and b (in the y=mx+b sense) for the "left-of-knee"
sigma_xy = np.cumsum(x * y)[1:]
sigma_x = np.cumsum(x)[1:]
sigma_y = np.cumsum(y)[1:]
sigma_xx = np.cumsum(x * x)[1:]
det = (n * sigma_xx - sigma_x * sigma_x)
mfwd = (n * sigma_xy - sigma_x * sigma_y) / det
bfwd = -(sigma_x * sigma_xy - sigma_xx * sigma_y) / det
# figure out the m and b (in the y=mx+b sense) for the "right-of-knee"
sigma_xy = np.cumsum(x[::-1] * y[::-1])[1:]
sigma_x = np.cumsum(x[::-1])[1:]
sigma_y = np.cumsum(y[::-1])[1:]
sigma_xx = np.cumsum(x[::-1] * x[::-1])[1:]
det = (n * sigma_xx - sigma_x * sigma_x)
mbck = ((n * sigma_xy - sigma_x * sigma_y) / det)[::-1]
bbck = (-(sigma_x * sigma_xy - sigma_xx * sigma_y) / det)[::-1]
# figure out the sum of per-point errors for left- and right- of-knee fits
error_curve = np.full_like(y, np.float('nan'))
for breakpt in np.arange(1, len(y) - 1):
delsfwd = (mfwd[breakpt - 1] * x[:breakpt + 1] +
bfwd[breakpt - 1]) - y[:breakpt + 1]
delsbck = (mbck[breakpt - 1] * x[breakpt:] +
bbck[breakpt - 1]) - y[breakpt:]
error_curve[breakpt] = np.sum(np.abs(delsfwd)) + \
np.sum(np.abs(delsbck))
# find location of the min of the error curve
loc = np.argmin(error_curve[1:-1]) + 1
knee_point = x[loc]
return knee_point | def find_knee_point(y, x=None) | Returns the x-location of a (single) knee of curve y=f(x)
Parameters
----------
y : array, shape=[n]
data for which to find the knee point
x : array, optional, shape=[n], default=np.arange(len(y))
indices of the data points of y,
if these are not in order and evenly spaced
Returns
-------
knee_point : int
The index (or x value) of the knee point on y
Examples
--------
>>> import numpy as np
>>> import phate
>>> x = np.arange(20)
>>> y = np.exp(-x/10)
>>> phate.vne.find_knee_point(y,x)
8 | 2.230846 | 2.285104 | 0.976256 |
for p in params:
if not isinstance(params[p], numbers.Number) or params[p] <= 0:
raise ValueError(
"Expected {} > 0, got {}".format(p, params[p])) | def check_positive(**params) | Check that parameters are positive as expected
Raises
------
ValueError : unacceptable choice of parameters | 2.568053 | 3.027161 | 0.848337 |
for p in params:
if not isinstance(params[p], numbers.Integral):
raise ValueError(
"Expected {} integer, got {}".format(p, params[p])) | def check_int(**params) | Check that parameters are integers as expected
Raises
------
ValueError : unacceptable choice of parameters | 3.716935 | 4.035227 | 0.921122 |
for p in params:
if params[p] is not x and params[p] != x:
[check(**{p: params[p]}) for check in checks] | def check_if_not(x, *checks, **params) | Run checks only if parameters are not equal to a specified value
Parameters
----------
x : excepted value
Checks not run if parameters equal x
checks : function
Unnamed arguments, check functions to be run
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters | 5.241136 | 4.973336 | 1.053847 |
for p in params:
if params[p] not in choices:
raise ValueError(
"{} value {} not recognized. Choose from {}".format(
p, params[p], choices)) | def check_in(choices, **params) | Checks parameters are in a list of allowed parameters
Parameters
----------
choices : array-like, accepted values
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters | 3.709836 | 4.045786 | 0.916963 |
for p in params:
if params[p] < v_min or params[p] > v_max:
raise ValueError("Expected {} between {} and {}, "
"got {}".format(p, v_min, v_max, params[p])) | def check_between(v_min, v_max, **params) | Checks parameters are in a specified range
Parameters
----------
v_min : float, minimum allowed value (inclusive)
v_max : float, maximum allowed value (inclusive)
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters | 2.366982 | 2.626124 | 0.901321 |
return X is Y or (isinstance(X, Y.__class__) and X.shape == Y.shape and
np.sum((X != Y).sum()) == 0) | def matrix_is_equivalent(X, Y) | Checks matrix equivalence with numpy, scipy and pandas | 4.845682 | 5.117363 | 0.94691 |
if self.graph is not None:
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_op = self.graph.landmark_op
else:
diff_op = self.graph.diff_op
if sparse.issparse(diff_op):
diff_op = diff_op.toarray()
return diff_op
else:
raise NotFittedError("This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method.") | def diff_op(self) | The diffusion operator calculated from the data | 3.310131 | 3.224094 | 1.026685 |
utils.check_positive(n_components=self.n_components,
k=self.knn)
utils.check_int(n_components=self.n_components,
k=self.knn,
n_jobs=self.n_jobs)
utils.check_between(0, 1, gamma=self.gamma)
utils.check_if_not(None, utils.check_positive, a=self.decay)
utils.check_if_not(None, utils.check_positive, utils.check_int,
n_landmark=self.n_landmark,
n_pca=self.n_pca)
utils.check_if_not('auto', utils.check_positive, utils.check_int,
t=self.t)
if not callable(self.knn_dist):
utils.check_in(['euclidean', 'precomputed', 'cosine',
'correlation', 'cityblock', 'l1', 'l2',
'manhattan', 'braycurtis', 'canberra',
'chebyshev', 'dice', 'hamming', 'jaccard',
'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule',
'precomputed_affinity', 'precomputed_distance'],
knn_dist=self.knn_dist)
if not callable(self.mds_dist):
utils.check_in(['euclidean', 'cosine', 'correlation', 'braycurtis',
'canberra', 'chebyshev', 'cityblock', 'dice',
'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule'],
mds_dist=self.mds_dist)
utils.check_in(['classic', 'metric', 'nonmetric'],
mds=self.mds) | def _check_params(self) | Check PHATE parameters
This allows us to fail early - otherwise certain unacceptable
parameter choices, such as mds='mmds', would only fail after
minutes of runtime.
Raises
------
ValueError : unacceptable choice of parameters | 2.130252 | 2.080254 | 1.024035 |
X, n_pca, precomputed, update_graph = self._parse_input(X)
if precomputed is None:
tasklogger.log_info(
"Running PHATE on {} cells and {} genes.".format(
X.shape[0], X.shape[1]))
else:
tasklogger.log_info(
"Running PHATE on precomputed {} matrix with {} cells.".format(
precomputed, X.shape[0]))
if self.n_landmark is None or X.shape[0] <= self.n_landmark:
n_landmark = None
else:
n_landmark = self.n_landmark
if self.graph is not None and update_graph:
self._update_graph(X, precomputed, n_pca, n_landmark)
self.X = X
if self.graph is None:
tasklogger.log_start("graph and diffusion operator")
self.graph = graphtools.Graph(
X,
n_pca=n_pca,
n_landmark=n_landmark,
distance=self.knn_dist,
precomputed=precomputed,
knn=self.knn,
decay=self.decay,
thresh=1e-4,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
**(self.kwargs))
tasklogger.log_complete("graph and diffusion operator")
# landmark op doesn't build unless forced
self.diff_op
return self | def fit(self, X) | Computes the diffusion operator
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
Returns
-------
phate_operator : PHATE
The estimator object | 3.679681 | 3.579739 | 1.027919 |
if self.graph is None:
raise NotFittedError("This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method.")
elif X is not None and not utils.matrix_is_equivalent(X, self.X):
# fit to external data
warnings.warn("Pre-fit PHATE cannot be used to transform a "
"new data matrix. Please fit PHATE to the new"
" data by running 'fit' with the new data.",
RuntimeWarning)
if isinstance(self.graph, graphtools.graphs.TraditionalGraph) and \
self.graph.precomputed is not None:
raise ValueError("Cannot transform additional data using a "
"precomputed distance matrix.")
else:
transitions = self.graph.extend_to_data(X)
return self.graph.interpolate(self.embedding,
transitions)
else:
diff_potential = self.calculate_potential(
t_max=t_max, plot_optimal_t=plot_optimal_t, ax=ax)
if self.embedding is None:
tasklogger.log_start("{} MDS".format(self.mds))
self.embedding = mds.embed_MDS(
diff_potential, ndim=self.n_components, how=self.mds,
distance_metric=self.mds_dist, n_jobs=self.n_jobs,
seed=self.random_state, verbose=max(self.verbose - 1, 0))
tasklogger.log_complete("{} MDS".format(self.mds))
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
tasklogger.log_debug("Extending to original data...")
return self.graph.interpolate(self.embedding)
else:
return self.embedding | def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None) | Computes the position of the cells in the embedding space
Parameters
----------
X : array, optional, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Not required, since PHATE does not currently embed
cells not given in the input matrix to `PHATE.fit()`.
Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
t_max : int, optional, default: 100
maximum t to test if `t` is set to 'auto'
plot_optimal_t : boolean, optional, default: False
If true and `t` is set to 'auto', plot the Von Neumann
entropy used to select t
ax : matplotlib.axes.Axes, optional
If given and `plot_optimal_t` is true, plot will be drawn
on the given axis.
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE | 4.475434 | 4.333272 | 1.032807 |
tasklogger.log_start('PHATE')
self.fit(X)
embedding = self.transform(**kwargs)
tasklogger.log_complete('PHATE')
return embedding | def fit_transform(self, X, **kwargs) | Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE | 9.178271 | 7.331231 | 1.251941 |
if t is None:
t = self.t
if self.diff_potential is None:
if t == 'auto':
t = self.optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
else:
t = self.t
tasklogger.log_start("diffusion potential")
# diffused diffusion operator
diff_op_t = np.linalg.matrix_power(self.diff_op, t)
if self.gamma == 1:
# handling small values
diff_op_t = diff_op_t + 1e-7
self.diff_potential = -1 * np.log(diff_op_t)
elif self.gamma == -1:
self.diff_potential = diff_op_t
else:
c = (1 - self.gamma) / 2
self.diff_potential = ((diff_op_t)**c) / c
tasklogger.log_complete("diffusion potential")
elif plot_optimal_t:
self.optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
return self.diff_potential | def calculate_potential(self, t=None,
t_max=100, plot_optimal_t=False, ax=None) | Calculates the diffusion potential
Parameters
----------
t : int
power to which the diffusion operator is powered
sets the level of diffusion
t_max : int, default: 100
Maximum value of `t` to test
plot_optimal_t : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
diff_potential : array-like, shape=[n_samples, n_samples]
The diffusion potential fit on the input data | 3.075042 | 3.03757 | 1.012336 |
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max) | def von_neumann_entropy(self, t_max=100) | Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t` | 5.151746 | 4.661196 | 1.105241 |
tasklogger.log_start("optimal t")
t, h = self.von_neumann_entropy(t_max=t_max)
t_opt = vne.find_knee_point(y=h, x=t)
tasklogger.log_info("Automatically selected t = {}".format(t_opt))
tasklogger.log_complete("optimal t")
if plot:
if ax is None:
fig, ax = plt.subplots()
show = True
else:
show = False
ax.plot(t, h)
ax.scatter(t_opt, h[t == t_opt], marker='*', c='k', s=50)
ax.set_xlabel("t")
ax.set_ylabel("Von Neumann Entropy")
ax.set_title("Optimal t = {}".format(t_opt))
if show:
plt.show()
return t_opt | def optimal_t(self, t_max=100, plot=False, ax=None) | Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t | 3.002853 | 2.607102 | 1.151797 |
t = Default()
for question_type, settings in dict_theme.items():
if question_type not in vars(t):
raise ThemeError('Error while parsing theme. Question type '
'`{}` not found or not customizable.'
.format(question_type))
# calculating fields of namedtuple, hence the filtering
question_fields = list(filter(lambda x: not x.startswith('_'),
vars(getattr(t, question_type))))
for field, value in settings.items():
if field not in question_fields:
raise ThemeError('Error while parsing theme. Field '
'`{}` invalid for question type `{}`'
.format(field, question_type))
actual_value = getattr(term, value) or value
setattr(getattr(t, question_type), field, actual_value)
return t | def load_theme_from_dict(dict_theme) | Load a theme from a dict.
Expected format:
{
"Question": {
"mark_color": "yellow",
"brackets_color": "normal",
...
},
"List": {
"selection_color": "bold_blue",
"selection_cursor": "->"
}
}
Color values should be string representing valid blessings.Terminal colors. | 4.396076 | 4.245023 | 1.035584 |
data = json.loads(question_json)
if isinstance(data, list):
return load_from_list(data)
if isinstance(data, dict):
return load_from_dict(data)
raise TypeError(
'Json contained a %s variable when a dict or list was expected',
type(data)) | def load_from_json(question_json) | Load Questions from a JSON string.
:return: A list of Question objects with associated data if the JSON
contains a list or a Question if the JSON contains a dict.
:return type: List or Dict | 3.556481 | 3.407125 | 1.043837 |
return bin(reduce(lambda x, y: (x << 8) + y, (ord(c) for c in chars), 1))[
3:
] | def a2bits(chars: str) -> str | Converts a string to its bits representation as a string of 0's and 1's.
>>> a2bits("Hello World!")
'010010000110010101101100011011000110111100100000010101110110111101110010011011000110010000100001' | 5.242221 | 6.711741 | 0.781052 |
return [bin(ord(x))[2:].rjust(ENCODINGS[encoding], "0") for x in chars] | def a2bits_list(chars: str, encoding: str = "UTF-8") -> List[str] | Convert a string to its bits representation as a list of 0's and 1's.
>>> a2bits_list("Hello World!")
['01001000',
'01100101',
'01101100',
'01101100',
'01101111',
'00100000',
'01010111',
'01101111',
'01110010',
'01101100',
'01100100',
'00100001']
>>> "".join(a2bits_list("Hello World!"))
'010010000110010101101100011011000110111100100000010101110110111101110010011011000110010000100001' | 4.582512 | 5.861503 | 0.781798 |
return str(s) if s <= 1 else bs(s >> 1) + str(s & 1) | def bs(s: int) -> str | Converts an int to its bits representation as a string of 0's and 1's. | 3.669572 | 2.617251 | 1.402071 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.