index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
36,728
pytest_cache
_getvaluepath
null
def _getvaluepath(self, key): p = self._getpath("v/" + key) p.dirpath().ensure(dir=1) return p
(self, key)
36,729
pytest_cache
get
return cached value for the given key. If no value was yet cached or the value cannot be read, the specified default is returned. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param default: must be provided in case of a cache-miss or invalid cache values.
def get(self, key, default): """ return cached value for the given key. If no value was yet cached or the value cannot be read, the specified default is returned. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param default: must be provided in case of a cache-miss or invalid cache values. """ from execnet import loads, DataFormatError path = self._getvaluepath(key) if path.check(): f = path.open("rb") try: try: return loads(f.read()) finally: f.close() except DataFormatError: self.trace("cache-invalid at %s" % (key,)) return default
(self, key, default)
36,730
pytest_cache
makedir
return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it to manage files likes e. g. store/retrieve database dumps across test sessions. :param name: must be a string not containing a ``/`` separator. Make sure the name contains your plugin or application identifiers to prevent clashes with other cache users.
def makedir(self, name): """ return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it to manage files likes e. g. store/retrieve database dumps across test sessions. :param name: must be a string not containing a ``/`` separator. Make sure the name contains your plugin or application identifiers to prevent clashes with other cache users. """ if name.count("/") != 0: raise ValueError("name is not allowed to contain '/'") p = self._cachedir.join("d/" + name) p.ensure(dir=1) return p
(self, name)
36,731
pytest_cache
set
save value for the given key. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param value: must be of any combination of basic python types, including nested types like e. g. lists of dictionaries.
def set(self, key, value): """ save value for the given key. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param value: must be of any combination of basic python types, including nested types like e. g. lists of dictionaries. """ from execnet import dumps, DataFormatError path = self._getvaluepath(key) f = path.open("wb") try: try: self.trace("cache-write %s: %r" % (key, value,)) return f.write(dumps(value)) finally: f.close() except DataFormatError: raise ValueError("cannot serialize a builtin python type")
(self, key, value)
36,732
pytest_cache
LFPlugin
Plugin which implements the --lf (run last-failing) option
class LFPlugin: """ Plugin which implements the --lf (run last-failing) option """ def __init__(self, config): self.config = config self.active = config.getvalue("lf") or config.getvalue("failedfirst") if self.active: self.lastfailed = config.cache.get("cache/lastfailed", set()) else: self.lastfailed = set() def pytest_report_header(self): if self.active: if not self.lastfailed: mode = "run all (no recorded failures)" else: mode = "rerun last %d failures%s" % ( len(self.lastfailed), " first" if self.config.getvalue("failedfirst") else "") return "run-last-failure: %s" % mode def pytest_runtest_logreport(self, report): if report.failed and "xfail" not in report.keywords: self.lastfailed.add(report.nodeid) elif not report.failed: if report.when == "call": try: self.lastfailed.remove(report.nodeid) except KeyError: pass def pytest_collection_modifyitems(self, session, config, items): if self.active and self.lastfailed: previously_failed = [] previously_passed = [] for item in items: if item.nodeid in self.lastfailed: previously_failed.append(item) else: previously_passed.append(item) if self.config.getvalue("failedfirst"): items[:] = previously_failed + previously_passed else: items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed) def pytest_sessionfinish(self, session): config = self.config if config.getvalue("showcache") or hasattr(config, "slaveinput"): return config.cache.set("cache/lastfailed", self.lastfailed)
(config)
36,733
pytest_cache
__init__
null
def __init__(self, config): self.config = config self.active = config.getvalue("lf") or config.getvalue("failedfirst") if self.active: self.lastfailed = config.cache.get("cache/lastfailed", set()) else: self.lastfailed = set()
(self, config)
36,734
pytest_cache
pytest_collection_modifyitems
null
def pytest_collection_modifyitems(self, session, config, items): if self.active and self.lastfailed: previously_failed = [] previously_passed = [] for item in items: if item.nodeid in self.lastfailed: previously_failed.append(item) else: previously_passed.append(item) if self.config.getvalue("failedfirst"): items[:] = previously_failed + previously_passed else: items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed)
(self, session, config, items)
36,735
pytest_cache
pytest_report_header
null
def pytest_report_header(self): if self.active: if not self.lastfailed: mode = "run all (no recorded failures)" else: mode = "rerun last %d failures%s" % ( len(self.lastfailed), " first" if self.config.getvalue("failedfirst") else "") return "run-last-failure: %s" % mode
(self)
36,736
pytest_cache
pytest_runtest_logreport
null
def pytest_runtest_logreport(self, report): if report.failed and "xfail" not in report.keywords: self.lastfailed.add(report.nodeid) elif not report.failed: if report.when == "call": try: self.lastfailed.remove(report.nodeid) except KeyError: pass
(self, report)
36,737
pytest_cache
pytest_sessionfinish
null
def pytest_sessionfinish(self, session): config = self.config if config.getvalue("showcache") or hasattr(config, "slaveinput"): return config.cache.set("cache/lastfailed", self.lastfailed)
(self, session)
36,738
pytest_cache
getroot
null
def getroot(args, inibasenames): args = [x for x in args if not str(x).startswith("-")] if not args: args = [py.path.local()] for arg in args: arg = py.path.local(arg) for base in arg.parts(reverse=True): for inibasename in inibasenames: p = base.join(inibasename) if p.check(): yield p
(args, inibasenames)
36,739
pytest_cache
getrootdir
return a best-effort root subdir for this test run. Starting from files specified at the command line (or cwd) search starts upward for the first "tox.ini", "pytest.ini", "setup.cfg" or "setup.py" file. The first directory containing such a file will be used to return a named subdirectory (py.path.local object).
def getrootdir(config, name): """ return a best-effort root subdir for this test run. Starting from files specified at the command line (or cwd) search starts upward for the first "tox.ini", "pytest.ini", "setup.cfg" or "setup.py" file. The first directory containing such a file will be used to return a named subdirectory (py.path.local object). """ if config.inicfg: p = py.path.local(config.inicfg.config.path).dirpath() else: inibasenames = ["setup.py", "setup.cfg", "tox.ini", "pytest.ini"] for x in getroot(config.args, inibasenames): p = x.dirpath() break else: p = py.path.local() config.trace.get("warn")("no rootdir found, using %s" % p) subdir = p.join(name) config.trace("root %s: %s" % (name, subdir)) return subdir
(config, name)
36,742
pytest_cache
pytest_addoption
null
def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--lf', action='store_true', dest="lf", help="rerun only the tests that failed at the last run (or all if none failed)") group.addoption('--ff', action='store_true', dest="failedfirst", help="run all tests but run the last failures first. This may re-order " "tests and thus lead to repeated fixture setup/teardown") group.addoption('--cache', action='store_true', dest="showcache", help="show cache contents, don't perform collection or tests") group.addoption('--clearcache', action='store_true', dest="clearcache", help="remove all cache contents at start of test run.")
(parser)
36,743
pytest_cache
pytest_cmdline_main
null
def pytest_cmdline_main(config): if config.option.showcache: from _pytest.main import wrap_session return wrap_session(config, showcache)
(config)
36,744
pytest_cache
pytest_configure
null
@pytest.mark.tryfirst def pytest_configure(config): config.cache = cache = Cache(config) config.pluginmanager.register(LFPlugin(config), "lfplugin")
(config)
36,745
pytest_cache
pytest_report_header
null
def pytest_report_header(config): if config.option.verbose: relpath = py.path.local().bestrelpath(config.cache._cachedir) return "cachedir: %s" % config.cache._cachedir
(config)
36,746
pytest_cache
showcache
null
def showcache(config, session): from pprint import pprint tw = py.io.TerminalWriter() tw.line("cachedir: " + str(config.cache._cachedir)) if not config.cache._cachedir.check(): tw.line("cache is empty") return 0 dummy = object() basedir = config.cache._cachedir vdir = basedir.join("v") tw.sep("-", "cache values") for valpath in vdir.visit(lambda x: x.check(file=1)): key = valpath.relto(vdir).replace(valpath.sep, "/") val = config.cache.get(key, dummy) if val is dummy: tw.line("%s contains unreadable content, " "will be ignored" % key) else: tw.line("%s contains:" % key) stream = py.io.TextIO() pprint(val, stream=stream) for line in stream.getvalue().splitlines(): tw.line(" " + line) ddir = basedir.join("d") if ddir.check(dir=1) and ddir.listdir(): tw.sep("-", "cache directories") for p in basedir.join("d").visit(): #if p.check(dir=1): # print("%s/" % p.relto(basedir)) if p.check(file=1): key = p.relto(basedir) tw.line("%s is a file of length %d" % ( key, p.size()))
(config, session)
36,747
lizard
AllResult
null
class AllResult(object): def __init__(self, result): self.result = list(file_info for file_info in result if file_info) self.all_fun = list(itertools.chain(*(file_info.function_list for file_info in self.result))) def function_count(self): return len(self.all_fun) or 1 def nloc_in_functions(self): return sum([f.nloc for f in self.all_fun]) or 1 def as_fileinfo(self): return FileInformation( "", sum([f.nloc for f in self.result]), self.all_fun)
(result)
36,748
lizard
__init__
null
def __init__(self, result): self.result = list(file_info for file_info in result if file_info) self.all_fun = list(itertools.chain(*(file_info.function_list for file_info in self.result)))
(self, result)
36,749
lizard
as_fileinfo
null
def as_fileinfo(self): return FileInformation( "", sum([f.nloc for f in self.result]), self.all_fun)
(self)
36,750
lizard
function_count
null
def function_count(self): return len(self.all_fun) or 1
(self)
36,751
lizard
nloc_in_functions
null
def nloc_in_functions(self): return sum([f.nloc for f in self.all_fun]) or 1
(self)
36,752
lizard_languages.clike
CLikeReader
This is the reader for C, C++ and Java.
class CLikeReader(CodeReader, CCppCommentsMixin): ''' This is the reader for C, C++ and Java. ''' ext = ["c", "cpp", "cc", "mm", "cxx", "h", "hpp"] language_names = ['cpp', 'c'] macro_pattern = re.compile(r"#\s*(\w+)\s*(.*)", re.M | re.S) def __init__(self, context): super(CLikeReader, self).__init__(context) self.parallel_states = ( CLikeStates(context), CLikeNestingStackStates(context), CppRValueRefStates(context)) def preprocess(self, tokens): tilde = False for token in tokens: if token == '~': tilde = True elif tilde: tilde = False yield "~" + token elif not token.isspace() or token == '\n': macro = self.macro_pattern.match(token) if macro: if macro.group(1) in ('if', 'ifdef', 'elif'): self.context.add_condition() elif macro.group(1) == 'include': yield "#include" yield macro.group(2) or "\"\"" for _ in macro.group(2).splitlines()[1:]: yield '\n' else: yield token
(context)
36,753
lizard_languages.code_reader
__call__
null
def __call__(self, tokens, reader): self.context = reader.context for token in tokens: for state in self.parallel_states: state(token) yield token for state in self.parallel_states: state.statemachine_before_return() self.eof()
(self, tokens, reader)
36,754
lizard_languages.clike
__init__
null
def __init__(self, context): super(CLikeReader, self).__init__(context) self.parallel_states = ( CLikeStates(context), CLikeNestingStackStates(context), CppRValueRefStates(context))
(self, context)
36,755
lizard_languages.code_reader
eof
null
def eof(self): pass
(self)
36,756
lizard_languages.code_reader
generate_tokens
null
@staticmethod def generate_tokens(source_code, addition='', token_class=None): def create_token(match): return match.group(0) if not token_class: token_class = create_token def _generate_tokens(source, add): # DO NOT put any sub groups in the regex. Good for performance _until_end = r"(?:\\\n|[^\n])*" combined_symbols = ["<<=", ">>=", "||", "&&", "===", "!==", "==", "!=", "<=", ">=", "->", "=>", "++", "--", '+=', '-=', "+", "-", '*', '/', '*=', '/=', '^=', '&=', '|=', "..."] token_pattern = re.compile( r"(?:" + r"\/\*.*?\*\/" + add + r"|(?:\d+\')+\d+" + r"|\w+" + r"|\"(?:\\.|[^\"\\])*\"" + r"|\'(?:\\.|[^\'\\])*?\'" + r"|\/\/" + _until_end + r"|\#" + r"|:=|::|\*\*" + r"|\<\s*\?(?:\s*extends\s+\w+)?\s*\>" + r"|" + r"|".join(re.escape(s) for s in combined_symbols) + r"|\\\n" + r"|\n" + r"|[^\S\n]+" + r"|.)", re.M | re.S) macro = "" for match in token_pattern.finditer(source): token = token_class(match) if macro: if "\\\n" in token or "\n" not in token: macro += token else: yield macro yield token macro = "" elif token == "#": macro = token else: yield token if macro: yield macro return _generate_tokens(source_code, addition)
(source_code, addition='', token_class=None)
36,757
lizard_languages.clike
get_comment_from_token
null
@staticmethod def get_comment_from_token(token): if token.startswith("/*") or token.startswith("//"): return token[2:]
(token)
36,758
lizard_languages.clike
preprocess
null
def preprocess(self, tokens): tilde = False for token in tokens: if token == '~': tilde = True elif tilde: tilde = False yield "~" + token elif not token.isspace() or token == '\n': macro = self.macro_pattern.match(token) if macro: if macro.group(1) in ('if', 'ifdef', 'elif'): self.context.add_condition() elif macro.group(1) == 'include': yield "#include" yield macro.group(2) or "\"\"" for _ in macro.group(2).splitlines()[1:]: yield '\n' else: yield token
(self, tokens)
36,759
lizard
FileAnalyzer
null
class FileAnalyzer(object): # pylint: disable=R0903 def __init__(self, extensions): self.processors = extensions def __call__(self, filename): try: return self.analyze_source_code( filename, auto_read(filename)) except UnicodeDecodeError: sys.stderr.write("Error: doesn't support none utf encoding '%s'\n" % filename) except IOError: sys.stderr.write("Error: Fail to read source file '%s'\n" % filename) except IndexError: sys.stderr.write("Error: Fail to parse file '%s'\n" % filename) raise return FileInformation(filename, 0, []) def analyze_source_code(self, filename, code): context = FileInfoBuilder(filename) reader = (get_reader_for(filename) or CLikeReader)(context) tokens = reader.generate_tokens(code) try: for processor in self.processors: tokens = processor(tokens, reader) for _ in reader(tokens, reader): pass except RecursionError as e: sys.stderr.write("[skip] fail to process '%s' with RecursionError - %s\n" % (filename, e)) return context.fileinfo
(extensions)
36,760
lizard
__call__
null
def __call__(self, filename): try: return self.analyze_source_code( filename, auto_read(filename)) except UnicodeDecodeError: sys.stderr.write("Error: doesn't support none utf encoding '%s'\n" % filename) except IOError: sys.stderr.write("Error: Fail to read source file '%s'\n" % filename) except IndexError: sys.stderr.write("Error: Fail to parse file '%s'\n" % filename) raise return FileInformation(filename, 0, [])
(self, filename)
36,761
lizard
__init__
null
def __init__(self, extensions): self.processors = extensions
(self, extensions)
36,762
lizard
analyze_source_code
null
def analyze_source_code(self, filename, code): context = FileInfoBuilder(filename) reader = (get_reader_for(filename) or CLikeReader)(context) tokens = reader.generate_tokens(code) try: for processor in self.processors: tokens = processor(tokens, reader) for _ in reader(tokens, reader): pass except RecursionError as e: sys.stderr.write("[skip] fail to process '%s' with RecursionError - %s\n" % (filename, e)) return context.fileinfo
(self, filename, code)
36,763
lizard
FileInfoBuilder
The builder is also referred as "context" in the code, because each language readers use this builder to build source file and function information and the builder keep the context information that's needed for the building.
class FileInfoBuilder(object): ''' The builder is also referred as "context" in the code, because each language readers use this builder to build source file and function information and the builder keep the context information that's needed for the building. ''' def __init__(self, filename): self.fileinfo = FileInformation(filename, 0) self.current_line = 0 self.forgive = False self.newline = True self.global_pseudo_function = FunctionInfo('*global*', filename, 0) self.current_function = self.global_pseudo_function self.stacked_functions = [] self._nesting_stack = NestingStack() def __getattr__(self, attr): # delegating to _nesting_stack return getattr(self._nesting_stack, attr) def decorate_nesting_stack(self, decorate_class): self._nesting_stack = decorate_class(self._nesting_stack) return self._nesting_stack def pop_nesting(self): nest = self._nesting_stack.pop_nesting() if isinstance(nest, FunctionInfo): endline = self.current_function.end_line self.end_of_function() self.current_function = ( self._nesting_stack.last_function or self.global_pseudo_function) self.current_function.end_line = endline def add_nloc(self, count): self.fileinfo.nloc += count self.current_function.nloc += count self.current_function.end_line = self.current_line self.newline = count > 0 def try_new_function(self, name): self.current_function = FunctionInfo( self.with_namespace(name), self.fileinfo.filename, self.current_line) self.current_function.top_nesting_level = self.current_nesting_level def confirm_new_function(self): self.start_new_function_nesting(self.current_function) self.current_function.cyclomatic_complexity = 1 def restart_new_function(self, name): self.try_new_function(name) self.confirm_new_function() def push_new_function(self, name): self.stacked_functions.append(self.current_function) self.restart_new_function(name) def add_condition(self, inc=1): self.current_function.cyclomatic_complexity += inc def add_to_long_function_name(self, app): self.current_function.add_to_long_name(app) def add_to_function_name(self, app): self.current_function.add_to_function_name(app) def parameter(self, token): self.current_function.add_parameter(token) def end_of_function(self): if not self.forgive: self.fileinfo.function_list.append(self.current_function) self.forgive = False if self.stacked_functions: self.current_function = self.stacked_functions.pop() else: self.current_function = self.global_pseudo_function
(filename)
36,764
lizard
__getattr__
null
def __getattr__(self, attr): # delegating to _nesting_stack return getattr(self._nesting_stack, attr)
(self, attr)
36,765
lizard
__init__
null
def __init__(self, filename): self.fileinfo = FileInformation(filename, 0) self.current_line = 0 self.forgive = False self.newline = True self.global_pseudo_function = FunctionInfo('*global*', filename, 0) self.current_function = self.global_pseudo_function self.stacked_functions = [] self._nesting_stack = NestingStack()
(self, filename)
36,766
lizard
add_condition
null
def add_condition(self, inc=1): self.current_function.cyclomatic_complexity += inc
(self, inc=1)
36,767
lizard
add_nloc
null
def add_nloc(self, count): self.fileinfo.nloc += count self.current_function.nloc += count self.current_function.end_line = self.current_line self.newline = count > 0
(self, count)
36,768
lizard
add_to_function_name
null
def add_to_function_name(self, app): self.current_function.add_to_function_name(app)
(self, app)
36,769
lizard
add_to_long_function_name
null
def add_to_long_function_name(self, app): self.current_function.add_to_long_name(app)
(self, app)
36,770
lizard
confirm_new_function
null
def confirm_new_function(self): self.start_new_function_nesting(self.current_function) self.current_function.cyclomatic_complexity = 1
(self)
36,771
lizard
decorate_nesting_stack
null
def decorate_nesting_stack(self, decorate_class): self._nesting_stack = decorate_class(self._nesting_stack) return self._nesting_stack
(self, decorate_class)
36,772
lizard
end_of_function
null
def end_of_function(self): if not self.forgive: self.fileinfo.function_list.append(self.current_function) self.forgive = False if self.stacked_functions: self.current_function = self.stacked_functions.pop() else: self.current_function = self.global_pseudo_function
(self)
36,773
lizard
parameter
null
def parameter(self, token): self.current_function.add_parameter(token)
(self, token)
36,774
lizard
pop_nesting
null
def pop_nesting(self): nest = self._nesting_stack.pop_nesting() if isinstance(nest, FunctionInfo): endline = self.current_function.end_line self.end_of_function() self.current_function = ( self._nesting_stack.last_function or self.global_pseudo_function) self.current_function.end_line = endline
(self)
36,775
lizard
push_new_function
null
def push_new_function(self, name): self.stacked_functions.append(self.current_function) self.restart_new_function(name)
(self, name)
36,776
lizard
restart_new_function
null
def restart_new_function(self, name): self.try_new_function(name) self.confirm_new_function()
(self, name)
36,777
lizard
try_new_function
null
def try_new_function(self, name): self.current_function = FunctionInfo( self.with_namespace(name), self.fileinfo.filename, self.current_line) self.current_function.top_nesting_level = self.current_nesting_level
(self, name)
36,778
lizard
FileInformation
null
class FileInformation(object): # pylint: disable=R0903 def __init__(self, filename, nloc, function_list=None): self.filename = filename self.nloc = nloc self.function_list = function_list or [] self.token_count = 0 average_nloc = property(lambda self: self.functions_average("nloc")) average_token_count = property( lambda self: self.functions_average("token_count")) average_cyclomatic_complexity = property( lambda self: self.functions_average("cyclomatic_complexity")) CCN = property( lambda self: sum(fun.cyclomatic_complexity for fun in self.function_list)) ND = property( # pylint: disable=C0103 lambda self: sum(fun.max_nesting_depth for fun in self.function_list)) def functions_average(self, att): summary = sum(getattr(fun, att) for fun in self.function_list) return summary / len(self.function_list) if self.function_list else 0
(filename, nloc, function_list=None)
36,779
lizard
__init__
null
def __init__(self, filename, nloc, function_list=None): self.filename = filename self.nloc = nloc self.function_list = function_list or [] self.token_count = 0
(self, filename, nloc, function_list=None)
36,780
lizard
functions_average
null
def functions_average(self, att): summary = sum(getattr(fun, att) for fun in self.function_list) return summary / len(self.function_list) if self.function_list else 0
(self, att)
36,781
lizard
FunctionInfo
null
class FunctionInfo(Nesting): # pylint: disable=R0902 def __init__(self, name, filename, start_line=0, ccn=1): self.cyclomatic_complexity = ccn self.nloc = 1 self.token_count = 1 # the first token self.name = name self.long_name = name self.start_line = start_line self.end_line = start_line self.full_parameters = [] self.filename = filename self.top_nesting_level = -1 self.fan_in = 0 self.fan_out = 0 self.general_fan_out = 0 @property def name_in_space(self): return self.name + "." @property def unqualified_name(self): ''' name without qualification like namespaces or classes. Just the bare name without '::'. ''' return self.name.split('::')[-1] location = property(lambda self: " %(name)s@%(start_line)s-%(end_line)s@%(filename)s" % self.__dict__) parameter_count = property(lambda self: len(self.full_parameters)) @property def parameters(self): matches = [re.search(r'(\w+)(\s=.*)?$', f) for f in self.full_parameters] return [m.group(1) for m in matches if m] @property def length(self): return self.end_line - self.start_line + 1 def add_to_function_name(self, app): self.name += app self.long_name += app def add_to_long_name(self, app): if self.long_name: if self.long_name[-1].isalpha() and app[0].isalpha(): self.long_name += ' ' self.long_name += app def add_parameter(self, token): self.add_to_long_name(" " + token) if not self.full_parameters: self.full_parameters.append(token) elif token == ",": self.full_parameters.append('') else: self.full_parameters[-1] += " " + token
(name, filename, start_line=0, ccn=1)
36,782
lizard
__init__
null
def __init__(self, name, filename, start_line=0, ccn=1): self.cyclomatic_complexity = ccn self.nloc = 1 self.token_count = 1 # the first token self.name = name self.long_name = name self.start_line = start_line self.end_line = start_line self.full_parameters = [] self.filename = filename self.top_nesting_level = -1 self.fan_in = 0 self.fan_out = 0 self.general_fan_out = 0
(self, name, filename, start_line=0, ccn=1)
36,783
lizard
add_parameter
null
def add_parameter(self, token): self.add_to_long_name(" " + token) if not self.full_parameters: self.full_parameters.append(token) elif token == ",": self.full_parameters.append('') else: self.full_parameters[-1] += " " + token
(self, token)
36,784
lizard
add_to_function_name
null
def add_to_function_name(self, app): self.name += app self.long_name += app
(self, app)
36,785
lizard
add_to_long_name
null
def add_to_long_name(self, app): if self.long_name: if self.long_name[-1].isalpha() and app[0].isalpha(): self.long_name += ' ' self.long_name += app
(self, app)
36,786
lizard
Namespace
null
class Namespace(Nesting): # pylint: disable=R0903 def __init__(self, name): self.name = name @property def name_in_space(self): return self.name + "::" if self.name else ''
(name)
36,787
lizard
__init__
null
def __init__(self, name): self.name = name
(self, name)
36,788
lizard
Nesting
Nesting represent one level of nesting in any programming language.
class Nesting(object): # pylint: disable=R0903 ''' Nesting represent one level of nesting in any programming language. ''' @property def name_in_space(self): return ''
()
36,789
lizard
NestingStack
null
class NestingStack(object): def __init__(self): self.nesting_stack = [] self.pending_function = None self.function_stack = [] def with_namespace(self, name): return ''.join([x.name_in_space for x in self.nesting_stack] + [name]) def add_bare_nesting(self): self.nesting_stack.append(self._create_nesting()) def add_namespace(self, token): self.pending_function = None self.nesting_stack.append(Namespace(token)) def start_new_function_nesting(self, function): self.pending_function = function def _create_nesting(self): tmp = self.pending_function self.pending_function = None if tmp: return tmp return BARE_NESTING def pop_nesting(self): self.pending_function = None if self.nesting_stack: return self.nesting_stack.pop() @property def current_nesting_level(self): return len(self.nesting_stack) @property def last_function(self): funs = [f for f in self.nesting_stack if isinstance(f, FunctionInfo)] return funs[-1] if funs else None
()
36,790
lizard
__init__
null
def __init__(self): self.nesting_stack = [] self.pending_function = None self.function_stack = []
(self)
36,791
lizard
_create_nesting
null
def _create_nesting(self): tmp = self.pending_function self.pending_function = None if tmp: return tmp return BARE_NESTING
(self)
36,792
lizard
add_bare_nesting
null
def add_bare_nesting(self): self.nesting_stack.append(self._create_nesting())
(self)
36,793
lizard
add_namespace
null
def add_namespace(self, token): self.pending_function = None self.nesting_stack.append(Namespace(token))
(self, token)
36,794
lizard
pop_nesting
null
def pop_nesting(self): self.pending_function = None if self.nesting_stack: return self.nesting_stack.pop()
(self)
36,795
lizard
start_new_function_nesting
null
def start_new_function_nesting(self, function): self.pending_function = function
(self, function)
36,796
lizard
with_namespace
null
def with_namespace(self, name): return ''.join([x.name_in_space for x in self.nesting_stack] + [name])
(self, name)
36,797
lizard
OutputScheme
Collect the schema of the data columns. Each extension can define some additional data columns to the FunctionInfo structure, or even add properties to the FileInformation structure. In any extension class, define a class level variable: FUNCTION_INFO = { 'column_name' : { 'caption': 'if defined, will show the column in result', 'average_caption': 'if defined, will add average function to FileInformation and show in the end result. } }
class OutputScheme(object): ''' Collect the schema of the data columns. Each extension can define some additional data columns to the FunctionInfo structure, or even add properties to the FileInformation structure. In any extension class, define a class level variable: FUNCTION_INFO = { 'column_name' : { 'caption': 'if defined, will show the column in result', 'average_caption': 'if defined, will add average function to FileInformation and show in the end result. } } ''' def __init__(self, ext): self.extensions = ext self.items = [ { 'caption': " NLOC ", 'value': "nloc", 'avg_caption': ' Avg.NLOC '}, { 'caption': " CCN ", 'value': "cyclomatic_complexity", 'avg_caption': ' AvgCCN '}, { 'caption': " token ", 'value': "token_count", 'avg_caption': ' Avg.token '}, {'caption': " PARAM ", 'value': "parameter_count"}, {'caption': " length ", 'value': "length"}, ] + [ { 'caption': caption, 'value': part, 'avg_caption': average } for caption, part, average in self._ext_member_info()] self.items.append({'caption': " location ", 'value': 'location'}) def patch_for_extensions(self): def _patch(name): setattr(FileInformation, "average_" + name, property(lambda self: self.functions_average(name))) for item in self.items: if 'avg_caption' in item: _patch(item["value"]) def any_silent(self): return any(hasattr(ex, 'silent_all_others') for ex in self.extensions) def value_columns(self): return [item['value'] for item in self.items] def _ext_member_info(self): for ext in self.extensions: if hasattr(ext, "FUNCTION_INFO"): for key in ext.FUNCTION_INFO: yield ( ext.FUNCTION_INFO[key].get("caption", None), key, ext.FUNCTION_INFO[key].get("average_caption", None)) def captions(self): caps = [item.get('caption') for item in self.items] return "".join(caption for caption in caps if caption) @staticmethod def _head(captions): return "\n".join(("=" * len(captions), captions, "-" * len(captions))) def function_info_head(self): return self._head(self.captions()) def function_info(self, fun): return ''.join( str(getattr(fun, item['value'])).rjust(len(item['caption'])) for item in self.items if item['caption']) def average_captions(self): return "".join([ e['avg_caption'] for e in self.items if e.get("avg_caption", None)]) def average_formatter(self): return "".join([ "{{module.average_{ext[value]}:{size}.1f}}" .format(ext=e, size=len(e['avg_caption'])) for e in self.items if e.get("avg_caption", None)]) def clang_warning_format(self): return ( "{f.filename}:{f.start_line}: warning: {f.name} has " + ", ".join([ "{{f.{ext[value]}}} {caption}" .format(ext=e, caption=e['caption'].strip()) for e in self.items[:-1] ])) def msvs_warning_format(self): return ( "{f.filename}({f.start_line}): warning: {f.name} has " + ", ".join([ "{{f.{ext[value]}}} {caption}" .format(ext=e, caption=e['caption'].strip()) for e in self.items[:-1] ]))
(ext)
36,798
lizard
__init__
null
def __init__(self, ext): self.extensions = ext self.items = [ { 'caption': " NLOC ", 'value': "nloc", 'avg_caption': ' Avg.NLOC '}, { 'caption': " CCN ", 'value': "cyclomatic_complexity", 'avg_caption': ' AvgCCN '}, { 'caption': " token ", 'value': "token_count", 'avg_caption': ' Avg.token '}, {'caption': " PARAM ", 'value': "parameter_count"}, {'caption': " length ", 'value': "length"}, ] + [ { 'caption': caption, 'value': part, 'avg_caption': average } for caption, part, average in self._ext_member_info()] self.items.append({'caption': " location ", 'value': 'location'})
(self, ext)
36,799
lizard
_ext_member_info
null
def _ext_member_info(self): for ext in self.extensions: if hasattr(ext, "FUNCTION_INFO"): for key in ext.FUNCTION_INFO: yield ( ext.FUNCTION_INFO[key].get("caption", None), key, ext.FUNCTION_INFO[key].get("average_caption", None))
(self)
36,800
lizard
_head
null
@staticmethod def _head(captions): return "\n".join(("=" * len(captions), captions, "-" * len(captions)))
(captions)
36,801
lizard
any_silent
null
def any_silent(self): return any(hasattr(ex, 'silent_all_others') for ex in self.extensions)
(self)
36,802
lizard
average_captions
null
def average_captions(self): return "".join([ e['avg_caption'] for e in self.items if e.get("avg_caption", None)])
(self)
36,803
lizard
average_formatter
null
def average_formatter(self): return "".join([ "{{module.average_{ext[value]}:{size}.1f}}" .format(ext=e, size=len(e['avg_caption'])) for e in self.items if e.get("avg_caption", None)])
(self)
36,804
lizard
captions
null
def captions(self): caps = [item.get('caption') for item in self.items] return "".join(caption for caption in caps if caption)
(self)
36,805
lizard
clang_warning_format
null
def clang_warning_format(self): return ( "{f.filename}:{f.start_line}: warning: {f.name} has " + ", ".join([ "{{f.{ext[value]}}} {caption}" .format(ext=e, caption=e['caption'].strip()) for e in self.items[:-1] ]))
(self)
36,806
lizard
function_info
null
def function_info(self, fun): return ''.join( str(getattr(fun, item['value'])).rjust(len(item['caption'])) for item in self.items if item['caption'])
(self, fun)
36,807
lizard
function_info_head
null
def function_info_head(self): return self._head(self.captions())
(self)
36,808
lizard
msvs_warning_format
null
def msvs_warning_format(self): return ( "{f.filename}({f.start_line}): warning: {f.name} has " + ", ".join([ "{{f.{ext[value]}}} {caption}" .format(ext=e, caption=e['caption'].strip()) for e in self.items[:-1] ]))
(self)
36,809
lizard
patch_for_extensions
null
def patch_for_extensions(self): def _patch(name): setattr(FileInformation, "average_" + name, property(lambda self: self.functions_average(name))) for item in self.items: if 'avg_caption' in item: _patch(item["value"])
(self)
36,810
lizard
value_columns
null
def value_columns(self): return [item['value'] for item in self.items]
(self)
36,811
lizard
_extension_arg
null
def _extension_arg(parser): parser.add_argument("-E", "--extension", help='''User the extensions. The available extensions are: -Ecpre: it will ignore code in the #else branch. -Ewordcount: count word frequencies and generate tag cloud. -Eoutside: include the global code as one function. -EIgnoreAssert: to ignore all code in assert. -ENS: count nested control structures.''', action="append", dest="extensions", default=[])
(parser)
36,812
lizard
analyze
returns an iterator of file information that contains function statistics.
def analyze(paths, exclude_pattern=None, threads=1, exts=None, lans=None): ''' returns an iterator of file information that contains function statistics. ''' exclude_pattern = exclude_pattern or [] files = get_all_source_files(paths, exclude_pattern, lans) return analyze_files(files, threads, exts)
(paths, exclude_pattern=None, threads=1, exts=None, lans=None)
36,813
lizard
analyze_files
null
def analyze_files(files, threads=1, exts=None): extensions = exts or get_extensions([]) file_analyzer = FileAnalyzer(extensions) result = map_files_to_analyzer(files, file_analyzer, threads) for extension in extensions: if hasattr(extension, 'cross_file_process'): result = extension.cross_file_process(result) return result
(files, threads=1, exts=None)
36,814
lizard
arg_parser
null
def arg_parser(prog=None): from argparse import ArgumentParser, Action, ArgumentError class DictAction(Action): # pylint: disable=R0903 def __init__(self, option_strings, dest, nargs=None, **kwargs): super(DictAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, value, option_string=None): if not re.match(r"\s*\w+\s*=\s*\d+", value): raise ArgumentError(self, "should be like nloc=20") k, val = value.split("=", 2) getattr(namespace, self.dest)[k.strip()] = int(val.strip()) parser = ArgumentParser(prog=prog) parser.add_argument('paths', nargs='*', default=['.'], help='list of the filename/paths.') parser.add_argument('--version', action='version', version=version) parser.add_argument("-l", "--languages", help='''List the programming languages you want to analyze. if left empty, it'll search for all languages it knows. `lizard -l cpp -l java`searches for C++ and Java code. The available languages are: ''' + ', '.join(x.language_names[0] for x in languages()), action="append", dest="languages", default=[]) parser.add_argument("-V", "--verbose", help="Output in verbose mode (long function name)", action="store_true", dest="verbose", default=False) parser.add_argument("-C", "--CCN", help='''Threshold for cyclomatic complexity number warning. The default value is %d. Functions with CCN bigger than it will generate warning ''' % DEFAULT_CCN_THRESHOLD, type=int, dest="CCN", default=DEFAULT_CCN_THRESHOLD) parser.add_argument("-f", "--input_file", help='''get a list of filenames from the given file ''', type=str, dest="input_file") parser.add_argument("-o", "--output_file", help='''Output file. The output format is inferred from the file extension (e.g. .html), unless it is explicitly specified (e.g. using --xml). ''', type=str, dest="output_file") parser.add_argument("-L", "--length", help='''Threshold for maximum function length warning. The default value is %d. Functions length bigger than it will generate warning ''' % DEFAULT_MAX_FUNC_LENGTH, type=int, dest="length", default=DEFAULT_MAX_FUNC_LENGTH) parser.add_argument("-a", "--arguments", help="Limit for number of parameters", type=int, dest="arguments", default=100) parser.add_argument("-w", "--warnings_only", help='''Show warnings only, using clang/gcc's warning format for printing warnings. http://clang.llvm.org/docs/UsersManual.html#cmdoption-fdiagnostics-format ''', action="store_const", const=print_clang_style_warning, dest="printer") parser.add_argument("--warning-msvs", help='''Show warnings only, using Visual Studio's warning format for printing warnings. https://msdn.microsoft.com/en-us/library/yxkt8b26.aspx ''', action="store_const", const=print_msvs_style_warning, dest="printer") parser.add_argument("-i", "--ignore_warnings", help='''If the number of warnings is equal or less than the number, the tool will exit normally; otherwise, it will generate error. If the number is negative, the tool exits normally regardless of the number of warnings. Useful in makefile for legacy code.''', type=int, dest="number", default=0) parser.add_argument("-x", "--exclude", help='''Exclude files that match the pattern. * matches everything, ? matches any single character, "./folder/*" exclude everything in the folder recursively. Multiple patterns can be specified. Don't forget to add "" around the pattern.''', action="append", dest="exclude", default=[]) parser.add_argument("-t", "--working_threads", help='''number of working threads. The default value is 1. Using a bigger number can fully utilize the CPU and often faster.''', type=int, dest="working_threads", default=1) parser.add_argument("-X", "--xml", help='''Generate XML in cppncss style instead of the tabular output. Useful to generate report in Jenkins server''', action="store_const", const=print_xml, dest="printer") parser.add_argument("--csv", help='''Generate CSV output as a transform of the default output''', action="store_const", const=print_csv, dest="printer") parser.add_argument("-H", "--html", help='''Output HTML report''', action="store_const", const=html_output, dest="printer") parser.add_argument("-m", "--modified", help='''Calculate modified cyclomatic complexity number , which count a switch/case with multiple cases as one CCN.''', action="append_const", const="modified", dest="extensions", default=[]) _extension_arg(parser) parser.add_argument("-s", "--sort", help='''Sort the warning with field. The field can be nloc, cyclomatic_complexity, token_count, parameter_count, etc. Or an customized field.''', action="append", dest="sorting", default=[]) parser.add_argument("-T", "--Threshold", help='''Set the limit for a field. The field can be nloc, cyclomatic_complexity, token_count, parameter_count, etc. Or an customized file. Lizard will report warning if a function exceed the limit''', action=DictAction, dest="thresholds", default={}) parser.add_argument("-W", "--whitelist", help='''The path and file name to the whitelist file. It's './whitelizard.txt' by default. Find more information in README.''', type=str, dest="whitelist", default=DEFAULT_WHITELIST) parser.usage = '''lizard [options] [PATH or FILE] [PATH] ...''' parser.description = __doc__ return parser
(prog=None)
36,815
lizard_ext.auto_open
auto_open
Ok. I believe a big can of worms has just been opened. Codecs of text file is very hard to detect. So far lizard hasn't include any other dependencies, so I'm not too comfortable to introduce the first dependency (chardet) only for this. And it won't be a perfect solution any way. Let's see how far we can go by just patching for new requests. So far it can handle: UTF-8 With BOM
def auto_open(*args, **kwargs): ''' Ok. I believe a big can of worms has just been opened. Codecs of text file is very hard to detect. So far lizard hasn't include any other dependencies, so I'm not too comfortable to introduce the first dependency (chardet) only for this. And it won't be a perfect solution any way. Let's see how far we can go by just patching for new requests. So far it can handle: UTF-8 With BOM ''' size = min(32, os.path.getsize(args[0])) with io.open(args[0], 'rb') as binary: if binary.read(size).startswith(codecs.BOM_UTF8): kwargs["encoding"] = 'utf-8-sig' return io.open(*args, **kwargs) return io.open(*args, **kwargs)
(*args, **kwargs)
36,816
lizard_ext.auto_open
auto_read
null
def auto_read(filename): try: with auto_open(filename, 'r') as current_file: return current_file.read() except UnicodeDecodeError: with open(filename, 'rb') as current_file: return current_file.read().decode('utf8', 'ignore')
(filename)
36,818
lizard
comment_counter
null
def comment_counter(tokens, reader): for token in tokens: comment = reader.get_comment_from_token(token) if comment is not None: for _ in comment.splitlines()[1:]: yield '\n' if comment.strip().startswith("#lizard forgive"): reader.context.forgive = True if "GENERATED CODE" in comment: return else: yield token
(tokens, reader)
36,819
lizard
condition_counter
null
def condition_counter(tokens, reader): conditions = reader.conditions for token in tokens: if token in conditions: reader.context.add_condition() yield token
(tokens, reader)
36,820
fnmatch
fnmatch
Test whether FILENAME matches PATTERN. Patterns are Unix shell style: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq An initial period in FILENAME is not special. Both FILENAME and PATTERN are first case-normalized if the operating system requires it. If you don't want this, use fnmatchcase(FILENAME, PATTERN).
def fnmatch(name, pat): """Test whether FILENAME matches PATTERN. Patterns are Unix shell style: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq An initial period in FILENAME is not special. Both FILENAME and PATTERN are first case-normalized if the operating system requires it. If you don't want this, use fnmatchcase(FILENAME, PATTERN). """ name = os.path.normcase(name) pat = os.path.normcase(pat) return fnmatchcase(name, pat)
(name, pat)
36,821
lizard
get_all_source_files
Function counts md5 hash for the given file and checks if it isn't a duplicate using set of hashes for previous files
def get_all_source_files(paths, exclude_patterns, lans): ''' Function counts md5 hash for the given file and checks if it isn't a duplicate using set of hashes for previous files ''' hash_set = set() def _support(reader): return not lans or set(lans).intersection( reader.language_names) def _validate_file(pathname): return ( pathname in paths or ( get_reader_for(pathname) and _support(get_reader_for(pathname)) and all(not fnmatch(pathname, p) for p in exclude_patterns) and _not_duplicate(pathname))) def _not_duplicate(full_path_name): fhash = md5_hash_file(full_path_name) if not fhash or fhash not in hash_set: hash_set.add(fhash) return True def all_listed_files(paths): for path in paths: if os.path.isfile(path): yield path else: for root, _, files in os.walk(path, topdown=False): for filename in files: yield os.path.join(root, filename) return filter(_validate_file, all_listed_files(paths))
(paths, exclude_patterns, lans)
36,822
lizard
get_extensions
null
def get_extensions(extension_names): from importlib import import_module as im def expand_extensions(existing): for name in extension_names: ext = ( im('lizard_ext.lizard' + name.lower()) .LizardExtension() if isinstance(name, str) else name) existing.insert( len(existing) if not hasattr(ext, "ordering_index") else ext.ordering_index, ext) return existing return expand_extensions([ preprocessing, comment_counter, line_counter, token_counter, condition_counter, ])
(extension_names)
36,823
lizard
get_map_method
null
def get_map_method(working_threads): try: if working_threads == 1: raise ImportError import multiprocessing pool = multiprocessing.Pool(processes=working_threads) return pool.imap_unordered except ImportError: return map
(working_threads)
36,824
lizard_languages
get_reader_for
null
def get_reader_for(filename): for lan in languages(): if lan.match_filename(filename): return lan
(filename)
36,825
lizard
get_warnings
null
def get_warnings(code_infos, option): warnings = whitelist_filter(warning_filter(option, code_infos), whitelist=option.whitelist) if isinstance(option.sorting, list) and option.sorting: warnings = sorted(warnings, reverse=True, key=lambda x: getattr( x, option.sorting[0])) return warnings
(code_infos, option)
36,827
lizard_ext.htmloutput
html_output
null
def html_output(result, options, *_): try: from jinja2 import Template except ImportError: sys.stderr.write( "HTML Output depends on jinja2. `pip install jinja2` first") sys.exit(2) file_list = [] for source_file in result: if source_file: source_file_dict = {"filename": source_file.filename} func_list = [] for source_function in source_file.function_list: if source_function: source_function_dict = _create_dict(source_function) func_list.append(source_function_dict) source_file_dict["functions"] = func_list file_list.append(source_file_dict) output = Template(TEMPLATE).render( title='Lizard code complexity report', date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), thresholds=options.thresholds, files=file_list) print(output) return 0
(result, options, *_)
36,828
lizard
infer_printer_from_file_ext
null
def infer_printer_from_file_ext(path): mapping = { '.csv': print_csv, '.htm': html_output, '.html': html_output, '.xml': print_xml } _, ext = os.path.splitext(path) printer = mapping.get(ext) return printer
(path)
36,830
lizard_languages
languages
null
def languages(): return [ CLikeReader, JavaReader, CSharpReader, JavaScriptReader, PythonReader, ObjCReader, TTCNReader, RubyReader, PHPReader, SwiftReader, ScalaReader, GDScriptReader, GoReader, LuaReader, RustReader, TypeScriptReader, FortranReader, KotlinReader ]
()
36,831
lizard
line_counter
null
def line_counter(tokens, reader): context = reader.context context.current_line = 1 newline = 1 for token in tokens: if token != "\n": count = token.count('\n') context.current_line += count context.add_nloc(count + newline) newline = 0 yield token else: context.current_line += 1 newline = 1
(tokens, reader)
36,832
lizard
main
Command-line entrance to Lizard. Args: argv: Arguments vector; if None, sys.argv by default.
def main(argv=None): """Command-line entrance to Lizard. Args: argv: Arguments vector; if None, sys.argv by default. """ options = parse_args(argv or sys.argv) printer = options.printer or print_result schema = OutputScheme(options.extensions) if schema.any_silent(): printer = silent_printer schema.patch_for_extensions() if options.input_file: options.paths = auto_read(options.input_file).splitlines() original_stdout = sys.stdout output_file = None if options.output_file: output_file = open_output_file(options.output_file) sys.stdout = output_file result = analyze( options.paths, options.exclude, options.working_threads, options.extensions, options.languages) warning_count = printer(result, options, schema, AllResult) print_extension_results(options.extensions) list(result) if output_file: sys.stdout = original_stdout output_file.close() if 0 <= options.number < warning_count: sys.exit(1)
(argv=None)