code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'Convert a structure into a Python native type.' ctx = Context() ContextFlags = self.ContextFlags ctx['ContextFlags'] = ContextFlags if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS: for key in self._ctx_debug: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT: ctx['FloatSave'] = self.FloatSave.to_dict() if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS: for key in self._ctx_segs: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER: for key in self._ctx_int: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL: for key in self._ctx_ctrl: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS: er = [ self.ExtendedRegisters[index] for index in compat.xrange(0, MAXIMUM_SUPPORTED_EXTENSION) ] er = tuple(er) ctx['ExtendedRegisters'] = er return ctx
def to_dict(self)
Convert a structure into a Python native type.
2.519536
2.416372
1.042694
# Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc
def _get_normal_name(orig_enc)
Imitates get_normal_name in tokenizer.c.
2.18862
2.261822
0.967636
bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return bytes() def find_cookie(line): try: line_string = line.decode('ascii') except UnicodeDecodeError: return None match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second]
def detect_encoding(readline)
The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned.
2.677598
2.533142
1.057026
stdout = get_output(command) return [line.strip().decode('utf-8') for line in stdout.splitlines()]
def get_lines(command)
Run a command and return lines of output :param str command: the command to run :returns: list of whitespace-stripped lines output by command
3.571795
5.144831
0.694249
# Get list of files modified and staged diff_cmd = "git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD" files_modified = get_lines(diff_cmd) errors = 0 for filename in files_modified: if filename.endswith('.py'): # Get the staged contents of the file staged_cmd = "git show :%s" % filename staged_contents = get_output(staged_cmd) sort = SortImports( file_path=filename, file_contents=staged_contents.decode(), check=True ) if sort.incorrectly_sorted: errors += 1 return errors if strict else 0
def git_hook(strict=False)
Git pre-commit hook to check staged files for isort errors :param bool strict - if True, return number of errors on exit, causing the hook to fail. If False, return zero so it will just act as a warning. :return number of errors if in strict mode, 0 otherwise.
3.858832
3.251586
1.186754
''' Used so that it can work with the multiprocess plugin. Monkeypatched because nose seems a bit unsupported at this time (ideally the plugin would have this support by default). ''' ret = original(self, result, batch_result) parent_frame = sys._getframe().f_back # addr is something as D:\pytesting1\src\mod1\hello.py:TestCase.testMet4 # so, convert it to what report_cond expects addr = parent_frame.f_locals['addr'] i = addr.rindex(':') addr = [addr[:i], addr[i + 1:]] output, testsRun, failures, errors, errorClasses = batch_result if failures or errors: for failure in failures: PYDEV_NOSE_PLUGIN_SINGLETON.report_cond('fail', addr, output, failure) for error in errors: PYDEV_NOSE_PLUGIN_SINGLETON.report_cond('error', addr, output, error) else: PYDEV_NOSE_PLUGIN_SINGLETON.report_cond('ok', addr, output) return ret
def new_consolidate(self, result, batch_result)
Used so that it can work with the multiprocess plugin. Monkeypatched because nose seems a bit unsupported at this time (ideally the plugin would have this support by default).
8.39813
4.888126
1.718068
try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if not self.raw_requestline: self.close_connection = 1 return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error(501, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() #actually send the response if not already done. except socket.timeout: #a read or a write timed out. Discard this connection self.log_error("Request timed out: %r", sys.exc_info()[1]) self.close_connection = 1 return
def handle_one_request(self)
Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST.
1.607247
1.565253
1.026829
try: short, long = self.responses[code] except KeyError: short, long = '???', '???' if message is None: message = short explain = long self.log_error("code %d, message %s", code, message) # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) content = (self.error_message_format % {'code': code, 'message': _quote_html(message), 'explain': explain}) self.send_response(code, message) self.send_header("Content-Type", self.error_content_type) self.send_header('Connection', 'close') self.end_headers() if self.command != 'HEAD' and code >= 200 and code not in (204, 304): self.wfile.write(content)
def send_error(self, code, message=None)
Send and log an error reply. Arguments are the error code, and a detailed message. The detailed message defaults to the short entry matching the response code. This sends an error response (so it must be called before any output has been generated), logs the error, and finally sends a piece of HTML explaining the error to the user.
3.026267
2.974406
1.017436
self.log_request(code) if message is None: if code in self.responses: message = self.responses[code][0] else: message = '' if self.request_version != 'HTTP/0.9': self.wfile.write("%s %d %s\r\n" % (self.protocol_version, code, message)) # print (self.protocol_version, code, message) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string())
def send_response(self, code, message=None)
Send the response header and log the response code. Also send two standard headers with the server software version and the current date.
2.130124
1.943384
1.09609
if self.request_version != 'HTTP/0.9': self.wfile.write("%s: %s\r\n" % (keyword, value)) if keyword.lower() == 'connection': if value.lower() == 'close': self.close_connection = 1 elif value.lower() == 'keep-alive': self.close_connection = 0
def send_header(self, keyword, value)
Send a MIME header.
1.842965
1.934557
0.952655
a = a.splitlines() b = b.splitlines() return difflib.unified_diff(a, b, filename, filename, "(original)", "(refactored)", lineterm="")
def diff_texts(a, b, filename)
Return a unified diff of two strings.
3.702302
3.241293
1.14223
if not isinstance(env_cmd, (list, tuple)): env_cmd = [env_cmd] if not os.path.exists(env_cmd[0]): raise RuntimeError('Error: %s does not exist' % (env_cmd[0],)) # construct the command that will alter the environment env_cmd = subprocess.list2cmdline(env_cmd) # create a tag so we can tell in the output when the proc is done tag = 'Done running command' # construct a cmd.exe command to do accomplish this cmd = 'cmd.exe /s /c "{env_cmd} && echo "{tag}" && set"'.format(**vars()) # launch the process proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=initial) # parse the output sent to stdout lines = proc.stdout # consume whatever output occurs until the tag is reached for line in lines: line = line.decode('utf-8') if 'The specified configuration type is missing.' in line: raise AssertionError('Error executing %s. View http://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ for details.' % (env_cmd)) if tag in line: break if sys.version_info[0] > 2: # define a way to handle each KEY=VALUE line handle_line = lambda l: l.decode('utf-8').rstrip().split('=', 1) else: # define a way to handle each KEY=VALUE line handle_line = lambda l: l.rstrip().split('=', 1) # parse key/values into pairs pairs = map(handle_line, lines) # make sure the pairs are valid valid_pairs = filter(validate_pair, pairs) # construct a dictionary of the pairs result = dict(valid_pairs) # let the process finish proc.communicate() return result
def get_environment_from_batch_command(env_cmd, initial=None)
Take a command (either a single command or list of arguments) and return the environment created after running that command. Note that if the command must be a batch file or .cmd file, or the changes to the environment will not be captured. If initial is supplied, it is used as the initial environment passed to the child process.
3.804396
3.796803
1.002
'Instance a new structure from a Python native type.' ctx = Context(ctx) s = cls() ContextFlags = ctx['ContextFlags'] s.ContextFlags = ContextFlags for key in cls._others: if key != 'VectorRegister': setattr(s, key, ctx[key]) else: w = ctx[key] v = (M128A * len(w))() i = 0 for x in w: y = M128A() y.High = x >> 64 y.Low = x - (x >> 64) v[i] = y i += 1 setattr(s, key, v) if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL: for key in cls._control: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER: for key in cls._integer: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS: for key in cls._segments: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS: for key in cls._debug: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS: xmm = s.FltSave.xmm for key in cls._mmx: y = M128A() y.High = x >> 64 y.Low = x - (x >> 64) setattr(xmm, key, y) return s
def from_dict(cls, ctx)
Instance a new structure from a Python native type.
2.611917
2.436762
1.07188
'Convert a structure into a Python dictionary.' ctx = Context() ContextFlags = self.ContextFlags ctx['ContextFlags'] = ContextFlags for key in self._others: if key != 'VectorRegister': ctx[key] = getattr(self, key) else: ctx[key] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, key) ]) if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL: for key in self._control: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER: for key in self._integer: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS: for key in self._segments: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS: for key in self._debug: ctx[key] = getattr(self, key) if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS: xmm = self.FltSave.xmm.to_dict() for key in self._mmx: ctx[key] = xmm.get(key) return ctx
def to_dict(self)
Convert a structure into a Python dictionary.
2.682923
2.658976
1.009006
''' We have to override the exit because calling sys.exit will only actually exit the main thread, and as we're in a Xml-rpc server, that won't work. ''' try: import java.lang.System java.lang.System.exit(1) except ImportError: if len(args) == 1: os._exit(args[0]) else: os._exit(0)
def do_exit(*args)
We have to override the exit because calling sys.exit will only actually exit the main thread, and as we're in a Xml-rpc server, that won't work.
6.628554
2.326848
2.848727
frame = dbg.find_frame(thread_id, frame_id) is_multiline = expression.count('@LINE@') > 1 expression = str(expression.replace('@LINE@', '\n')) # Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 # (Names not resolved in generator expression in method) # See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals if IPYTHON: need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg) if not need_more: pydevd_save_locals.save_locals(frame) return need_more interpreter = ConsoleWriter() if not is_multiline: try: code = compile_command(expression) except (OverflowError, SyntaxError, ValueError): # Case 1 interpreter.showsyntaxerror() return False if code is None: # Case 2 return True else: code = expression # Case 3 try: Exec(code, updated_globals, frame.f_locals) except SystemExit: raise except: interpreter.showtraceback() else: pydevd_save_locals.save_locals(frame) return False
def console_exec(thread_id, frame_id, expression, dbg)
returns 'False' in case expression is partially correct
4.515952
4.494536
1.004765
# Override for avoid using sys.excepthook PY-12600 type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: msg, (dummy_filename, lineno, offset, line) = value.args except ValueError: # Not the format we expect; leave it alone pass else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value list = traceback.format_exception_only(type, value) sys.stderr.write(''.join(list))
def showsyntaxerror(self, filename=None)
Display the syntax error that just occurred.
3.462815
3.428156
1.01011
# Override for avoid using sys.excepthook PY-12600 try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] lines = traceback.format_list(tblist) if lines: lines.insert(0, "Traceback (most recent call last):\n") lines.extend(traceback.format_exception_only(type, value)) finally: tblist = tb = None sys.stderr.write(''.join(lines))
def showtraceback(self, *args, **kwargs)
Display the exception that just occurred.
2.877247
2.782787
1.033945
for mbi in memory_map: if condition(mbi): address = mbi.BaseAddress max_addr = address + mbi.RegionSize while address < max_addr: yield address address = address + 1
def CustomAddressIterator(memory_map, condition)
Generator function that iterates through a memory map, filtering memory region blocks by any given condition. @type memory_map: list( L{win32.MemoryBasicInformation} ) @param memory_map: List of memory region information objects. Returned by L{Process.get_memory_map}. @type condition: function @param condition: Callback function that returns C{True} if the memory block should be returned, or C{False} if it should be filtered. @rtype: generator of L{win32.MemoryBasicInformation} @return: Generator object to iterate memory blocks.
3.45644
3.889311
0.888702
'x.next() -> the next value, or raise StopIteration' if self.__g_object is None: self.__g_object = self.__g_function( *self.__v_args, **self.__d_args ) try: return self.__g_object.next() except StopIteration: self.__g_object = None raise
def next(self)
x.next() -> the next value, or raise StopIteration
4.036443
3.473567
1.162046
filepart = win32.PathRemoveExtension(pathname) extpart = win32.PathFindExtension(pathname) return (filepart, extpart)
def split_extension(pathname)
@type pathname: str @param pathname: Absolute path. @rtype: tuple( str, str ) @return: Tuple containing the file and extension components of the filename.
6.998876
8.680093
0.806313
filepart = win32.PathFindFileName(pathname) pathpart = win32.PathRemoveFileSpec(pathname) return (pathpart, filepart)
def split_filename(pathname)
@type pathname: str @param pathname: Absolute path. @rtype: tuple( str, str ) @return: Tuple containing the path to the file and the base filename.
9.703165
10.836577
0.895409
components = list() while path: next = win32.PathFindNextComponent(path) if next: prev = path[ : -len(next) ] components.append(prev) path = next return components
def split_path(path)
@see: L{join_path} @type path: str @param path: Absolute or relative path. @rtype: list( str... ) @return: List of path components.
6.335483
6.676151
0.948972
if components: path = components[0] for next in components[1:]: path = win32.PathAppend(path, next) else: path = "" return path
def join_path(*components)
@see: L{split_path} @type components: tuple( str... ) @param components: Path components. @rtype: str @return: Absolute or relative path.
4.352998
5.057655
0.860675
# XXX TODO # There are probably some native paths that # won't be converted by this naive approach. if name.startswith(compat.b("\\")): if name.startswith(compat.b("\\??\\")): name = name[4:] elif name.startswith(compat.b("\\SystemRoot\\")): system_root_path = os.environ['SYSTEMROOT'] if system_root_path.endswith('\\'): system_root_path = system_root_path[:-1] name = system_root_path + name[11:] else: for drive_number in compat.xrange(ord('A'), ord('Z') + 1): drive_letter = '%c:' % drive_number try: device_native_path = win32.QueryDosDevice(drive_letter) except WindowsError: e = sys.exc_info()[1] if e.winerror in (win32.ERROR_FILE_NOT_FOUND, \ win32.ERROR_PATH_NOT_FOUND): continue raise if not device_native_path.endswith(compat.b('\\')): device_native_path += compat.b('\\') if name.startswith(device_native_path): name = drive_letter + compat.b('\\') + \ name[ len(device_native_path) : ] break return name
def native_to_win32_pathname(name)
@type name: str @param name: Native (NT) absolute pathname. @rtype: str @return: Win32 absolute pathname.
2.909487
2.976624
0.977445
try: try: pageSize = win32.GetSystemInfo().dwPageSize except WindowsError: pageSize = 0x1000 except NameError: pageSize = 0x1000 cls.pageSize = pageSize # now this function won't be called again return pageSize
def pageSize(cls)
Try to get the pageSize value on runtime.
5.323501
4.792019
1.11091
if begin is None: begin = 0 if end is None: end = win32.LPVOID(-1).value # XXX HACK if end < begin: begin, end = end, begin begin = cls.align_address_to_page_start(begin) if end != cls.align_address_to_page_start(end): end = cls.align_address_to_page_end(end) return (begin, end)
def align_address_range(cls, begin, end)
Align the given address range to the start and end of the page(s) it occupies. @type begin: int @param begin: Memory address of the beginning of the buffer. Use C{None} for the first legal address in the address space. @type end: int @param end: Memory address of the end of the buffer. Use C{None} for the last legal address in the address space. @rtype: tuple( int, int ) @return: Aligned memory addresses.
3.111008
3.031818
1.02612
if size < 0: size = -size address = address - size begin, end = cls.align_address_range(address, address + size) # XXX FIXME # I think this rounding fails at least for address 0xFFFFFFFF size 1 return int(float(end - begin) / float(cls.pageSize))
def get_buffer_size_in_pages(cls, address, size)
Get the number of pages in use by the given buffer. @type address: int @param address: Aligned memory address. @type size: int @param size: Buffer size. @rtype: int @return: Buffer size in number of pages.
8.787068
10.10724
0.869384
return (old_begin <= begin < old_end) or \ (old_begin < end <= old_end) or \ (begin <= old_begin < end) or \ (begin < old_end <= end)
def do_ranges_intersect(begin, end, old_begin, old_end)
Determine if the two given memory address ranges intersect. @type begin: int @param begin: Start address of the first range. @type end: int @param end: End address of the first range. @type old_begin: int @param old_begin: Start address of the second range. @type old_end: int @param old_end: End address of the second range. @rtype: bool @return: C{True} if the two ranges intersect, C{False} otherwise.
2.383808
2.866249
0.831682
ctx['Dr7'] &= cls.clearMask[register] ctx['Dr%d' % register] = 0
def clear_bp(cls, ctx, register)
Clears a hardware breakpoint. @see: find_slot, set_bp @type ctx: dict( str S{->} int ) @param ctx: Thread context dictionary. @type register: int @param register: Slot (debug register) for hardware breakpoint.
19.687996
23.108784
0.85197
Dr7 = ctx['Dr7'] Dr7 |= cls.enableMask[register] orMask, andMask = cls.triggerMask[register][trigger] Dr7 &= andMask Dr7 |= orMask orMask, andMask = cls.watchMask[register][watch] Dr7 &= andMask Dr7 |= orMask ctx['Dr7'] = Dr7 ctx['Dr%d' % register] = address
def set_bp(cls, ctx, register, address, trigger, watch)
Sets a hardware breakpoint. @see: clear_bp, find_slot @type ctx: dict( str S{->} int ) @param ctx: Thread context dictionary. @type register: int @param register: Slot (debug register). @type address: int @param address: Memory address. @type trigger: int @param trigger: Trigger flag. See L{HardwareBreakpoint.validTriggers}. @type watch: int @param watch: Watch flag. See L{HardwareBreakpoint.validWatchSizes}.
3.721597
4.817595
0.772501
Dr7 = ctx['Dr7'] slot = 0 for m in cls.enableMask: if (Dr7 & m) == 0: return slot slot += 1 return None
def find_slot(cls, ctx)
Finds an empty slot to set a hardware breakpoint. @see: clear_bp, set_bp @type ctx: dict( str S{->} int ) @param ctx: Thread context dictionary. @rtype: int @return: Slot (debug register) for hardware breakpoint.
7.713511
10.819801
0.712907
self.parse_graminit_h(graminit_h) self.parse_graminit_c(graminit_c) self.finish_off()
def run(self, graminit_h, graminit_c)
Load the grammar tables from the text files written by pgen.
3.539627
3.319126
1.066433
try: f = open(filename) except IOError, err: print "Can't open %s: %s" % (filename, err) return False self.symbol2number = {} self.number2symbol = {} lineno = 0 for line in f: lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): print "%s(%s): can't parse %s" % (filename, lineno, line.strip()) else: symbol, number = mo.groups() number = int(number) assert symbol not in self.symbol2number assert number not in self.number2symbol self.symbol2number[symbol] = number self.number2symbol[number] = symbol return True
def parse_graminit_h(self, filename)
Parse the .h file written by pgen. (Internal) This file is a sequence of #define statements defining the nonterminals of the grammar as numbers. We build two tables mapping the numbers to names and back.
2.200319
2.14424
1.026154
self.keywords = {} # map from keyword strings to arc labels self.tokens = {} # map from numeric token values to arc labels for ilabel, (type, value) in enumerate(self.labels): if type == token.NAME and value is not None: self.keywords[value] = ilabel elif value is None: self.tokens[type] = ilabel
def finish_off(self)
Create additional useful structures. (Internal).
5.929471
5.192935
1.141834
''' We need to set the IDE os because the host where the code is running may be actually different from the client (and the point is that we want the proper paths to translate from the client to the server). :param os: 'UNIX' or 'WINDOWS' ''' global _ide_os global _normcase_from_client prev = _ide_os if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in ('WINDOWS', 'UNIX') if DEBUG_CLIENT_SERVER_TRANSLATION: print('pydev debugger: client OS: %s' % (os,)) _normcase_from_client = normcase if os == 'WINDOWS': # Client in Windows and server in Unix, we need to normalize the case. if not IS_WINDOWS: _normcase_from_client = _normcase_windows else: # Client in Unix and server in Windows, we can't normalize the case. if IS_WINDOWS: _normcase_from_client = _normcase_linux if prev != os: _ide_os = os # We need to (re)setup how the client <-> server translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set)
def set_ide_os(os)
We need to set the IDE os because the host where the code is running may be actually different from the client (and the point is that we want the proper paths to translate from the client to the server). :param os: 'UNIX' or 'WINDOWS'
5.867727
3.840418
1.527888
for path in paths: if os.path.isdir(path): if should_skip(path, config, os.getcwd()): skipped.append(path) continue for dirpath, dirnames, filenames in os.walk(path, topdown=True): for dirname in list(dirnames): if should_skip(dirname, config, dirpath): skipped.append(dirname) dirnames.remove(dirname) for filename in filenames: if filename.endswith('.py'): if should_skip(filename, config, dirpath): skipped.append(filename) else: yield os.path.join(dirpath, filename) else: yield path
def iter_source_code(paths, config, skipped)
Iterate over all Python source files defined in paths.
1.817809
1.81742
1.000214
''' This is necessary so that we get the imports from the same directory where the file we are completing is located. ''' global currDirModule if currDirModule is not None: if len(sys.path) > 0 and sys.path[0] == currDirModule: del sys.path[0] currDirModule = directory sys.path.insert(0, directory)
def complete_from_dir(directory)
This is necessary so that we get the imports from the same directory where the file we are completing is located.
4.626849
2.618431
1.767031
'''Changes the pythonpath (clears all the previous pythonpath) @param pythonpath: string with paths separated by | ''' split = pythonpath.split('|') sys.path = [] for path in split: path = path.strip() if len(path) > 0: sys.path.append(path)
def change_python_path(pythonpath)
Changes the pythonpath (clears all the previous pythonpath) @param pythonpath: string with paths separated by |
4.174964
2.434766
1.71473
''' Format the completions suggestions in the following format: @@COMPLETIONS(modFile(token,description),(token,description),(token,description))END@@ ''' compMsg = [] compMsg.append('%s' % defFile) for tup in completionsList: compMsg.append(',') compMsg.append('(') compMsg.append(str(self.remove_invalid_chars(tup[0]))) # token compMsg.append(',') compMsg.append(self.remove_invalid_chars(tup[1])) # description if(len(tup) > 2): compMsg.append(',') compMsg.append(self.remove_invalid_chars(tup[2])) # args - only if function. if(len(tup) > 3): compMsg.append(',') compMsg.append(self.remove_invalid_chars(tup[3])) # TYPE compMsg.append(')') return '%s(%s)%s' % (MSG_COMPLETIONS, ''.join(compMsg), MSG_END)
def format_completion_message(self, defFile, completionsList)
Format the completions suggestions in the following format: @@COMPLETIONS(modFile(token,description),(token,description),(token,description))END@@
3.425508
2.211473
1.548971
''' When we receive this, we have 'token):data' ''' token = '' for c in data: if c != ')': token = token + c else: break; return token, data.lstrip(token + '):')
def get_token_and_data(self, data)
When we receive this, we have 'token):data'
8.76199
4.040827
2.168365
# Support people who used socket.settimeout() to escape # handle_request before self.timeout was available. timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) fd_sets = select.select([self], [], [], timeout) if not fd_sets[0]: self.handle_timeout() return self._handle_request_noblock()
def handle_request(self)
Handle one request, possibly blocking. Respects self.timeout.
4.489953
4.043073
1.11053
print '-'*40 print 'Exception happened during processing of request from', print client_address import traceback traceback.print_exc() # XXX But this goes to stderr! print '-'*40
def handle_error(self, request, client_address)
Handle an error gracefully. May be overridden. The default is to print a traceback and continue.
7.511523
7.162993
1.048657
if self.active_children is None: return while len(self.active_children) >= self.max_children: # XXX: This will wait for any child process, not just ones # spawned by this library. This could confuse other # libraries that expect to be able to wait for their own # children. try: pid, status = os.waitpid(0, 0) except os.error: pid = None if pid not in self.active_children: continue self.active_children.remove(pid) # XXX: This loop runs more system calls than it ought # to. There should be a way to put the active_children into a # process group and then use os.waitpid(-pgid) to wait for any # of that set, but I couldn't find a way to allocate pgids # that couldn't collide. for child in self.active_children: try: pid, status = os.waitpid(child, os.WNOHANG) # @UndefinedVariable except os.error: pid = None if not pid: continue try: self.active_children.remove(pid) except ValueError, e: raise ValueError('%s. x=%d and list=%r' % (e.message, pid, self.active_children))
def collect_children(self)
Internal routine to wait for children that have exited.
4.615796
4.336387
1.064433
self.collect_children() pid = os.fork() # @UndefinedVariable if pid: # Parent process if self.active_children is None: self.active_children = [] self.active_children.append(pid) self.close_request(request) #close handle in parent process return else: # Child process. # This must never return, hence os._exit()! try: self.finish_request(request, client_address) self.shutdown_request(request) os._exit(0) except: try: self.handle_error(request, client_address) self.shutdown_request(request) finally: os._exit(1)
def process_request(self, request, client_address)
Fork a new subprocess to process the request.
3.698458
3.393108
1.089991
try: self.finish_request(request, client_address) self.shutdown_request(request) except: self.handle_error(request, client_address) self.shutdown_request(request)
def process_request_thread(self, request, client_address)
Same as in BaseServer but as a thread. In addition, exception handling is done here.
2.21005
2.127422
1.038839
#already imported Qt somewhere. Use that loaded = loaded_api() if loaded is not None: return [loaded] mpl = sys.modules.get('matplotlib', None) if mpl is not None and not check_version(mpl.__version__, '1.0.2'): #1.0.1 only supports PyQt4 v1 return [QT_API_PYQT_DEFAULT] if os.environ.get('QT_API', None) is None: #no ETS variable. Ask mpl, then use either return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5] #ETS variable present. Will fallback to external.qt return None
def get_options()
Return a list of acceptable QT APIs, in decreasing order of preference
9.672668
8.516497
1.135757
r indent = INDENT_REGEX.match(physical_line).group(1) for offset, char in enumerate(indent): if char != indent_char: return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_or_spaces(physical_line, indent_char)
r"""Never mix tabs and spaces. The most popular way of indenting Python is with spaces only. The second-most popular way is with tabs only. Code indented with a mixture of tabs and spaces should be converted to using spaces exclusively. When invoking the Python command line interpreter with the -t option, it issues warnings about code that illegally mixes tabs and spaces. When using -tt these warnings become errors. These options are highly recommended! Okay: if a == 0:\n a = 1\n b = 1 E101: if a == 0:\n a = 1\n\tb = 1
6.889033
6.532241
1.05462
r indent = INDENT_REGEX.match(physical_line).group(1) if '\t' in indent: return indent.index('\t'), "W191 indentation contains tabs"
def tabs_obsolete(physical_line)
r"""For new projects, spaces-only are strongly recommended over tabs. Okay: if True:\n return W191: if True:\n\treturn
11.050636
6.651592
1.661352
r physical_line = physical_line.rstrip('\n') # chr(10), newline physical_line = physical_line.rstrip('\r') # chr(13), carriage return physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L stripped = physical_line.rstrip(' \t\v') if physical_line != stripped: if stripped: return len(stripped), "W291 trailing whitespace" else: return 0, "W293 blank line contains whitespace"
def trailing_whitespace(physical_line)
r"""Trailing whitespace is superfluous. The warning returned varies on whether the line itself is blank, for easier filtering for those who want to indent their blank lines. Okay: spam(1)\n# W291: spam(1) \n# W293: class Foo(object):\n \n bang = 12
3.878755
3.710911
1.04523
r if line_number == total_lines: stripped_last_line = physical_line.rstrip() if not stripped_last_line: return 0, "W391 blank line at end of file" if stripped_last_line == physical_line: return len(physical_line), "W292 no newline at end of file"
def trailing_blank_lines(physical_line, lines, line_number, total_lines)
r"""Trailing blank lines are superfluous. Okay: spam(1) W391: spam(1)\n However the last line should end with a new line (warning W292).
3.934124
3.454542
1.138826
r line = physical_line.rstrip() length = len(line) if length > max_line_length and not noqa: # Special case for long URLs in multi-line docstrings or comments, # but still report the error when the 72 first chars are whitespaces. chunks = line.split() if ((len(chunks) == 1 and multiline) or (len(chunks) == 2 and chunks[0] == '#')) and \ len(line) - len(chunks[-1]) < max_line_length - 7: return if hasattr(line, 'decode'): # Python 2 # The line could contain multi-byte characters try: length = len(line.decode('utf-8')) except UnicodeError: pass if length > max_line_length: return (max_line_length, "E501 line too long " "(%d > %d characters)" % (length, max_line_length))
def maximum_line_length(physical_line, max_line_length, multiline, noqa)
r"""Limit all lines to a maximum of 79 characters. There are still many devices around that are limited to 80 character lines; plus, limiting windows to 80 characters makes it possible to have several windows side-by-side. The default wrapping on such devices looks ugly. Therefore, please limit all lines to a maximum of 79 characters. For flowing long blocks of text (docstrings or comments), limiting the length to 72 characters is recommended. Reports error E501.
4.127929
3.831217
1.077446
r if line_number < 3 and not previous_logical: return # Don't expect blank lines before the first line if previous_logical.startswith('@'): if blank_lines: yield 0, "E304 blank lines found after function decorator" elif blank_lines > 2 or (indent_level and blank_lines == 2): yield 0, "E303 too many blank lines (%d)" % blank_lines elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): if indent_level: if not (blank_before or previous_indent_level < indent_level or DOCSTRING_REGEX.match(previous_logical)): ancestor_level = indent_level nested = False # Search backwards for a def ancestor or tree root (top level). for line in lines[line_number - 2::-1]: if line.strip() and expand_indent(line) < ancestor_level: ancestor_level = expand_indent(line) nested = line.lstrip().startswith('def ') if nested or ancestor_level == 0: break if nested: yield 0, "E306 expected 1 blank line before a " \ "nested definition, found 0" else: yield 0, "E301 expected 1 blank line, found 0" elif blank_before != 2: yield 0, "E302 expected 2 blank lines, found %d" % blank_before elif (logical_line and not indent_level and blank_before != 2 and previous_unindented_logical_line.startswith(('def ', 'class '))): yield 0, "E305 expected 2 blank lines after " \ "class or function definition, found %d" % blank_before
def blank_lines(logical_line, blank_lines, indent_level, line_number, blank_before, previous_logical, previous_unindented_logical_line, previous_indent_level, lines)
r"""Separate top-level function and class definitions with two blank lines. Method definitions inside a class are separated by a single blank line. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. Okay: def a():\n pass\n\n\ndef b():\n pass Okay: def a():\n pass\n\n\nasync def b():\n pass Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass Okay: default = 1\nfoo = 1 Okay: classify = 1\nfoo = 1 E301: class Foo:\n b = 0\n def bar():\n pass E302: def a():\n pass\n\ndef b(n):\n pass E302: def a():\n pass\n\nasync def b(n):\n pass E303: def a():\n pass\n\n\n\ndef b(n):\n pass E303: def a():\n\n\n\n pass E304: @decorator\n\ndef a():\n pass E305: def a():\n pass\na() E306: def a():\n def b():\n pass\n def c():\n pass
3.581564
3.627487
0.98734
r line = logical_line for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): text = match.group() char = text.strip() found = match.start() if text == char + ' ': # assert char in '([{' yield found + 1, "E201 whitespace after '%s'" % char elif line[found - 1] != ',': code = ('E202' if char in '}])' else 'E203') # if char in ',;:' yield found, "%s whitespace before '%s'" % (code, char)
def extraneous_whitespace(logical_line)
r"""Avoid extraneous whitespace. Avoid extraneous whitespace in these situations: - Immediately inside parentheses, brackets or braces. - Immediately before a comma, semicolon, or colon. Okay: spam(ham[1], {eggs: 2}) E201: spam( ham[1], {eggs: 2}) E201: spam(ham[ 1], {eggs: 2}) E201: spam(ham[1], { eggs: 2}) E202: spam(ham[1], {eggs: 2} ) E202: spam(ham[1 ], {eggs: 2}) E202: spam(ham[1], {eggs: 2 }) E203: if x == 4: print x, y; x, y = y , x E203: if x == 4: print x, y ; x, y = y, x E203: if x == 4 : print x, y; x, y = y, x
5.321491
5.157464
1.031804
r for match in KEYWORD_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E274 tab before keyword" elif len(before) > 1: yield match.start(1), "E272 multiple spaces before keyword" if '\t' in after: yield match.start(2), "E273 tab after keyword" elif len(after) > 1: yield match.start(2), "E271 multiple spaces after keyword"
def whitespace_around_keywords(logical_line)
r"""Avoid extraneous whitespace around keywords. Okay: True and False E271: True and False E272: True and False E273: True and\tFalse E274: True\tand False
2.816622
2.572577
1.094864
r line = logical_line indicator = ' import(' if line.startswith('from '): found = line.find(indicator) if -1 < found: pos = found + len(indicator) - 1 yield pos, "E275 missing whitespace after keyword"
def missing_whitespace_after_import_keyword(logical_line)
r"""Multiple imports in form from x import (a, b, c) should have space between import statement and parenthesised name list. Okay: from foo import (bar, baz) E275: from foo import(bar, baz) E275: from importable.module import(bar, baz)
9.849836
7.412734
1.328772
r line = logical_line for index in range(len(line) - 1): char = line[index] if char in ',;:' and line[index + 1] not in WHITESPACE: before = line[:index] if char == ':' and before.count('[') > before.count(']') and \ before.rfind('{') < before.rfind('['): continue # Slice syntax, no space required if char == ',' and line[index + 1] == ')': continue # Allow tuple with only one element: (3,) yield index, "E231 missing whitespace after '%s'" % char
def missing_whitespace(logical_line)
r"""Each comma, semicolon or colon should be followed by whitespace. Okay: [a, b] Okay: (3,) Okay: a[1:4] Okay: a[:4] Okay: a[1:] Okay: a[1:4:2] E231: ['a','b'] E231: foo(bar,baz) E231: [{'a':'b'}]
5.089118
4.193691
1.213518
r c = 0 if logical_line else 3 tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" if indent_level % 4: yield 0, tmpl % (1 + c, "indentation is not a multiple of four") indent_expect = previous_logical.endswith(':') if indent_expect and indent_level <= previous_indent_level: yield 0, tmpl % (2 + c, "expected an indented block") elif not indent_expect and indent_level > previous_indent_level: yield 0, tmpl % (3 + c, "unexpected indentation")
def indentation(logical_line, previous_logical, indent_char, indent_level, previous_indent_level)
r"""Use 4 spaces per indentation level. For really old code that you don't want to mess up, you can continue to use 8-space tabs. Okay: a = 1 Okay: if a == 0:\n a = 1 E111: a = 1 E114: # a = 1 Okay: for item in items:\n pass E112: for item in items:\npass E115: for item in items:\n# Hi\n pass Okay: a = 1\nb = 2 E113: a = 1\n b = 2 E116: a = 1\n # b = 2
4.233495
5.089162
0.831865
r prev_type, prev_text, __, prev_end, __ = tokens[0] for index in range(1, len(tokens)): token_type, text, start, end, __ = tokens[index] if (token_type == tokenize.OP and text in '([' and start != prev_end and (prev_type == tokenize.NAME or prev_text in '}])') and # Syntax "class A (B):" is allowed, but avoid it (index < 2 or tokens[index - 2][1] != 'class') and # Allow "return (a.foo for a in range(5))" not keyword.iskeyword(prev_text)): yield prev_end, "E211 whitespace before '%s'" % text prev_type = token_type prev_text = text prev_end = end
def whitespace_before_parameters(logical_line, tokens)
r"""Avoid extraneous whitespace. Avoid extraneous whitespace in the following situations: - before the open parenthesis that starts the argument list of a function call. - before the open parenthesis that starts an indexing or slicing. Okay: spam(1) E211: spam (1) Okay: dict['key'] = list[index] E211: dict ['key'] = list[index] E211: dict['key'] = list [index]
4.395325
4.536428
0.968895
r for match in OPERATOR_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E223 tab before operator" elif len(before) > 1: yield match.start(1), "E221 multiple spaces before operator" if '\t' in after: yield match.start(2), "E224 tab after operator" elif len(after) > 1: yield match.start(2), "E222 multiple spaces after operator"
def whitespace_around_operator(logical_line)
r"""Avoid extraneous whitespace around an operator. Okay: a = 12 + 3 E221: a = 4 + 5 E222: a = 4 + 5 E223: a = 4\t+ 5 E224: a = 4 +\t5
2.716539
2.602749
1.043719
r parens = 0 need_space = False prev_type = tokenize.OP prev_text = prev_end = None for token_type, text, start, end, line in tokens: if token_type in SKIP_COMMENTS: continue if text in ('(', 'lambda'): parens += 1 elif text == ')': parens -= 1 if need_space: if start != prev_end: # Found a (probably) needed space if need_space is not True and not need_space[1]: yield (need_space[0], "E225 missing whitespace around operator") need_space = False elif text == '>' and prev_text in ('<', '-'): # Tolerate the "<>" operator, even if running Python 3 # Deal with Python 3's annotated return value "->" pass else: if need_space is True or need_space[1]: # A needed trailing space was not found yield prev_end, "E225 missing whitespace around operator" elif prev_text != '**': code, optype = 'E226', 'arithmetic' if prev_text == '%': code, optype = 'E228', 'modulo' elif prev_text not in ARITHMETIC_OP: code, optype = 'E227', 'bitwise or shift' yield (need_space[0], "%s missing whitespace " "around %s operator" % (code, optype)) need_space = False elif token_type == tokenize.OP and prev_end is not None: if text == '=' and parens: # Allow keyword args or defaults: foo(bar=None). pass elif text in WS_NEEDED_OPERATORS: need_space = True elif text in UNARY_OPERATORS: # Check if the operator is being used as a binary operator # Allow unary operators: -123, -x, +1. # Allow argument unpacking: foo(*args, **kwargs). if (prev_text in '}])' if prev_type == tokenize.OP else prev_text not in KEYWORDS): need_space = None elif text in WS_OPTIONAL_OPERATORS: need_space = None if need_space is None: # Surrounding space is optional, but ensure that # trailing space matches opening space need_space = (prev_end, start != prev_end) elif need_space and start == prev_end: # A needed opening space was not found yield prev_end, "E225 missing whitespace around operator" need_space = False prev_type = token_type prev_text = text prev_end = end
def missing_whitespace_around_operator(logical_line, tokens)
r"""Surround operators with a single space on either side. - Always surround these binary operators with a single space on either side: assignment (=), augmented assignment (+=, -= etc.), comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), Booleans (and, or, not). - If operators with different priorities are used, consider adding whitespace around the operators with the lowest priorities. Okay: i = i + 1 Okay: submitted += 1 Okay: x = x * 2 - 1 Okay: hypot2 = x * x + y * y Okay: c = (a + b) * (a - b) Okay: foo(bar, key='word', *args, **kwargs) Okay: alpha[:-i] E225: i=i+1 E225: submitted +=1 E225: x = x /2 - 1 E225: z = x **y E226: c = (a+b) * (a-b) E226: hypot2 = x*x + y*y E227: c = a|b E228: msg = fmt%(errno, errmsg)
4.439976
4.494655
0.987835
r line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_comma(logical_line)
r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2)
5.042008
5.118779
0.985002
r parens = 0 no_space = False prev_end = None annotated_func_arg = False in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) message = "E251 unexpected spaces around keyword / parameter equals" for token_type, text, start, end, line in tokens: if token_type == tokenize.NL: continue if no_space: no_space = False if start != prev_end: yield (prev_end, message) if token_type == tokenize.OP: if text in '([': parens += 1 elif text in ')]': parens -= 1 elif in_def and text == ':' and parens == 1: annotated_func_arg = True elif parens and text == ',' and parens == 1: annotated_func_arg = False elif parens and text == '=' and not annotated_func_arg: no_space = True if start != prev_end: yield (prev_end, message) if not parens: annotated_func_arg = False prev_end = end
def whitespace_around_named_parameter_equals(logical_line, tokens)
r"""Don't use spaces around the '=' sign in function arguments. Don't use spaces around the '=' sign when used to indicate a keyword argument or a default parameter value. Okay: def complex(real, imag=0.0): Okay: return magic(r=real, i=imag) Okay: boolean(a == b) Okay: boolean(a != b) Okay: boolean(a <= b) Okay: boolean(a >= b) Okay: def foo(arg: int = 42): Okay: async def foo(arg: int = 42): E251: def complex(real, imag = 0.0): E251: return magic(r = real, i = imag)
3.399623
3.56606
0.953327
r prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end
def whitespace_before_comment(logical_line, tokens)
r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment
4.533707
4.340089
1.044612
r line = logical_line if line.startswith('import '): found = line.find(',') if -1 < found and ';' not in line[:found]: yield found, "E401 multiple imports on one line"
def imports_on_separate_lines(logical_line)
r"""Place imports on separate lines. Okay: import os\nimport sys E401: import sys, os Okay: from subprocess import Popen, PIPE Okay: from myclas import MyClass Okay: from foo.bar.yourclass import YourClass Okay: import myclass Okay: import foo.bar.yourclass
7.503834
7.958257
0.942899
r def is_string_literal(line): if line[0] in 'uUbB': line = line[1:] if line and line[0] in 'rR': line = line[1:] return line and (line[0] == '"' or line[0] == "'") allowed_try_keywords = ('try', 'except', 'else', 'finally') if indent_level: # Allow imports in conditional statements or functions return if not logical_line: # Allow empty lines or comments return if noqa: return line = logical_line if line.startswith('import ') or line.startswith('from '): if checker_state.get('seen_non_imports', False): yield 0, "E402 module level import not at top of file" elif re.match(DUNDER_REGEX, line): return elif any(line.startswith(kw) for kw in allowed_try_keywords): # Allow try, except, else, finally keywords intermixed with imports in # order to support conditional importing return elif is_string_literal(line): # The first literal is a docstring, allow it. Otherwise, report error. if checker_state.get('seen_docstring', False): checker_state['seen_non_imports'] = True else: checker_state['seen_docstring'] = True else: checker_state['seen_non_imports'] = True
def module_imports_on_top_of_file( logical_line, indent_level, checker_state, noqa)
r"""Place imports at the top of the file. Always put imports at the top of the file, just after any module comments and docstrings, and before module globals and constants. Okay: import os Okay: # this is a comment\nimport os Okay: '''this is a module docstring'''\nimport os Okay: r'''this is a module docstring'''\nimport os Okay: try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y Okay: try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y E402: a=1\nimport os E402: 'One string'\n"Two string"\nimport os E402: a=1\nfrom sys import x Okay: if x:\n import os
3.737855
3.814434
0.979924
r line = logical_line last_char = len(line) - 1 found = line.find(':') prev_found = 0 counts = dict((char, 0) for char in '{}[]()') while -1 < found < last_char: update_counts(line[prev_found:found], counts) if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) counts['['] <= counts[']'] and # [1:2] (slice) counts['('] <= counts[')'])): # (annotation) lambda_kw = LAMBDA_REGEX.search(line, 0, found) if lambda_kw: before = line[:lambda_kw.start()].rstrip() if before[-1:] == '=' and isidentifier(before[:-1].strip()): yield 0, ("E731 do not assign a lambda expression, use a " "def") break if STARTSWITH_DEF_REGEX.match(line): yield 0, "E704 multiple statements on one line (def)" elif STARTSWITH_INDENT_STATEMENT_REGEX.match(line): yield found, "E701 multiple statements on one line (colon)" prev_found = found found = line.find(':', found + 1) found = line.find(';') while -1 < found: if found < last_char: yield found, "E702 multiple statements on one line (semicolon)" else: yield found, "E703 statement ends with a semicolon" found = line.find(';', found + 1)
def compound_statements(logical_line)
r"""Compound statements (on the same line) are generally discouraged. While sometimes it's okay to put an if/for/while with a small body on the same line, never do this for multi-clause statements. Also avoid folding such long lines! Always use a def statement instead of an assignment statement that binds a lambda expression directly to a name. Okay: if foo == 'blah':\n do_blah_thing() Okay: do_one() Okay: do_two() Okay: do_three() E701: if foo == 'blah': do_blah_thing() E701: for x in lst: total += x E701: while t < 10: t = delay() E701: if foo == 'blah': do_blah_thing() E701: else: do_non_blah_thing() E701: try: something() E701: finally: cleanup() E701: if foo == 'blah': one(); two(); three() E702: do_one(); do_two(); do_three() E703: do_four(); # useless semicolon E704: def f(x): return 2*x E731: f = lambda x: 2*x
3.958085
3.724052
1.062844
r prev_start = prev_end = parens = 0 comment = False backslash = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: comment = True if start[0] != prev_start and parens and backslash and not comment: yield backslash, "E502 the backslash is redundant between brackets" if end[0] != prev_end: if line.rstrip('\r\n').endswith('\\'): backslash = (end[0], len(line.splitlines()[-1]) - 1) else: backslash = None prev_start = prev_end = end[0] else: prev_start = start[0] if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in ')]}': parens -= 1
def explicit_line_join(logical_line, tokens)
r"""Avoid explicit line join between brackets. The preferred way of wrapping long lines is by using Python's implied line continuation inside parentheses, brackets and braces. Long lines can be broken over multiple lines by wrapping expressions in parentheses. These should be used in preference to using a backslash for line continuation. E502: aaa = [123, \\n 123] E502: aaa = ("bbb " \\n "ccc") Okay: aaa = [123,\n 123] Okay: aaa = ("bbb "\n "ccc") Okay: aaa = "bbb " \\n "ccc" Okay: aaa = 123 # \\
3.446091
3.292944
1.046508
r def is_binary_operator(token_type, text): # The % character is strictly speaking a binary operator, but the # common usage seems to be to put it next to the format parameters, # after a line break. return ((token_type == tokenize.OP or text in ['and', 'or']) and text not in "()[]{},:.;@=%~") line_break = False unary_context = True # Previous non-newline token types and text previous_token_type = None previous_text = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: continue if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: line_break = True else: if (is_binary_operator(token_type, text) and line_break and not unary_context and not is_binary_operator(previous_token_type, previous_text)): yield start, "W503 line break before binary operator" unary_context = text in '([{,;' line_break = False previous_token_type = token_type previous_text = text
def break_around_binary_operator(logical_line, tokens)
r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) Okay: (width == 0 +\n height == 0) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) Okay: var = (1 &\n ~2) Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2)
4.677857
4.343306
1.077027
r match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) if match: singleton = match.group(1) or match.group(3) same = (match.group(2) == '==') msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) if singleton in ('None',): code = 'E711' else: code = 'E712' nonzero = ((singleton == 'True' and same) or (singleton == 'False' and not same)) msg += " or 'if %scond:'" % ('' if nonzero else 'not ') yield match.start(2), ("%s comparison to %s should be %s" % (code, singleton, msg))
def comparison_to_singleton(logical_line, noqa)
r"""Comparison to singletons should use "is" or "is not". Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. Okay: if arg is not None: E711: if arg != None: E711: if None == arg: E712: if arg == True: E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was set to some other value. The other value might have a type (such as a container) that could be false in a boolean context!
5.281778
5.142367
1.02711
r match = COMPARE_NEGATIVE_REGEX.search(logical_line) if match: pos = match.start(1) if match.group(2) == 'in': yield pos, "E713 test for membership should be 'not in'" else: yield pos, "E714 test for object identity should be 'is not'"
def comparison_negative(logical_line)
r"""Negative comparison should be done using "not in" and "is not". Okay: if x not in y:\n pass Okay: assert (X in Y or X is Z) Okay: if not (X in Y):\n pass Okay: zz = x is not y E713: Z = not X in Y E713: if not X.B in Y:\n pass E714: if not X is Y:\n pass E714: Z = not X.B is Y
5.384499
5.043462
1.06762
r match = COMPARE_TYPE_REGEX.search(logical_line) if match and not noqa: inst = match.group(1) if inst and isidentifier(inst) and inst not in SINGLETONS: return # Allow comparison for types which are not obvious yield match.start(), "E721 do not compare types, use 'isinstance()'"
def comparison_type(logical_line, noqa)
r"""Object type comparisons should always use isinstance(). Do not compare types directly. Okay: if isinstance(obj, int): E721: if type(obj) is type(1): When checking if an object is a string, keep in mind that it might be a unicode string too! In Python 2.3, str and unicode have a common base class, basestring, so you can do: Okay: if isinstance(obj, basestring): Okay: if type(a1) is type(b1):
9.223812
10.105536
0.912748
r if noqa: return regex = re.compile(r"except\s*:") match = regex.match(logical_line) if match: yield match.start(), "E722 do not use bare except'"
def bare_except(logical_line, noqa)
r"""When catching exceptions, mention specific exceptions whenever possible. Okay: except Exception: Okay: except BaseException: E722: except:
6.200561
7.618948
0.813834
r idents_to_avoid = ('l', 'O', 'I') prev_type, prev_text, prev_start, prev_end, __ = tokens[0] for token_type, text, start, end, line in tokens[1:]: ident = pos = None # identifiers on the lhs of an assignment operator if token_type == tokenize.OP and '=' in text: if prev_text in idents_to_avoid: ident = prev_text pos = prev_start # identifiers bound to a value with 'as', 'global', or 'nonlocal' if prev_text in ('as', 'global', 'nonlocal'): if text in idents_to_avoid: ident = text pos = start if prev_text == 'class': if text in idents_to_avoid: yield start, "E742 ambiguous class definition '%s'" % text if prev_text == 'def': if text in idents_to_avoid: yield start, "E743 ambiguous function definition '%s'" % text if ident: yield pos, "E741 ambiguous variable name '%s'" % ident prev_text = text prev_start = start
def ambiguous_identifier(logical_line, tokens)
r"""Never use the characters 'l', 'O', or 'I' as variable names. In some fonts, these characters are indistinguishable from the numerals one and zero. When tempted to use 'l', use 'L' instead. Okay: L = 0 Okay: o = 123 Okay: i = 42 E741: l = 0 E741: O = 123 E741: I = 42 Variables can be bound in several other contexts, including class and function definitions, 'global' and 'nonlocal' statements, exception handlers, and 'with' statements. Okay: except AttributeError as o: Okay: with lock as L: E741: except AttributeError as O: E741: with lock as l: E741: global I E741: nonlocal l E742: class I(object): E743: def l(x):
3.341382
3.049981
1.095542
r match = RAISE_COMMA_REGEX.match(logical_line) if match and not RERAISE_COMMA_REGEX.match(logical_line): yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_raise_comma(logical_line)
r"""When raising an exception, use "raise ValueError('message')". The older form is removed in Python 3. Okay: raise DummyError("Message") W602: raise DummyError, "Message"
7.607271
6.382104
1.191969
r if '\t' not in line: return len(line) - len(line.lstrip()) result = 0 for char in line: if char == '\t': result = result // 8 * 8 + 8 elif char == ' ': result += 1 else: break return result
def expand_indent(line)
r"""Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\t') 8 >>> expand_indent(' \t') 8 >>> expand_indent(' \t') 16
2.9502
2.701258
1.092158
# String modifiers (e.g. u or r) start = text.index(text[-1]) + 1 end = len(text) - 1 # Triple quotes if text[-3:] in ('"""', "'''"): start += 2 end -= 2 return text[:start] + 'x' * (end - start) + text[end:]
def mute_string(text)
Replace contents with 'xxx' to prevent syntax matching. >>> mute_string('"abc"') '"xxx"' >>> mute_string("'''abc'''") "'''xxx'''" >>> mute_string("r'abc'") "r'xxx'"
4.616564
4.778709
0.966069
# For each file of the diff, the entry key is the filename, # and the value is a set of row numbers to consider. rv = {} path = nrows = None for line in diff.splitlines(): if nrows: if line[:1] != '-': nrows -= 1 continue if line[:3] == '@@ ': hunk_match = HUNK_REGEX.match(line) (row, nrows) = [int(g or '1') for g in hunk_match.groups()] rv[path].update(range(row, row + nrows)) elif line[:3] == '+++': path = line[4:].split('\t', 1)[0] if path[:2] == 'b/': path = path[2:] rv[path] = set() return dict([(os.path.join(parent, path), rows) for (path, rows) in rv.items() if rows and filename_match(path, patterns)])
def parse_udiff(diff, patterns=None, parent='.')
Return a dictionary of matching lines.
4.010731
3.735915
1.073561
if not value: return [] if isinstance(value, list): return value paths = [] for path in value.split(','): path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) return paths
def normalize_paths(value, parent=os.curdir)
Parse a comma-separated list of paths. Return a list of absolute paths.
2.203196
2.183758
1.008901
if not patterns: return default return any(fnmatch(filename, pattern) for pattern in patterns)
def filename_match(filename, patterns, default=True)
Check if patterns contains a pattern that matches filename. If patterns is unspecified, this always returns True.
2.976383
3.565444
0.834786
def _add_check(check, kind, codes, args): if check in _checks[kind]: _checks[kind][check][0].extend(codes or []) else: _checks[kind][check] = (codes or [''], args) if inspect.isfunction(check): args = _get_parameters(check) if args and args[0] in ('physical_line', 'logical_line'): if codes is None: codes = ERRORCODE_REGEX.findall(check.__doc__ or '') _add_check(check, args[0], codes, args) elif inspect.isclass(check): if _get_parameters(check.__init__)[:2] == ['self', 'tree']: _add_check(check, 'tree', codes, None)
def register_check(check, codes=None)
Register a new check object.
3.417615
3.485263
0.98059
mod = inspect.getmodule(register_check) for (name, function) in inspect.getmembers(mod, inspect.isfunction): register_check(function)
def init_checks_registry()
Register all globally visible functions. The first argument name is either 'physical_line' or 'logical_line'.
4.203311
4.267742
0.984903
parser = OptionParser(prog=prog, version=version, usage="%prog [options] input ...") parser.config_options = [ 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] parser.add_option('-v', '--verbose', default=0, action='count', help="print status messages, or debug with -vv") parser.add_option('-q', '--quiet', default=0, action='count', help="report only file names, or nothing with -qq") parser.add_option('-r', '--repeat', default=True, action='store_true', help="(obsolete) show all occurrences of the same error") parser.add_option('--first', action='store_false', dest='repeat', help="show first occurrence of each error") parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, help="exclude files or directories which match these " "comma separated patterns (default: %default)") parser.add_option('--filename', metavar='patterns', default='*.py', help="when parsing directories, only check filenames " "matching these comma separated patterns " "(default: %default)") parser.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") parser.add_option('--ignore', metavar='errors', default='', help="skip errors and warnings (e.g. E4,W) " "(default: %s)" % DEFAULT_IGNORE) parser.add_option('--show-source', action='store_true', help="show source code for each error") parser.add_option('--show-pep8', action='store_true', help="show text of PEP 8 for each error " "(implies --first)") parser.add_option('--statistics', action='store_true', help="count errors and warnings") parser.add_option('--count', action='store_true', help="print total number of errors and warnings " "to standard error and set exit code to 1 if " "total is not null") parser.add_option('--max-line-length', type='int', metavar='n', default=MAX_LINE_LENGTH, help="set maximum allowed line length " "(default: %default)") parser.add_option('--hang-closing', action='store_true', help="hang closing bracket instead of matching " "indentation of opening bracket's line") parser.add_option('--format', metavar='format', default='default', help="set the error format [default|pylint|<custom>]") parser.add_option('--diff', action='store_true', help="report changes only within line number ranges in " "the unified diff received on STDIN") group = parser.add_option_group("Testing Options") if os.path.exists(TESTSUITE_PATH): group.add_option('--testsuite', metavar='dir', help="run regression tests from dir") group.add_option('--doctest', action='store_true', help="run doctest on myself") group.add_option('--benchmark', action='store_true', help="measure processing speed") return parser
def get_parser(prog='pycodestyle', version=__version__)
Create the parser for the program.
2.881841
2.879701
1.000743
config = RawConfigParser() cli_conf = options.config local_dir = os.curdir if USER_CONFIG and os.path.isfile(USER_CONFIG): if options.verbose: print('user configuration: %s' % USER_CONFIG) config.read(USER_CONFIG) parent = tail = args and os.path.abspath(os.path.commonprefix(args)) while tail: if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): local_dir = parent if options.verbose: print('local configuration: in %s' % parent) break (parent, tail) = os.path.split(parent) if cli_conf and os.path.isfile(cli_conf): if options.verbose: print('cli configuration: %s' % cli_conf) config.read(cli_conf) pycodestyle_section = None if config.has_section(parser.prog): pycodestyle_section = parser.prog elif config.has_section('pep8'): pycodestyle_section = 'pep8' # Deprecated warnings.warn('[pep8] section is deprecated. Use [pycodestyle].') if pycodestyle_section: option_list = dict([(o.dest, o.type or o.action) for o in parser.option_list]) # First, read the default values (new_options, __) = parser.parse_args([]) # Second, parse the configuration for opt in config.options(pycodestyle_section): if opt.replace('_', '-') not in parser.config_options: print(" unknown option '%s' ignored" % opt) continue if options.verbose > 1: print(" %s = %s" % (opt, config.get(pycodestyle_section, opt))) normalized_opt = opt.replace('-', '_') opt_type = option_list[normalized_opt] if opt_type in ('int', 'count'): value = config.getint(pycodestyle_section, opt) elif opt_type in ('store_true', 'store_false'): value = config.getboolean(pycodestyle_section, opt) else: value = config.get(pycodestyle_section, opt) if normalized_opt == 'exclude': value = normalize_paths(value, local_dir) setattr(new_options, normalized_opt, value) # Third, overwrite with the command-line options (options, __) = parser.parse_args(arglist, values=new_options) options.doctest = options.testsuite = False return options
def read_config(options, args, arglist, parser)
Read and parse configurations. If a config file is specified on the command line with the "--config" option, then only it is used for configuration. Otherwise, the user configuration (~/.config/pycodestyle) and any local configurations in the current directory or above will be merged together (in that order) using the read method of ConfigParser.
2.960046
2.875094
1.029548
if not parser: parser = get_parser() if not parser.has_option('--config'): group = parser.add_option_group("Configuration", description=( "The project options are read from the [%s] section of the " "tox.ini file or the setup.cfg file located in any parent folder " "of the path(s) being processed. Allowed options are: %s." % (parser.prog, ', '.join(parser.config_options)))) group.add_option('--config', metavar='path', default=config_file, help="user config file location") # Don't read the command line if the module is used as a library. if not arglist and not parse_argv: arglist = [] # If parse_argv is True and arglist is None, arguments are # parsed from the command line (sys.argv) (options, args) = parser.parse_args(arglist) options.reporter = None if options.ensure_value('testsuite', False): args.append(options.testsuite) elif not options.ensure_value('doctest', False): if parse_argv and not args: if options.diff or any(os.path.exists(name) for name in PROJECT_CONFIG): args = ['.'] else: parser.error('input not specified') options = read_config(options, args, arglist, parser) options.reporter = parse_argv and options.quiet == 1 and FileReport options.filename = _parse_multi_options(options.filename) options.exclude = normalize_paths(options.exclude) options.select = _parse_multi_options(options.select) options.ignore = _parse_multi_options(options.ignore) if options.diff: options.reporter = DiffReport stdin = stdin_get_value() options.selected_lines = parse_udiff(stdin, options.filename, args[0]) args = sorted(options.selected_lines) return options, args
def process_options(arglist=None, parse_argv=False, config_file=None, parser=None)
Process options passed either via arglist or via command line args. Passing in the ``config_file`` parameter allows other tools, such as flake8 to specify their own options to be processed in pycodestyle.
4.985874
4.93073
1.011184
r if options: return [o.strip() for o in options.split(split_token) if o.strip()] else: return options
def _parse_multi_options(options, split_token=',')
r"""Split and strip and discard empties. Turns the following: A, B, into ["A", "B"]
3.969814
4.716464
0.841693
import signal # Handle "Broken pipe" gracefully try: signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) except AttributeError: pass # not supported on Windows style_guide = StyleGuide(parse_argv=True) options = style_guide.options if options.doctest or options.testsuite: from testsuite.support import run_tests report = run_tests(style_guide) else: report = style_guide.check_files() if options.statistics: report.print_statistics() if options.benchmark: report.print_benchmark() if options.testsuite and not options.quiet: report.print_results() if report.total_errors: if options.count: sys.stderr.write(str(report.total_errors) + '\n') sys.exit(1)
def _main()
Parse options and run checks on Python source.
3.529545
3.369069
1.047632
(exc_type, exc) = sys.exc_info()[:2] if len(exc.args) > 1: offset = exc.args[1] if len(offset) > 2: offset = offset[1:3] else: offset = (1, 0) self.report_error(offset[0], offset[1] or 0, 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), self.report_invalid_syntax)
def report_invalid_syntax(self)
Check if the syntax is valid.
3.61379
3.557021
1.01596
if self.line_number >= self.total_lines: return '' line = self.lines[self.line_number] self.line_number += 1 if self.indent_char is None and line[:1] in WHITESPACE: self.indent_char = line[0] return line
def readline(self)
Get the next line from the input buffer.
2.903085
2.893096
1.003452
arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
def run_check(self, check, argument_names)
Run a check plugin.
3.059523
3.028769
1.010154
if 'checker_state' in argument_names: self.checker_state = self._checker_states.setdefault(name, {})
def init_checker_state(self, name, argument_names)
Prepare custom state for the specific checker plugin.
4.745984
4.367291
1.086711
self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == 'E101': self.indent_char = line[0]
def check_physical(self, line)
Run all physical checks on a raw input line.
5.476067
5.164926
1.060241
logical = [] comments = [] length = 0 prev_row = prev_col = mapping = None for token_type, text, start, end, line in self.tokens: if token_type in SKIP_TOKENS: continue if not mapping: mapping = [(0, start)] if token_type == tokenize.COMMENT: comments.append(text) continue if token_type == tokenize.STRING: text = mute_string(text) if prev_row: (start_row, start_col) = start if prev_row != start_row: # different row prev_text = self.lines[prev_row - 1][prev_col - 1] if prev_text == ',' or (prev_text not in '{[(' and text not in '}])'): text = ' ' + text elif prev_col != start_col: # different column text = line[prev_col:start_col] + text logical.append(text) length += len(text) mapping.append((length, end)) (prev_row, prev_col) = end self.logical_line = ''.join(logical) self.noqa = comments and noqa(''.join(comments)) return mapping
def build_tokens_line(self)
Build a logical line from tokens.
3.520154
3.341537
1.053453
self.report.increment_logical_line() mapping = self.build_tokens_line() if not mapping: return (start_row, start_col) = mapping[0][1] start_line = self.lines[start_row - 1] self.indent_level = expand_indent(start_line[:start_col]) if self.blank_before < self.blank_lines: self.blank_before = self.blank_lines if self.verbose >= 2: print(self.logical_line[:80].rstrip()) for name, check, argument_names in self._logical_checks: if self.verbose >= 4: print(' ' + name) self.init_checker_state(name, argument_names) for offset, text in self.run_check(check, argument_names) or (): if not isinstance(offset, tuple): for token_offset, pos in mapping: if offset <= token_offset: break offset = (pos[0], pos[1] + offset - token_offset) self.report_error(offset[0], offset[1], text, check) if self.logical_line: self.previous_indent_level = self.indent_level self.previous_logical = self.logical_line if not self.indent_level: self.previous_unindented_logical_line = self.logical_line self.blank_lines = 0 self.tokens = []
def check_logical(self)
Build a line from tokens and run all logical checks on it.
4.035465
3.739777
1.079066
try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) except (ValueError, SyntaxError, TypeError): return self.report_invalid_syntax() for name, cls, __ in self._ast_checks: checker = cls(tree, self.filename) for lineno, offset, text, check in checker.run(): if not self.lines or not noqa(self.lines[lineno - 1]): self.report_error(lineno, offset, text, check)
def check_ast(self)
Build the file's AST and run all AST checks.
5.034952
4.533129
1.110701
if self._io_error: self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) tokengen = tokenize.generate_tokens(self.readline) try: for token in tokengen: if token[2][0] > self.total_lines: return self.noqa = token[4] and noqa(token[4]) self.maybe_check_physical(token) yield token except (SyntaxError, tokenize.TokenError): self.report_invalid_syntax()
def generate_tokens(self)
Tokenize the file, run physical line checks and yield tokens.
7.174519
6.197115
1.157719
# Called after every token, but act only on end of line. if _is_eol_token(token): # Obviously, a newline token ends a single physical line. self.check_physical(token[4]) elif token[0] == tokenize.STRING and '\n' in token[1]: # Less obviously, a string that contains newlines is a # multiline string, either triple-quoted or with internal # newlines backslash-escaped. Check every physical line in the # string *except* for the last one: its newline is outside of # the multiline string, so we consider it a regular physical # line, and will check it like any other physical line. # # Subtleties: # - we don't *completely* ignore the last line; if it contains # the magical "# noqa" comment, we disable all physical # checks for the entire multiline string # - have to wind self.line_number back because initially it # points to the last line of the string, and we want # check_physical() to give accurate feedback if noqa(token[4]): return self.multiline = True self.line_number = token[2][0] for line in token[1].split('\n')[:-1]: self.check_physical(line + '\n') self.line_number += 1 self.multiline = False
def maybe_check_physical(self, token)
If appropriate (based on token), check current physical line(s).
7.140305
6.902499
1.034452
self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results()
def check_all(self, expected=None, line_offset=0)
Run all checks on the input file.
3.194953
3.14818
1.014857
self.filename = filename self.lines = lines self.expected = expected or () self.line_offset = line_offset self.file_errors = 0 self.counters['files'] += 1 self.counters['physical lines'] += len(lines)
def init_file(self, filename, lines, expected, line_offset)
Signal a new file.
3.600369
3.244133
1.109809
code = text[:4] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = text[5:] # Don't care about expected errors or warnings if code in self.expected: return if self.print_filename and not self.file_errors: print(self.filename) self.file_errors += 1 self.total_errors += 1 return code
def error(self, line_number, offset, text, check)
Report an error, according to options.
4.130691
3.942786
1.047658