code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if value is None: editor.show_message('tabstop=%i' % editor.tabstop) else: try: value = int(value) if value > 0: editor.tabstop = value else: editor.show_message('Argument must be positive') except ValueError: editor.show_message('Number required after =')
def tab_stop(editor, value)
Set tabstop.
3.424933
3.348348
1.022873
if value is None: editor.show_message('scrolloff=%i' % editor.scroll_offset) else: try: value = int(value) if value >= 0: editor.scroll_offset = value else: editor.show_message('Argument must be positive') except ValueError: editor.show_message('Number required after =')
def set_scroll_offset(editor, value)
Set scroll offset.
3.43106
3.331157
1.02999
b = get_app().current_buffer before_cursor = b.document.current_line_before_cursor return bool(not before_cursor or before_cursor[-1].isspace())
def whitespace_before_cursor_on_line()
Filter which evaluates to True when the characters before the cursor are whitespace, or we are at the start of te line.
5.098751
4.952333
1.029565
def walk(split): for c in split: if isinstance(c, (HSplit, VSplit)): for i in walk(c): yield i elif isinstance(c, Window): yield split, c return walk(self.root)
def _walk_through_windows(self)
Yields (Split, Window) tuples.
5.203825
3.526556
1.475611
def walk(split): for c in split: if isinstance(c, (HSplit, VSplit)): yield split, c for i in walk(c): yield i return walk(self.root)
def _walk_through_splits(self)
Yields (parent_split, child_plit) tuples.
5.779079
4.342404
1.330848
if editor_buffer is None: editor_buffer = self.active_window.editor_buffer active_split = self._get_active_split() index = active_split.index(self.active_window) new_window = Window(editor_buffer) if isinstance(active_split, split_cls): # Add new window to active split. active_split.insert(index, new_window) else: # Split in the other direction. active_split[index] = split_cls([active_split[index], new_window]) # Focus new window. self.active_window = new_window
def _split(self, split_cls, editor_buffer=None)
Split horizontal or vertical. (when editor_buffer is None, show the current buffer there as well.)
2.878739
2.838662
1.014118
assert isinstance(editor_buffer, EditorBuffer) self.active_window.editor_buffer = editor_buffer
def show_editor_buffer(self, editor_buffer)
Open this `EditorBuffer` in the active window.
4.413121
3.383899
1.304152
for split, window in self._walk_through_windows(): if window.editor_buffer == editor_buffer: self._close_window(window)
def close_editor_buffer(self, editor_buffer)
Close all the windows that have this editor buffer open.
6.928471
5.700073
1.215506
if window == self.active_window: self.close_active_window() else: original_active_window = self.active_window self.close_active_window() self.active_window = original_active_window
def _close_window(self, window)
Close this window.
2.872401
2.681717
1.071105
active_split = self._get_active_split() # First remove the active window from its split. index = active_split.index(self.active_window) del active_split[index] # Move focus. if len(active_split): new_active_window = active_split[max(0, index - 1)] while isinstance(new_active_window, (HSplit, VSplit)): new_active_window = new_active_window[0] self.active_window = new_active_window else: self.active_window = None # No windows left. # When there is exactly on item left, move this back into the parent # split. (We don't want to keep a split with one item around -- exept # for the root.) if len(active_split) == 1 and active_split != self.root: parent = self._get_split_parent(active_split) index = parent.index(active_split) parent[index] = active_split[0]
def close_active_window(self)
Close active window.
3.688854
3.633982
1.0151
windows = self.windows() new_index = (windows.index(self.active_window) + 1) % len(windows) self.active_window = windows[new_index]
def cycle_focus(self)
Cycle through all windows.
3.497994
2.719848
1.286099
for w in self.windows(): if w.editor_buffer.has_unsaved_changes: return True return False
def has_unsaved_changes(self)
True when any of the visible buffers in this tab has unsaved changes.
5.477178
4.33279
1.264123
if self.active_tab and self.active_tab.active_window: return self.active_tab.active_window.editor_buffer
def active_editor_buffer(self)
The active EditorBuffer or None.
3.780733
3.214528
1.176139
" The active prompt_toolkit layout Window. " if self.active_tab: w = self.active_tab.active_window if w: return w.pt_window
def active_pt_window(self)
The active prompt_toolkit layout Window.
9.093359
5.463953
1.664245
for eb in self.editor_buffers: if eb.location == location: return eb
def get_editor_buffer_for_location(self, location)
Return the `EditorBuffer` for this location. When this file was not yet loaded, return None
3.802092
3.41714
1.112653
for eb in self.editor_buffers: if eb.buffer_name == buffer_name: return eb
def get_editor_buffer_for_buffer_name(self, buffer_name)
Return the `EditorBuffer` for this buffer_name. When not found, return None
2.816665
2.703866
1.041718
if len(self.tab_pages) > 1: # Cannot close last tab. del self.tab_pages[self.active_tab_index] self.active_tab_index = max(0, self.active_tab_index - 1) # Clean up buffers. self._auto_close_new_empty_buffers()
def close_tab(self)
Close active tab.
4.442679
4.115137
1.079595
assert location is None or text is None or new is False # Don't pass two of them. if location or text or new: editor_buffer = self._get_or_create_editor_buffer(location=location, text=text) else: editor_buffer = None self.active_tab.hsplit(editor_buffer)
def hsplit(self, location=None, new=False, text=None)
Split horizontally.
5.09231
5.058255
1.006733
self.tab_pages = [TabPage(self.active_tab.active_window)] self.active_tab_index = 0
def keep_only_current_window(self)
Close all other windows, except the current one.
9.477144
8.122109
1.166833
if self.active_editor_buffer: # Find the active opened buffer. index = self.editor_buffers.index(self.active_editor_buffer) # Get index of new buffer. if _previous: new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers) else: new_index = (index + 1) % len(self.editor_buffers) # Open new buffer in active tab. self.active_tab.show_editor_buffer(self.editor_buffers[new_index]) # Clean up buffers. self._auto_close_new_empty_buffers()
def go_to_next_buffer(self, _previous=False)
Open next buffer in active window.
3.068424
2.887423
1.062686
self.active_tab_index = (self.active_tab_index - 1 + len(self.tab_pages)) % len(self.tab_pages)
def go_to_previous_tab(self)
Focus the previous tab.
3.042813
2.792587
1.089604
assert isinstance(buffer_name, string_types) for i, eb in enumerate(self.editor_buffers): if (eb.location == buffer_name or (buffer_name.isdigit() and int(buffer_name) == i)): self.show_editor_buffer(eb) break
def go_to_buffer(self, buffer_name)
Go to one of the open buffers.
4.052685
3.974318
1.019719
assert isinstance(editor_buffer, EditorBuffer) and editor_buffer not in self.editor_buffers # Add to list of EditorBuffers eb = self.active_editor_buffer if eb is None: self.editor_buffers.append(editor_buffer) else: # Append right after the currently active one. try: index = self.editor_buffers.index(self.active_editor_buffer) except ValueError: index = 0 self.editor_buffers.insert(index, editor_buffer) # When there are no tabs/windows yet, create one for this buffer. if self.tab_pages == []: self.tab_pages.append(TabPage(Window(editor_buffer))) self.active_tab_index = 0 # To be shown? if show_in_current_window and self.active_tab: self.active_tab.show_editor_buffer(editor_buffer) # Start reporter. editor_buffer.run_reporter()
def _add_editor_buffer(self, editor_buffer, show_in_current_window=False)
Insert this new buffer in the list of buffers, right after the active one.
3.196672
3.100291
1.031088
assert location is None or text is None # Don't pass two of them. assert location is None or isinstance(location, string_types) if location is None: # Create and add an empty EditorBuffer eb = EditorBuffer(self.editor, text=text) self._add_editor_buffer(eb) return eb else: # When a location is given, first look whether the file was already # opened. eb = self.get_editor_buffer_for_location(location) # Not found? Create one. if eb is None: # Create and add EditorBuffer eb = EditorBuffer(self.editor, location) self._add_editor_buffer(eb) return eb else: # Found! Return it. return eb
def _get_or_create_editor_buffer(self, location=None, text=None)
Given a location, return the `EditorBuffer` instance that we have if the file is already open, or create a new one. When location is None, this creates a new buffer.
3.181492
3.007336
1.05791
eb = self._get_or_create_editor_buffer(location) if show_in_current_window: self.show_editor_buffer(eb)
def open_buffer(self, location=None, show_in_current_window=False)
Open/create a file, load it, and show it in a new buffer.
3.878756
4.214362
0.920366
# Get all visible EditorBuffers ebs = set() for t in self.tab_pages: ebs |= set(t.visible_editor_buffers()) # Remove empty/new buffers that are hidden. for eb in self.editor_buffers[:]: if eb.is_new and not eb.location and eb not in ebs and eb.buffer.text == '': self.editor_buffers.remove(eb)
def _auto_close_new_empty_buffers(self)
When there are new, empty buffers open. (Like, created when the editor starts without any files.) These can be removed at the point when there is no more window showing them. This should be called every time when a window is closed, or when the content of a window is replcaed by something new.
5.462994
4.903984
1.113991
eb = self.active_editor_buffer # Remove this buffer. index = self.editor_buffers.index(eb) self.editor_buffers.remove(eb) # Close the active window. self.active_tab.close_active_window() # Close all the windows that still have this buffer open. for i, t in enumerate(self.tab_pages[:]): t.close_editor_buffer(eb) # Remove tab when there are no windows left. if t.window_count() == 0: self.tab_pages.remove(t) if i >= self.active_tab_index: self.active_tab_index = max(0, self.active_tab_index - 1) # When there are no windows/tabs left, create a new tab. if len(self.tab_pages) == 0: self.active_tab_index = None if len(self.editor_buffers) > 0: # Open the previous buffer. new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers) eb = self.editor_buffers[new_index] # Create a window for this buffer. self.tab_pages.append(TabPage(Window(eb))) self.active_tab_index = 0 else: # Create a new buffer. (This will also create the window # automatically.) eb = self._get_or_create_editor_buffer()
def close_buffer(self)
Close current buffer. When there are other windows showing the same buffer, they are closed as well. When no windows are left, the previous buffer or an empty buffer is shown.
2.97301
2.892749
1.027746
eb = self._get_or_create_editor_buffer(location) self.tab_pages.insert(self.active_tab_index + 1, TabPage(Window(eb))) self.active_tab_index += 1
def create_tab(self, location=None)
Create a new tab page.
5.56155
4.916718
1.131151
active_eb = self.active_editor_buffer visible_ebs = self.active_tab.visible_editor_buffers() def make_info(i, eb): return OpenBufferInfo( index=i, editor_buffer=eb, is_active=(eb == active_eb), is_visible=(eb in visible_ebs)) return [make_info(i, eb) for i, eb in enumerate(self.editor_buffers)]
def list_open_buffers(self)
Return a `OpenBufferInfo` list that gives information about the open buffers.
3.434579
3.160721
1.086644
if not SPHINX_INSTALLED: docutils.parsers.rst.directives.register_directive('code', CodeBlockDirective) docutils.parsers.rst.directives.register_directive('code-block', CodeBlockDirective) docutils.parsers.rst.directives.register_directive('sourcecode', CodeBlockDirective)
def register_code_directive()
Register code directive.
2.599568
2.518607
1.032145
# Do this at call time rather than import time to avoid unnecessarily # mutating state. register_code_directive() ignore_sphinx() ignore = ignore or {} try: ignore.setdefault('languages', []).extend( find_ignored_languages(source) ) except Error as error: yield (error.line_number, '{}'.format(error)) writer = CheckWriter(source, filename, ignore=ignore) string_io = io.StringIO() # This is a hack to avoid false positive from docutils (#23). docutils # mistakes BOMs for actual visible letters. This results in the "underline # too short" warning firing. source = strip_byte_order_mark(source) try: docutils.core.publish_string( source, writer=writer, source_path=filename, settings_overrides={'halt_level': report_level, 'report_level': report_level, 'warning_stream': string_io}) except docutils.utils.SystemMessage: pass except AttributeError: # Sphinx will sometimes throw an exception trying to access # "self.state.document.settings.env". Ignore this for now until we # figure out a better approach. if debug: raise for checker in writer.checkers: for error in checker(): yield error rst_errors = string_io.getvalue().strip() if rst_errors: for message in rst_errors.splitlines(): try: ignore_regex = ignore.get('messages', '') if ignore_regex and re.search(ignore_regex, message): continue yield parse_gcc_style_error_message(message, filename=filename, has_column=False) except ValueError: continue
def check(source, filename='<string>', report_level=docutils.utils.Reporter.INFO_LEVEL, ignore=None, debug=False)
Yield errors. Use lower report_level for noisier error output. Each yielded error is a tuple of the form: (line_number, message) Line numbers are indexed at 1 and are with respect to the full RST file. Each code block is checked asynchronously in a subprocess. Note that this function mutates state by calling the ``docutils`` ``register_*()`` functions.
5.668774
5.71671
0.991615
for (index, line) in enumerate(source.splitlines()): match = RSTCHECK_COMMENT_RE.match(line) if match: key_and_value = line[match.end():].strip().split('=') if len(key_and_value) != 2: raise Error('Expected "key=value" syntax', line_number=index + 1) if key_and_value[0] == 'ignore-language': for language in key_and_value[1].split(','): yield language.strip()
def find_ignored_languages(source)
Yield ignored languages. Languages are ignored via comment. For example, to ignore C++, JSON, and Python: >>> list(find_ignored_languages(''' ... Example ... ======= ... ... .. rstcheck: ignore-language=cpp,json ... ... .. rstcheck: ignore-language=python ... ''')) ['cpp', 'json', 'python']
3.209118
2.848358
1.126655
(filename, args) = parameters if filename == '-': contents = sys.stdin.read() else: with contextlib.closing( docutils.io.FileInput(source_path=filename) ) as input_file: contents = input_file.read() args = load_configuration_from_file( os.path.dirname(os.path.realpath(filename)), args) ignore_directives_and_roles(args.ignore_directives, args.ignore_roles) for substitution in args.ignore_substitutions: contents = contents.replace('|{}|'.format(substitution), 'None') ignore = { 'languages': args.ignore_language, 'messages': args.ignore_messages, } all_errors = [] for error in check(contents, filename=filename, report_level=args.report, ignore=ignore, debug=args.debug): all_errors.append(error) return (filename, all_errors)
def _check_file(parameters)
Return list of errors.
3.819078
3.787936
1.008221
try: compile(code, '<string>', 'exec') except SyntaxError as exception: yield (int(exception.lineno), exception.msg)
def check_python(code)
Yield errors.
4.168662
3.616647
1.152632
try: json.loads(code) except ValueError as exception: message = '{}'.format(exception) line_number = 0 found = re.search(r': line\s+([0-9]+)[^:]*$', message) if found: line_number = int(found.group(1)) yield (int(line_number), message)
def check_json(code)
Yield errors.
3.883563
3.551723
1.093431
try: xml.etree.ElementTree.fromstring(code) except xml.etree.ElementTree.ParseError as exception: message = '{}'.format(exception) line_number = 0 found = re.search(r': line\s+([0-9]+)[^:]*$', message) if found: line_number = int(found.group(1)) yield (int(line_number), message)
def check_xml(code)
Yield errors.
3.333787
3.105162
1.073628
filename = '<string>' for result in check(code, filename=filename, ignore=ignore): yield result
def check_rst(code, ignore)
Yield errors in nested RST code.
8.889294
7.636069
1.164119
if SPHINX_INSTALLED: sphinx_directives = list(sphinx.domains.std.StandardDomain.directives) sphinx_roles = list(sphinx.domains.std.StandardDomain.roles) for domain in [sphinx.domains.c.CDomain, sphinx.domains.cpp.CPPDomain, sphinx.domains.javascript.JavaScriptDomain, sphinx.domains.python.PythonDomain]: sphinx_directives += list(domain.directives) + [ '{}:{}'.format(domain.name, item) for item in list(domain.directives)] sphinx_roles += list(domain.roles) + [ '{}:{}'.format(domain.name, item) for item in list(domain.roles)] else: sphinx_roles = [ 'abbr', 'command', 'dfn', 'doc', 'download', 'envvar', 'file', 'guilabel', 'kbd', 'keyword', 'mailheader', 'makevar', 'manpage', 'menuselection', 'mimetype', 'newsgroup', 'option', 'program', 'py:func', 'ref', 'regexp', 'samp', 'term', 'token'] sphinx_directives = [ 'autosummary', 'currentmodule', 'centered', 'c:function', 'c:type', 'include', 'deprecated', 'envvar', 'glossary', 'index', 'no-code-block', 'literalinclude', 'hlist', 'option', 'productionlist', 'py:function', 'seealso', 'toctree', 'todo', 'versionadded', 'versionchanged'] return (sphinx_directives, sphinx_roles)
def _get_directives_and_roles_from_sphinx()
Return a tuple of Sphinx directive and roles.
2.834909
2.767654
1.0243
(directives, roles) = _get_directives_and_roles_from_sphinx() directives += [ 'centered', 'include', 'deprecated', 'index', 'no-code-block', 'literalinclude', 'hlist', 'seealso', 'toctree', 'todo', 'versionadded', 'versionchanged'] ext_autosummary = [ 'autosummary', 'currentmodule', ] ignore_directives_and_roles(directives + ext_autosummary, roles + ['ctype'])
def ignore_sphinx()
Register Sphinx directives and roles to ignore.
5.63866
5.113578
1.102684
directory_or_file = os.path.realpath(directory_or_file) if os.path.isfile(directory_or_file): if debug: print('using config file {}'.format(directory_or_file), file=sys.stderr) return directory_or_file directory = directory_or_file while directory: for filename in CONFIG_FILES: candidate = os.path.join(directory, filename) if os.path.exists(candidate): if debug: print('using config file {}'.format(candidate), file=sys.stderr) return candidate parent_directory = os.path.dirname(directory) if parent_directory == directory: break else: directory = parent_directory
def find_config(directory_or_file, debug=False)
Return configuration filename. If `directory_or_file` is a file, return the real-path of that file. If it is a directory, find the configuration (any file name in CONFIG_FILES) in that directory or its ancestors.
1.764118
1.723585
1.023517
args = copy.copy(args) directory_or_file = directory if args.config is not None: directory_or_file = args.config options = _get_options(directory_or_file, debug=args.debug) args.report = options.get('report', args.report) threshold_dictionary = docutils.frontend.OptionParser.thresholds args.report = int(threshold_dictionary.get(args.report, args.report)) args.ignore_language = get_and_split( options, 'ignore_language', args.ignore_language) args.ignore_messages = options.get( 'ignore_messages', args.ignore_messages) args.ignore_directives = get_and_split( options, 'ignore_directives', args.ignore_directives) args.ignore_substitutions = get_and_split( options, 'ignore_substitutions', args.ignore_substitutions) args.ignore_roles = get_and_split( options, 'ignore_roles', args.ignore_roles) return args
def load_configuration_from_file(directory, args)
Return new ``args`` with configuration loaded from file.
2.644992
2.560104
1.033158
for directive in directives: docutils.parsers.rst.directives.register_directive(directive, IgnoredDirective) for role in roles: docutils.parsers.rst.roles.register_local_role(role, _ignore_role)
def ignore_directives_and_roles(directives, roles)
Ignore directives/roles in docutils.
2.903584
2.558339
1.134949
run = run_in_subprocess(code, '.bash', ['bash', '-n'], working_directory=working_directory) def run_check(): result = run() if result: (output, filename) = result prefix = filename + ': line ' for line in output.splitlines(): if not line.startswith(prefix): continue message = line[len(prefix):] split_message = message.split(':', 1) yield (int(split_message[0]) - 1, split_message[1].strip()) return run_check
def bash_checker(code, working_directory)
Return checker.
3.723148
3.741017
0.995224
return gcc_checker(code, '.c', [os.getenv('CC', 'gcc'), '-std=c99'] + INCLUDE_FLAGS, working_directory=working_directory)
def c_checker(code, working_directory)
Return checker.
6.30663
6.523101
0.966815
return gcc_checker(code, '.cpp', [os.getenv('CXX', 'g++'), '-std=c++0x'] + INCLUDE_FLAGS, working_directory=working_directory)
def cpp_checker(code, working_directory)
Return checker.
5.794998
6.066574
0.955234
run = run_in_subprocess(code, filename_suffix, arguments + ['-pedantic', '-fsyntax-only'], working_directory=working_directory) def run_check(): result = run() if result: (output, filename) = result for line in output.splitlines(): try: yield parse_gcc_style_error_message(line, filename=filename) except ValueError: continue return run_check
def gcc_checker(code, filename_suffix, arguments, working_directory)
Return checker.
4.964881
4.942533
1.004522
colons = 2 if has_column else 1 prefix = filename + ':' if not message.startswith(prefix): raise ValueError() message = message[len(prefix):] split_message = message.split(':', colons) line_number = int(split_message[0]) return (line_number, split_message[colons].strip())
def parse_gcc_style_error_message(message, filename, has_column=True)
Parse GCC-style error message. Return (line_number, message). Raise ValueError if message cannot be parsed.
3.001732
2.759191
1.087903
temporary_file = tempfile.NamedTemporaryFile(mode='wb', suffix=filename_suffix) temporary_file.write(code.encode('utf-8')) temporary_file.flush() process = subprocess.Popen(arguments + [temporary_file.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory) def run(): raw_result = process.communicate() if process.returncode != 0: return (raw_result[1].decode(get_encoding()), temporary_file.name) return run
def run_in_subprocess(code, filename_suffix, arguments, working_directory)
Return None on success.
2.700515
2.630801
1.026499
if SPHINX_INSTALLED and not is_code_node: delta = len(node.non_default_attributes()) current_line_contents = full_contents.splitlines()[line_number:] blank_lines = next( (i for (i, x) in enumerate(current_line_contents) if x), 0) return ( line_number + delta - 1 + blank_lines - 1 + SPHINX_CODE_BLOCK_DELTA) else: lines = full_contents.splitlines() code_block_length = len(node.rawsource.splitlines()) try: # Case where there are no extra spaces. if lines[line_number - 1].strip(): return line_number - code_block_length + 1 except IndexError: pass # The offsets are wrong if the RST text has multiple blank lines after # the code block. This is a workaround. for line_number in range(line_number, 1, -1): if lines[line_number - 2].strip(): break return line_number - code_block_length
def beginning_of_code_block(node, line_number, full_contents, is_code_node)
Return line number of beginning of code block.
3.988325
3.90963
1.020129
threshold_choices = docutils.frontend.OptionParser.threshold_choices parser = argparse.ArgumentParser( description=__doc__ + (' Sphinx is enabled.' if SPHINX_INSTALLED else ''), prog='rstcheck') parser.add_argument('files', nargs='+', type=decode_filename, help='files to check') parser.add_argument('--config', metavar='CONFIG', default=None, help='location of config file') parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories') parser.add_argument('--report', metavar='level', choices=threshold_choices, default='info', help='report system messages at or higher than ' 'level; ' + ', '.join(choice for choice in threshold_choices if not choice.isdigit()) + ' (default: %(default)s)') parser.add_argument('--ignore-language', '--ignore', metavar='language', default='', help='comma-separated list of languages to ignore') parser.add_argument('--ignore-messages', metavar='messages', default='', help='python regex that match the messages to ignore') parser.add_argument('--ignore-directives', metavar='directives', default='', help='comma-separated list of directives to ignore') parser.add_argument('--ignore-substitutions', metavar='substitutions', default='', help='comma-separated list of substitutions to ignore') parser.add_argument('--ignore-roles', metavar='roles', default='', help='comma-separated list of roles to ignore') parser.add_argument('--debug', action='store_true', help='show messages helpful for debugging') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args() if '-' in args.files: if len(args.files) > 1: parser.error("'-' for standard in can only be checked alone") else: args.files = list(find_files(filenames=args.files, recursive=args.recursive)) return args
def parse_args()
Return parsed command-line arguments.
2.817746
2.770188
1.017168
if file.encoding is None: # If the output file does not support Unicode, encode it to a byte # string. On some machines, this occurs when Python is redirecting to # file (or piping to something like Vim). text = text.encode('utf-8') print(text, file=file)
def output_message(text, file=sys.stderr)
Output message to terminal.
7.839825
7.79145
1.006209
if SPHINX_INSTALLED: srcdir = tempfile.mkdtemp() outdir = os.path.join(srcdir, '_build') try: sphinx.application.Sphinx(srcdir=srcdir, confdir=None, outdir=outdir, doctreedir=outdir, buildername='dummy', status=None) yield finally: shutil.rmtree(srcdir) else: yield
def enable_sphinx_if_possible()
Register Sphinx directives and roles.
2.982249
2.973471
1.002952
base_name = os.path.basename(filename) if base_name.startswith('.'): return False if not os.path.isdir(filename) and not filename.lower().endswith('.rst'): return False return True
def match_file(filename)
Return True if file is okay for modifying/recursing.
3.062505
2.845836
1.076135
args = parse_args() if not args.files: return 0 with enable_sphinx_if_possible(): status = 0 pool = multiprocessing.Pool(multiprocessing.cpu_count()) try: if len(args.files) > 1: results = pool.map( _check_file, [(name, args) for name in args.files]) else: # This is for the case where we read from standard in. results = [_check_file((args.files[0], args))] for (filename, errors) in results: for error in errors: line_number = error[0] message = error[1] if not re.match(r'\([A-Z]+/[0-9]+\)', message): message = '(ERROR/3) ' + message output_message('{}:{}: {}'.format(filename, line_number, message)) status = 1 except (IOError, UnicodeError) as exception: output_message(exception) status = 1 return status
def main()
Return 0 on success.
3.831547
3.704652
1.034253
try: language = self.arguments[0] except IndexError: language = '' code = '\n'.join(self.content) literal = docutils.nodes.literal_block(code, code) literal['classes'].append('code-block') literal['language'] = language return [literal]
def run(self)
Run directive.
4.149233
3.522663
1.177868
# For "..code-block:: language" language = node.get('language', None) is_code_node = False if not language: # For "..code:: language" is_code_node = True classes = node.get('classes') if 'code' in classes: language = classes[-1] else: return if language in self.ignore['languages']: return if language == 'doctest' or ( language == 'python' and node.rawsource.lstrip().startswith('>>> ')): self.visit_doctest_block(node) raise docutils.nodes.SkipNode checker = { 'bash': bash_checker, 'c': c_checker, 'cpp': cpp_checker, 'json': lambda source, _: lambda: check_json(source), 'xml': lambda source, _: lambda: check_xml(source), 'python': lambda source, _: lambda: check_python(source), 'rst': lambda source, _: lambda: check_rst(source, ignore=self.ignore) }.get(language) if checker: run = checker(node.rawsource, self.working_directory) self._add_check(node=node, run=run, language=language, is_code_node=is_code_node) raise docutils.nodes.SkipNode
def visit_literal_block(self, node)
Check syntax of code block.
3.127264
3.058008
1.022647
find = re.search(r'\[[^\]]+\]\([^\)]+\)', node.rawsource) if find is not None: self.document.reporter.warning( '(rst) Link is formatted in Markdown style.', base_node=node)
def visit_paragraph(self, node)
Check syntax of reStructuredText.
8.263881
6.999199
1.18069
def run_check(): all_results = run() if all_results is not None: if all_results: for result in all_results: error_offset = result[0] - 1 line_number = getattr(node, 'line', None) if line_number is not None: yield ( beginning_of_code_block( node=node, line_number=line_number, full_contents=self.contents, is_code_node=is_code_node) + error_offset, '({}) {}'.format(language, result[1])) else: yield (self.filename, 0, 'unknown error') self.checkers.append(run_check)
def _add_check(self, node, run, language, is_code_node)
Add checker that will be run.
4.40417
4.152634
1.060573
visitor = CheckTranslator(self.document, contents=self.contents, filename=self.filename, ignore=self.ignore) self.document.walkabout(visitor) self.checkers += visitor.checkers
def translate(self)
Run CheckTranslator.
6.767551
5.123044
1.321002
parsed = urlparse(url) kwargs = parse_qsl(parsed.query) # TCP redis connection if parsed.scheme == 'redis': details = {'host': parsed.hostname} if parsed.port: details['port'] = parsed.port if parsed.password: details['password'] = parsed.password db = parsed.path.lstrip('/') if db and db.isdigit(): details['db'] = db # Unix socket redis connection elif parsed.scheme == 'redis+socket': details = {'unix_socket_path': parsed.path} else: raise ValueError('Unsupported protocol %s' % (parsed.scheme)) # Add kwargs to the details and convert them to the appropriate type, if needed details.update(kwargs) if 'socket_timeout' in details: details['socket_timeout'] = float(details['socket_timeout']) if 'db' in details: details['db'] = int(details['db']) return details
def parse_url(url)
Parse the argument url and return a redis connection. Two patterns of url are supported: * redis://host:port[/db][?options] * redis+socket:///path/to/redis.sock[?options] A ValueError is raised if the URL is not recognized.
2.569855
2.289504
1.122451
acquired = Lock( self.redis, key, timeout=timeout, blocking=self.blocking, blocking_timeout=self.blocking_timeout ).acquire() if not acquired: # Time remaining in milliseconds # https://redis.io/commands/pttl ttl = self.redis.pttl(key) raise AlreadyQueued(ttl / 1000.)
def raise_or_lock(self, key, timeout)
Checks if the task is locked and raises an exception, else locks the task. By default, the tasks and the key expire after 60 minutes. (meaning it will not be executed and the lock will clear).
4.009092
4.066173
0.985962
backend_name = config['backend'] path = backend_name.split('.') backend_mod_name, backend_class_name = '.'.join(path[:-1]), path[-1] backend_mod = importlib.import_module(backend_mod_name) backend_class = getattr(backend_mod, backend_class_name) return backend_class(config['settings'])
def import_backend(config)
Imports and initializes the Backend class.
2.029726
2.025751
1.001962
if isinstance(kwargs, dict): # Context: https://github.com/cameronmaske/celery-once/issues/58 # Keep equivalent to string of dict for backwards compatibility. return order_dict_to_string(OrderedDict( (force_string(key), force_string(value)) for key, value in items_sorted_by_key(kwargs) )) elif isinstance(kwargs, list): return [force_string(element) for element in kwargs] elif six.PY2 and isinstance(kwargs, unicode): return kwargs.encode('utf-8') return kwargs
def force_string(kwargs)
Force key in dict or list to a string. Fixes: https://github.com/cameronmaske/celery-once/issues/11
4.287628
3.450231
1.242707
kwargs_list = [] # Kwargs are sorted in alphabetic order by their keys. # Taken from http://www.saltycrane.com/blog/2007/09/how-to-sort-python-dictionary-by-keys/ for k, v in items_sorted_by_key(kwargs): kwargs_list.append(str(k) + '-' + str(force_string(v))) return kwargs_list
def kwargs_to_list(kwargs)
Turns {'a': 1, 'b': 2} into ["a-1", "b-2"]
3.812329
3.314444
1.150217
keys = ['qo', force_string(name)] # Restrict to only the keys allowed in keys. if restrict_to is not None: restrict_kwargs = {key: kwargs[key] for key in restrict_to} keys += kwargs_to_list(restrict_kwargs) else: keys += kwargs_to_list(kwargs) key = "_".join(keys) return key
def queue_once_key(name, kwargs, restrict_to=None)
Turns a list the name of the task, the kwargs and allowed keys into a redis key.
4.300143
4.118979
1.043983
lock_path = self._get_lock_path(key) try: # Create lock file, raise exception if it exists fd = os.open(lock_path, os.O_CREAT | os.O_EXCL) except OSError as error: if error.errno == errno.EEXIST: # File already exists, check its modification time mtime = os.path.getmtime(lock_path) ttl = mtime + timeout - time.time() if ttl > 0: raise AlreadyQueued(ttl) else: # Update modification time if timeout happens os.utime(lock_path, None) return else: # Re-raise unexpected OSError raise else: os.close(fd)
def raise_or_lock(self, key, timeout)
Check the lock file and create one if it does not exist.
2.66481
2.580145
1.032814
lock_path = self._get_lock_path(key) os.remove(lock_path)
def clear_lock(self, key)
Remove the lock file.
3.392921
2.697414
1.257842
once_options = options.get('once', {}) once_graceful = once_options.get( 'graceful', self.once.get('graceful', False)) once_timeout = once_options.get( 'timeout', self.once.get('timeout', self.default_timeout)) if not options.get('retries'): key = self.get_key(args, kwargs) try: self.once_backend.raise_or_lock(key, timeout=once_timeout) except AlreadyQueued as e: if once_graceful: return EagerResult(None, None, states.REJECTED) raise e return super(QueueOnce, self).apply_async(args, kwargs, **options)
def apply_async(self, args=None, kwargs=None, **options)
Attempts to queues a task. Will raises an AlreadyQueued exception if already queued. :param \*args: positional arguments passed on to the task. :param \*\*kwargs: keyword arguments passed on to the task. :keyword \*\*once: (optional) :param: graceful: (optional) If True, wouldn't raise an exception if already queued. Instead will return none. :param: timeout: (optional) An `int' number of seconds after which the lock will expire. If not set, defaults to 1 hour. :param: keys: (optional)
3.922744
3.796883
1.033148
restrict_to = self.once.get('keys', None) args = args or {} kwargs = kwargs or {} call_args = getcallargs( getattr(self, '_orig_run', self.run), *args, **kwargs) # Remove the task instance from the kwargs. This only happens when the # task has the 'bind' attribute set to True. We remove it, as the task # has a memory pointer in its repr, that will change between the task # caller and the celery worker if isinstance(call_args.get('self'), Task): del call_args['self'] key = queue_once_key(self.name, call_args, restrict_to) return key
def get_key(self, args=None, kwargs=None)
Generate the key from the name of the task (e.g. 'tasks.example') and args/kwargs.
7.7092
7.517
1.025569
# Only clear the lock after the task's execution if the # "unlock_before_run" option is False if not self.unlock_before_run(): key = self.get_key(args, kwargs) self.once_backend.clear_lock(key)
def after_return(self, status, retval, task_id, args, kwargs, einfo)
After a task has run (both succesfully or with a failure) clear the lock if "unlock_before_run" is False.
7.597231
5.074319
1.497192
logger.info("Saving DataFrame to %s", csv_path) df.to_csv(csv_path, index=False, chunksize=chunksize)
def _write_csv(self, df, csv_path, chunksize=10**5)
Parameters ---------- df : pandas.DataFrame csv_path : str chunksize : int Number of rows to write at a time. Helps to limit memory consumption while writing a CSV.
2.628807
3.347488
0.785308
if not csv_path.endswith(".csv"): raise ValueError("Invalid path '%s', must be a CSV file" % csv_path) if csv_path in self._memory_cache: return self._memory_cache[csv_path] if exists(csv_path) and not self.is_empty(csv_path): df = self._read_csv(csv_path) else: df = compute_fn() if not isinstance(df, pd.DataFrame): raise TypeError( "Expected compute_fn to return DataFrame, got %s : %s" % ( df, type(df))) self._write_csv(df, csv_path) self._memory_cache[csv_path] = df return df
def cached_dataframe(self, csv_path, compute_fn)
If a CSV path is in the _memory_cache, then return that cached value. If we've already saved the DataFrame as a CSV then load it. Otherwise run the provided `compute_fn`, and store its result in memory and and save it as a CSV.
2.235965
2.08279
1.073543
if path in self._memory_cache: return self._memory_cache[path] if exists(path) and not self.is_empty(path): obj = load_pickle(path) else: obj = compute_fn() dump_pickle(obj, path) self._memory_cache[path] = obj return obj
def cached_object(self, path, compute_fn)
If `cached_object` has already been called for a value of `path` in this running Python instance, then it should have a cached value in the _memory_cache; return that value. If this function was never called before with a particular value of `path`, then call compute_fn, and pickle it to `path`. If `path` already exists, unpickle it and store that value in _memory_cache.
2.538559
2.565034
0.989679
if type(line) is not binary_type: raise TypeError("Expected header line to be of type %s but got %s" % ( binary_type, type(line))) if len(line) <= 1: raise ValueError("No identifier on FASTA line") # split line at first space to get the unique identifier for # this sequence space_index = line.find(b" ") if space_index >= 0: identifier = line[1:space_index] else: identifier = line[1:] # annoyingly Ensembl83 reformatted the transcript IDs of its # cDNA FASTA to include sequence version numbers # .e.g. # "ENST00000448914.1" instead of "ENST00000448914" # So now we have to parse out the identifier dot_index = identifier.find(b".") if dot_index >= 0: identifier = identifier[:dot_index] return identifier.decode("ascii")
def _parse_header_id(line)
Pull the transcript or protein identifier from the header line which starts with '>'
4.525469
4.279696
1.057428
fasta_dictionary = {} for (identifier, sequence) in self.iterate_over_file(fasta_path): fasta_dictionary[identifier] = sequence return fasta_dictionary
def read_file(self, fasta_path)
Read the contents of a FASTA file into a dictionary
3.745932
3.24668
1.153773
with self._open(fasta_path) as f: for line in f: line = line.rstrip() if len(line) == 0: continue # have to slice into a bytes object or else I get a single integer first_char = line[0:1] if first_char == b">": id_and_seq = self._read_header(line) if id_and_seq is not None: yield id_and_seq elif first_char == b";": # semicolon are comment characters continue else: self.current_lines.append(line) # the last sequence is still in the lines buffer after we're done with # the file so make sure to yield it id_and_seq = self._current_entry() if id_and_seq is not None: yield id_and_seq
def iterate_over_file(self, fasta_path)
Generator that yields identifiers paired with sequences.
4.336646
4.219553
1.02775
if fasta_path.endswith("gz") or fasta_path.endswith("gzip"): return GzipFile(fasta_path, 'rb') else: return open(fasta_path, 'rb')
def _open(self, fasta_path)
Open either a text file or compressed gzip file as a stream of bytes.
2.390723
2.106528
1.134912
lower_name = name.strip().lower() for reference in Species._reference_names_to_species.keys(): if reference.lower() == lower_name: return reference raise ValueError("Reference genome '%s' not found" % name)
def normalize_reference_name(name)
Search the dictionary of species-specific references to find a reference name that matches aside from capitalization. If no matching reference is found, raise an exception.
5.170602
4.359942
1.185934
reference_name = normalize_reference_name(reference_name) species = find_species_by_reference(reference_name) (min_ensembl_release, max_ensembl_release) = \ species.reference_assemblies[reference_name] if allow_older_downloaded_release: # go through candidate releases in descending order for release in reversed(range(min_ensembl_release, max_ensembl_release + 1)): # check if release has been locally downloaded candidate = EnsemblRelease.cached(release=release, species=species) if candidate.required_local_files_exist(): return candidate # see if any of the releases between [max, min] are already locally # available return EnsemblRelease.cached(release=max_ensembl_release, species=species)
def genome_for_reference_name( reference_name, allow_older_downloaded_release=True)
Given a genome reference name, such as "GRCh38", returns the corresponding Ensembl Release object. If `allow_older_downloaded_release` is True, and some older releases have been downloaded, then return the most recent locally available release. Otherwise, return the newest release of Ensembl (even if its data hasn't already been downloaded).
3.798839
3.825787
0.992956
lower_name = name.lower().strip() # if given a common name such as "human", look up its latin equivalent if lower_name in Species._common_names_to_species: return Species._common_names_to_species[lower_name].latin_name return lower_name.replace(" ", "_")
def normalize_species_name(name)
If species name was "Homo sapiens" then replace spaces with underscores and return "homo_sapiens". Also replace common names like "human" with "homo_sapiens".
4.335474
3.68183
1.177532
if isinstance(species_name_or_object, Species): return species_name_or_object elif isinstance(species_name_or_object, str): return find_species_by_name(species_name_or_object) else: raise ValueError("Unexpected type for species: %s : %s" % ( species_name_or_object, type(species_name_or_object)))
def check_species_object(species_name_or_object)
Helper for validating user supplied species names or objects.
1.824526
1.803761
1.011512
species = Species( latin_name=latin_name, synonyms=synonyms, reference_assemblies=reference_assemblies) cls._latin_names_to_species[species.latin_name] = species for synonym in synonyms: if synonym in cls._common_names_to_species: raise ValueError("Can't use synonym '%s' for both %s and %s" % ( synonym, species, cls._common_names_to_species[synonym])) cls._common_names_to_species[synonym] = species for reference_name in reference_assemblies: if reference_name in cls._reference_names_to_species: raise ValueError("Can't use reference '%s' for both %s and %s" % ( reference_name, species, cls._reference_names_to_species[reference_name])) cls._reference_names_to_species[reference_name] = species return species
def register(cls, latin_name, synonyms, reference_assemblies)
Create a Species object from the given arguments and enter into all the dicts used to look the species up by its fields.
1.73368
1.629249
1.064097
for species_name in cls.all_registered_latin_names(): species = cls._latin_names_to_species[species_name] for _, release_range in species.reference_assemblies.items(): for release in range(release_range[0], release_range[1] + 1): yield species_name, release
def all_species_release_pairs(cls)
Generator which yields (species, release) pairs for all possible combinations.
3.8687
3.974714
0.973328
if position > self.end or position < self.start: raise ValueError( "Position %d outside valid range %d..%d of %s" % ( position, self.start, self.end, self)) elif self.on_forward_strand: return position - self.start else: return self.end - position
def offset(self, position)
Offset of given position from stranded start of this locus. For example, if a Locus goes from 10..20 and is on the negative strand, then the offset of position 13 is 7, whereas if the Locus is on the positive strand, then the offset is 3.
3.328412
2.854088
1.166191
assert start <= end, \ "Locations should always have start < end, got start=%d, end=%d" % ( start, end) if start < self.start or end > self.end: raise ValueError("Range (%d, %d) falls outside %s" % ( start, end, self)) if self.on_forward_strand: return (start - self.start, end - self.start) else: return (self.end - end, self.end - start)
def offset_range(self, start, end)
Database start/end entries are always ordered such that start < end. This makes computing a relative position (e.g. of a stop codon relative to its transcript) complicated since the "end" position of a backwards locus is actually earlir on the strand. This function correctly selects a start vs. end value depending on this locuses's strand and determines that position's offset from the earliest position in this locus.
3.01782
2.765773
1.091131
return (self.on_contig(contig) and (strand is None or self.on_strand(strand)))
def can_overlap(self, contig, strand=None)
Is this locus on the same contig and (optionally) on the same strand?
4.483616
3.390565
1.32238
if self.start > end: # interval is before this exon return self.start - end elif self.end < start: # exon is before the interval return start - self.end else: return 0
def distance_to_interval(self, start, end)
Find the distance between intervals [start1, end1] and [start2, end2]. If the intervals overlap then the distance is 0.
3.879766
3.686711
1.052365
return ( self.can_overlap(contig, strand) and self.distance_to_interval(start, end) == 0)
def overlaps(self, contig, start, end, strand=None)
Does this locus overlap with a given range of positions? Since locus position ranges are inclusive, we should make sure that e.g. chr1:10-10 overlaps with chr1:10-10
6.468866
7.138526
0.906191
cache_key_list = [] # hack to get around the unhashability of lists, # add a special case to convert them to tuples for arg in args: if type(arg) is list: cache_key_list.append(tuple(arg)) else: cache_key_list.append(arg) for (k, v) in sorted(kwargs.items()): if type(v) is list: cache_key_list.append((k, tuple(v))) else: cache_key_list.append((k, v)) return tuple(cache_key_list)
def _memoize_cache_key(args, kwargs)
Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg).
2.368812
2.378452
0.995947
cache = {} @wraps(fn) def wrapped_fn(*args, **kwargs): cache_key = _memoize_cache_key(args, kwargs) try: return cache[cache_key] except KeyError: value = fn(*args, **kwargs) cache[cache_key] = value return value def clear_cache(): cache.clear() # Needed to ensure that EnsemblRelease.clear_cache # is able to clear memoized values from each of its methods wrapped_fn.clear_cache = clear_cache # expose the cache so we can check if an item has already been computed wrapped_fn.cache = cache # if we want to check whether an item is in the cache, first need # to construct the same cache key as used by wrapped_fn wrapped_fn.make_cache_key = _memoize_cache_key return wrapped_fn
def memoize(fn)
Simple reset-able memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared.
3.459898
3.638202
0.950991
release = check_release_number(release) species = check_species_object(species) return (release, species, server)
def normalize_init_values(cls, release, species, server)
Normalizes the arguments which uniquely specify an EnsemblRelease genome.
5.264882
4.739664
1.110813
init_args_tuple = cls.normalize_init_values(release, species, server) if init_args_tuple in cls._genome_cache: genome = cls._genome_cache[init_args_tuple] else: genome = cls._genome_cache[init_args_tuple] = cls(*init_args_tuple) return genome
def cached( cls, release=MAX_ENSEMBL_RELEASE, species=human, server=ENSEMBL_FTP_SERVER)
Construct EnsemblRelease if it's never been made before, otherwise return an old instance.
3.092169
3.062467
1.009698
if reference_name is None: reference_name = "" if annotation_name is None: annotation_name = "" if annotation_version is None: annotation_version = "" reference_dir = join(CACHE_BASE_SUBDIR, reference_name) annotation_dir = "%s%s" % (annotation_name, annotation_version) return join(reference_dir, annotation_dir)
def cache_subdirectory( reference_name=None, annotation_name=None, annotation_version=None)
Which cache subdirectory to use for a given annotation database over a particular reference. All arguments can be omitted to just get the base subdirectory for all pyensembl cached datasets.
2.062493
2.04071
1.010674
return ( ('reference_name', self.reference_name,), ('annotation_name', self.annotation_name), ('annotation_version', self.annotation_version), ('cache_directory_path', self.cache_directory_path), ('decompress_on_download', self.decompress_on_download), ('copy_local_files_to_cache', self.copy_local_files_to_cache) )
def _fields(self)
Fields used for hashing, string representation, equality comparison
3.401756
3.170291
1.073011
for ext in [".gz", ".gzip", ".zip"]: if filename.endswith(ext): return filename[:-len(ext)] return filename
def _remove_compression_suffix_if_present(self, filename)
If the given filename ends in one of the compression suffixes that datacache knows how to deal with, remove the suffix (since we expect the result of downloading to be a decompressed file)
3.066753
3.299944
0.929335
assert path_or_url, "Expected non-empty string for path_or_url" remote_filename = split(path_or_url)[1] if self.is_url_format(path_or_url): # passing `decompress=False` since there is logic below # for stripping decompression extensions for both local # and remote files local_filename = datacache.build_local_filename( download_url=path_or_url, filename=remote_filename, decompress=False) else: local_filename = remote_filename # if we expect the download function to decompress this file then # we should use its name without the compression extension if self.decompress_on_download: local_filename = self._remove_compression_suffix_if_present( local_filename) if len(local_filename) == 0: raise ValueError("Can't determine local filename for %s" % ( path_or_url,)) return join(self.cache_directory_path, local_filename)
def cached_path(self, path_or_url)
When downloading remote files, the default behavior is to name local files the same as their remote counterparts.
4.950798
4.720345
1.048821
cached_path = self.cached_path(url) missing = not exists(cached_path) if (missing or overwrite) and download_if_missing: logger.info("Fetching %s from URL %s", cached_path, url) datacache.download._download_and_decompress_if_necessary( full_path=cached_path, download_url=url, timeout=3600) elif missing: raise MissingRemoteFile(url) return cached_path
def _download_if_necessary(self, url, download_if_missing, overwrite)
Return local cached path to a remote file, download it if necessary.
4.306239
3.861739
1.115104
local_path = abspath(local_path) if not exists(local_path): raise MissingLocalFile(local_path) elif not self.copy_local_files_to_cache: return local_path else: cached_path = self.cached_path(local_path) if exists(cached_path) and not overwrite: return cached_path copy2(local_path, cached_path) return cached_path
def _copy_if_necessary(self, local_path, overwrite)
Return cached path to local file, copying it to the cache if necessary.
2.46447
2.144148
1.149393
assert path_or_url, "Expected non-empty string for path_or_url" if self.is_url_format(path_or_url): return self._download_if_necessary( path_or_url, download_if_missing, overwrite) else: return self._copy_if_necessary(path_or_url, overwrite)
def download_or_copy_if_necessary( self, path_or_url, download_if_missing=False, overwrite=False)
Download a remote file or copy Get the local path to a possibly remote file. Download if file is missing from the cache directory and `download_if_missing` is True. Download even if local file exists if both `download_if_missing` and `overwrite` are True. If the file is on the local file system then return its path, unless self.copy_local_to_cache is True, and then copy it to the cache first. Parameters ---------- path_or_url : str download_if_missing : bool, optional Download files if missing from local cache overwrite : bool, optional Overwrite existing copy if it exists
2.341789
3.083736
0.7594
for filename in listdir(self.cache_directory_path): delete = ( any([filename.endswith(ext) for ext in suffixes]) or any([filename.startswith(pre) for pre in prefixes])) if delete: path = join(self.cache_directory_path, filename) logger.info("Deleting %s", path) remove(path)
def delete_cached_files(self, prefixes=[], suffixes=[])
Deletes any cached files matching the prefixes or suffixes given
2.541379
2.578537
0.98559