code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if count < 0: return self.find_previous_word_beginning(count=-count, WORD=WORD) regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(self.text_after_cursor) try: for i, match in enumerate(iterator): # Take first match, unless it's the word on which we're right now. if i == 0 and match.start(1) == 0: count += 1 if i + 1 == count: return match.start(1) except StopIteration: pass
def find_next_word_beginning(self, count=1, WORD=False)
Return an index relative to the cursor position pointing to the start of the next word. Return `None` if nothing was found.
3.689782
3.493476
1.056192
if count < 0: return self.find_previous_word_ending(count=-count, WORD=WORD) if include_current_position: text = self.text_after_cursor else: text = self.text_after_cursor[1:] regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterable = regex.finditer(text) try: for i, match in enumerate(iterable): if i + 1 == count: value = match.end(1) if include_current_position: return value else: return value + 1 except StopIteration: pass
def find_next_word_ending(self, include_current_position=False, count=1, WORD=False)
Return an index relative to the cursor position pointing to the end of the next word. Return `None` if nothing was found.
2.880383
2.743561
1.04987
if count < 0: return self.find_next_word_beginning(count=-count, WORD=WORD) regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(self.text_before_cursor[::-1]) try: for i, match in enumerate(iterator): if i + 1 == count: return - match.end(1) except StopIteration: pass
def find_previous_word_beginning(self, count=1, WORD=False)
Return an index relative to the cursor position pointing to the start of the previous word. Return `None` if nothing was found.
3.809935
3.394311
1.122447
if count < 0: return self.find_next_word_ending(count=-count, WORD=WORD) text_before_cursor = self.text_after_cursor[:1] + self.text_before_cursor[::-1] regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE iterator = regex.finditer(text_before_cursor) try: for i, match in enumerate(iterator): # Take first match, unless it's the word on which we're right now. if i == 0 and match.start(1) == 0: count += 1 if i + 1 == count: return -match.start(1) + 1 except StopIteration: pass
def find_previous_word_ending(self, count=1, WORD=False)
Return an index relative to the cursor position pointing to the end of the previous word. Return `None` if nothing was found.
3.856042
3.696038
1.043291
result = None for index, line in enumerate(self.lines[self.cursor_position_row + 1:]): if match_func(line): result = 1 + index count -= 1 if count == 0: break return result
def find_next_matching_line(self, match_func, count=1)
Look downwards for empty lines. Return the line index, relative to the current line.
3.720164
3.175907
1.171371
if count < 0: return self.get_cursor_right_position(-count) return - min(self.cursor_position_col, count)
def get_cursor_left_position(self, count=1)
Relative position for cursor left.
5.701737
5.434639
1.049147
if count < 0: return self.get_cursor_left_position(-count) return min(count, len(self.current_line_after_cursor))
def get_cursor_right_position(self, count=1)
Relative position for cursor_right.
5.195759
4.888206
1.062917
assert count >= 1 column = self.cursor_position_col if preferred_column is None else preferred_column return self.translate_row_col_to_index( max(0, self.cursor_position_row - count), column) - self.cursor_position
def get_cursor_up_position(self, count=1, preferred_column=None)
Return the relative cursor position (character index) where we would be if the user pressed the arrow-up button. :param preferred_column: When given, go to this column instead of staying at the current column.
3.424197
4.025118
0.850707
assert count >= 1 column = self.cursor_position_col if preferred_column is None else preferred_column return self.translate_row_col_to_index( self.cursor_position_row + count, column) - self.cursor_position
def get_cursor_down_position(self, count=1, preferred_column=None)
Return the relative cursor position (character index) where we would be if the user pressed the arrow-down button. :param preferred_column: When given, go to this column instead of staying at the current column.
3.413859
3.981929
0.857338
if self.current_char == right_ch: return 0 if end_pos is None: end_pos = len(self.text) else: end_pos = min(len(self.text), end_pos) stack = 1 # Look forward. for i in range(self.cursor_position + 1, end_pos): c = self.text[i] if c == left_ch: stack += 1 elif c == right_ch: stack -= 1 if stack == 0: return i - self.cursor_position
def find_enclosing_bracket_right(self, left_ch, right_ch, end_pos=None)
Find the right bracket enclosing current position. Return the relative position to the cursor position. When `end_pos` is given, don't look past the position.
2.134119
2.005403
1.064185
if self.current_char == left_ch: return 0 if start_pos is None: start_pos = 0 else: start_pos = max(0, start_pos) stack = 1 # Look backward. for i in range(self.cursor_position - 1, start_pos - 1, -1): c = self.text[i] if c == right_ch: stack += 1 elif c == left_ch: stack -= 1 if stack == 0: return i - self.cursor_position
def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None)
Find the left bracket enclosing current position. Return the relative position to the cursor position. When `start_pos` is given, don't look past the position.
2.393769
2.254894
1.061588
# Look for a match. for A, B in '()', '[]', '{}', '<>': if self.current_char == A: return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0 elif self.current_char == B: return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0 return 0
def find_matching_bracket_position(self, start_pos=None, end_pos=None)
Return relative cursor position of matching [, (, { or < bracket. When `start_pos` or `end_pos` are given. Don't look past the positions.
3.555153
3.369244
1.055178
if after_whitespace: current_line = self.current_line return len(current_line) - len(current_line.lstrip()) - self.cursor_position_col else: return - len(self.current_line_before_cursor)
def get_start_of_line_position(self, after_whitespace=False)
Relative position for the start of this line.
3.378475
3.253572
1.03839
line_length = len(self.current_line) current_column = self.cursor_position_col column = max(0, min(line_length, column)) return column - current_column
def get_column_cursor_position(self, column)
Return the relative cursor position for this column at the current line. (It will stay between the boundaries of the line in case of a larger number.)
4.198048
3.462634
1.212386
from_, to = sorted([self.cursor_position, self.selection.original_cursor_position]) else: from_, to = self.cursor_position, self.cursor_position return from_, to
def selection_range(self): # XXX: shouldn't this return `None` if there is no selection??? if self.selection
Return (from, to) tuple of the selection. start and end position are included. This doesn't take the selection type into account. Use `selection_ranges` instead.
5.798335
5.962219
0.972513
if self.selection: from_, to = sorted([self.cursor_position, self.selection.original_cursor_position]) if self.selection.type == SelectionType.BLOCK: from_line, from_column = self.translate_index_to_position(from_) to_line, to_column = self.translate_index_to_position(to) from_column, to_column = sorted([from_column, to_column]) lines = self.lines for l in range(from_line, to_line + 1): line_length = len(lines[l]) if from_column < line_length: yield (self.translate_row_col_to_index(l, from_column), self.translate_row_col_to_index(l, min(line_length - 1, to_column))) else: # In case of a LINES selection, go to the start/end of the lines. if self.selection.type == SelectionType.LINES: from_ = max(0, self.text.rfind('\n', 0, from_) + 1) if self.text.find('\n', to) >= 0: to = self.text.find('\n', to) else: to = len(self.text) - 1 yield from_, to
def selection_ranges(self)
Return a list of (from, to) tuples for the selection or none if nothing was selected. start and end position are always included in the selection. This will yield several (from, to) tuples in case of a BLOCK selection.
2.499095
2.381112
1.049549
if self.selection: row_start = self.translate_row_col_to_index(row, 0) row_end = self.translate_row_col_to_index(row, max(0, len(self.lines[row]) - 1)) from_, to = sorted([self.cursor_position, self.selection.original_cursor_position]) # Take the intersection of the current line and the selection. intersection_start = max(row_start, from_) intersection_end = min(row_end, to) if intersection_start <= intersection_end: if self.selection.type == SelectionType.LINES: intersection_start = row_start intersection_end = row_end elif self.selection.type == SelectionType.BLOCK: _, col1 = self.translate_index_to_position(from_) _, col2 = self.translate_index_to_position(to) col1, col2 = sorted([col1, col2]) intersection_start = self.translate_row_col_to_index(row, col1) intersection_end = self.translate_row_col_to_index(row, col2) _, from_column = self.translate_index_to_position(intersection_start) _, to_column = self.translate_index_to_position(intersection_end) return from_column, to_column
def selection_range_at_line(self, row)
If the selection spans a portion of the given line, return a (from, to) tuple. Otherwise, return None.
2.207979
2.068215
1.067577
if self.selection: cut_parts = [] remaining_parts = [] new_cursor_position = self.cursor_position last_to = 0 for from_, to in self.selection_ranges(): if last_to == 0: new_cursor_position = from_ remaining_parts.append(self.text[last_to:from_]) cut_parts.append(self.text[from_:to + 1]) last_to = to + 1 remaining_parts.append(self.text[last_to:]) cut_text = '\n'.join(cut_parts) remaining_text = ''.join(remaining_parts) # In case of a LINES selection, don't include the trailing newline. if self.selection.type == SelectionType.LINES and cut_text.endswith('\n'): cut_text = cut_text[:-1] return (Document(text=remaining_text, cursor_position=new_cursor_position), ClipboardData(cut_text, self.selection.type)) else: return self, ClipboardData('')
def cut_selection(self)
Return a (:class:`.Document`, :class:`.ClipboardData`) tuple, where the document represents the new document when the selection is cut, and the clipboard data, represents whatever has to be put on the clipboard.
2.857048
2.545688
1.122309
assert isinstance(data, ClipboardData) assert paste_mode in (PasteMode.VI_BEFORE, PasteMode.VI_AFTER, PasteMode.EMACS) before = (paste_mode == PasteMode.VI_BEFORE) after = (paste_mode == PasteMode.VI_AFTER) if data.type == SelectionType.CHARACTERS: if after: new_text = (self.text[:self.cursor_position + 1] + data.text * count + self.text[self.cursor_position + 1:]) else: new_text = self.text_before_cursor + data.text * count + self.text_after_cursor new_cursor_position = self.cursor_position + len(data.text) * count if before: new_cursor_position -= 1 elif data.type == SelectionType.LINES: l = self.cursor_position_row if before: lines = self.lines[:l] + [data.text] * count + self.lines[l:] new_text = '\n'.join(lines) new_cursor_position = len(''.join(self.lines[:l])) + l else: lines = self.lines[:l + 1] + [data.text] * count + self.lines[l + 1:] new_cursor_position = len(''.join(self.lines[:l + 1])) + l + 1 new_text = '\n'.join(lines) elif data.type == SelectionType.BLOCK: lines = self.lines[:] start_line = self.cursor_position_row start_column = self.cursor_position_col + (0 if before else 1) for i, line in enumerate(data.text.split('\n')): index = i + start_line if index >= len(lines): lines.append('') lines[index] = lines[index].ljust(start_column) lines[index] = lines[index][:start_column] + line * count + lines[index][start_column:] new_text = '\n'.join(lines) new_cursor_position = self.cursor_position + (0 if before else 1) return Document(text=new_text, cursor_position=new_cursor_position)
def paste_clipboard_data(self, data, paste_mode=PasteMode.EMACS, count=1)
Return a new :class:`.Document` instance which contains the result if we would paste this data at the current cursor position. :param paste_mode: Where to paste. (Before/after/emacs.) :param count: When >1, Paste multiple times.
1.975575
1.942594
1.016978
count = 0 for line in self.lines[::-1]: if not line or line.isspace(): count += 1 else: break return count
def empty_line_count_at_the_end(self)
Return number of empty lines at the end of the document.
2.97589
2.541543
1.170899
def match_func(text): return not text or text.isspace() line_index = self.find_previous_matching_line(match_func=match_func, count=count) if line_index: add = 0 if before else 1 return min(0, self.get_cursor_up_position(count=-line_index) + add) else: return -self.cursor_position
def start_of_paragraph(self, count=1, before=False)
Return the start of the current paragraph. (Relative cursor position.)
5.551106
5.130406
1.082001
def match_func(text): return not text or text.isspace() line_index = self.find_next_matching_line(match_func=match_func, count=count) if line_index: add = 0 if after else 1 return max(0, self.get_cursor_down_position(count=line_index) - add) else: return len(self.text_after_cursor)
def end_of_paragraph(self, count=1, after=False)
Return the end of the current paragraph. (Relative cursor position.)
5.076111
4.71784
1.07594
return Document( text=self.text + text, cursor_position=self.cursor_position, selection=self.selection)
def insert_after(self, text)
Create a new document, with this text inserted after the buffer. It keeps selection ranges and cursor position in sync.
5.379672
3.955291
1.36012
selection_state = self.selection if selection_state: selection_state = SelectionState( original_cursor_position=selection_state.original_cursor_position + len(text), type=selection_state.type) return Document( text=text + self.text, cursor_position=self.cursor_position + len(text), selection=selection_state)
def insert_before(self, text)
Create a new document, with this text inserted before the buffer. It keeps selection ranges and cursor position in sync.
3.531423
3.25365
1.085373
assert get_search_state is None or callable(get_search_state) # Accept both Filters and booleans as input. enable_abort_and_exit_bindings = to_cli_filter(enable_abort_and_exit_bindings) enable_system_bindings = to_cli_filter(enable_system_bindings) enable_search = to_cli_filter(enable_search) enable_open_in_editor = to_cli_filter(enable_open_in_editor) enable_extra_page_navigation = to_cli_filter(enable_extra_page_navigation) enable_auto_suggest_bindings = to_cli_filter(enable_auto_suggest_bindings) registry = MergedRegistry([ # Load basic bindings. load_basic_bindings(), load_mouse_bindings(), ConditionalRegistry(load_abort_and_exit_bindings(), enable_abort_and_exit_bindings), ConditionalRegistry(load_basic_system_bindings(), enable_system_bindings), # Load emacs bindings. load_emacs_bindings(), ConditionalRegistry(load_emacs_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_emacs_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_emacs_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_emacs_page_navigation_bindings(), enable_extra_page_navigation), # Load Vi bindings. load_vi_bindings(get_search_state=get_search_state), ConditionalRegistry(load_vi_open_in_editor_bindings(), enable_open_in_editor), ConditionalRegistry(load_vi_search_bindings(get_search_state=get_search_state), enable_search), ConditionalRegistry(load_vi_system_bindings(), enable_system_bindings), ConditionalRegistry(load_extra_vi_page_navigation_bindings(), enable_extra_page_navigation), # Suggestion bindings. # (This has to come at the end, because the Vi bindings also have an # implementation for the "right arrow", but we really want the # suggestion binding when a suggestion is available.) ConditionalRegistry(load_auto_suggestion_bindings(), enable_auto_suggest_bindings), ]) return registry
def load_key_bindings( get_search_state=None, enable_abort_and_exit_bindings=False, enable_system_bindings=False, enable_search=False, enable_open_in_editor=False, enable_extra_page_navigation=False, enable_auto_suggest_bindings=False)
Create a Registry object that contains the default key bindings. :param enable_abort_and_exit_bindings: Filter to enable Ctrl-C and Ctrl-D. :param enable_system_bindings: Filter to enable the system bindings (meta-! prompt and Control-Z suspension.) :param enable_search: Filter to enable the search bindings. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_open_in_editor: Filter to enable open-in-editor. :param enable_extra_page_navigation: Filter for enabling extra page navigation. (Bindings for up/down scrolling through long pages, like in Emacs or Vi.) :param enable_auto_suggest_bindings: Filter to enable fish-style suggestions.
2.051222
2.049133
1.00102
kw.setdefault('enable_abort_and_exit_bindings', True) kw.setdefault('enable_search', True) kw.setdefault('enable_auto_suggest_bindings', True) return load_key_bindings(**kw)
def load_key_bindings_for_prompt(**kw)
Create a ``Registry`` object with the defaults key bindings for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.)
3.66153
3.332177
1.09884
if is_windows(): from prompt_toolkit.eventloop.win32 import Win32EventLoop as Loop return Loop(inputhook=inputhook, recognize_paste=recognize_win32_paste) else: from prompt_toolkit.eventloop.posix import PosixEventLoop as Loop return Loop(inputhook=inputhook)
def create_eventloop(inputhook=None, recognize_win32_paste=True)
Create and return an :class:`~prompt_toolkit.eventloop.base.EventLoop` instance for a :class:`~prompt_toolkit.interface.CommandLineInterface`.
2.299605
2.268549
1.01369
stdout = stdout or sys.__stdout__ true_color = to_simple_filter(true_color) if is_windows(): if is_conemu_ansi(): return ConEmuOutput(stdout) else: return Win32Output(stdout) else: term = os.environ.get('TERM', '') if PY2: term = term.decode('utf-8') return Vt100_Output.from_pty( stdout, true_color=true_color, ansi_colors_only=ansi_colors_only, term=term)
def create_output(stdout=None, true_color=False, ansi_colors_only=None)
Return an :class:`~prompt_toolkit.output.Output` instance for the command line. :param true_color: When True, use 24bit colors instead of 256 colors. (`bool` or :class:`~prompt_toolkit.filters.SimpleFilter`.) :param ansi_colors_only: When True, restrict to 16 ANSI colors only. (`bool` or :class:`~prompt_toolkit.filters.SimpleFilter`.)
3.936189
3.879672
1.014567
# Inline import, to make sure the rest doesn't break on Python 2. (Where # asyncio is not available.) if is_windows(): from prompt_toolkit.eventloop.asyncio_win32 import Win32AsyncioEventLoop as AsyncioEventLoop else: from prompt_toolkit.eventloop.asyncio_posix import PosixAsyncioEventLoop as AsyncioEventLoop return AsyncioEventLoop(loop)
def create_asyncio_eventloop(loop=None)
Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It is a wrapper around an asyncio loop. :param loop: The asyncio eventloop (or `None` if the default asyncioloop should be used.)
5.046597
4.425034
1.140465
def has_before_tokens(cli): for token, char in get_prompt_tokens(cli): if '\n' in char: return True return False def before(cli): result = [] found_nl = False for token, char in reversed(explode_tokens(get_prompt_tokens(cli))): if found_nl: result.insert(0, (token, char)) elif char == '\n': found_nl = True return result def first_input_line(cli): result = [] for token, char in reversed(explode_tokens(get_prompt_tokens(cli))): if char == '\n': break else: result.insert(0, (token, char)) return result return has_before_tokens, before, first_input_line
def _split_multiline_prompt(get_prompt_tokens)
Take a `get_prompt_tokens` function and return three new functions instead. One that tells whether this prompt consists of multiple lines; one that returns the tokens to be shown on the lines above the input; and another one with the tokens to be shown at the first line of the input.
2.708334
2.496983
1.084643
patch_stdout = kwargs.pop('patch_stdout', False) return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False) true_color = kwargs.pop('true_color', False) refresh_interval = kwargs.pop('refresh_interval', 0) eventloop = kwargs.pop('eventloop', None) application = create_prompt_application(message, **kwargs) return run_application(application, patch_stdout=patch_stdout, return_asyncio_coroutine=return_asyncio_coroutine, true_color=true_color, refresh_interval=refresh_interval, eventloop=eventloop)
def prompt(message='', **kwargs)
Get input from the user and return it. This is a wrapper around a lot of ``prompt_toolkit`` functionality and can be a replacement for `raw_input`. (or GNU readline.) If you want to keep your history across several calls, create one :class:`~prompt_toolkit.history.History` instance and pass it every time. This function accepts many keyword arguments. Except for the following, they are a proxy to the arguments of :func:`.create_prompt_application`. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds.
2.506736
1.840977
1.361633
assert isinstance(application, Application) if return_asyncio_coroutine: eventloop = create_asyncio_eventloop() else: eventloop = eventloop or create_eventloop() # Create CommandLineInterface. cli = CommandLineInterface( application=application, eventloop=eventloop, output=create_output(true_color=true_color)) # Set up refresh interval. if refresh_interval: done = [False] def start_refresh_loop(cli): def run(): while not done[0]: time.sleep(refresh_interval) cli.request_redraw() t = threading.Thread(target=run) t.daemon = True t.start() def stop_refresh_loop(cli): done[0] = True cli.on_start += start_refresh_loop cli.on_stop += stop_refresh_loop # Replace stdout. patch_context = cli.patch_stdout_context(raw=True) if patch_stdout else DummyContext() # Read input and return it. if return_asyncio_coroutine: # Create an asyncio coroutine and call it. exec_context = {'patch_context': patch_context, 'cli': cli, 'Document': Document} exec_(textwrap.dedent(''' def prompt_coro(): # Inline import, because it slows down startup when asyncio is not # needed. import asyncio @asyncio.coroutine def run(): with patch_context: result = yield from cli.run_async() if isinstance(result, Document): # Backwards-compatibility. return result.text return result return run() '''), exec_context) return exec_context['prompt_coro']() else: try: with patch_context: result = cli.run() if isinstance(result, Document): # Backwards-compatibility. return result.text return result finally: eventloop.close()
def run_application( application, patch_stdout=False, return_asyncio_coroutine=False, true_color=False, refresh_interval=0, eventloop=None)
Run a prompt toolkit application. :param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) :param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3) :param true_color: When True, use 24bit colors instead of 256 colors. :param refresh_interval: (number; in seconds) When given, refresh the UI every so many seconds.
3.018895
3.048639
0.990244
registry = Registry() @registry.add_binding('y') @registry.add_binding('Y') def _(event): event.cli.buffers[DEFAULT_BUFFER].text = 'y' event.cli.set_return_value(True) @registry.add_binding('n') @registry.add_binding('N') @registry.add_binding(Keys.ControlC) def _(event): event.cli.buffers[DEFAULT_BUFFER].text = 'n' event.cli.set_return_value(False) return create_prompt_application(message, key_bindings_registry=registry)
def create_confirm_application(message)
Create a confirmation `Application` that returns True/False.
2.92984
2.889116
1.014096
assert isinstance(message, text_type) app = create_confirm_application(message) return run_application(app)
def confirm(message='Confirm (y or n) ')
Display a confirmation prompt.
11.197021
9.772213
1.145802
if style is None: style = DEFAULT_STYLE assert isinstance(style, Style) output = create_output(true_color=true_color, stdout=file) renderer_print_tokens(output, tokens, style)
def print_tokens(tokens, style=None, true_color=False, file=None)
Print a list of (Token, text) tuples in the given style to the output. E.g.:: style = style_from_dict({ Token.Hello: '#ff0066', Token.World: '#884444 italic', }) tokens = [ (Token.Hello, 'Hello'), (Token.World, 'World'), ] print_tokens(tokens, style=style) :param tokens: List of ``(Token, text)`` tuples. :param style: :class:`.Style` instance for the color scheme. :param true_color: When True, use 24bit colors instead of 256 colors. :param file: The output file. This can be `sys.stdout` or `sys.stderr`.
5.00331
7.598765
0.658437
" Create a table that maps the 16 named ansi colors to their Windows code. " return { 'ansidefault': color_cls.BLACK, 'ansiblack': color_cls.BLACK, 'ansidarkgray': color_cls.BLACK | color_cls.INTENSITY, 'ansilightgray': color_cls.GRAY, 'ansiwhite': color_cls.GRAY | color_cls.INTENSITY, # Low intensity. 'ansidarkred': color_cls.RED, 'ansidarkgreen': color_cls.GREEN, 'ansibrown': color_cls.YELLOW, 'ansidarkblue': color_cls.BLUE, 'ansipurple': color_cls.MAGENTA, 'ansiteal': color_cls.CYAN, # High intensity. 'ansired': color_cls.RED | color_cls.INTENSITY, 'ansigreen': color_cls.GREEN | color_cls.INTENSITY, 'ansiyellow': color_cls.YELLOW | color_cls.INTENSITY, 'ansiblue': color_cls.BLUE | color_cls.INTENSITY, 'ansifuchsia': color_cls.MAGENTA | color_cls.INTENSITY, 'ansiturquoise': color_cls.CYAN | color_cls.INTENSITY, }
def _create_ansi_color_dict(color_cls)
Create a table that maps the 16 named ansi colors to their Windows code.
2.040518
1.800092
1.133563
self.flush() if _DEBUG_RENDER_OUTPUT: self.LOG.write(('%r' % func.__name__).encode('utf-8') + b'\n') self.LOG.write(b' ' + ', '.join(['%r' % i for i in a]).encode('utf-8') + b'\n') self.LOG.write(b' ' + ', '.join(['%r' % type(i) for i in a]).encode('utf-8') + b'\n') self.LOG.flush() try: return func(*a, **kw) except ArgumentError as e: if _DEBUG_RENDER_OUTPUT: self.LOG.write((' Error in %r %r %s\n' % (func.__name__, e, e)).encode('utf-8'))
def _winapi(self, func, *a, **kw)
Flush and call win API function.
2.815962
2.68219
1.049874
# NOTE: We don't call the `GetConsoleScreenBufferInfo` API through # `self._winapi`. Doing so causes Python to crash on certain 64bit # Python versions. (Reproduced with 64bit Python 2.7.6, on Windows # 10). It is not clear why. Possibly, it has to do with passing # these objects as an argument, or through *args. # The Python documentation contains the following - possibly related - warning: # ctypes does not support passing unions or structures with # bit-fields to functions by value. While this may work on 32-bit # x86, it's not guaranteed by the library to work in the general # case. Unions and structures with bit-fields should always be # passed to functions by pointer. # Also see: # - https://github.com/ipython/ipython/issues/10070 # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406 # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86 self.flush() sbinfo = CONSOLE_SCREEN_BUFFER_INFO() success = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo)) # success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo, # self.hconsole, byref(sbinfo)) if success: return sbinfo else: raise NoConsoleScreenBufferError
def get_win32_screen_buffer_info(self)
Return Screen buffer info.
5.365901
5.31709
1.00918
assert isinstance(title, six.text_type) self._winapi(windll.kernel32.SetConsoleTitleW, title)
def set_title(self, title)
Set terminal title.
5.80935
4.546999
1.277623
" Reset the console foreground/background color. " self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole, self.default_attrs)
def reset_attributes(self)
Reset the console foreground/background color.
14.078557
7.589896
1.854908
if not self._buffer: # Only flush stdout buffer. (It could be that Python still has # something in its buffer. -- We want to be sure to print that in # the correct color.) self.stdout.flush() return data = ''.join(self._buffer) if _DEBUG_RENDER_OUTPUT: self.LOG.write(('%r' % data).encode('utf-8') + b'\n') self.LOG.flush() # Print characters one by one. This appears to be the best soluton # in oder to avoid traces of vertical lines when the completion # menu disappears. for b in data: written = DWORD() retval = windll.kernel32.WriteConsoleW(self.hconsole, b, 1, byref(written), None) assert retval != 0 self._buffer = []
def flush(self)
Write to output stream and flush.
8.514157
8.372643
1.016902
# Get current window size info = self.get_win32_screen_buffer_info() sr = info.srWindow cursor_pos = info.dwCursorPosition result = SMALL_RECT() # Scroll to the left. result.Left = 0 result.Right = sr.Right - sr.Left # Scroll vertical win_height = sr.Bottom - sr.Top if 0 < sr.Bottom - cursor_pos.Y < win_height - 1: # no vertical scroll if cursor already on the screen result.Bottom = sr.Bottom else: result.Bottom = max(win_height, cursor_pos.Y) result.Top = result.Bottom - win_height # Scroll API self._winapi(windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result))
def scroll_buffer_to_prompt(self)
To be called before drawing the prompt. This should scroll the console to left, with the cursor at the bottom (if possible).
4.803418
4.524713
1.061596
if not self._in_alternate_screen: GENERIC_READ = 0x80000000 GENERIC_WRITE = 0x40000000 # Create a new console buffer and activate that one. handle = self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE, DWORD(0), None, DWORD(1), None) self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle) self.hconsole = handle self._in_alternate_screen = True
def enter_alternate_screen(self)
Go to alternate screen buffer.
3.920999
3.581328
1.094845
if self._in_alternate_screen: stdout = self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE) self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout) self._winapi(windll.kernel32.CloseHandle, self.hconsole) self.hconsole = stdout self._in_alternate_screen = False
def quit_alternate_screen(self)
Make stdout again the active buffer.
3.690026
2.983948
1.236626
# Get console handle handle = windll.kernel32.GetConsoleWindow() RDW_INVALIDATE = 0x0001 windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
def win32_refresh_window(cls)
Call win32 API to refresh the whole Window. This is sometimes necessary when the application paints background for completion menus. When the menu disappears, it leaves traces due to a bug in the Windows Console. Sending a repaint request solves it.
5.420332
5.277958
1.026975
FG = FOREGROUND_COLOR BG = BACKROUND_COLOR return [ (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK), (0x00, 0x00, 0xaa, FG.BLUE, BG.BLUE), (0x00, 0xaa, 0x00, FG.GREEN, BG.GREEN), (0x00, 0xaa, 0xaa, FG.CYAN, BG.CYAN), (0xaa, 0x00, 0x00, FG.RED, BG.RED), (0xaa, 0x00, 0xaa, FG.MAGENTA, BG.MAGENTA), (0xaa, 0xaa, 0x00, FG.YELLOW, BG.YELLOW), (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY), (0x44, 0x44, 0xff, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY), (0x44, 0xff, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY), (0x44, 0xff, 0xff, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY), (0xff, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY), (0xff, 0x44, 0xff, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY), (0xff, 0xff, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY), (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY), (0xff, 0xff, 0xff, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY), ]
def _build_color_table()
Build an RGB-to-256 color conversion table
1.337616
1.334488
1.002344
# Foreground. if fg_color in FG_ANSI_COLORS: return FG_ANSI_COLORS[fg_color] else: return self._color_indexes(fg_color)[0]
def lookup_fg_color(self, fg_color)
Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
4.780527
6.260621
0.763587
# Background. if bg_color in BG_ANSI_COLORS: return BG_ANSI_COLORS[bg_color] else: return self._color_indexes(bg_color)[1]
def lookup_bg_color(self, bg_color)
Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param bg_color: Background as text. E.g. 'ffffff' or 'red'
6.394527
7.544967
0.847522
if hasattr(array_or_tuple, 'size'): # pytorch tensors use V.size() to get size of tensor return list(array_or_tuple.size()) elif hasattr(array_or_tuple, 'get_shape'): # tensorflow uses V.get_shape() to get size of tensor return array_or_tuple.get_shape().as_list() elif hasattr(array_or_tuple, 'shape'): return array_or_tuple.shape try: # treat object as iterable return [nested_shape(item) for item in list(array_or_tuple)] except TypeError: # object is not actually iterable # LB: Maybe we should throw an error? return []
def nested_shape(array_or_tuple)
Figures out the shape of tensors possibly embedded in tuples i.e [0,0] returns (2) ([0,0], [0,0]) returns (2,2) (([0,0], [0,0]),[0,0]) returns ((2,2),2)
3.605197
3.650776
0.987515
log_track[LOG_TRACK_COUNT] += 1 if log_track[LOG_TRACK_COUNT] < log_track[LOG_TRACK_THRESHOLD]: return False log_track[LOG_TRACK_COUNT] = 0 return True
def log_track_update(log_track)
count (log_track[0]) up to threshold (log_track[1]), reset count (log_track[0]) and return true when reached
2.474734
2.017188
1.226824
if name is not None: prefix = prefix + name if log_parameters: def parameter_log_hook(module, input_, output, log_track): if not log_track_update(log_track): return for name, parameter in module.named_parameters(): # for pytorch 0.3 Variables if isinstance(parameter, torch.autograd.Variable): data = parameter.data else: data = parameter self.log_tensor_stats( data.cpu(), 'parameters/' + prefix + name) log_track_params = log_track_init(log_freq) module.register_forward_hook( lambda mod, inp, outp: parameter_log_hook(mod, inp, outp, log_track_params)) if log_gradients: for name, parameter in module.named_parameters(): if parameter.requires_grad: log_track_grad = log_track_init(log_freq) self._hook_variable_gradient_stats( parameter, 'gradients/' + prefix + name, log_track_grad)
def add_log_hooks_to_pytorch_module(self, module, name=None, prefix='', log_parameters=True, log_gradients=True, log_freq=0)
This instuments hooks into the pytorch module log_parameters - log parameters after a forward pass log_gradients - log gradients after a backward pass log_freq - log gradients/parameters every N batches
2.952052
2.990913
0.987007
# TODO Handle the case of duplicate names. if (isinstance(tensor, tuple) or isinstance(tensor, list)): while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (isinstance(tensor[0], tuple) or isinstance(tensor[0], list)): tensor = [item for sublist in tensor for item in sublist] tensor = torch.cat([t.view(-1) for t in tensor]) # checking for inheritance from _TensorBase didn't work for some reason if not hasattr(tensor, 'shape'): cls = type(tensor) raise TypeError('Expected Tensor, not {}.{}'.format( cls.__module__, cls.__name__)) history = self._history() if history is None or not history.compute: return # HalfTensors on cpu do not support view(), upconvert to 32bit if isinstance(tensor, torch.HalfTensor): tensor = tensor.clone().type(torch.FloatTensor).detach() flat = tensor.view(-1) # For pytorch 0.3 we use unoptimized numpy histograms (detach is new in 0.4) if not hasattr(flat, "detach"): tensor = flat.cpu().clone().numpy() history.row.update({ name: wandb.Histogram(tensor) }) return if flat.is_cuda: # TODO(jhr): see if pytorch will accept something upstream to check cuda support for ops # until then, we are going to have to catch a specific exception to check for histc support. if self._is_cuda_histc_supported is None: self._is_cuda_histc_supported = True check = torch.cuda.FloatTensor(1).fill_(0) try: check = flat.histc(bins=self._num_bins) except RuntimeError as e: # Only work around missing support with specific exception if str(e).startswith("_th_histc is not implemented"): self._is_cuda_histc_supported = False if not self._is_cuda_histc_supported: flat = flat.cpu().clone().detach() # As of torch 1.0.1.post2+nightly, float16 cuda summary ops are not supported (convert to float32) if isinstance(flat, torch.cuda.HalfTensor): flat = flat.clone().type(torch.cuda.FloatTensor).detach() if isinstance(flat, torch.HalfTensor): flat = flat.clone().type(torch.FloatTensor).detach() tmin = flat.min().item() tmax = flat.max().item() tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax) tensor = tensor.cpu().clone().detach() bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1) history.row.update({ name: wandb.Histogram(np_histogram=( tensor.tolist(), bins.tolist())) })
def log_tensor_stats(self, tensor, name)
Add distribution statistics on a tensor's elements to the current History entry
4.301534
4.250516
1.012003
if not isinstance(var, torch.autograd.Variable): cls = type(var) raise TypeError('Expected torch.Variable, not {}.{}'.format( cls.__module__, cls.__name__)) handle = self._hook_handles.get(name) if handle is not None and self._torch_hook_handle_is_valid(handle): raise ValueError( 'A hook has already been set under name "{}"'.format(name)) def _callback(grad, log_track): if not log_track_update(log_track): return self.log_tensor_stats(grad.data, name) handle = var.register_hook(lambda grad: _callback(grad, log_track)) self._hook_handles[name] = handle return handle
def _hook_variable_gradient_stats(self, var, name, log_track)
Logs a Variable's gradient's distribution statistics next time backward() is called on it.
3.401676
3.469118
0.980559
try: return self._line_heights[lineno, width] except KeyError: text = token_list_to_text(self.get_line(lineno)) result = self.get_height_for_text(text, width) # Cache and return self._line_heights[lineno, width] = result return result
def get_height_for_line(self, lineno, width)
Return the height that a given line would need if it is rendered in a space with the given width.
3.326456
3.327628
0.999648
return self._token_cache.get( cli.render_counter, lambda: self.get_tokens(cli))
def _get_tokens_cached(self, cli)
Get tokens, but only retrieve tokens once during one render run. (This function is called several times during one rendering, because we also need those for calculating the dimensions.)
10.728631
5.646701
1.899982
text = token_list_to_text(self._get_tokens_cached(cli)) line_lengths = [get_cwidth(l) for l in text.split('\n')] return max(line_lengths)
def preferred_width(self, cli, max_available_width)
Return the preferred width for this control. That is the width of the longest line.
6.695579
5.943511
1.126536
if self._tokens: # Read the generator. tokens_for_line = list(split_lines(self._tokens)) try: tokens = tokens_for_line[mouse_event.position.y] except IndexError: return NotImplemented else: # Find position in the token list. xpos = mouse_event.position.x # Find mouse handler for this character. count = 0 for item in tokens: count += len(item[1]) if count >= xpos: if len(item) >= 3: # Handler found. Call it. # (Handler can return NotImplemented, so return # that result.) handler = item[2] return handler(cli, mouse_event) else: break # Otherwise, don't handle here. return NotImplemented
def mouse_handler(self, cli, mouse_event)
Handle mouse events. (When the token list contained mouse handlers and the user clicked on on any of these, the matching handler is called. This handler can still return `NotImplemented` in case we want the `Window` to handle this particular event.)
5.199069
4.474386
1.161963
# Cache using `document.text`. def get_tokens_for_line(): return self.lexer.lex_document(cli, document) return self._token_cache.get(document.text, get_tokens_for_line)
def _get_tokens_for_line_func(self, cli, document)
Create a function that returns the tokens for a given line.
5.019848
4.806815
1.044319
def transform(lineno, tokens): " Transform the tokens for a given line number. " source_to_display_functions = [] display_to_source_functions = [] # Get cursor position at this line. if document.cursor_position_row == lineno: cursor_column = document.cursor_position_col else: cursor_column = None def source_to_display(i): for f in source_to_display_functions: i = f(i) return i # Apply each processor. for p in self.input_processors: transformation = p.apply_transformation( cli, document, lineno, source_to_display, tokens) tokens = transformation.tokens if cursor_column: cursor_column = transformation.source_to_display(cursor_column) display_to_source_functions.append(transformation.display_to_source) source_to_display_functions.append(transformation.source_to_display) def display_to_source(i): for f in reversed(display_to_source_functions): i = f(i) return i return _ProcessedLine(tokens, source_to_display, display_to_source) def create_func(): get_line = self._get_tokens_for_line_func(cli, document) cache = {} def get_processed_line(i): try: return cache[i] except KeyError: processed_line = transform(i, get_line(i)) cache[i] = processed_line return processed_line return get_processed_line return create_func()
def _create_get_processed_line_func(self, cli, document)
Create a function that takes a line number of the current document and returns a _ProcessedLine(processed_tokens, source_to_display, display_to_source) tuple.
2.703934
2.512995
1.075981
buffer = self._buffer(cli) # Get the document to be shown. If we are currently searching (the # search buffer has focus, and the preview_search filter is enabled), # then use the search document, which has possibly a different # text/cursor position.) def preview_now(): return bool(self.preview_search(cli) and cli.buffers[self.search_buffer_name].text) if preview_now(): if self.get_search_state: ss = self.get_search_state(cli) else: ss = cli.search_state document = buffer.document_for_search(SearchState( text=cli.current_buffer.text, direction=ss.direction, ignore_case=ss.ignore_case)) else: document = buffer.document get_processed_line = self._create_get_processed_line_func(cli, document) self._last_get_processed_line = get_processed_line def translate_rowcol(row, col): " Return the content column for this coordinate. " return Point(y=row, x=get_processed_line(row).source_to_display(col)) def get_line(i): " Return the tokens for a given line number. " tokens = get_processed_line(i).tokens # Add a space at the end, because that is a possible cursor # position. (When inserting after the input.) We should do this on # all the lines, not just the line containing the cursor. (Because # otherwise, line wrapping/scrolling could change when moving the # cursor around.) tokens = tokens + [(self.default_char.token, ' ')] return tokens content = UIContent( get_line=get_line, line_count=document.line_count, cursor_position=translate_rowcol(document.cursor_position_row, document.cursor_position_col), default_char=self.default_char) # If there is an auto completion going on, use that start point for a # pop-up menu position. (But only when this buffer has the focus -- # there is only one place for a menu, determined by the focussed buffer.) if cli.current_buffer_name == self.buffer_name: menu_position = self.menu_position(cli) if self.menu_position else None if menu_position is not None: assert isinstance(menu_position, int) menu_row, menu_col = buffer.document.translate_index_to_position(menu_position) content.menu_position = translate_rowcol(menu_row, menu_col) elif buffer.complete_state: # Position for completion menu. # Note: We use 'min', because the original cursor position could be # behind the input string when the actual completion is for # some reason shorter than the text we had before. (A completion # can change and shorten the input.) menu_row, menu_col = buffer.document.translate_index_to_position( min(buffer.cursor_position, buffer.complete_state.original_document.cursor_position)) content.menu_position = translate_rowcol(menu_row, menu_col) else: content.menu_position = None return content
def create_content(self, cli, width, height)
Create a UIContent.
4.961231
4.907465
1.010956
buffer = self._buffer(cli) position = mouse_event.position # Focus buffer when clicked. if self.has_focus(cli): if self._last_get_processed_line: processed_line = self._last_get_processed_line(position.y) # Translate coordinates back to the cursor position of the # original input. xpos = processed_line.display_to_source(position.x) index = buffer.document.translate_row_col_to_index(position.y, xpos) # Set the cursor position. if mouse_event.event_type == MouseEventType.MOUSE_DOWN: buffer.exit_selection() buffer.cursor_position = index elif mouse_event.event_type == MouseEventType.MOUSE_UP: # When the cursor was moved to another place, select the text. # (The >1 is actually a small but acceptable workaround for # selecting text in Vi navigation mode. In navigation mode, # the cursor can never be after the text, so the cursor # will be repositioned automatically.) if abs(buffer.cursor_position - index) > 1: buffer.start_selection(selection_type=SelectionType.CHARACTERS) buffer.cursor_position = index # Select word around cursor on double click. # Two MOUSE_UP events in a short timespan are considered a double click. double_click = self._last_click_timestamp and time.time() - self._last_click_timestamp < .3 self._last_click_timestamp = time.time() if double_click: start, end = buffer.document.find_boundaries_of_current_word() buffer.cursor_position += start buffer.start_selection(selection_type=SelectionType.CHARACTERS) buffer.cursor_position += end - start else: # Don't handle scroll events here. return NotImplemented # Not focussed, but focussing on click events. else: if self.focus_on_click(cli) and mouse_event.event_type == MouseEventType.MOUSE_UP: # Focus happens on mouseup. (If we did this on mousedown, the # up event will be received at the point where this widget is # focussed and be handled anyway.) cli.focus(self.buffer_name) else: return NotImplemented
def mouse_handler(self, cli, mouse_event)
Mouse handler for this control.
4.74424
4.772035
0.994175
arg = cli.input_processor.arg return [ (Token.Prompt.Arg, '(arg: '), (Token.Prompt.Arg.Text, str(arg)), (Token.Prompt.Arg, ') '), ]
def _get_arg_tokens(cli)
Tokens for the arg-prompt.
7.329983
5.97858
1.226041
assert isinstance(message, text_type) def get_message_tokens(cli): return [(Token.Prompt, message)] return cls(get_message_tokens)
def from_message(cls, message='> ')
Create a default prompt with a static message text.
7.149564
5.627579
1.270451
if not isinstance(data, list): print('warning: malformed json data for file', fname) return with open(fname, 'w') as of: for row in data: # TODO: other malformed cases? if row.strip(): of.write('%s\n' % row.strip())
def write_jsonl_file(fname, data)
Writes a jsonl file. Args: data: list of json encoded data
4.953034
6.104834
0.81133
# Process signal asynchronously, because this handler can write to the # output, and doing this inside the signal handler causes easily # reentrant calls, giving runtime errors.. # Furthur, this has to be thread safe. When the CommandLineInterface # runs not in the main thread, this function still has to be called # from the main thread. (The only place where we can install signal # handlers.) def process_winch(): if self._callbacks: self._callbacks.terminal_size_changed() self.call_from_executor(process_winch)
def received_winch(self)
Notify the event loop that SIGWINCH has been received
15.561491
14.49563
1.07353
# Wait until the main thread is idle. # We start the thread by using `call_from_executor`. The event loop # favours processing input over `calls_from_executor`, so the thread # will not start until there is no more input to process and the main # thread becomes idle for an instant. This is good, because Python # threading favours CPU over I/O -- an autocompletion thread in the # background would cause a significantly slow down of the main thread. # It is mostly noticable when pasting large portions of text while # having real time autocompletion while typing on. def start_executor(): threading.Thread(target=callback).start() self.call_from_executor(start_executor)
def run_in_executor(self, callback)
Run a long running function in a background thread. (This is recommended for code that could block the event loop.) Similar to Twisted's ``deferToThread``.
13.033876
13.248477
0.983802
assert _max_postpone_until is None or isinstance(_max_postpone_until, float) self._calls_from_executor.append((callback, _max_postpone_until)) if self._schedule_pipe: try: os.write(self._schedule_pipe[1], b'x') except (AttributeError, IndexError, OSError): # Handle race condition. We're in a different thread. # - `_schedule_pipe` could have become None in the meantime. # - We catch `OSError` (actually BrokenPipeError), because the # main thread could have closed the pipe already. pass
def call_from_executor(self, callback, _max_postpone_until=None)
Call this function in the main event loop. Similar to Twisted's ``callFromThread``. :param _max_postpone_until: `None` or `time.time` value. For interal use. If the eventloop is saturated, consider this task to be low priority and postpone maximum until this timestamp. (For instance, repaint is done using low priority.)
4.684231
4.662602
1.004639
" Add read file descriptor to the event loop. " fd = fd_to_int(fd) self._read_fds[fd] = callback self.selector.register(fd)
def add_reader(self, fd, callback)
Add read file descriptor to the event loop.
7.624868
5.41471
1.408177
" Remove read file descriptor from the event loop. " fd = fd_to_int(fd) if fd in self._read_fds: del self._read_fds[fd] self.selector.unregister(fd)
def remove_reader(self, fd)
Remove read file descriptor from the event loop.
4.937876
4.063094
1.215299
cache = SimpleCache(maxsize=maxsize) def decorator(obj): @wraps(obj) def new_callable(*a, **kw): def create_new(): return obj(*a, **kw) key = (a, tuple(kw.items())) return cache.get(key, create_new) return new_callable return decorator
def memoized(maxsize=1024)
Momoization decorator for immutable classes and pure functions.
2.821535
2.744176
1.02819
# Look in cache first. try: return self._data[key] except KeyError: # Not found? Get it. value = getter_func() self._data[key] = value self._keys.append(key) # Remove the oldest key when the size is exceeded. if len(self._data) > self.maxsize: key_to_remove = self._keys.popleft() if key_to_remove in self._data: del self._data[key_to_remove] return value
def get(self, key, getter_func)
Get object from the cache. If not found, call `getter_func` to resolve it, and put that on the top of the cache instead.
2.535686
2.485794
1.020071
filter = to_cli_filter(kwargs.pop('filter', True)) eager = to_cli_filter(kwargs.pop('eager', False)) save_before = kwargs.pop('save_before', lambda e: True) to_cli_filter(kwargs.pop('invalidate_ui', True)) # Deprecated! (ignored.) assert not kwargs assert keys assert all(isinstance(k, (Key, text_type)) for k in keys), \ 'Key bindings should consist of Key and string (unicode) instances.' assert callable(save_before) if isinstance(filter, Never): # When a filter is Never, it will always stay disabled, so in that case # don't bother putting it in the registry. It will slow down every key # press otherwise. def decorator(func): return func else: def decorator(func): self.key_bindings.append( _Binding(keys, func, filter=filter, eager=eager, save_before=save_before)) self._clear_cache() return func return decorator
def add_binding(self, *keys, **kwargs)
Decorator for annotating key bindings. :param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine when this key binding is active. :param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`. When True, ignore potential longer matches when this key binding is hit. E.g. when there is an active eager key binding for Ctrl-X, execute the handler immediately and ignore the key binding for Ctrl-X Ctrl-E of which it is a prefix. :param save_before: Callable that takes an `Event` and returns True if we should save the current buffer, before handling the event. (That's the default.)
5.757151
4.6406
1.240605
assert callable(function) for b in self.key_bindings: if b.handler == function: self.key_bindings.remove(b) self._clear_cache() return # No key binding found for this function. Raise ValueError. raise ValueError('Binding not found: %r' % (function, ))
def remove_binding(self, function)
Remove a key binding. This expects a function that was given to `add_binding` method as parameter. Raises `ValueError` when the given function was not registered before.
4.271869
3.857157
1.107517
def get(): result = [] for b in self.key_bindings: if len(keys) == len(b.keys): match = True any_count = 0 for i, j in zip(b.keys, keys): if i != j and i != Keys.Any: match = False break if i == Keys.Any: any_count += 1 if match: result.append((any_count, b)) # Place bindings that have more 'Any' occurences in them at the end. result = sorted(result, key=lambda item: -item[0]) return [item[1] for item in result] return self._get_bindings_for_keys_cache.get(keys, get)
def get_bindings_for_keys(self, keys)
Return a list of key bindings that can handle this key. (This return also inactive bindings, so the `filter` still has to be called, for checking it.) :param keys: tuple of keys.
3.231747
3.220073
1.003625
def get(): result = [] for b in self.key_bindings: if len(keys) < len(b.keys): match = True for i, j in zip(b.keys, keys): if i != j and i != Keys.Any: match = False break if match: result.append(b) return result return self._get_bindings_starting_with_keys_cache.get(keys, get)
def get_bindings_starting_with_keys(self, keys)
Return a list of key bindings that handle a key sequence starting with `keys`. (It does only return bindings for which the sequences are longer than `keys`. And like `get_bindings_for_keys`, it also includes inactive bindings.) :param keys: tuple of keys.
2.882615
2.828036
1.019299
" If the original registry was changed. Update our copy version. " expected_version = (self.registry._version, self._extra_registry._version) if self._last_version != expected_version: registry2 = Registry() # Copy all bindings from `self.registry`, adding our condition. for reg in (self.registry, self._extra_registry): for b in reg.key_bindings: registry2.key_bindings.append( _Binding( keys=b.keys, handler=b.handler, filter=self.filter & b.filter, eager=b.eager, save_before=b.save_before)) self._registry2 = registry2 self._last_version = expected_version
def _update_cache(self)
If the original registry was changed. Update our copy version.
6.09332
4.576965
1.331301
expected_version = ( tuple(r._version for r in self.registries) + (self._extra_registry._version, )) if self._last_version != expected_version: registry2 = Registry() for reg in self.registries: registry2.key_bindings.extend(reg.key_bindings) # Copy all bindings from `self._extra_registry`. registry2.key_bindings.extend(self._extra_registry.key_bindings) self._registry2 = registry2 self._last_version = expected_version
def _update_cache(self)
If one of the original registries was changed. Update our merged version.
4.274048
3.739888
1.142828
if not config_dict: config_file = find_config_file(config_path) if not config_file: return cls({}, credstore_env) try: with open(config_file) as f: config_dict = json.load(f) except (IOError, KeyError, ValueError) as e: # Likely missing new Docker config file or it's in an # unknown format, continue to attempt to read old location # and format. log.debug(e) return cls(_load_legacy_config(config_file), credstore_env) res = {} if config_dict.get('auths'): log.debug("Found 'auths' section") res.update({ 'auths': cls.parse_auth( config_dict.pop('auths'), raise_on_error=True ) }) if config_dict.get('credsStore'): log.debug("Found 'credsStore' section") res.update({'credsStore': config_dict.pop('credsStore')}) if config_dict.get('credHelpers'): log.debug("Found 'credHelpers' section") res.update({'credHelpers': config_dict.pop('credHelpers')}) if res: return cls(res, credstore_env) log.debug( "Couldn't find auth-related section ; attempting to interpret " "as auth-only file" ) return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
def load_config(cls, config_path, config_dict, credstore_env=None)
Loads authentication data from a Docker configuration file in the given root directory or if config_path is passed use given path. Lookup priority: explicit config_path parameter > DOCKER_CONFIG environment variable > ~/.docker/config.json > ~/.dockercfg
2.952117
2.860577
1.032
tfutil = util.get_module('tensorflow.python.util') if tfutil: return tfutil.nest.flatten(thing) else: return [thing]
def nest(thing)
Use tensorflows nest function if available, otherwise just wrap object in an array
5.803381
3.349418
1.732654
converted = val typename = util.get_full_typename(val) if util.is_matplotlib_typename(typename): # This handles plots with images in it because plotly doesn't support it # TODO: should we handle a list of plots? val = util.ensure_matplotlib_figure(val) if any(len(ax.images) > 0 for ax in val.axes): PILImage = util.get_module( "PIL.Image", required="Logging plots with images requires pil: pip install pillow") buf = six.BytesIO() val.savefig(buf) val = Image(PILImage.open(buf)) else: converted = util.convert_plots(val) elif util.is_plotly_typename(typename): converted = util.convert_plots(val) if isinstance(val, IterableMedia): val = [val] if isinstance(val, collections.Sequence) and len(val) > 0: is_media = [isinstance(v, IterableMedia) for v in val] if all(is_media): cwd = wandb.run.dir if wandb.run else "." if step is None: step = "summary" if isinstance(val[0], Image): converted = Image.transform(val, cwd, "{}_{}.jpg".format(key, step)) elif isinstance(val[0], Audio): converted = Audio.transform(val, cwd, key, step) elif isinstance(val[0], Html): converted = Html.transform(val, cwd, key, step) elif isinstance(val[0], Object3D): converted = Object3D.transform(val, cwd, key, step) elif any(is_media): raise ValueError( "Mixed media types in the same list aren't supported") elif isinstance(val, Histogram): converted = Histogram.transform(val) elif isinstance(val, Graph): if mode == "history": raise ValueError("Graphs are only supported in summary") converted = Graph.transform(val) elif isinstance(val, Table): converted = Table.transform(val) return converted
def val_to_json(key, val, mode="summary", step=None)
Converts a wandb datatype to its JSON representation
3.47979
3.405089
1.021938
for key, val in six.iteritems(payload): if isinstance(val, dict): payload[key] = to_json(val, mode) else: payload[key] = val_to_json( key, val, mode, step=payload.get("_step")) return payload
def to_json(payload, mode="history")
Converts all keys in a potentially nested array into their JSON representation
3.283061
3.095148
1.060712
# TODO: do we want to support dimensions being at the beginning of the array? if data.ndim == 2: return "L" elif data.shape[-1] == 3: return "RGB" elif data.shape[-1] == 4: return "RGBA" else: raise ValueError( "Un-supported shape for image conversion %s" % list(data.shape))
def guess_mode(self, data)
Guess what type of image the np.array is representing
4.376335
3.787055
1.155604
np = util.get_module( "numpy", required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy") # I think it's better to check the image range vs the data type, since many # image libraries will return floats between 0 and 255 # some images have range -1...1 or 0-1 dmin = np.min(data) if dmin < 0: data = (data - np.min(data)) / np.ptp(data) if np.max(data) <= 1.0: data = (data * 255).astype(np.int32) #assert issubclass(data.dtype.type, np.integer), 'Illegal image format.' return data.clip(0, 255).astype(np.uint8)
def to_uint8(self, data)
Converts floating point image on the range [0,1] and integer images on the range [0,255] to uint8, clipping if necessary.
5.341881
5.328871
1.002441
from PIL import Image as PILImage base = os.path.join(out_dir, "media", "images") width, height = images[0].image.size num_images_to_log = len(images) if num_images_to_log > Image.MAX_IMAGES: logging.warn( "The maximum number of images to store per step is %i." % Image.MAX_IMAGES) num_images_to_log = Image.MAX_IMAGES if width * num_images_to_log > Image.MAX_DIMENSION: max_images_by_dimension = Image.MAX_DIMENSION // width logging.warn("The maximum total width for all images in a collection is 65500, or {} images, each with a width of {} pixels. Only logging the first {} images.".format(max_images_by_dimension, width, max_images_by_dimension)) num_images_to_log = max_images_by_dimension total_width = width * num_images_to_log sprite = PILImage.new( mode='RGB', size=(total_width, height), color=(0, 0, 0)) for i, image in enumerate(images[:num_images_to_log]): location = width * i sprite.paste(image.image, (location, 0)) util.mkdir_exists_ok(base) sprite.save(os.path.join(base, fname), transparency=0) meta = {"width": width, "height": height, "count": num_images_to_log, "_type": "images"} # TODO: hacky way to enable image grouping for now grouping = images[0].grouping if grouping: meta["grouping"] = grouping captions = Image.captions(images[:num_images_to_log]) if captions: meta["captions"] = captions return meta
def transform(images, out_dir, fname)
Combines a list of images into a single sprite returning meta information
3.086898
3.024617
1.020591
kw.setdefault('enable_abort_and_exit_bindings', True) kw.setdefault('enable_search', True) kw.setdefault('enable_auto_suggest_bindings', True) return cls(**kw)
def for_prompt(cls, **kw)
Create a ``KeyBindingManager`` with the defaults for an input prompt. This activates the key bindings for abort/exit (Ctrl-C/Ctrl-D), incremental search and auto suggestions. (Not for full screen applications.)
5.592319
2.901961
1.927083
assert isinstance(app, Application) assert callback is None or callable(callback) self.cli = CommandLineInterface( application=app, eventloop=self.eventloop, output=self.vt100_output) self.callback = callback # Create a parser, and parser callbacks. cb = self.cli.create_eventloop_callbacks() inputstream = InputStream(cb.feed_key) # Input decoder for stdin. (Required when working with multibyte # characters, like chinese input.) stdin_decoder_cls = getincrementaldecoder(self.encoding) stdin_decoder = [stdin_decoder_cls()] # nonlocal # Tell the CLI that it's running. We don't start it through the run() # call, but will still want _redraw() to work. self.cli._is_running = True def data_received(data): assert isinstance(data, binary_type) try: result = stdin_decoder[0].decode(data) inputstream.feed(result) except UnicodeDecodeError: stdin_decoder[0] = stdin_decoder_cls() return '' def size_received(rows, columns): self.size = Size(rows=rows, columns=columns) cb.terminal_size_changed() self.parser = TelnetProtocolParser(data_received, size_received)
def set_application(self, app, callback=None)
Set ``CommandLineInterface`` instance for this connection. (This can be replaced any time.) :param cli: CommandLineInterface instance. :param callback: Callable that takes the result of the CLI.
6.47427
6.502821
0.995609
assert isinstance(data, binary_type) self.parser.feed(data) # Render again. self.cli._redraw() # When a return value has been set (enter was pressed), handle command. if self.cli.is_returning: try: return_value = self.cli.return_value() except (EOFError, KeyboardInterrupt) as e: # Control-D or Control-C was pressed. logger.info('%s, closing connection.', type(e).__name__) self.close() return # Handle CLI command self._handle_command(return_value)
def feed(self, data)
Handler for incoming data. (Called by TelnetServer.)
6.322832
5.701617
1.108954
logger.info('Handle command %r', command) def in_executor(): self.handling_command = True try: if self.callback is not None: self.callback(self, command) finally: self.server.call_from_executor(done) def done(): self.handling_command = False # Reset state and draw again. (If the connection is still open -- # the application could have called TelnetConnection.close() if not self.closed: self.cli.reset() self.cli.buffers[DEFAULT_BUFFER].reset() self.cli.renderer.request_absolute_cursor_position() self.vt100_output.flush() self.cli._redraw() self.server.run_in_executor(in_executor)
def _handle_command(self, command)
Handle command. This will run in a separate thread, in order not to block the event loop.
6.668609
6.458508
1.032531
self.vt100_output.erase_screen() self.vt100_output.cursor_goto(0, 0) self.vt100_output.flush()
def erase_screen(self)
Erase output screen.
3.366922
3.096891
1.087194
assert isinstance(data, text_type) # When data is send back to the client, we should replace the line # endings. (We didn't allocate a real pseudo terminal, and the telnet # connection is raw, so we are responsible for inserting \r.) self.stdout.write(data.replace('\n', '\r\n')) self.stdout.flush()
def send(self, data)
Send text to the client.
10.158184
9.479552
1.071589
self.application.client_leaving(self) self.conn.close() self.closed = True
def close(self)
Close the connection.
12.858676
10.411287
1.235071
# Flush all the pipe content. os.read(self._schedule_pipe[0], 1024) # Process calls from executor. calls_from_executor, self._calls_from_executor = self._calls_from_executor, [] for c in calls_from_executor: c()
def _process_callbacks(self)
Process callbacks from `call_from_executor` in eventloop.
6.590851
5.266951
1.25136
listen_socket = self.create_socket(self.host, self.port) logger.info('Listening for telnet connections on %s port %r', self.host, self.port) try: while True: # Removed closed connections. self.connections = set([c for c in self.connections if not c.closed]) # Ignore connections handling commands. connections = set([c for c in self.connections if not c.handling_command]) # Wait for next event. read_list = ( [listen_socket, self._schedule_pipe[0]] + [c.conn for c in connections]) read, _, _ = select.select(read_list, [], []) for s in read: # When the socket itself is ready, accept a new connection. if s == listen_socket: self._accept(listen_socket) # If we receive something on our "call_from_executor" pipe, process # these callbacks in a thread safe way. elif s == self._schedule_pipe[0]: self._process_callbacks() # Handle incoming data on socket. else: self._handle_incoming_data(s) finally: listen_socket.close()
def run(self)
Run the eventloop for the telnet server.
4.482738
4.221823
1.061801
conn, addr = listen_socket.accept() connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding) self.connections.add(connection) logger.info('New connection %r %r', *addr)
def _accept(self, listen_socket)
Accept new incoming connection.
4.608796
4.155015
1.109213
connection = [c for c in self.connections if c.conn == conn][0] data = conn.recv(1024) if data: connection.feed(data) else: self.connections.remove(connection)
def _handle_incoming_data(self, conn)
Handle incoming data on socket.
2.860238
2.610377
1.095718
try: return self.client.execute(*args, **kwargs) except requests.exceptions.HTTPError as err: res = err.response logger.error("%s response executing GraphQL." % res.status_code) logger.error(res.text) self.display_gorilla_error_if_found(res) six.reraise(*sys.exc_info())
def execute(self, *args, **kwargs)
Wrapper around execute that logs in cases of failure.
5.066567
4.699701
1.078062
try: import pkg_resources installed_packages = [d for d in iter(pkg_resources.working_set)] installed_packages_list = sorted( ["%s==%s" % (i.key, i.version) for i in installed_packages] ) with open(os.path.join(out_dir, 'requirements.txt'), 'w') as f: f.write("\n".join(installed_packages_list)) except Exception as e: logger.error("Error saving pip packages")
def save_pip(self, out_dir)
Saves the current working set of pip packages to requirements.txt
2.374821
2.27288
1.044851
if not self.git.enabled: return False try: root = self.git.root if self.git.dirty: patch_path = os.path.join(out_dir, 'diff.patch') if self.git.has_submodule_diff: with open(patch_path, 'wb') as patch: # we diff against HEAD to ensure we get changes in the index subprocess.check_call( ['git', 'diff', '--submodule=diff', 'HEAD'], stdout=patch, cwd=root, timeout=5) else: with open(patch_path, 'wb') as patch: subprocess.check_call( ['git', 'diff', 'HEAD'], stdout=patch, cwd=root, timeout=5) upstream_commit = self.git.get_upstream_fork_point() if upstream_commit and upstream_commit != self.git.repo.head.commit: sha = upstream_commit.hexsha upstream_patch_path = os.path.join( out_dir, 'upstream_diff_{}.patch'.format(sha)) if self.git.has_submodule_diff: with open(upstream_patch_path, 'wb') as upstream_patch: subprocess.check_call( ['git', 'diff', '--submodule=diff', sha], stdout=upstream_patch, cwd=root, timeout=5) else: with open(upstream_patch_path, 'wb') as upstream_patch: subprocess.check_call( ['git', 'diff', sha], stdout=upstream_patch, cwd=root, timeout=5) except (subprocess.CalledProcessError, subprocess.TimeoutExpired): logger.error('Error generating diff')
def save_patches(self, out_dir)
Save the current state of this repository to one or more patches. Makes one patch against HEAD and another one against the most recent commit that occurs in an upstream branch. This way we can be robust to history editing as long as the user never does "push -f" to break history on an upstream branch. Writes the first patch to <out_dir>/diff.patch and the second to <out_dir>/upstream_diff_<commit_id>.patch. Args: out_dir (str): Directory to write the patch files.
2.162389
2.030055
1.065188
if not self._settings: self._settings = self.default_settings.copy() section = section or self._settings['section'] try: if section in self.settings_parser.sections(): for option in self.settings_parser.options(section): self._settings[option] = self.settings_parser.get( section, option) except configparser.InterpolationSyntaxError: print("WARNING: Unable to parse settings file") self._settings["project"] = env.get_project( self._settings.get("project")) self._settings["entity"] = env.get_entity( self._settings.get("entity")) self._settings["base_url"] = env.get_base_url( self._settings.get("base_url")) self._settings["ignore_globs"] = env.get_ignore( self._settings.get("ignore_globs") ) return self._settings if key is None else self._settings[key]
def settings(self, key=None, section=None)
The settings overridden from the wandb/settings file. Args: key (str, optional): If provided only this setting is returned section (str, optional): If provided this section of the setting file is used, defaults to "default" Returns: A dict with the current settings { "entity": "models", "base_url": "https://api.wandb.ai", "project": None }
2.518799
2.299402
1.095415
query = gql(''' query Models($entity: String!) { models(first: 10, entityName: $entity) { edges { node { id name description } } } } ''') return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity')})['models'])
def list_projects(self, entity=None)
Lists projects in W&B scoped by entity. Args: entity (str, optional): The entity to scope this project to. Returns: [{"id","name","description"}]
4.55311
4.49929
1.011962