code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
output = self.app.output # WORKAROUND: Due to a bug in Jedi, the current directory is removed # from sys.path. See: https://github.com/davidhalter/jedi/issues/1148 if '' not in sys.path: sys.path.insert(0, '') def compile_with_flags(code, mode): " Compile code with the right compiler flags. " return compile(code, '<stdin>', mode, flags=self.get_compiler_flags(), dont_inherit=True) if line.lstrip().startswith('\x1a'): # When the input starts with Ctrl-Z, quit the REPL. self.app.exit() elif line.lstrip().startswith('!'): # Run as shell command os.system(line[1:]) else: # Try eval first try: code = compile_with_flags(line, 'eval') result = eval(code, self.get_globals(), self.get_locals()) locals = self.get_locals() locals['_'] = locals['_%i' % self.current_statement_index] = result if result is not None: out_prompt = self.get_output_prompt() try: result_str = '%r\n' % (result, ) except UnicodeDecodeError: # In Python 2: `__repr__` should return a bytestring, # so to put it in a unicode context could raise an # exception that the 'ascii' codec can't decode certain # characters. Decode as utf-8 in that case. result_str = '%s\n' % repr(result).decode('utf-8') # Align every line to the first one. line_sep = '\n' + ' ' * fragment_list_width(out_prompt) result_str = line_sep.join(result_str.splitlines()) + '\n' # Write output tokens. if self.enable_syntax_highlighting: formatted_output = merge_formatted_text([ out_prompt, PygmentsTokens(list(_lex_python_result(result_str))), ]) else: formatted_output = FormattedText( out_prompt + [('', result_str)]) print_formatted_text( formatted_output, style=self._current_style, style_transformation=self.style_transformation, include_default_pygments_style=False) # If not a valid `eval` expression, run using `exec` instead. except SyntaxError: code = compile_with_flags(line, 'exec') six.exec_(code, self.get_globals(), self.get_locals()) output.flush()
def _execute(self, line)
Evaluate the line and print the result.
4.202765
4.16693
1.0086
print('You should be able to read and update the "counter[0]" variable from this shell.') try: yield from embed(globals=globals(), return_asyncio_coroutine=True, patch_stdout=True) except EOFError: # Stop the loop when quitting the repl. (Ctrl-D press.) loop.stop()
def interactive_shell()
Coroutine that starts a Python REPL from which we can access the global counter variable.
15.154915
11.96568
1.266532
# When the input starts with Ctrl-Z, always accept. This means EOF in a # Python REPL. if document.text.startswith('\x1a'): return try: if self.get_compiler_flags: flags = self.get_compiler_flags() else: flags = 0 compile(document.text, '<input>', 'exec', flags=flags, dont_inherit=True) except SyntaxError as e: # Note, the 'or 1' for offset is required because Python 2.7 # gives `None` as offset in case of '4=4' as input. (Looks like # fixed in Python 3.) index = document.translate_row_col_to_index(e.lineno - 1, (e.offset or 1) - 1) raise ValidationError(index, 'Syntax Error') except TypeError as e: # e.g. "compile() expected string without null bytes" raise ValidationError(0, str(e)) except ValueError as e: # In Python 2, compiling "\x9" (an invalid escape sequence) raises # ValueError instead of SyntaxError. raise ValidationError(0, 'Syntax Error: %s' % e)
def validate(self, document)
Check input for Python syntax errors.
5.947856
5.72693
1.038577
loop = asyncio.get_event_loop() # Namespace exposed in the REPL. environ = {'hello': 'world'} # Start SSH server. def create_server(): return MySSHServer(lambda: environ) print('Listening on :%i' % port) print('To connect, do "ssh localhost -p %i"' % port) loop.run_until_complete( asyncssh.create_server(create_server, '', port, server_host_keys=['/etc/ssh/ssh_host_dsa_key'])) # Run eventloop. loop.run_forever()
def main(port=8222)
Example that starts the REPL through an SSH server.
4.664809
4.336269
1.075765
if self._chan is None: return Size(rows=20, columns=79) else: width, height, pixwidth, pixheight = self._chan.get_terminal_size() return Size(rows=height, columns=width)
def _get_size(self)
Callable that returns the current `Size`, required by Vt100_Output.
5.576731
4.731663
1.178599
self._chan = chan # Run REPL interface. f = asyncio.ensure_future(self.cli.run_async()) # Close channel when done. def done(_): chan.close() self._chan = None f.add_done_callback(done)
def connection_made(self, chan)
Client connected, run repl in coroutine.
5.430538
4.29355
1.264813
# Pop keyword-only arguments. (We cannot use the syntax from the # signature. Otherwise, Python2 will give a syntax error message when # installing.) sep = kw.pop('sep', ' ') end = kw.pop('end', '\n') _ = kw.pop('file', None) assert not kw, 'Too many keyword-only arguments' data = sep.join(map(str, data)) self._chan.write(data + end)
def _print(self, *data, **kw)
_print(self, *data, sep=' ', end='\n', file=None) Alternative 'print' function that prints back into the SSH channel.
6.632292
5.913162
1.121615
# Show function signature (bool). repl.show_signature = True # Show docstring (bool). repl.show_docstring = False # Show the "[Meta+Enter] Execute" message when pressing [Enter] only # inserts a newline instead of executing the code. repl.show_meta_enter_message = True # Show completions. (NONE, POP_UP, MULTI_COLUMN or TOOLBAR) repl.completion_visualisation = CompletionVisualisation.POP_UP # When CompletionVisualisation.POP_UP has been chosen, use this # scroll_offset in the completion menu. repl.completion_menu_scroll_offset = 0 # Show line numbers (when the input contains multiple lines.) repl.show_line_numbers = False # Show status bar. repl.show_status_bar = True # When the sidebar is visible, also show the help text. repl.show_sidebar_help = True # Highlight matching parethesis. repl.highlight_matching_parenthesis = True # Line wrapping. (Instead of horizontal scrolling.) repl.wrap_lines = True # Mouse support. repl.enable_mouse_support = True # Complete while typing. (Don't require tab before the # completion menu is shown.) repl.complete_while_typing = True # Vi mode. repl.vi_mode = False # Paste mode. (When True, don't insert whitespace after new line.) repl.paste_mode = False # Use the classic prompt. (Display '>>>' instead of 'In [1]'.) repl.prompt_style = 'classic' # 'classic' or 'ipython' # Don't insert a blank line after the output. repl.insert_blank_line_after_output = False # History Search. # When True, going back in history will filter the history on the records # starting with the current input. (Like readline.) # Note: When enable, please disable the `complete_while_typing` option. # otherwise, when there is a completion available, the arrows will # browse through the available completions instead of the history. repl.enable_history_search = False # Enable auto suggestions. (Pressing right arrow will complete the input, # based on the history.) repl.enable_auto_suggest = False # Enable open-in-editor. Pressing C-X C-E in emacs mode or 'v' in # Vi navigation mode will open the input in the current editor. repl.enable_open_in_editor = True # Enable system prompt. Pressing meta-! will display the system prompt. # Also enables Control-Z suspend. repl.enable_system_bindings = True # Ask for confirmation on exit. repl.confirm_exit = True # Enable input validation. (Don't try to execute when the input contains # syntax errors.) repl.enable_input_validation = True # Use this colorscheme for the code. repl.use_code_colorscheme('pastie') # Set color depth (keep in mind that not all terminals support true color). #repl.color_depth = 'DEPTH_1_BIT' # Monochrome. #repl.color_depth = 'DEPTH_4_BIT' # ANSI colors only. repl.color_depth = 'DEPTH_8_BIT' # The default, 256 colors. #repl.color_depth = 'DEPTH_24_BIT' # True color. # Syntax. repl.enable_syntax_highlighting = True # Install custom colorscheme named 'my-colorscheme' and use it. # Add custom key binding for PDB. # Typing ControlE twice should also execute the current command. # (Alternative for Meta-Enter.) # Typing 'jj' in Vi Insert mode, should send escape. (Go back to navigation # mode.) # Custom key binding for some simple autocorrection while typing.
def configure(repl)
Configuration method. This is called during the start-up of ptpython. :param repl: `PythonRepl` instance.
6.052944
6.022914
1.004986
stack = [] # Ignore braces inside strings text = re.sub(r'''('[^']*'|"[^"]*")''', '', text) # XXX: handle escaped quotes.! for c in reversed(text): if c in '])}': stack.append(c) elif c in '[({': if stack: if ((c == '[' and stack[-1] == ']') or (c == '{' and stack[-1] == '}') or (c == '(' and stack[-1] == ')')): stack.pop() else: # Opening bracket for which we didn't had a closing one. return True return False
def has_unclosed_brackets(text)
Starting at the end of the string. If we find an opening bracket for which we didn't had a closing one yet, return True.
4.092711
3.548954
1.153216
def ends_in_multiline_string(): delims = _multiline_string_delims.findall(document.text) opening = None for delim in delims: if opening is None: opening = delim elif delim == opening: opening = None return bool(opening) if '\n' in document.text or ends_in_multiline_string(): return True def line_ends_with_colon(): return document.current_line.rstrip()[-1:] == ':' # If we just typed a colon, or still have open brackets, always insert a real newline. if line_ends_with_colon() or \ (document.is_cursor_at_the_end and has_unclosed_brackets(document.text_before_cursor)) or \ document.text.startswith('@'): return True # If the character before the cursor is a backslash (line continuation # char), insert a new line. elif document.text_before_cursor[-1:] == '\\': return True return False
def document_is_multiline_python(document)
Determine whether this is a multiline Python document.
4.572184
4.534418
1.008329
def handle_if_mouse_down(mouse_event): if mouse_event.event_type == MouseEventType.MOUSE_DOWN: return handler(mouse_event) else: return NotImplemented return handle_if_mouse_down
def if_mousedown(handler)
Decorator for mouse handlers. Only handle event when the user pressed mouse down. (When applied to a token list. Scroll events will bubble up and are handled by the Window.)
3.120546
3.180227
0.981234
assert isinstance(title, six.text_type) assert isinstance(body, Container) return Frame(body=body, title=title)
def _create_popup_window(title, body)
Return the layout for a pop-up window. It consists of a title bar showing the `title` text, and a body layout. The window is surrounded by borders.
5.557076
4.882661
1.138124
" Display/hide help. " help_buffer_control = history.history_layout.help_buffer_control if history.app.layout.current_control == help_buffer_control: history.app.layout.focus_previous() else: history.app.layout.current_control = help_buffer_control
def _toggle_help(history)
Display/hide help.
5.04454
4.760245
1.059723
" Toggle focus between left/right window. " current_buffer = history.app.current_buffer layout = history.history_layout.layout if current_buffer == history.history_buffer: layout.current_control = history.history_layout.default_buffer_control elif current_buffer == history.default_buffer: layout.current_control = history.history_layout.history_buffer_control
def _select_other_window(history)
Toggle focus between left/right window.
5.216813
4.164633
1.252646
bindings = KeyBindings() handle = bindings.add @handle(' ', filter=has_focus(history.history_buffer)) def _(event): b = event.current_buffer line_no = b.document.cursor_position_row if not history_mapping.history_lines: # If we've no history, then nothing to do return if line_no in history_mapping.selected_lines: # Remove line. history_mapping.selected_lines.remove(line_no) history_mapping.update_default_buffer() else: # Add line. history_mapping.selected_lines.add(line_no) history_mapping.update_default_buffer() # Update cursor position default_buffer = history.default_buffer default_lineno = sorted(history_mapping.selected_lines).index(line_no) + \ history_mapping.result_line_offset default_buffer.cursor_position = \ default_buffer.document.translate_row_col_to_index(default_lineno, 0) # Also move the cursor to the next line. (This way they can hold # space to select a region.) b.cursor_position = b.document.translate_row_col_to_index(line_no + 1, 0) @handle(' ', filter=has_focus(DEFAULT_BUFFER)) @handle('delete', filter=has_focus(DEFAULT_BUFFER)) @handle('c-h', filter=has_focus(DEFAULT_BUFFER)) def _(event): b = event.current_buffer line_no = b.document.cursor_position_row - history_mapping.result_line_offset if line_no >= 0: try: history_lineno = sorted(history_mapping.selected_lines)[line_no] except IndexError: pass # When `selected_lines` is an empty set. else: history_mapping.selected_lines.remove(history_lineno) history_mapping.update_default_buffer() help_focussed = has_focus(history.help_buffer) main_buffer_focussed = has_focus(history.history_buffer) | has_focus(history.default_buffer) @handle('tab', filter=main_buffer_focussed) @handle('c-x', filter=main_buffer_focussed, eager=True) # Eager: ignore the Emacs [Ctrl-X Ctrl-X] binding. @handle('c-w', filter=main_buffer_focussed) def _(event): " Select other window. " _select_other_window(history) @handle('f4') def _(event): " Switch between Emacs/Vi mode. " python_input.vi_mode = not python_input.vi_mode @handle('f1') def _(event): " Display/hide help. " _toggle_help(history) @handle('enter', filter=help_focussed) @handle('c-c', filter=help_focussed) @handle('c-g', filter=help_focussed) @handle('escape', filter=help_focussed) def _(event): " Leave help. " event.app.layout.focus_previous() @handle('q', filter=main_buffer_focussed) @handle('f3', filter=main_buffer_focussed) @handle('c-c', filter=main_buffer_focussed) @handle('c-g', filter=main_buffer_focussed) def _(event): " Cancel and go back. " event.app.exit(result=None) @handle('enter', filter=main_buffer_focussed) def _(event): " Accept input. " event.app.exit(result=history.default_buffer.text) enable_system_bindings = Condition(lambda: python_input.enable_system_bindings) @handle('c-z', filter=enable_system_bindings) def _(event): " Suspend to background. " event.app.suspend_to_background() return bindings
def create_key_bindings(history, python_input, history_mapping)
Key bindings.
2.570587
2.564332
1.002439
lines = [] # Original text, before cursor. if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) # Selected entries from the history. for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) # Original text, after cursor. if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) # Create `Document` with cursor at the right position. text = '\n'.join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
def get_new_document(self, cursor_pos=None)
Create a `Document` instance that contains the resulting text.
2.692167
2.522369
1.067317
# Only when this buffer has the focus. if self.app.current_buffer == self.default_buffer: try: line_no = self.default_buffer.document.cursor_position_row - \ self.history_mapping.result_line_offset if line_no < 0: # When the cursor is above the inserted region. raise IndexError history_lineno = sorted(self.history_mapping.selected_lines)[line_no] except IndexError: pass else: self.history_buffer.cursor_position = \ self.history_buffer.document.translate_row_col_to_index(history_lineno, 0)
def _default_buffer_pos_changed(self, _)
When the cursor changes in the default buffer. Synchronize with history buffer.
4.555064
3.908192
1.165517
# Only when this buffer has the focus. if self.app.current_buffer == self.history_buffer: line_no = self.history_buffer.document.cursor_position_row if line_no in self.history_mapping.selected_lines: default_lineno = sorted(self.history_mapping.selected_lines).index(line_no) + \ self.history_mapping.result_line_offset self.default_buffer.cursor_position = \ self.default_buffer.document.translate_row_col_to_index(default_lineno, 0)
def _history_buffer_pos_changed(self, _)
When the cursor changes in the history buffer. Synchronize.
3.982614
3.653438
1.0901
def get_text_fragments(): tokens = [] def append_category(category): tokens.extend([ ('class:sidebar', ' '), ('class:sidebar.title', ' %-36s' % category.title), ('class:sidebar', '\n'), ]) def append(index, label, status): selected = index == python_input.selected_option_index @if_mousedown def select_item(mouse_event): python_input.selected_option_index = index @if_mousedown def goto_next(mouse_event): " Select item and go to next value. " python_input.selected_option_index = index option = python_input.selected_option option.activate_next() sel = ',selected' if selected else '' tokens.append(('class:sidebar' + sel, ' >' if selected else ' ')) tokens.append(('class:sidebar.label' + sel, '%-24s' % label, select_item)) tokens.append(('class:sidebar.status' + sel, ' ', select_item)) tokens.append(('class:sidebar.status' + sel, '%s' % status, goto_next)) if selected: tokens.append(('[SetCursorPosition]', '')) tokens.append(('class:sidebar.status' + sel, ' ' * (13 - len(status)), goto_next)) tokens.append(('class:sidebar', '<' if selected else '')) tokens.append(('class:sidebar', '\n')) i = 0 for category in python_input.options: append_category(category) for option in category.options: append(i, option.title, '%s' % option.get_current_value()) i += 1 tokens.pop() # Remove last newline. return tokens class Control(FormattedTextControl): def move_cursor_down(self): python_input.selected_option_index += 1 def move_cursor_up(self): python_input.selected_option_index -= 1 return Window( Control(get_text_fragments), style='class:sidebar', width=Dimension.exact(43), height=Dimension(min=3), scroll_offsets=ScrollOffsets(top=1, bottom=1))
def python_sidebar(python_input)
Create the `Layout` for the sidebar with the configurable options.
3.159056
3.148055
1.003495
def get_text_fragments(): tokens = [] # Show navigation info. tokens.extend([ ('class:sidebar', ' '), ('class:sidebar.key', '[Arrows]'), ('class:sidebar', ' '), ('class:sidebar.description', 'Navigate'), ('class:sidebar', ' '), ('class:sidebar.key', '[Enter]'), ('class:sidebar', ' '), ('class:sidebar.description', 'Hide menu'), ]) return tokens return Window( FormattedTextControl(get_text_fragments), style='class:sidebar', width=Dimension.exact(43), height=Dimension.exact(1))
def python_sidebar_navigation(python_input)
Create the `Layout` showing the navigation information for the sidebar.
4.056671
3.963181
1.02359
token = 'class:sidebar.helptext' def get_current_description(): i = 0 for category in python_input.options: for option in category.options: if i == python_input.selected_option_index: return option.description i += 1 return '' def get_help_text(): return [(token, get_current_description())] return ConditionalContainer( content=Window( FormattedTextControl(get_help_text), style=token, height=Dimension(min=3)), filter=ShowSidebar(python_input) & Condition(lambda: python_input.show_sidebar_help) & ~is_done)
def python_sidebar_help(python_input)
Create the `Layout` for the help text for the current item in the sidebar.
5.206222
4.947896
1.052209
def get_text_fragments(): result = [] append = result.append Signature = 'class:signature-toolbar' if python_input.signatures: sig = python_input.signatures[0] # Always take the first one. append((Signature, ' ')) try: append((Signature, sig.full_name)) except IndexError: # Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37 # See also: https://github.com/davidhalter/jedi/issues/490 return [] append((Signature + ',operator', '(')) try: enumerated_params = enumerate(sig.params) except AttributeError: # Workaround for #136: https://github.com/jonathanslenders/ptpython/issues/136 # AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name' return [] for i, p in enumerated_params: # Workaround for #47: 'p' is None when we hit the '*' in the signature. # and sig has no 'index' attribute. # See: https://github.com/jonathanslenders/ptpython/issues/47 # https://github.com/davidhalter/jedi/issues/598 description = (p.description if p else '*') #or '*' sig_index = getattr(sig, 'index', 0) if i == sig_index: # Note: we use `_Param.description` instead of # `_Param.name`, that way we also get the '*' before args. append((Signature + ',current-name', str(description))) else: append((Signature, str(description))) append((Signature + ',operator', ', ')) if sig.params: # Pop last comma result.pop() append((Signature + ',operator', ')')) append((Signature, ' ')) return result return ConditionalContainer( content=Window( FormattedTextControl(get_text_fragments), height=Dimension.exact(1)), filter= # Show only when there is a signature HasSignature(python_input) & # And there are no completions to be shown. (would cover signature pop-up.) ~(has_completions & (show_completions_menu(python_input) | show_multi_column_completions_menu(python_input))) # Signature needs to be shown. & ShowSignature(python_input) & # Not done yet. ~is_done)
def signature_toolbar(python_input)
Return the `Layout` for the signature.
4.974069
4.952023
1.004452
TB = 'class:status-toolbar' @if_mousedown def toggle_paste_mode(mouse_event): python_input.paste_mode = not python_input.paste_mode @if_mousedown def enter_history(mouse_event): python_input.enter_history() def get_text_fragments(): python_buffer = python_input.default_buffer result = [] append = result.append append((TB, ' ')) result.extend(get_inputmode_fragments(python_input)) append((TB, ' ')) # Position in history. append((TB, '%i/%i ' % (python_buffer.working_index + 1, len(python_buffer._working_lines)))) # Shortcuts. app = get_app() if not python_input.vi_mode and app.current_buffer == python_input.search_buffer: append((TB, '[Ctrl-G] Cancel search [Enter] Go to this position.')) elif bool(app.current_buffer.selection_state) and not python_input.vi_mode: # Emacs cut/copy keys. append((TB, '[Ctrl-W] Cut [Meta-W] Copy [Ctrl-Y] Paste [Ctrl-G] Cancel')) else: result.extend([ (TB + ' class:key', '[F3]', enter_history), (TB, ' History ', enter_history), (TB + ' class:key', '[F6]', toggle_paste_mode), (TB, ' ', toggle_paste_mode), ]) if python_input.paste_mode: append((TB + ' class:paste-mode-on', 'Paste mode (on)', toggle_paste_mode)) else: append((TB, 'Paste mode', toggle_paste_mode)) return result return ConditionalContainer( content=Window(content=FormattedTextControl(get_text_fragments), style=TB), filter=~is_done & renderer_height_is_known & Condition(lambda: python_input.show_status_bar and not python_input.show_exit_confirmation))
def status_bar(python_input)
Create the `Layout` for the status bar.
4.249987
4.18198
1.016262
app = get_app() @if_mousedown def toggle_vi_mode(mouse_event): python_input.vi_mode = not python_input.vi_mode token = 'class:status-toolbar' input_mode_t = 'class:status-toolbar.input-mode' mode = app.vi_state.input_mode result = [] append = result.append append((input_mode_t, '[F4] ', toggle_vi_mode)) # InputMode if python_input.vi_mode: recording_register = app.vi_state.recording_register if recording_register: append((token, ' ')) append((token + ' class:record', 'RECORD({})'.format(recording_register))) append((token, ' - ')) if bool(app.current_buffer.selection_state): if app.current_buffer.selection_state.type == SelectionType.LINES: append((input_mode_t, 'Vi (VISUAL LINE)', toggle_vi_mode)) elif app.current_buffer.selection_state.type == SelectionType.CHARACTERS: append((input_mode_t, 'Vi (VISUAL)', toggle_vi_mode)) append((token, ' ')) elif app.current_buffer.selection_state.type == 'BLOCK': append((input_mode_t, 'Vi (VISUAL BLOCK)', toggle_vi_mode)) append((token, ' ')) elif mode in (InputMode.INSERT, 'vi-insert-multiple'): append((input_mode_t, 'Vi (INSERT)', toggle_vi_mode)) append((token, ' ')) elif mode == InputMode.NAVIGATION: append((input_mode_t, 'Vi (NAV)', toggle_vi_mode)) append((token, ' ')) elif mode == InputMode.REPLACE: append((input_mode_t, 'Vi (REPLACE)', toggle_vi_mode)) append((token, ' ')) else: if app.emacs_state.is_recording: append((token, ' ')) append((token + ' class:record', 'RECORD')) append((token, ' - ')) append((input_mode_t, 'Emacs', toggle_vi_mode)) append((token, ' ')) return result
def get_inputmode_fragments(python_input)
Return current input mode as a list of (token, text) tuples for use in a toolbar.
2.918093
2.873632
1.015472
@if_mousedown def toggle_sidebar(mouse_event): " Click handler for the menu. " python_input.show_sidebar = not python_input.show_sidebar version = sys.version_info tokens = [ ('class:status-toolbar.key', '[F2]', toggle_sidebar), ('class:status-toolbar', ' Menu', toggle_sidebar), ('class:status-toolbar', ' - '), ('class:status-toolbar.python-version', '%s %i.%i.%i' % (platform.python_implementation(), version[0], version[1], version[2])), ('class:status-toolbar', ' '), ] width = fragment_list_width(tokens) def get_text_fragments(): # Python version return tokens return ConditionalContainer( content=Window( FormattedTextControl(get_text_fragments), style='class:status-toolbar', height=Dimension.exact(1), width=Dimension.exact(width)), filter=~is_done & renderer_height_is_known & Condition(lambda: python_input.show_status_bar and not python_input.show_exit_confirmation))
def show_sidebar_button_info(python_input)
Create `Layout` for the information in the right-bottom corner. (The right part of the status bar.)
5.213252
5.188269
1.004815
def get_text_fragments(): # Show "Do you really want to exit?" return [ (style, '\n %s ([y]/n)' % python_input.exit_message), ('[SetCursorPosition]', ''), (style, ' \n'), ] visible = ~is_done & Condition(lambda: python_input.show_exit_confirmation) return ConditionalContainer( content=Window(FormattedTextControl(get_text_fragments), style=style), # , has_focus=visible)), filter=visible)
def exit_confirmation(python_input, style='class:exit-confirmation')
Create `Layout` for the exit message.
8.420066
8.057019
1.04506
def get_text_fragments(): return [('class:accept-message', ' [Meta+Enter] Execute ')] def extra_condition(): " Only show when... " b = python_input.default_buffer return ( python_input.show_meta_enter_message and (not b.document.is_cursor_at_the_end or python_input.accept_input_on_enter is None) and '\n' in b.text) visible = ~is_done & has_focus(DEFAULT_BUFFER) & Condition(extra_condition) return ConditionalContainer( content=Window(FormattedTextControl(get_text_fragments)), filter=visible)
def meta_enter_message(python_input)
Create the `Layout` for the 'Meta+Enter` message.
9.058618
8.223422
1.101563
current = self.get_current_value() options = sorted(self.values.keys()) # Get current index. try: index = options.index(current) except ValueError: index = 0 # Go to previous/next index. if _previous: index -= 1 else: index += 1 # Call handler for this option. next_option = options[index % len(options)] self.values[next_option]()
def activate_next(self, _previous=False)
Activate next value.
3.178459
2.914822
1.090447
" Return the currently selected option. " i = 0 for category in self.options: for o in category.options: if i == self.selected_option_index: return o else: i += 1
def selected_option(self)
Return the currently selected option.
4.326099
4.526778
0.955669
flags = 0 for value in self.get_globals().values(): if isinstance(value, __future__._Feature): flags |= value.compiler_flag return flags
def get_compiler_flags(self)
Give the current compiler flags by looking for _Feature instances in the globals.
7.489508
5.058901
1.480461
def add_binding_decorator(*k, **kw): return self.extra_key_bindings.add(*k, **kw) return add_binding_decorator
def add_key_binding(self)
Shortcut for adding new key bindings. (Mostly useful for a .ptpython/config.py file, that receives a PythonInput/Repl instance as input.) :: @python_input.add_key_binding(Keys.ControlX, filter=...) def handler(event): ...
7.010359
7.47083
0.938364
assert isinstance(name, six.text_type) assert isinstance(style_dict, dict) self.code_styles[name] = style_dict
def install_code_colorscheme(self, name, style_dict)
Install a new code color scheme.
3.151906
3.216078
0.980046
assert name in self.code_styles self._current_code_style_name = name self._current_style = self._generate_style()
def use_code_colorscheme(self, name)
Apply new colorscheme. (By name.)
6.113595
6.137313
0.996136
assert isinstance(name, six.text_type) assert isinstance(style_dict, dict) self.ui_styles[name] = style_dict
def install_ui_colorscheme(self, name, style_dict)
Install a new UI color scheme.
3.189935
3.261793
0.97797
assert name in self.ui_styles self._current_ui_style_name = name self._current_style = self._generate_style()
def use_ui_colorscheme(self, name)
Apply new colorscheme. (By name.)
5.893028
6.069781
0.97088
return generate_style(self.code_styles[self._current_code_style_name], self.ui_styles[self._current_ui_style_name])
def _generate_style(self)
Create new Style instance. (We don't want to do this on every key press, because each time the renderer receives a new style class, he will redraw everything.)
5.829769
4.462026
1.30653
return Application( input=self.input, output=self.output, layout=self.ptpython_layout.layout, key_bindings=merge_key_bindings([ load_python_bindings(self), load_auto_suggest_bindings(), load_sidebar_bindings(self), load_confirm_exit_bindings(self), ConditionalKeyBindings( load_open_in_editor_bindings(), Condition(lambda: self.enable_open_in_editor)), # Extra key bindings should not be active when the sidebar is visible. ConditionalKeyBindings( self.extra_key_bindings, Condition(lambda: not self.show_sidebar)) ]), color_depth=lambda: self.color_depth, paste_mode=Condition(lambda: self.paste_mode), mouse_support=Condition(lambda: self.enable_mouse_support), style=DynamicStyle(lambda: self._current_style), style_transformation=self.style_transformation, include_default_pygments_style=False, reverse_vi_search_direction=True)
def _create_application(self)
Create an `Application` instance.
3.975734
3.904855
1.018151
python_buffer = Buffer( name=DEFAULT_BUFFER, complete_while_typing=Condition(lambda: self.complete_while_typing), enable_history_search=Condition(lambda: self.enable_history_search), tempfile_suffix='.py', history=self.history, completer=ThreadedCompleter(self._completer), validator=ConditionalValidator( self._validator, Condition(lambda: self.enable_input_validation)), auto_suggest=ConditionalAutoSuggest( ThreadedAutoSuggest(AutoSuggestFromHistory()), Condition(lambda: self.enable_auto_suggest)), accept_handler=self._accept_handler, on_text_changed=self._on_input_timeout) return python_buffer
def _create_buffer(self)
Create the `Buffer` for the Python input.
4.072925
3.63158
1.12153
assert isinstance(buff, Buffer) app = self.app # Never run multiple get-signature threads. if self._get_signatures_thread_running: return self._get_signatures_thread_running = True document = buff.document def run(): script = get_jedi_script_from_document(document, self.get_locals(), self.get_globals()) # Show signatures in help text. if script: try: signatures = script.call_signatures() except ValueError: # e.g. in case of an invalid \\x escape. signatures = [] except Exception: # Sometimes we still get an exception (TypeError), because # of probably bugs in jedi. We can silence them. # See: https://github.com/davidhalter/jedi/issues/492 signatures = [] else: # Try to access the params attribute just once. For Jedi # signatures containing the keyword-only argument star, # this will crash when retrieving it the first time with # AttributeError. Every following time it works. # See: https://github.com/jonathanslenders/ptpython/issues/47 # https://github.com/davidhalter/jedi/issues/598 try: if signatures: signatures[0].params except AttributeError: pass else: signatures = [] self._get_signatures_thread_running = False # Set signatures and redraw if the text didn't change in the # meantime. Otherwise request new signatures. if buff.text == document.text: self.signatures = signatures # Set docstring in docstring buffer. if signatures: string = signatures[0].docstring() if not isinstance(string, six.text_type): string = string.decode('utf-8') self.docstring_buffer.reset( document=Document(string, cursor_position=0)) else: self.docstring_buffer.reset() app.invalidate() else: self._on_input_timeout(buff) get_event_loop().run_in_executor(run)
def _on_input_timeout(self, buff)
When there is no input activity, in another thread, get the signature of the current code.
4.94421
4.828297
1.024007
app = get_app() app.vi_state.input_mode = InputMode.NAVIGATION def done(f): result = f.result() if result is not None: self.default_buffer.text = result app.vi_state.input_mode = InputMode.INSERT history = History(self, self.default_buffer.document) future = run_coroutine_in_terminal(history.app.run_async) future.add_done_callback(done)
def enter_history(self)
Display the history.
4.699324
4.509868
1.042009
result = dict((name, style_from_pygments_cls(get_style_by_name(name))) for name in get_all_styles()) result['win32'] = Style.from_dict(win32_code_style) return result
def get_all_code_styles()
Return a mapping from style names to their classes.
4.14311
3.731405
1.110335
try: iter(extensions) except TypeError: pass # no extensions found else: for ext in extensions: try: shell.extension_manager.load_extension(ext) except: ipy_utils.warn.warn( "Error in loading extension: %s" % ext + "\nCheck your config files in %s" % ipy_utils.path.get_ipython_dir()) shell.showtraceback()
def initialize_extensions(shell, extensions)
Partial copy of `InteractiveShellApp.init_extensions` from IPython.
4.448858
4.269228
1.042076
config = kwargs.get('config') header = kwargs.pop('header', u'') compile_flags = kwargs.pop('compile_flags', None) if config is None: config = load_default_config() config.InteractiveShellEmbed = config.TerminalInteractiveShell kwargs['config'] = config shell = InteractiveShellEmbed.instance(**kwargs) initialize_extensions(shell, config['InteractiveShellApp']['extensions']) shell(header=header, stack_depth=2, compile_flags=compile_flags)
def embed(**kwargs)
Copied from `IPython/terminal/embed.py`, but using our `InteractiveShellEmbed` instead.
4.696779
3.607455
1.301965
if self._paths_for_download is None: queries = list() try: for sra in self.gsm.relations['SRA']: query = sra.split("=")[-1] if 'SRX' not in query: raise ValueError( "Sample looks like it is not an SRA: %s" % query) logger.info("Query: %s" % query) queries.append(query) except KeyError: raise NoSRARelationException( 'No relation called SRA for %s' % self.gsm.get_accession()) # Construction of DataFrame df with paths to download df = DataFrame(columns=['download_path']) for query in queries: # retrieve IDs for given SRX searchdata = Entrez.esearch(db='sra', term=query, usehistory='y', retmode='json') answer = json.loads(searchdata.read()) ids = answer["esearchresult"]["idlist"] if len(ids) != 1: raise ValueError( "There should be one and only one ID per SRX") # using ID fetch the info number_of_trials = 10 wait_time = 30 for trial in range(number_of_trials): try: results = Entrez.efetch(db="sra", id=ids[0], rettype="runinfo", retmode="text").read() break except HTTPError as httperr: if "502" in str(httperr): logger.warn(("%s, trial %i out of %i, waiting " "for %i seconds.") % ( str(httperr), trial, number_of_trials, wait_time)) time.sleep(wait_time) elif httperr.code == 429: # This means that there is too many requests try: header_wait_time = int( httperr.headers["Retry-After"]) except: header_wait_time = wait_time logger.warn(("%s, trial %i out of %i, waiting " "for %i seconds.") % ( str(httperr), trial, number_of_trials, header_wait_time)) time.sleep(header_wait_time) else: raise httperr try: df_tmp = DataFrame([i.split(',') for i in results.split('\n') if i != ''][1:], columns=[i.split(',') for i in results.split('\n') if i != ''][0]) except IndexError: logger.error(("SRA is empty (ID: %s, query: %s). " "Check if it is publicly available.") % (ids[0], query)) continue # check it first try: df_tmp['download_path'] except KeyError as e: logger.error('KeyError: ' + str(e) + '\n') logger.error(str(results) + '\n') df = concat([df, df_tmp], sort=True) self._paths_for_download = [path for path in df['download_path']] return self._paths_for_download
def paths_for_download(self)
List of URLs available for downloading.
2.999347
2.959448
1.013482
self.downloaded_paths = list() for path in self.paths_for_download: downloaded_path = list() utils.mkdir_p(os.path.abspath(self.directory)) sra_run = path.split("/")[-1] logger.info("Analysing %s" % sra_run) url = type(self).FTP_ADDRESS_TPL.format( range_subdir=sra_run[:6], file_dir=sra_run) logger.debug("URL: %s", url) filepath = os.path.abspath( os.path.join(self.directory, "%s.sra" % sra_run)) utils.download_from_url( url, filepath, aspera=self.aspera, silent=self.silent, force=self.force) if self.filetype in ("fasta", "fastq"): if utils.which('fastq-dump') is None: logger.error("fastq-dump command not found") ftype = "" if self.filetype == "fasta": ftype = " --fasta " cmd = "fastq-dump" if utils.which('parallel-fastq-dump') is None: cmd += " %s --outdir %s %s" else: logger.debug("Using parallel fastq-dump") cmd = " parallel-fastq-dump --threads %s" cmd = cmd % self.threads cmd += " %s --outdir %s -s %s" cmd = cmd % (ftype, self.directory, filepath) for fqoption, fqvalue in iteritems(self.fastq_dump_options): if fqvalue: cmd += (" --%s %s" % (fqoption, fqvalue)) elif fqvalue is None: cmd += (" --%s" % fqoption) logger.debug(cmd) process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) logger.info("Converting to %s/%s*.%s.gz\n" % ( self.directory, sra_run, self.filetype)) pout, perr = process.communicate() downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s.gz" % (sra_run, self.filetype))) elif self.filetype == 'sra': downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s" % (sra_run, self.filetype))) else: downloaded_path = glob.glob(os.path.join( self.directory, "%s*" % sra_run)) logger.error("Filetype %s not supported." % self.filetype) if not self.keep_sra and self.filetype != 'sra': # Delete sra file os.unlink(filepath) self.downloaded_paths += downloaded_path return self.downloaded_paths
def download(self)
Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files.
2.827763
2.80528
1.008014
logfile_handler = RotatingFileHandler( path, maxBytes=50000, backupCount=2) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)s %(module)s - %(message)s', datefmt="%d-%b-%Y %H:%M:%S") logfile_handler.setFormatter(formatter) geoparse_logger.addHandler(logfile_handler)
def add_log_file(path)
Add log file. Args: path (:obj:`str`): Path to the log file.
2.3139
2.705752
0.855178
gsm = args[0][0] email = args[0][1] dirpath = args[0][2] kwargs = args[0][3] return (gsm.get_accession(), gsm.download_SRA(email, dirpath, **kwargs))
def _sra_download_worker(*args)
A worker to download SRA files. To be used with multiprocessing.
3.725698
3.830533
0.972632
gsm = args[0][0] download_sra = args[0][1] email = args[0][2] dirpath = args[0][3] sra_kwargs = args[0][4] return (gsm.get_accession(), gsm.download_supplementary_files( directory=dirpath, download_sra=download_sra, email=email, **sra_kwargs))
def _supplementary_files_download_worker(*args)
A worker to download supplementary files. To be used with multiprocessing.
3.357725
3.536678
0.949401
metadata_value = self.metadata.get(metaname, None) if metadata_value is None: raise NoMetadataException( "No metadata attribute named %s" % metaname) if not isinstance(metadata_value, list): raise TypeError("Metadata is not a list and it should be.") if len(metadata_value) > 1: return metadata_value else: return metadata_value[0]
def get_metadata_attribute(self, metaname)
Get the metadata attribute by the name. Args: metaname (:obj:`str`): Name of the attribute Returns: :obj:`list` or :obj:`str`: Value(s) of the requested metadata attribute Raises: NoMetadataException: Attribute error TypeError: Metadata should be a list
2.659908
2.535741
1.048967
metalist = [] for metaname, meta in iteritems(self.metadata): message = "Single value in metadata dictionary should be a list!" assert isinstance(meta, list), message for data in meta: if data: metalist.append("!%s_%s = %s" % (self.geotype.capitalize(), metaname, data)) return "\n".join(metalist)
def _get_metadata_as_string(self)
Get the metadata as SOFT formatted string.
5.864539
5.510955
1.06416
if isinstance(path_or_handle, str): if as_gzip: with gzip.open(path_or_handle, 'wt') as outfile: outfile.write(self._get_object_as_soft()) else: with open(path_or_handle, 'w') as outfile: outfile.write(self._get_object_as_soft()) else: path_or_handle.write(self._get_object_as_soft())
def to_soft(self, path_or_handle, as_gzip=False)
Save the object in a SOFT format. Args: path_or_handle (:obj:`str` or :obj:`file`): Path or handle to output file as_gzip (:obj:`bool`): Save as gzip
1.791382
1.762982
1.016109
summary = list() summary.append("%s %s" % (self.geotype, self.name) + "\n") summary.append(" - Metadata:" + "\n") summary.append( "\n".join(self._get_metadata_as_string().split("\n")[:5]) + "\n") summary.append("\n") summary.append(" - Columns:" + "\n") summary.append(self.columns.to_string() + "\n") summary.append("\n") summary.append(" - Table:" + "\n") summary.append( "\t".join(["Index"] + self.table.columns.tolist()) + "\n") summary.append(self.table.head().to_string(header=None) + "\n") summary.append(" " * 40 + "..." + " " * 40 + "\n") summary.append(" " * 40 + "..." + " " * 40 + "\n") summary.append(" " * 40 + "..." + " " * 40 + "\n") summary.append(self.table.tail().to_string(header=None) + "\n") return "\n".join([str(s) for s in summary])
def head(self)
Print short description of the object.
2.260587
2.217591
1.019389
soft = ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string(), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
def _get_object_as_soft(self)
Get the object as SOFT formated string.
5.486907
4.408001
1.244761
tablelist = [] tablelist.append("!%s_table_begin" % self.geotype.lower()) tablelist.append("\t".join(self.table.columns)) for idx, row in self.table.iterrows(): tablelist.append("\t".join(map(str, row))) tablelist.append("!%s_table_end" % self.geotype.lower()) return "\n".join(tablelist)
def _get_table_as_string(self)
Get table as SOFT formated string.
2.942852
2.778403
1.059188
columnslist = [] for rowidx, row in self.columns.iterrows(): columnslist.append("#%s = %s" % (rowidx, row.description)) return "\n".join(columnslist)
def _get_columns_as_string(self)
Returns columns as SOFT formated string.
5.105977
4.380479
1.165621
if isinstance(gpl, GPL): annotation_table = gpl.table elif isinstance(gpl, DataFrame): annotation_table = gpl else: raise TypeError("gpl should be a GPL object or a pandas.DataFrame") # annotate by merging annotated = self.table.merge( annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on) del annotated[gpl_on] if in_place: self.table = annotated return None else: return annotated
def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF", in_place=False)
Annotate GSM with provided GPL Args: gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with annotation_column (str`): Column in a table for annotation gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID". gsm_on (:obj:`str`): Use this column in GPL to merge. Defaults to "ID_REF". in_place (:obj:`bool`): Substitute table in GSM by new annotated table. Defaults to False. Returns: :obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None Raises: TypeError: GPL should be GPL or pandas.DataFrame
2.822875
2.765684
1.020678
if gpl.name != self.metadata['platform_id'][0] and not force: raise KeyError("Platforms from GSM (%s) and from GPL (%s)" % ( gpl.name, self.metadata['platform_id']) + " are incompatible. Use force=True to use this GPL.") if merge_on_column is None and gpl_on is None and gsm_on is None: raise Exception("You have to provide one of the two: " "merge_on_column or gpl_on and gsm_on parameters") if merge_on_column: logger.info("merge_on_column is not None. Using this option.") tmp_data = self.table.merge(gpl.table, on=merge_on_column, how='outer') tmp_data = tmp_data.groupby(group_by_column).mean()[ [expression_column]] else: if gpl_on is None or gsm_on is None: raise Exception("Please provide both gpl_on and gsm_on or " "provide merge_on_column only") tmp_data = self.table.merge(gpl.table, left_on=gsm_on, right_on=gpl_on, how='outer') tmp_data = tmp_data.groupby(group_by_column).mean()[ [expression_column]] if rename: tmp_data.columns = [self.name] return tmp_data
def annotate_and_average(self, gpl, expression_column, group_by_column, rename=True, force=False, merge_on_column=None, gsm_on=None, gpl_on=None)
Annotate GSM table with provided GPL. Args: gpl (:obj:`GEOTypes.GPL`): Platform for annotations expression_column (:obj:`str`): Column name which "expressions" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept rename (:obj:`bool`): Rename output column to the self.name. Defaults to True. force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway. Defaults to False. merge_on_column (:obj:`str`): Column to merge the data on. Defaults to None. gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM. Defaults to None. gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL. Defaults to None. Returns: :obj:`pandas.DataFrame`: Annotated data
2.635067
2.443905
1.07822
directory_path = os.path.abspath( os.path.join(directory, "%s_%s_%s" % ( 'Supp', self.get_accession(), # the directory name cannot contain many of the signs re.sub(r'[\s\*\?\(\),\.;]', '_', self.metadata['title'][0])))) utils.mkdir_p(os.path.abspath(directory_path)) downloaded_paths = dict() if sra_kwargs is None: sra_kwargs = {} # Possible erroneous values that could be identified and skipped right # after blacklist = ('NONE',) for metakey, metavalue in iteritems(self.metadata): if 'supplementary_file' in metakey: assert len(metavalue) == 1 and metavalue != '' if metavalue[0] in blacklist: logger.warn("%s value is blacklisted as '%s' - skipping" % (metakey, metavalue[0])) continue # SRA will be downloaded elsewhere if 'sra' not in metavalue[0]: download_path = os.path.abspath(os.path.join( directory, os.path.join(directory_path, metavalue[0].split("/")[-1]))) try: utils.download_from_url(metavalue[0], download_path) downloaded_paths[metavalue[0]] = download_path except Exception as err: logger.error( "Cannot download %s supplementary file (%s)" % ( self.get_accession(), err)) if download_sra: try: downloaded_files = self.download_SRA( email, directory=directory, **sra_kwargs) downloaded_paths.update(downloaded_files) except Exception as err: logger.error("Cannot download %s SRA file (%s)" % ( self.get_accession(), err)) return downloaded_paths
def download_supplementary_files(self, directory="./", download_sra=True, email=None, sra_kwargs=None)
Download all supplementary data available for the sample. Args: directory (:obj:`str`): Directory to download the data (in this directory function will create new directory with the files). Defaults to "./". download_sra (:obj:`bool`): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`): E-mail that will be provided to the Entrez. It is mandatory if download_sra=True. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the download_SRA method. Defaults to None. Returns: :obj:`dict`: A key-value pair of name taken from the metadata and paths downloaded, in the case of SRA files the key is ``SRA``.
3.413989
3.449824
0.989612
downloader = SRADownloader(self, email, directory, **kwargs) return {"SRA": downloader.download()}
def download_SRA(self, email, directory='./', **kwargs)
Download RAW data as SRA file. The files will be downloaded to the sample directory created ad hoc or the directory specified by the parameter. The sample has to come from sequencing eg. mRNA-seq, CLIP etc. An important parameter is a filetype. By default an SRA is accessed by FTP and such file is downloaded. This does not require additional libraries. However in order to produce FASTA of FASTQ files one would need to use SRA-Toolkit. Thus, it is assumed that this library is already installed or it will be installed in the near future. One can immediately specify the download type to fasta or fastq. To see all possible ``**kwargs`` that could be passed to the function see the description of :class:`~GEOparse.sra_downloader.SRADownloader`. Args: email (:obj:`str`): an email (any) - Required by NCBI for access directory (:obj:`str`, optional): The directory to which download the data. Defaults to "./". **kwargs: Arbitrary keyword arguments, see description Returns: :obj:`dict`: A dictionary containing only one key (``SRA``) with the list of downloaded files. Raises: :obj:`TypeError`: Type to download unknown :obj:`NoSRARelationException`: No SRAToolkit :obj:`Exception`: Wrong e-mail :obj:`HTTPError`: Cannot access or connect to DB
6.598111
6.647816
0.992523
soft = ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] return "\n".join(soft)
def _get_object_as_soft(self)
Get the object as SOFT formatted string.
9.519135
6.826758
1.394386
soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for subset in self.subsets.values(): soft.append(subset._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
def _get_object_as_soft(self)
Return object as SOFT formatted string.
3.141946
2.886523
1.088488
if self._phenotype_data is None: pheno_data = {} for gsm_name, gsm in iteritems(self.gsms): tmp = {} for key, value in iteritems(gsm.metadata): if len(value) == 0: tmp[key] = np.nan elif key.startswith("characteristics_"): for i, char in enumerate(value): char = re.split(":\s+", char) char_type, char_value = [char[0], ": ".join(char[1:])] tmp[key + "." + str( i) + "." + char_type] = char_value else: tmp[key] = ",".join(value) pheno_data[gsm_name] = tmp self._phenotype_data = DataFrame(pheno_data).T return self._phenotype_data
def phenotype_data(self)
Get the phenotype data for each of the sample.
2.88013
2.789845
1.032362
if isinstance(platform, str): gpl = self.gpls[platform] elif isinstance(platform, GPL): gpl = platform else: raise ValueError("Platform has to be of type GPL or string with " "key for platform in GSE") data = [] for gsm in self.gsms.values(): if gpl.name == gsm.metadata['platform_id'][0]: data.append(gsm.annotate_and_average( gpl=gpl, merge_on_column=merge_on_column, expression_column=expression_column, group_by_column=group_by_column, force=force, gpl_on=gpl_on, gsm_on=gsm_on)) if len(data) == 0: logger.warning("No samples for the platform were found\n") return None elif len(data) == 1: return data[0] else: return data[0].join(data[1:])
def merge_and_average(self, platform, expression_column, group_by_column, force=False, merge_on_column=None, gsm_on=None, gpl_on=None)
Merge and average GSE samples. For given platform prepare the DataFrame with all the samples present in the GSE annotated with given column from platform and averaged over the column. Args: platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use. expression_column (:obj:`str`): Column name in which "expressions" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway merge_on_column (:obj:`str`): Column to merge the data on - should be present in both GSM and GPL gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL Returns: :obj:`pandas.DataFrame`: Merged and averaged table of results.
2.80128
2.667797
1.050035
data = [] for gsm in self.gsms.values(): tmp_data = gsm.table.copy() tmp_data["name"] = gsm.name data.append(tmp_data) ndf = concat(data).pivot(index=index, values=values, columns="name") return ndf
def pivot_samples(self, values, index="ID_REF")
Pivot samples by specified column. Construct a table in which columns (names) are the samples, index is a specified column eg. ID_REF and values in the columns are of one specified type. Args: values (:obj:`str`): Column name present in all GSMs. index (:obj:`str`, optional): Column name that will become an index in pivoted table. Defaults to "ID_REF". Returns: :obj:`pandas.DataFrame`: Pivoted data
4.411945
4.071116
1.083719
if isinstance(gpl, GPL): annotation_table = gpl.table elif isinstance(gpl, DataFrame): annotation_table = gpl else: raise TypeError("gpl should be a GPL object or a pandas.DataFrame") pivoted_samples = self.pivot_samples(values=values, index=gsm_on) ndf = pivoted_samples.reset_index().merge( annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on).set_index(gsm_on) del ndf[gpl_on] ndf.columns.name = 'name' return ndf
def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF")
Annotate GSM with provided GPL. Args: values (:obj:`str`): Column to use as values eg. "VALUES" gpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or DataFrame to annotate with. annotation_column (:obj:`str`): Column in table for annotation. gpl_on (:obj:`str`, optional): Use this column in GPL to merge. Defaults to "ID". gsm_on (:obj:`str`, optional): Use this column in GSM to merge. Defaults to "ID_REF". Returns: pandas.DataFrame: Pivoted and annotated table of results
2.958264
3.06879
0.963984
if sra_kwargs is None: sra_kwargs = dict() if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_Supp") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) downloaded_paths = dict() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in itervalues(self.gsms): logger.info( "Downloading SRA files for %s series\n" % gsm.name) paths = gsm.download_supplementary_files(email=email, download_sra=download_sra, directory=dirpath, sra_kwargs=sra_kwargs) downloaded_paths[gsm.name] = paths elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in itervalues(self.gsms): downloaders.append([ gsm, download_sra, email, dirpath, sra_kwargs]) p = Pool(nproc) results = p.map(_supplementary_files_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
def download_supplementary_files(self, directory='series', download_sra=True, email=None, sra_kwargs=None, nproc=1)
Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM
3.004734
3.02878
0.992061
if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_SRA") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) if filterby is not None: gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)] else: gsms_to_use = self.gsms.values() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in gsms_to_use: logger.info( "Downloading SRA files for %s series\n" % gsm.name) downloaded_paths[gsm.name] = gsm.download_SRA( email=email, directory=dirpath, **kwargs) elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in gsms_to_use: downloaders.append([ gsm, email, dirpath, kwargs]) p = Pool(nproc) results = p.map(_sra_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
def download_SRA(self, email, directory='series', filterby=None, nproc=1, **kwargs)
Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output.
2.932184
2.809166
1.043792
soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for gsm in itervalues(self.gsms): soft.append(gsm._get_object_as_soft()) for gpl in itervalues(self.gpls): soft.append(gpl._get_object_as_soft()) return "\n".join(soft)
def _get_object_as_soft(self)
Get object as SOFT formatted string.
3.46682
3.185658
1.088259
return os.path.join(os.path.abspath(self.outdir), self.filename)
def destination(self)
Get the destination path. This is the property should be calculated every time it is used because a user could change the outdir and filename dynamically.
6.092973
3.808283
1.599926
def _download(): if self.url.startswith("http"): self._download_http(silent=silent) elif self.url.startswith("ftp"): self._download_ftp(silent=silent) else: raise ValueError("Invalid URL %s" % self.url) logger.debug("Moving %s to %s" % ( self._temp_file_name, self.destination)) shutil.move(self._temp_file_name, self.destination) logger.debug("Successfully downloaded %s" % self.url) try: is_already_downloaded = os.path.isfile(self.destination) if is_already_downloaded: if force: try: os.remove(self.destination) except Exception: logger.error("Cannot delete %s" % self.destination) logger.info( "Downloading %s to %s" % (self.url, self.destination)) logger.debug( "Downloading %s to %s" % (self.url, self._temp_file_name)) _download() else: logger.info(("File %s already exist. Use force=True if you" " would like to overwrite it.") % self.destination) else: _download() finally: try: os.remove(self._temp_file_name) except OSError: pass
def download(self, force=False, silent=False)
Download from URL.
2.176144
2.151978
1.01123
aspera_home = os.environ.get("ASPERA_HOME", None) if not aspera_home: raise ValueError("environment variable $ASPERA_HOME not set") if not os.path.exists(aspera_home): raise ValueError( "$ASPERA_HOME directory {} does not exist".format(aspera_home)) ascp = os.path.join(aspera_home, "connect/bin/ascp") key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh") if not os.path.exists(ascp): raise ValueError("could not find ascp binary") if not os.path.exists(key): raise ValueError("could not find openssh key") parsed_url = urlparse(self.url) cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format( ascp, key, user, host, parsed_url.path, self._temp_file_name) logger.debug(cmd) try: pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = pr.communicate() if not silent: logger.debug("Aspera stdout: " + str(stdout)) logger.debug("Aspera stderr: " + str(stderr)) if pr.returncode == 0: logger.debug("Moving %s to %s" % ( self._temp_file_name, self.destination)) shutil.move(self._temp_file_name, self.destination) logger.debug("Successfully downloaded %s" % self.url) else: logger.error( "Failed to download %s using Aspera Connect" % self.url) finally: try: os.remove(self._temp_file_name) except OSError: pass
def download_aspera(self, user, host, silent=False)
Download file with Aspera Connect. For details see the documentation ov Aspera Connect Args: user (:obj:`str`): FTP user. host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
2.246228
2.237122
1.00407
with open(filename, 'rb') as fh: m = hashlib.md5() while True: data = fh.read(blocksize) if not data: break m.update(data) return m.hexdigest()
def md5sum(filename, blocksize=8192)
Get the MD5 checksum of a file.
1.677135
1.67253
1.002754
if geo is None and filepath is None: raise Exception("You have to specify filename or GEO accession!") if geo is not None and filepath is not None: raise Exception("You can specify filename or GEO accession - not both!") if silent: logger.setLevel(100) # More than critical if filepath is None: filepath, geotype = get_GEO_file(geo, destdir=destdir, how=how, annotate_gpl=annotate_gpl, include_data=include_data, silent=silent, aspera=aspera) else: if geotype is None: geotype = path.basename(filepath)[:3] logger.info("Parsing %s: " % filepath) if geotype.upper() == "GSM": return parse_GSM(filepath) elif geotype.upper() == "GSE": return parse_GSE(filepath) elif geotype.upper() == 'GPL': return parse_GPL(filepath, partial=partial) elif geotype.upper() == 'GDS': return parse_GDS(filepath) else: raise ValueError(("Unknown GEO type: %s. Available types: GSM, GSE, " "GPL and GDS.") % geotype.upper())
def get_GEO(geo=None, filepath=None, destdir="./", how='full', annotate_gpl=False, geotype=None, include_data=False, silent=False, aspera=False, partial=None)
Get the GEO entry. The GEO entry is taken directly from the GEO database or read it from SOFT file. Args: geo (:obj:`str`): GEO database identifier. filepath (:obj:`str`): Path to local SOFT file. Defaults to None. destdir (:obj:`str`, optional): Directory to download data. Defaults to None. how (:obj:`str`, optional): GSM download mode. Defaults to "full". annotate_gpl (:obj:`bool`, optional): Download the GPL annotation instead of regular GPL. If not available, fallback to regular GPL file. Defaults to False. geotype (:obj:`str`, optional): Type of GEO entry. By default it is inferred from the ID or the file name. include_data (:obj:`bool`, optional): Full download of GPLs including series and samples. Defaults to False. silent (:obj:`bool`, optional): Do not print anything. Defaults to False. aspera (:obj:`bool`, optional): EXPERIMENTAL Download using Aspera Connect. Follow Aspera instructions for further details. Defaults to False. partial (:obj:'iterable', optional): A list of accession IDs of GSMs to be partially extracted from GPL, works only if a file/accession is a GPL. Returns: :obj:`GEOparse.BaseGEO`: A GEO object of given type.
2.439892
2.427423
1.005136
if entry_line.startswith("!"): entry_line = sub(r"!\w*?_", '', entry_line) else: entry_line = entry_line.strip()[1:] try: entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)] except ValueError: entry_type = [i.strip() for i in entry_line.split("=", 1)][0] entry_name = '' return entry_type, entry_name
def __parse_entry(entry_line)
Parse the SOFT file entry name line that starts with '^', '!' or '#'. Args: entry_line (:obj:`str`): Line from SOFT to be parsed. Returns: :obj:`2-tuple`: Type of entry, value of entry.
2.814725
2.682767
1.049187
meta = defaultdict(list) for line in lines: line = line.rstrip() if line.startswith("!"): if "_table_begin" in line or "_table_end" in line: continue key, value = __parse_entry(line) meta[key].append(value) return dict(meta)
def parse_metadata(lines)
Parse list of lines with metadata information from SOFT file. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`dict`: Metadata from SOFT file.
3.703981
4.147071
0.893156
data = [] index = [] for line in lines: line = line.rstrip() if line.startswith("#"): tmp = __parse_entry(line) data.append(tmp[1]) index.append(tmp[0]) return DataFrame(data, index=index, columns=['description'])
def parse_columns(lines)
Parse list of lines with columns description from SOFT file. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`pandas.DataFrame`: Columns description.
3.726921
3.770213
0.988517
data = [] index = [] for line in lines: line = line.rstrip() if line.startswith("#"): tmp = __parse_entry(line) data.append(tmp[1]) index.append(tmp[0]) df = DataFrame(data, index=index, columns=['description']) subset_ids = defaultdict(dict) for subsetname, subset in iteritems(subsets): for expid in subset.metadata["sample_id"][0].split(","): try: subset_type = subset.get_type() subset_ids[subset_type][expid] = \ subset.metadata['description'][0] except Exception as err: logger.error("Error processing subsets: %s for subset %s" % ( subset.get_type(), subsetname)) return df.join(DataFrame(subset_ids))
def parse_GDS_columns(lines, subsets)
Parse list of line with columns description from SOFT file of GDS. Args: lines (:obj:`Iterable`): Iterator over the lines. subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use. Returns: :obj:`pandas.DataFrame`: Columns description.
3.908511
3.932899
0.993799
# filter lines that do not start with symbols data = "\n".join([i.rstrip() for i in lines if not i.startswith(("^", "!", "#")) and i.rstrip()]) if data: return read_csv(StringIO(data), index_col=None, sep="\t") else: return DataFrame()
def parse_table_data(lines)
Parse list of lines from SOFT file into DataFrame. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`pandas.DataFrame`: Table data.
5.49668
6.076267
0.904615
if isinstance(filepath, str): with utils.smart_open(filepath) as f: soft = [] has_table = False for line in f: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) else: soft = [] has_table = False for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) if entry_name is None: sets = [i for i in soft if i.startswith("^")] if len(sets) > 1: raise Exception("More than one entry in GPL") if len(sets) == 0: raise NoEntriesException( "No entries found. Check the if accession is correct!") entry_name = parse_entry_name(sets[0]) columns = parse_columns(soft) metadata = parse_metadata(soft) if has_table: table_data = parse_table_data(soft) else: table_data = DataFrame() gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns) return gsm
def parse_GSM(filepath, entry_name=None)
Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object.
2.913631
2.871658
1.014616
gsms = {} gses = {} gpl_soft = [] has_table = False gpl_name = entry_name database = None if isinstance(filepath, str): with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SERIES": is_data, data_group = next(groupper) gse_metadata = parse_metadata(data_group) gses[entry_name] = GSE(name=entry_name, metadata=gse_metadata) elif entry_type == "SAMPLE": if partial and entry_name not in partial: continue is_data, data_group = next(groupper) gsms[entry_name] = parse_GSM(data_group, entry_name) elif entry_type == "DATABASE": is_data, data_group = next(groupper) database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) elif entry_type == "PLATFORM" or entry_type == "Annotation": gpl_name = entry_name is_data, data_group = next(groupper) has_gpl_name = gpl_name or gpl_name is None for line in data_group: if ("_table_begin" in line or not line.startswith(("^", "!", "#"))): has_table = True if not has_gpl_name: if match("!Annotation_platform\s*=\s*", line): gpl_name = split("\s*=\s*", line)[-1].strip() has_gpl_name = True gpl_soft.append(line) else: raise RuntimeError( "Cannot parse {etype}. Unknown for GPL.".format( etype=entry_type )) else: for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True gpl_soft.append(line.rstrip()) columns = None try: columns = parse_columns(gpl_soft) except Exception: pass metadata = parse_metadata(gpl_soft) if has_table: table_data = parse_table_data(gpl_soft) else: table_data = DataFrame() gpl = GPL(name=gpl_name, gses=gses, gsms=gsms, table=table_data, metadata=metadata, columns=columns, database=database ) # link samples to series, if these were present in the GPL soft file for gse_id, gse in gpl.gses.items(): for gsm_id in gse.metadata.get("sample_id", []): if gsm_id in gpl.gsms: gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id] return gpl
def parse_GPL(filepath, entry_name=None, partial=None)
Parse GPL entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry or list of lines representing GPL from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. partial (:obj:'iterable', optional): A list of accession IDs of GSMs to be partially extracted from GPL, works only if a file/accession is a GPL. Returns: :obj:`GEOparse.GPL`: A GPL object.
2.928581
2.86898
1.020774
gpls = {} gsms = {} series_counter = 0 database = None metadata = {} gse_name = None with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SERIES": gse_name = entry_name series_counter += 1 if series_counter > 1: raise Exception( "GSE file should contain only one series entry!") is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message metadata = parse_metadata(data_group) elif entry_type == "SAMPLE": is_data, data_group = next(groupper) gsms[entry_name] = parse_GSM(data_group, entry_name) elif entry_type == "PLATFORM": is_data, data_group = next(groupper) gpls[entry_name] = parse_GPL(data_group, entry_name) elif entry_type == "DATABASE": is_data, data_group = next(groupper) database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) else: logger.error("Cannot recognize type %s" % entry_type) gse = GSE(name=gse_name, metadata=metadata, gpls=gpls, gsms=gsms, database=database) return gse
def parse_GSE(filepath)
Parse GSE SOFT file. Args: filepath (:obj:`str`): Path to GSE SOFT file. Returns: :obj:`GEOparse.GSE`: A GSE object.
2.929906
2.877392
1.018251
dataset_lines = [] subsets = {} database = None dataset_name = None with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SUBSET": is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message subset_metadata = parse_metadata(data_group) subsets[entry_name] = GDSSubset(name=entry_name, metadata=subset_metadata) elif entry_type == "DATABASE": is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) elif entry_type == "DATASET": is_data, data_group = next(groupper) dataset_name = entry_name for line in data_group: dataset_lines.append(line.rstrip()) else: logger.error("Cannot recognize type %s" % entry_type) metadata = parse_metadata(dataset_lines) columns = parse_GDS_columns(dataset_lines, subsets) table = parse_table_data(dataset_lines) return GDS(name=dataset_name, metadata=metadata, columns=columns, table=table, subsets=subsets, database=database)
def parse_GDS(filepath)
Parse GDS SOFT file. Args: filepath (:obj:`str`): Path to GDS SOFT file. Returns: :obj:`GEOparse.GDS`: A GDS object.
2.876534
2.798678
1.027819
try: os.makedirs(path_to_dir) except OSError as e: # Python >2.5 if e.errno == EEXIST and os.path.isdir(path_to_dir): logger.debug( "Directory %s already exists. Skipping." % path_to_dir) else: raise e
def mkdir_p(path_to_dir)
Make directory(ies). This function behaves like mkdir -p. Args: path_to_dir (:obj:`str`): Path to the directory to make.
1.999316
2.217337
0.901674
if aspera and url.startswith("http"): logger.warn("Aspera Connect allows only FTP servers - falling back to " "normal download") aspera = False try: fn = Downloader( url, outdir=os.path.dirname(destination_path)) if aspera: fn.download_aspera( user="anonftp", host="ftp-trace.ncbi.nlm.nih.gov", silent=silent) else: fn.download(silent=silent, force=force) except URLError: logger.error("Cannot find file %s" % url)
def download_from_url(url, destination_path, force=False, aspera=False, silent=False)
Download file from remote server. If the file is already downloaded and ``force`` flag is on the file will be removed. Args: url (:obj:`str`): Path to the file on remote server (including file name) destination_path (:obj:`str`): Path to the file on local machine (including file name) force (:obj:`bool`): If file exist force to overwrite it. Defaults to False. aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False. silent (:obj:`bool`): Do not print any message. Defaults to False.
4.766472
4.780221
0.997124
if filepath[-2:] == "gz": mode = "rt" fopen = gzip.open else: mode = "r" fopen = open if sys.version_info[0] < 3: fh = fopen(filepath, mode) else: fh = fopen(filepath, mode, errors="ignore") try: yield fh except IOError: fh.close() finally: fh.close()
def smart_open(filepath)
Open file intelligently depending on the source and python version. Args: filepath (:obj:`str`): Path to the file. Yields: Context manager for file handle.
2.163002
2.460742
0.879004
print("Tuning with GP tuner for %s iterations" % TUNING_BUDGET_PER_ITER) for i in range(TUNING_BUDGET_PER_ITER): params = tuner.propose() # create model using proposed hyperparams from tuner model = generate_model(params) model.fit(X, y) predicted = model.predict(X_val) score = accuracy_score(predicted, y_val) # record hyper-param combination and score for tuning tuner.add(params, score) print("Final score:", tuner._best_score)
def tune_pipeline(X, y, X_val, y_val, generate_model, tuner)
Tunes a specified pipeline with the specified tuner for TUNING_BUDGET_PER_ITER (3) iterations. Params: X: np.array of X training data y: np.array of y training data X_val: np.array of X validation data y_val: np.array of y validation data generate_model: function that returns an slkearn model to fit tuner: BTB tuner object for tuning hyperparameters
4.018304
3.462165
1.160633
return max(choice_rewards, key=lambda a: np.mean(choice_rewards[a]))
def bandit(self, choice_rewards)
Return the choice to take next using multi-armed bandit Multi-armed bandit method. Accepts a mapping of choices to rewards which indicate their historical performance, and returns the choice that we should make next in order to maximize expected reward in the long term. The default implementation is to return the arm with the highest average score. Args: choice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards. Returns: str: the name of the choice to take next.
5.010492
4.981711
1.005777
choice_rewards = {} for choice, scores in choice_scores.items(): if choice not in self.choices: continue choice_rewards[choice] = self.compute_rewards(scores) return self.bandit(choice_rewards)
def select(self, choice_scores)
Select the next best choice to make Args: choice_scores (Dict[object, List[float]]): Mapping of choice to list of scores for each possible choice. The caller is responsible for making sure each choice that is possible at this juncture is represented in the dict, even those with no scores. Score lists should be in ascending chronological order, that is, the score from the earliest trial should be listed first. For example:: { 1: [0.56, 0.61, 0.33, 0.67], 2: [0.25, 0.58], 3: [0.60, 0.65, 0.68], }
3.670487
4.527082
0.810784
if len(scores) > self.k: scores = np.copy(scores) inds = np.argsort(scores)[:-self.k] scores[inds] = np.nan return list(scores)
def compute_rewards(self, scores)
Retain the K best scores, and replace the rest with nans
3.714918
2.526042
1.470648
k = self.k m = max(len(scores) - k, 0) best_scores = sorted(scores)[-k - 1:] velocities = np.diff(best_scores) nans = np.full(m, np.nan) return list(velocities) + list(nans)
def compute_rewards(self, scores)
Compute the velocity of the best scores The velocities are the k distances between the k+1 best scores.
4.606043
3.297251
1.396934
# decompose X and generate the rankings of the elements in the # decomposed matrix dpp_vector_decomposed = self.mf_model.transform(dpp_vector) dpp_vector_ranked = stats.rankdata( dpp_vector_decomposed, method='dense', ) max_agreement_index = None max_agreement = -1 # min value of Kendall Tau agremment for i in range(self.dpp_ranked.shape[0]): # calculate agreement between current row and X agreement, _ = stats.kendalltau( dpp_vector_ranked, self.dpp_ranked[i, :], ) if agreement > max_agreement: max_agreement_index = i max_agreement = agreement if max_agreement_index is None: max_agreement_index = np.random.randint(self.dpp_matrix.shape[0]) # store the row with the highest agreement for prediction self.matching_dataset = self.dpp_matrix[max_agreement_index, :]
def fit(self, dpp_vector)
Finds row of self.dpp_matrix most closely corresponds to X by means of Kendall tau distance. https://en.wikipedia.org/wiki/Kendall_tau_distance Args: dpp_vector (np.array): Array with shape (n_components, )
3.832356
3.514818
1.090343
# first, train a gaussian process like normal super(GPEiVelocity, self).fit(X, y) # probability of uniform self.POU = 0 if len(y) >= self.r_minimum: # get the best few scores so far, and compute the average distance # between them. top_y = sorted(y)[-self.N_BEST_Y:] velocities = [top_y[i + 1] - top_y[i] for i in range(len(top_y) - 1)] # the probability of returning random parameters scales inversely with # the "velocity" of top scores. self.POU = np.exp(self.MULTIPLIER * np.mean(velocities))
def fit(self, X, y)
Train a gaussian process like normal, then compute a "Probability Of Uniform selection" (POU) value.
8.721124
6.681819
1.305202
if np.random.random() < self.POU: # choose params at random to avoid local minima return Uniform(self.tunables).predict(X) return super(GPEiVelocity, self).predict(X)
def predict(self, X)
Use the POU value we computed in fit to choose randomly between GPEi and uniform random selection.
14.615937
9.976964
1.464968
for i in range(len(scores)): if i >= self.k: scores[i] = 0. return scores
def compute_rewards(self, scores)
Retain the K most recent scores, and replace the rest with zeros
3.995443
2.799805
1.427043
# if we don't have enough scores to do K-selection, fall back to UCB1 min_num_scores = min([len(s) for s in choice_scores.values()]) if min_num_scores >= K_MIN: logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__)) reward_func = self.compute_rewards else: logger.warning( '{klass}: Not enough choices to do K-selection; using plain UCB1' .format(klass=type(self).__name__)) reward_func = super(RecentKReward, self).compute_rewards choice_rewards = {} for choice, scores in choice_scores.items(): if choice not in self.choices: continue choice_rewards[choice] = reward_func(scores) return self.bandit(choice_rewards)
def select(self, choice_scores)
Use the top k learner's scores for usage in rewards for the bandit calculation
4.396058
4.112538
1.06894
# take the k + 1 most recent scores so we can get k velocities recent_scores = scores[:-self.k - 2:-1] velocities = [recent_scores[i] - recent_scores[i + 1] for i in range(len(recent_scores) - 1)] # pad the list out with zeros, so the length of the list is # maintained zeros = (len(scores) - self.k) * [0] return velocities + zeros
def compute_rewards(self, scores)
Compute the velocity of thte k+1 most recent scores. The velocity is the average distance between scores. Return a list with those k velocities padded out with zeros so that the count remains the same.
5.089855
3.537587
1.438793
return -1 * ((a - x)**2 + b * (y - x**2)**2)
def rosenbrock(x, y, a=1, b=100)
Bigger is better; global optimum at x=a, y=a**2
3.141162
3.229031
0.972788
# choose algorithm using a bandit alg_scores = {} for algorithm, choices in self.by_algorithm.items(): # only make arms for algorithms that have options if not set(choices) & set(choice_scores.keys()): continue # sum up lists to get a list of all the scores from any run of this # algorithm sublists = [choice_scores.get(c, []) for c in choices] alg_scores[algorithm] = sum(sublists, []) best_algorithm = self.bandit(alg_scores) # now use only the frozen sets from the chosen algorithm best_subset = self.by_algorithm[best_algorithm] normal_ucb1 = UCB1(choices=best_subset) return normal_ucb1.select(choice_scores)
def select(self, choice_scores)
Groups the frozen sets by algorithm and first chooses an algorithm based on the traditional UCB1 criteria. Next, from that algorithm's frozen sets, makes the final set choice.
6.015516
5.269899
1.141486
# count the larger of 1 and the total number of arm pulls total_pulls = max(1, sum(len(r) for r in choice_rewards.values())) def ucb1(choice): rewards = choice_rewards[choice] choice_pulls = max(len(rewards), 1) average_reward = np.nanmean(rewards) if len(rewards) else 0 error = np.sqrt(2.0 * np.log(total_pulls) / choice_pulls) return average_reward + error return max(shuffle(choice_rewards), key=ucb1)
def bandit(self, choice_rewards)
Multi-armed bandit method which chooses the arm for which the upper confidence bound (UCB) of expected reward is greatest. If there are multiple arms with the same UCB1 index, then one is chosen at random. An explanation is here: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf
3.96859
3.816442
1.039866
# get the k + 1 best scores in descending order best_scores = sorted(scores, reverse=True)[:self.k + 1] velocities = [best_scores[i] - best_scores[i + 1] for i in range(len(best_scores) - 1)] # pad the list out with zeros to maintain the length of the list zeros = (len(scores) - self.k) * [0] return velocities + zeros
def compute_rewards(self, scores)
Compute the "velocity" of (average distance between) the k+1 best scores. Return a list with those k velocities padded out with zeros so that the count remains the same.
3.737048
2.661022
1.404366