code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
" Return `False`, or the Digraph symbol to be used. "
if cli.quoted_insert:
return '^'
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1
return '?'
return False | def _get_digraph_char(self, cli) | Return `False`, or the Digraph symbol to be used. | 9.250873 | 6.059017 | 1.526794 |
digraph_char = self._get_digraph_char(cli)
if digraph_char:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[digraph_char, Token.Digraph] | def _highlight_digraph(self, cli, new_screen) | When we are in Vi digraph mode, put a question mark underneath the
cursor. | 4.576814 | 4.533896 | 1.009466 |
key_buffer = cli.input_processor.key_buffer
if key_buffer and _in_insert_mode(cli) and not cli.is_done:
# The textual data for the given key. (Can be a VT100 escape
# sequence.)
data = key_buffer[-1].data
# Display only if this is a 1 cell width character.
if get_cwidth(data) == 1:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[data, Token.PartialKeyBinding] | def _show_input_processor_key_buffer(self, cli, new_screen) | When the user is typing a key binding that consists of several keys,
display the last pressed key if the user is in insert mode and the key
is meaningful to be displayed.
E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the
first 'j' needs to be displayed in order to get some feedback. | 8.166471 | 7.407917 | 1.102398 |
cursor_line_token = (':', ) + self.cursorline_token
cursor_column_token = (':', ) + self.cursorcolumn_token
data_buffer = new_screen.data_buffer
# Highlight cursor line.
if self.cursorline(cli):
row = data_buffer[cpos.y]
for x in range(x, x + width):
original_char = row[x]
row[x] = _CHAR_CACHE[
original_char.char, original_char.token + cursor_line_token]
# Highlight cursor column.
if self.cursorcolumn(cli):
for y2 in range(y, y + height):
row = data_buffer[y2]
original_char = row[cpos.x]
row[cpos.x] = _CHAR_CACHE[
original_char.char, original_char.token + cursor_column_token]
# Highlight color columns
for cc in self.get_colorcolumns(cli):
assert isinstance(cc, ColorColumn)
color_column_token = (':', ) + cc.token
column = cc.position
for y2 in range(y, y + height):
row = data_buffer[y2]
original_char = row[column]
row[column] = _CHAR_CACHE[
original_char.char, original_char.token + color_column_token] | def _highlight_cursorlines(self, cli, new_screen, cpos, x, y, width, height) | Highlight cursor row/column. | 2.673484 | 2.62054 | 1.020203 |
xpos = write_position.xpos + move_x
ypos = write_position.ypos
margin_write_position = WritePosition(xpos, ypos, width, write_position.height)
self._copy_body(cli, lazy_screen, new_screen, margin_write_position, 0, width) | def _copy_margin(self, cli, lazy_screen, new_screen, write_position, move_x, width) | Copy characters from the margin screen to the real screen. | 2.650631 | 2.594526 | 1.021624 |
scroll_offsets_bottom = self.scroll_offsets.bottom
scroll_offsets_top = self.scroll_offsets.top
# We don't have horizontal scrolling.
self.horizontal_scroll = 0
# If the current line consumes more than the whole window height,
# then we have to scroll vertically inside this line. (We don't take
# the scroll offsets into account for this.)
# Also, ignore the scroll offsets in this case. Just set the vertical
# scroll to this line.
if ui_content.get_height_for_line(ui_content.cursor_position.y, width) > height - scroll_offsets_top:
# Calculate the height of the text before the cursor, with the line
# containing the cursor included, and the character belowe the
# cursor included as well.
line = explode_tokens(ui_content.get_line(ui_content.cursor_position.y))
text_before_cursor = token_list_to_text(line[:ui_content.cursor_position.x + 1])
text_before_height = UIContent.get_height_for_text(text_before_cursor, width)
# Adjust scroll offset.
self.vertical_scroll = ui_content.cursor_position.y
self.vertical_scroll_2 = min(text_before_height - 1, self.vertical_scroll_2)
self.vertical_scroll_2 = max(0, text_before_height - height, self.vertical_scroll_2)
return
else:
self.vertical_scroll_2 = 0
# Current line doesn't consume the whole height. Take scroll offsets into account.
def get_min_vertical_scroll():
# Make sure that the cursor line is not below the bottom.
# (Calculate how many lines can be shown between the cursor and the .)
used_height = 0
prev_lineno = ui_content.cursor_position.y
for lineno in range(ui_content.cursor_position.y, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height - scroll_offsets_bottom:
return prev_lineno
else:
prev_lineno = lineno
return 0
def get_max_vertical_scroll():
# Make sure that the cursor line is not above the top.
prev_lineno = ui_content.cursor_position.y
used_height = 0
for lineno in range(ui_content.cursor_position.y - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > scroll_offsets_top:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
def get_topmost_visible():
prev_lineno = ui_content.line_count - 1
used_height = 0
for lineno in range(ui_content.line_count - 1, -1, -1):
used_height += ui_content.get_height_for_line(lineno, width)
if used_height > height:
return prev_lineno
else:
prev_lineno = lineno
return prev_lineno
# Scroll vertically. (Make sure that the whole line which contains the
# cursor is visible.
topmost_visible = get_topmost_visible()
# Note: the `min(topmost_visible, ...)` is to make sure that we
# don't require scrolling up because of the bottom scroll offset,
# when we are at the end of the document.
self.vertical_scroll = max(self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll()))
self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll())
# Disallow scrolling beyond bottom?
if not self.allow_scroll_beyond_bottom(cli):
self.vertical_scroll = min(self.vertical_scroll, topmost_visible) | def _scroll_when_linewrapping(self, ui_content, width, height, cli) | Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`. | 2.890732 | 2.842352 | 1.017021 |
cursor_position = ui_content.cursor_position or Point(0, 0)
# Without line wrapping, we will never have to scroll vertically inside
# a single line.
self.vertical_scroll_2 = 0
if ui_content.line_count == 0:
self.vertical_scroll = 0
self.horizontal_scroll = 0
return
else:
current_line_text = token_list_to_text(ui_content.get_line(cursor_position.y))
def do_scroll(current_scroll, scroll_offset_start, scroll_offset_end,
cursor_pos, window_size, content_size):
" Scrolling algorithm. Used for both horizontal and vertical scrolling. "
# Calculate the scroll offset to apply.
# This can obviously never be more than have the screen size. Also, when the
# cursor appears at the top or bottom, we don't apply the offset.
scroll_offset_start = int(min(scroll_offset_start, window_size / 2, cursor_pos))
scroll_offset_end = int(min(scroll_offset_end, window_size / 2,
content_size - 1 - cursor_pos))
# Prevent negative scroll offsets.
if current_scroll < 0:
current_scroll = 0
# Scroll back if we scrolled to much and there's still space to show more of the document.
if (not self.allow_scroll_beyond_bottom(cli) and
current_scroll > content_size - window_size):
current_scroll = max(0, content_size - window_size)
# Scroll up if cursor is before visible part.
if current_scroll > cursor_pos - scroll_offset_start:
current_scroll = max(0, cursor_pos - scroll_offset_start)
# Scroll down if cursor is after visible part.
if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end:
current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end
return current_scroll
# When a preferred scroll is given, take that first into account.
if self.get_vertical_scroll:
self.vertical_scroll = self.get_vertical_scroll(self)
assert isinstance(self.vertical_scroll, int)
if self.get_horizontal_scroll:
self.horizontal_scroll = self.get_horizontal_scroll(self)
assert isinstance(self.horizontal_scroll, int)
# Update horizontal/vertical scroll to make sure that the cursor
# remains visible.
offsets = self.scroll_offsets
self.vertical_scroll = do_scroll(
current_scroll=self.vertical_scroll,
scroll_offset_start=offsets.top,
scroll_offset_end=offsets.bottom,
cursor_pos=ui_content.cursor_position.y,
window_size=height,
content_size=ui_content.line_count)
self.horizontal_scroll = do_scroll(
current_scroll=self.horizontal_scroll,
scroll_offset_start=offsets.left,
scroll_offset_end=offsets.right,
cursor_pos=get_cwidth(current_line_text[:ui_content.cursor_position.x]),
window_size=width,
# We can only analyse the current line. Calculating the width off
# all the lines is too expensive.
content_size=max(get_cwidth(current_line_text), self.horizontal_scroll + width)) | def _scroll_without_linewrapping(self, ui_content, width, height, cli) | Scroll to make sure the cursor position is visible and that we maintain
the requested scroll offset.
Set `self.horizontal_scroll/vertical_scroll`. | 3.020007 | 2.945931 | 1.025145 |
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
self._scroll_down(cli)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
self._scroll_up(cli) | def _mouse_handler(self, cli, mouse_event) | Mouse handler. Called when the UI control doesn't handle this
particular event. | 2.293267 | 2.228006 | 1.029291 |
" Scroll window down. "
info = self.render_info
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down(cli)
self.vertical_scroll += 1 | def _scroll_down(self, cli) | Scroll window down. | 7.529021 | 6.818451 | 1.104213 |
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1 | def _scroll_up(self, cli) | Scroll window up. | 10.137622 | 9.575111 | 1.058747 |
if not key_vals:
return
write_items = self._update(key_vals, overwrite)
self._root._root_set(self._path, write_items)
self._root._write(commit=True) | def update(self, key_vals=None, overwrite=True) | Locked keys will be overwritten unless overwrite=False.
Otherwise, written keys will be added to the "locked" list. | 6.682146 | 6.970603 | 0.958618 |
if isinstance(json_value, dict):
if json_value.get("_type") in H5_TYPES:
return self.read_h5(path, json_value)
elif json_value.get("_type") == 'data-frame':
wandb.termerror(
'This data frame was saved via the wandb data API. Contact [email protected] for help.')
return None
# TODO: transform wandb objects and plots
else:
return SummarySubDict(self, path)
else:
return json_value | def _decode(self, path, json_value) | Decode a `dict` encoded by `Summary._encode()`, loading h5 objects.
h5 objects may be very large, so we won't have loaded them automatically. | 7.126941 | 6.372102 | 1.11846 |
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
if util.is_pandas_data_frame(value):
return util.encode_data_frame(path, value, self._run)
else:
friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value))
json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
| def _encode(self, value, path_from_root) | Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was. | 4.862038 | 4.71911 | 1.030287 |
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
info = w.render_info
ui_content = info.ui_content
# Height to scroll.
scroll_height = info.window_height
if half:
scroll_height //= 2
# Calculate how many lines is equivalent to that vertical space.
y = b.document.cursor_position_row + 1
height = 0
while y < ui_content.line_count:
line_height = info.get_height_for_line(y)
if height + line_height < scroll_height:
height += line_height
y += 1
else:
break
b.cursor_position = b.document.translate_row_col_to_index(y, 0) | def scroll_forward(event, half=False) | Scroll window down. | 3.560864 | 3.505084 | 1.015914 |
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the top, move to the next line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
b.cursor_position += b.document.get_cursor_down_position()
w.vertical_scroll += 1 | def scroll_one_line_down(event) | scroll_offset += 1 | 5.181574 | 5.281573 | 0.981066 |
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll > 0:
first_line_height = info.get_height_for_line(info.first_visible_line())
cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height -
info.configured_scroll_offsets.bottom)
# Move cursor up, as many steps as the height of the first line.
# TODO: not entirely correct yet, in case of line wrapping and many long lines.
for _ in range(max(0, cursor_up)):
b.cursor_position += b.document.get_cursor_up_position()
# Scroll window
w.vertical_scroll -= 1 | def scroll_one_line_up(event) | scroll_offset -= 1 | 5.961445 | 6.05723 | 0.984187 |
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
# Scroll down one page.
line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)
w.vertical_scroll = line_index
b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)
b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True) | def scroll_page_down(event) | Scroll page down. (Prefer the cursor at the top of the page, after scrolling.) | 4.17026 | 4.103548 | 1.016257 |
w = _current_window_for_event(event)
b = event.cli.current_buffer
if w and w.render_info:
# Put cursor at the first visible line. (But make sure that the cursor
# moves at least one line up.)
line_index = max(0, min(w.render_info.first_visible_line(),
b.document.cursor_position_row - 1))
b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)
b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)
# Set the scroll offset. We can safely set it to zero; the Window will
# make sure that it scrolls at least until the cursor becomes visible.
w.vertical_scroll = 0 | def scroll_page_up(event) | Scroll page up. (Prefer the cursor at the bottom of the page, after scrolling.) | 4.639122 | 4.524185 | 1.025405 |
width = get_cwidth(text)
# When the text is too wide, trim it.
if width > max_width:
# When there are no double width characters, just use slice operation.
if len(text) == width:
trimmed_text = (text[:max(1, max_width-3)] + '...')[:max_width]
return trimmed_text, len(trimmed_text)
# Otherwise, loop until we have the desired width. (Rather
# inefficient, but ok for now.)
else:
trimmed_text = ''
for c in text:
if get_cwidth(trimmed_text + c) <= max_width - 3:
trimmed_text += c
trimmed_text += '...'
return (trimmed_text, get_cwidth(trimmed_text))
else:
return text, width | def _trim_text(text, max_width) | Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple. | 3.867836 | 3.81333 | 1.014294 |
complete_state = cli.current_buffer.complete_state
if complete_state:
completions = complete_state.current_completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(width - menu_width, complete_state)
show_meta = self._show_meta(complete_state)
def get_line(i):
c = completions[i]
is_current_completion = (i == index)
result = self._get_menu_item_tokens(c, is_current_completion, menu_width)
if show_meta:
result += self._get_menu_item_meta_tokens(c, is_current_completion, menu_meta_width)
return result
return UIContent(get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
default_char=Char(' ', self.token))
return UIContent() | def create_content(self, cli, width, height) | Create a UIContent object for this control. | 3.641399 | 3.583312 | 1.01621 |
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2)) | def _get_menu_width(self, max_width, complete_state) | Return the width of the main column. | 7.756499 | 6.820308 | 1.137265 |
if self._show_meta(complete_state):
return min(max_width, max(get_cwidth(c.display_meta)
for c in complete_state.current_completions) + 2)
else:
return 0 | def _get_menu_meta_width(self, max_width, complete_state) | Return the width of the meta column. | 5.971323 | 5.006147 | 1.192798 |
b = cli.current_buffer
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Select completion.
b.go_to_completion(mouse_event.position.y)
b.complete_state = None
elif mouse_event.event_type == MouseEventType.SCROLL_DOWN:
# Scroll up.
b.complete_next(count=3, disable_wrap_around=True)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
# Scroll down.
b.complete_previous(count=3, disable_wrap_around=True) | def mouse_handler(self, cli, mouse_event) | Handle mouse events: clicking and scrolling. | 3.10646 | 2.940947 | 1.056279 |
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
result = int(column_width * math.ceil(len(complete_state.current_completions) / float(self.min_rows)))
# When the desired width is still more than the maximum available,
# reduce by removing columns until we are less than the available
# width.
while result > column_width and result > max_available_width - self._required_margin:
result -= column_width
return result + self._required_margin | def preferred_width(self, cli, max_available_width) | Preferred width: prefer to use at least min_rows, but otherwise as much
as possible horizontally. | 5.352401 | 4.987311 | 1.073204 |
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.current_completions) / float(column_count))) | def preferred_height(self, cli, width, max_available_height, wrap_lines) | Preferred height: as much as needed in order to display all the completions. | 4.620636 | 3.788617 | 1.21961 |
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
def grouper(n, iterable, fillvalue=None):
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion):
" Returns True when this completion is the currently selected one. "
return complete_state.complete_index is not None and c == complete_state.current_completion
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
if complete_state:
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= (column_width // self.suggested_max_column_width)
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.current_completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(selected_column, max(self.scroll, selected_column - visible_columns + 1))
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
tokens_for_line = []
for row_index, row in enumerate(rows_):
tokens = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
tokens += [(Token.Scrollbar, '<' if middle_row else ' ')]
# Draw row content.
for column_index, c in enumerate(row[self.scroll:][:visible_columns]):
if c is not None:
tokens += self._get_menu_item_tokens(c, is_current_completion(c), column_width)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[(column_index * column_width + x, row_index)] = c
else:
tokens += [(self.token.Completion, ' ' * column_width)]
# Draw trailing padding. (_get_menu_item_tokens only returns padding on the left.)
tokens += [(self.token.Completion, ' ')]
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
tokens += [(Token.Scrollbar, '>' if middle_row else ' ')]
# Newline.
tokens_for_line.append(tokens)
else:
tokens = []
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = column_width * visible_columns + render_left_arrow + render_right_arrow + 1
def get_line(i):
return tokens_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_)) | def create_content(self, cli, width, height) | Create a UIContent object for this menu. | 3.729002 | 3.6745 | 1.014832 |
return max(get_cwidth(c.display) for c in complete_state.current_completions) + 1 | def _get_column_width(self, complete_state) | Return the width of each column. | 9.766046 | 8.036445 | 1.21522 |
b = cli.current_buffer
def scroll_left():
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right():
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(self._total_columns - self._rendered_columns, self.scroll + 1)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion) | def mouse_handler(self, cli, mouse_event) | Handle scoll and click events. | 2.520857 | 2.480923 | 1.016096 |
if cli.current_buffer.complete_state:
state = cli.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions)
else:
return 0 | def preferred_width(self, cli, max_available_width) | Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.) | 6.874619 | 4.831235 | 1.422953 |
result = []
current = []
for part in token + (':', ):
if part == ':':
if current:
result.append(tuple(current))
current = []
else:
current.append(part)
return result | def split_token_in_parts(token) | Take a Token, and turn it in a list of tokens, by splitting
it on ':' (taking that as a separator.) | 3.572703 | 3.05646 | 1.168903 |
result = DEFAULT_ATTRS
for attr in list_of_attrs:
result = Attrs(
color=attr.color or result.color,
bgcolor=attr.bgcolor or result.bgcolor,
bold=attr.bold or result.bold,
underline=attr.underline or result.underline,
italic=attr.italic or result.italic,
blink=attr.blink or result.blink,
reverse=attr.reverse or result.reverse)
return result | def merge_attrs(list_of_attrs) | Take a list of :class:`.Attrs` instances and merge them into one.
Every `Attr` in the list can override the styling of the previous one. | 2.178294 | 1.924171 | 1.132069 |
def set_text(self, text): # Not abstract.
assert isinstance(text, six.string_types)
self.set_data(ClipboardData(text)) | Shortcut for setting plain text on clipboard. | null | null | null |
|
self.counter += 1
local_counter = self.counter
def timer_timeout():
if self.counter == local_counter and self.running:
self.callback()
self.loop.call_later(self.timeout, timer_timeout) | def reset(self) | Reset the timeout. Starts a new timer. | 5.19278 | 4.271738 | 1.215613 |
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2]) | def sentry_reraise(exc) | Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler. | 8.156013 | 9.566568 | 0.852554 |
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, 'vendor')
sys.path.insert(1, vendor_dir)
return import_module(name) | def vendor_import(name) | This enables us to use the vendor directory for packages we don't depend on | 2.244321 | 2.103671 | 1.066859 |
if name not in _not_importable:
try:
return import_module(name)
except ImportError:
_not_importable.add(name)
if required:
raise ValueError(required)
except Exception as e:
_not_importable.add(name)
msg = "Error importing optional module {}".format(name)
logger.exception(msg) | def get_module(name, required=None) | Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:return: (module|None) If import succeeds, the module will be returned. | 3.104062 | 3.377844 | 0.918948 |
instance_name = o.__class__.__module__ + "." + o.__class__.__name__
if instance_name in ["builtins.module", "__builtin__.module"]:
return o.__name__
else:
return instance_name | def get_full_typename(o) | We determine types based on type names so we don't have to import
(and therefore depend on) PyTorch, TensorFlow, etc. | 3.319491 | 3.261411 | 1.017808 |
import matplotlib
from matplotlib.figure import Figure
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj | def ensure_matplotlib_figure(obj) | Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted. | 4.883218 | 4.700884 | 1.038787 |
converted = True
typename = get_full_typename(obj)
if is_tf_tensor_typename(typename):
obj = obj.eval()
elif is_pytorch_tensor_typename(typename):
try:
if obj.requires_grad:
obj = obj.detach()
except AttributeError:
pass # before 0.4 is only present on variables
try:
obj = obj.data
except RuntimeError:
pass # happens for Tensors before 0.4
if obj.size():
obj = obj.numpy()
else:
return obj.item(), True
if np and isinstance(obj, np.ndarray):
if obj.size == 1:
obj = obj.flatten()[0]
elif obj.size <= 32:
obj = obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
elif isinstance(obj, bytes):
obj = obj.decode('utf-8')
elif isinstance(obj, (datetime, date)):
obj = obj.isoformat()
else:
converted = False
if getsizeof(obj) > VALUE_BYTES_LIMIT:
logger.warning("Object %s is %i bytes", obj, getsizeof(obj))
return obj, converted | def json_friendly(obj) | Convert an object into something that's more becoming of JSON | 3.557 | 3.533761 | 1.006576 |
_DISPLAY_VARIABLES = ['DISPLAY', 'WAYLAND_DISPLAY', 'MIR_SOCKET']
_WEBBROWSER_NAMES_BLACKLIST = [
'www-browser', 'lynx', 'links', 'elinks', 'w3m']
import webbrowser
launch_browser = attempt_launch_browser
if launch_browser:
if ('linux' in sys.platform and
not any(os.getenv(var) for var in _DISPLAY_VARIABLES)):
launch_browser = False
try:
browser = webbrowser.get()
if (hasattr(browser, 'name')
and browser.name in _WEBBROWSER_NAMES_BLACKLIST):
launch_browser = False
except webbrowser.Error:
launch_browser = False
return launch_browser | def launch_browser(attempt_launch_browser=True) | Decide if we should launch a browser | 3.723557 | 3.548594 | 1.049305 |
if os.getenv("TF_CONFIG"):
try:
return json.loads(os.environ["TF_CONFIG"])
except ValueError:
return False
else:
return False | def parse_tfjob_config() | Attempts to parse TFJob config, returning False if it can't find it | 3.176827 | 2.385846 | 1.331531 |
sagemaker_config = "/opt/ml/input/config/hyperparameters.json"
if os.path.exists(sagemaker_config):
conf = {}
conf["sagemaker_training_job_name"] = os.getenv('TRAINING_JOB_NAME')
# Hyper-parameter searchs quote configs...
for k, v in six.iteritems(json.load(open(sagemaker_config))):
cast = v.strip('"')
if os.getenv("WANDB_API_KEY") is None and k == "wandb_api_key":
os.environ["WANDB_API_KEY"] = cast
else:
if re.match(r'^[-\d]+$', cast):
cast = int(cast)
elif re.match(r'^[-.\d]+$', cast):
cast = float(cast)
conf[k] = cast
return conf
else:
return False | def parse_sm_config() | Attempts to parse SageMaker configuration returning False if it can't find it | 3.580028 | 3.307484 | 1.082402 |
pandas = get_module("pandas")
fastparquet = get_module("fastparquet")
if not pandas or not fastparquet:
raise wandb.Error(
"Failed to save data frame: unable to import either pandas or fastparquet.")
data_frame_id = generate_id()
# We have to call this wandb_run_id because that name is treated specially by
# our filtering code
df['wandb_run_id'] = pandas.Series(
[six.text_type(run.name)] * len(df.index), index=df.index)
df['wandb_data_frame_id'] = pandas.Series(
[six.text_type(data_frame_id)] * len(df.index), index=df.index)
frames_dir = os.path.join(run.dir, DATA_FRAMES_SUBDIR)
mkdir_exists_ok(frames_dir)
path = os.path.join(frames_dir, '{}-{}.parquet'.format(name, data_frame_id))
fastparquet.write(path, df)
return {
'id': data_frame_id,
'_type': 'data-frame',
'format': 'parquet',
'current_project_name': run.project_name(), # we don't have the project ID here
'path': path,
} | def encode_data_frame(name, df, run) | Encode a Pandas DataFrame into the JSON/backend format.
Writes the data to a file and returns a dictionary that we use to represent
it in `Summary`'s.
Arguments:
name (str): Name of the DataFrame, eg. the summary key in which it's
stored. This is for convenience, so people exploring the
directory tree can have some idea of what is in the Parquet files.
df (pandas.DataFrame): The DataFrame. Must not have columns named
"wandb_run_id" or "wandb_data_frame_id". They will be added to the
DataFrame here.
run (wandb_run.Run): The Run the DataFrame is associated with. We need
this because the information we store on the DataFrame is derived
from the Run it's in.
Returns:
A dict representing the DataFrame that we can store in summaries or
histories. This is the format:
{
'_type': 'data-frame',
# Magic field that indicates that this object is a data frame as
# opposed to a normal dictionary or anything else.
'id': 'asdf',
# ID for the data frame that is unique to this Run.
'format': 'parquet',
# The file format in which the data frame is stored. Currently can
# only be Parquet.
'current_project_name': 'wfeas',
# (Current) name of the project that this Run is in. It'd be
# better to store the project's ID because we know it'll never
# change but we don't have that here. We store this just in
# case because we use the project name in identifiers on the
# backend.
'path': 'media/data_frames/sdlk.parquet',
# Path to the Parquet file in the Run directory.
} | 3.470538 | 2.812782 | 1.233846 |
if len(key) != 40:
click.secho(
'API-key must be exactly 40 characters long: {} ({} chars)'.format(key, len(key)))
return None
try:
normalized_host = host.split("/")[-1].split(":")[0]
print("Appending key for %s to your netrc file: %s" %
(normalized_host, os.path.expanduser('~/.netrc')))
machine_line = 'machine %s' % normalized_host
path = os.path.expanduser('~/.netrc')
orig_lines = None
try:
with open(path) as f:
orig_lines = f.read().strip().split('\n')
except (IOError, OSError) as e:
pass
with open(path, 'w') as f:
if orig_lines:
# delete this machine from the file if it's already there.
skip = 0
for line in orig_lines:
if machine_line in line:
skip = 2
elif skip:
skip -= 1
else:
f.write('%s\n' % line)
f.write(textwrap.dedent().format(host=normalized_host, entity=entity, key=key))
os.chmod(os.path.expanduser('~/.netrc'),
stat.S_IRUSR | stat.S_IWUSR)
return True
except IOError as e:
click.secho("Unable to read ~/.netrc", fg="red")
return None | def write_netrc(host, entity, key) | Add our host and key to .netrc | 2.779915 | 2.783083 | 0.998862 |
max_retries = kwargs.pop('max_retries', 30)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError, # XXX 500s aren't retryable
requests.exceptions.Timeout) as e:
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 429:
logger.info(
"Rate limit exceeded, retrying in %s seconds" % delay)
else:
logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s',
e, args, kwargs)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
logger.error(response.json()['error']) # XXX clean this up
logger.exception(
'requests_with_retry encountered unretryable exception: %s', e)
return e | def request_with_retry(func, *args, **kwargs) | Perform a requests http call, retrying with exponential backoff.
Args:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func | 2.639494 | 2.707369 | 0.97493 |
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith('#!'):
return shlex.split(first_line[2:])
if program.endswith('.py'):
return [sys.executable]
return None | def find_runner(program) | Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None | 3.031695 | 3.16751 | 0.957122 |
assert target_length > 1
values = list(values)
if len(values) < target_length:
return values
ratio = float(len(values) - 1) / (target_length - 1)
result = []
for i in range(target_length):
result.append(values[int(i * ratio)])
return result | def downsample(values, target_length) | Downsamples 1d values to target_length, including start and end.
Algorithm just rounds index down.
Values can be any sequence, including a generator. | 2.233824 | 2.335493 | 0.956468 |
bool_args = ["-t", "--tty", "--rm","--privileged", "--oom-kill-disable","--no-healthcheck", "-i",
"--interactive", "--init", "--help", "--detach", "-d", "--sig-proxy", "-it", "-itd"]
last_flag = -2
last_arg = ""
possible_images = []
if len(args) > 0 and args[0] == "run":
args.pop(0)
for i, arg in enumerate(args):
if arg.startswith("-"):
last_flag = i
last_arg = arg
elif "@sha256:" in arg:
# Because our regex doesn't match digests
possible_images.append(arg)
elif docker_image_regex(arg):
if last_flag == i - 2:
possible_images.append(arg)
elif "=" in last_arg:
possible_images.append(arg)
elif last_arg in bool_args and last_flag == i - 1:
possible_images.append(arg)
most_likely = None
for img in possible_images:
if ":" in img or "@" in img or "/" in img:
most_likely = img
break
if most_likely == None and len(possible_images) > 0:
most_likely = possible_images[0]
return most_likely | def image_from_docker_args(args) | This scans docker run args and attempts to find the most likely docker image argument.
If excludes any argments that start with a dash, and the argument after it if it isn't a boolean
switch. This can be improved, we currently fallback gracefully when this fails. | 3.323426 | 3.026669 | 1.098048 |
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file) | def load_yaml(file) | If pyyaml > 5.1 use full_load to avoid warning | 2.58684 | 1.928262 | 1.34154 |
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if os.path.exists(token_path):
k8s_server = "https://{}:{}/api/v1/namespaces/default/pods/{}".format(
os.getenv("KUBERNETES_SERVICE_HOST"), os.getenv(
"KUBERNETES_PORT_443_TCP_PORT"), os.getenv("HOSTNAME")
)
try:
res = requests.get(k8s_server, verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
timeout=3, headers={"Authorization": "Bearer {}".format(open(token_path).read())})
res.raise_for_status()
except requests.RequestException:
return None
try:
return res.json()["status"]["containerStatuses"][0]["imageID"].strip("docker-pullable://")
except (ValueError, KeyError, IndexError):
logger.exception("Error checking kubernetes for image id")
return None | def image_id_from_k8s() | Pings the k8s metadata service for the image id | 2.351267 | 2.270996 | 1.035346 |
q = queue.Queue()
def wrapped_target(q, *args, **kwargs):
try:
q.put(target(*args, **kwargs))
except Exception as e:
q.put(e)
def wrapper(*args, **kwargs):
thread = threading.Thread(target=wrapped_target, args=(q,)+args, kwargs=kwargs)
thread.daemon = True
thread.start()
try:
result = q.get(True, timeout)
if isinstance(result, Exception):
six.reraise(type(result), result, sys.exc_info()[2])
return result, thread
except queue.Empty:
return None, thread
return wrapper | def async_call(target, timeout=None) | Accepts a method and optional timeout.
Returns a new method that will call the original with any args, waiting for upto timeout seconds.
This new method blocks on the original and returns the result or None
if timeout was reached, along with the thread.
You can check thread.isAlive() to determine if a timeout was reached.
If an exception is thrown in the thread, we reraise it. | 1.883068 | 1.835844 | 1.025723 |
if six.PY2:
now = time.time()
else:
now = time.monotonic()
return now | def stopwatch_now() | Get a timevalue for interval comparisons
When possible it is a monotonic clock to prevent backwards time issues. | 3.577202 | 3.421542 | 1.045494 |
if len(self.focus_stack) > 1:
try:
return self[self.focus_stack[-2]]
except KeyError:
pass | def previous(self, cli) | Return the previously focussed :class:`.Buffer` or `None`. | 4.800594 | 3.438943 | 1.39595 |
assert isinstance(buffer_name, six.text_type)
self.focus_stack = [buffer_name] | def focus(self, cli, buffer_name) | Focus the buffer with the given name. | 5.218968 | 5.242387 | 0.995533 |
assert isinstance(buffer_name, six.text_type)
self.focus_stack.append(buffer_name) | def push_focus(self, cli, buffer_name) | Push buffer on the focus stack. | 4.071935 | 3.311081 | 1.22979 |
if len(self.focus_stack) > 1:
self.focus_stack.pop()
else:
raise IndexError('Cannot pop last item from the focus stack.') | def pop_focus(self, cli) | Pop buffer from the focus stack. | 4.25233 | 3.469001 | 1.225808 |
@wraps(func)
def wrapper(*args, **kwargs):
message = "Whoa, you found a bug."
try:
return func(*args, **kwargs)
except requests.HTTPError as err:
raise CommError(err.response, err)
except RetryError as err:
if "response" in dir(err.last_exception) and err.last_exception.response is not None:
try:
message = err.last_exception.response.json().get(
'errors', [{'message': message}])[0]['message']
except ValueError:
message = err.last_exception.response.text
else:
message = err.last_exception
six.reraise(CommError, CommError(
message, err.last_exception), sys.exc_info()[2])
except Exception as err:
# gql raises server errors with dict's as strings...
if len(err.args) > 0:
payload = err.args[0]
else:
payload = err
if str(payload).startswith("{"):
message = ast.literal_eval(str(payload))["message"]
else:
message = str(err)
if wandb.env.is_debug():
six.reraise(*sys.exc_info())
else:
six.reraise(CommError, CommError(
message, err), sys.exc_info()[2])
return wrapper | def normalize_exceptions(func) | Function decorator for catching common errors and re-raising as wandb.Error | 2.763816 | 2.716913 | 1.017263 |
bites = self.file.read(size)
self.bytes_read += len(bites)
self.callback(len(bites), self.bytes_read)
return bites | def read(self, size=-1) | Read bytes and call the callback | 3.744584 | 3.45918 | 1.082506 |
"Call watch method to log model topology, gradients & weights"
# Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback"
super().on_train_begin()
# Ensure we don't call "watch" multiple times
if not WandbCallback.watch_called:
WandbCallback.watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log) | def on_train_begin(self, **kwargs) | Call watch method to log model topology, gradients & weights | 10.602436 | 6.980559 | 1.518852 |
"Logs training loss, validation loss and custom metrics & log prediction samples & save model"
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(
f'Better model found at epoch {epoch} with {self.monitor} value: {current}.'
)
self.best = current
# Section modified to save within wandb folder
with self.model_path.open('wb') as model_file:
self.learn.save(model_file)
# Log sample predictions
if self.show_results:
self.learn.show_results() # pyplot display of sample predictions
wandb.log({"Prediction Samples": plt}, commit=False)
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] +
last_metrics))[1:]
}
wandb.log(logs)
# We can now close results figure
if self.show_results:
plt.close('all') | def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs) | Logs training loss, validation loss and custom metrics & log prediction samples & save model | 5.632961 | 4.565456 | 1.233822 |
"Load the best model."
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
if self.model_path.is_file():
with self.model_path.open('rb') as model_file:
self.learn.load(model_file, purge=False)
print(f'Loaded best saved model from {self.model_path}') | def on_train_end(self, **kwargs) | Load the best model. | 4.942756 | 4.293619 | 1.151187 |
if self.stream_name != "default":
raise ValueError("Nested streams aren't supported")
if self._streams.get(name) == None:
self._streams[name] = History(self.fname, out_dir=self.out_dir,
add_callback=self._add_callback, stream_name=name)
return self._streams[name] | def stream(self, name) | Stream can be used to record different time series:
run.history.stream("batch").add({"gradients": 1}) | 5.261365 | 5.217969 | 1.008317 |
for row in self.rows:
if key in row:
yield row[key] | def column(self, key) | Iterator over a given column, skipping steps that don't have that key | 4.120788 | 3.290183 | 1.252449 |
if not isinstance(row, collections.Mapping):
raise wandb.Error('history.add expects dict-like object')
if step is None:
self.update(row)
if not self.batched:
self._write()
else:
if not isinstance(step, numbers.Integral):
raise wandb.Error(
"Step must be an integer, not {}".format(step))
elif step < self._steps:
warnings.warn(
"Adding to old History rows isn't currently supported. Dropping.", wandb.WandbWarning)
return
elif step == self._steps:
pass
elif self.batched:
raise wandb.Error(
"Can't log to a particular History step ({}) while in batched mode.".format(step))
else: # step > self._steps
self._write()
self._steps = step
self.update(row) | def add(self, row={}, step=None) | Adds or updates a history step.
If row isn't specified, will write the current state of row.
If step is specified, the row will be written only when add() is called with
a different step value.
run.history.row["duration"] = 1.0
run.history.add({"loss": 1})
=> {"duration": 1.0, "loss": 1} | 4.145606 | 4.095282 | 1.012288 |
for k, v in six.iteritems(new_vals):
k = k.strip()
if k in self.row:
warnings.warn("Adding history key ({}) that is already set in this step".format(
k), wandb.WandbWarning)
self.row[k] = v | def update(self, new_vals) | Add a dictionary of values to the current step without writing it to disk. | 6.650027 | 5.504771 | 1.208048 |
if self.batched: # we're already in a context manager
raise wandb.Error("Nested History step contexts aren't supported")
self.batched = True
self.compute = compute
yield self
if compute:
self._write()
compute = True | def step(self, compute=True) | Context manager to gradually build a history row, then commit it at the end.
To reduce the number of conditionals needed, code can check run.history.compute:
with run.history.step(batch_idx % log_interval == 0):
run.history.add({"nice": "ok"})
if run.history.compute:
# Something expensive here | 12.665594 | 11.801453 | 1.073223 |
self.rows.append(row)
self._keys.update(row.keys())
self._steps += 1 | def _index(self, row) | Add a row to the internal list of rows without writing it to disk.
This function should keep the data structure consistent so it's usable
for both adding new rows, and loading pre-existing histories. | 7.267538 | 6.202487 | 1.171713 |
master_fd, slave_fd = pty.openpty()
# raw mode so carriage returns etc. don't get added by the terminal driver,
# bash for windows blows up on this so we catch the error and do nothing
# TODO(adrian): (when) will this be called on windows?
try:
tty.setraw(master_fd)
except termios.error:
pass
if resize:
if SIGWINCH_HANDLER is not None:
SIGWINCH_HANDLER.add_fd(master_fd)
return master_fd, slave_fd | def wandb_pty(resize=True) | Get a PTY set to raw mode and registered to hear about window size changes. | 7.50239 | 7.094633 | 1.057474 |
def _reader_thread():
while True:
out = get_data_fn()
put_data_fn(out)
if not out:
# EOF.
# We've passed this on so things farther down the pipeline will
# know to shut down.
break
t = threading.Thread(target=_reader_thread)
t.daemon = True
t.start()
return t | def spawn_reader_writer(get_data_fn, put_data_fn) | Spawn a thread that reads from a data source and writes to a sink.
The thread will terminate if it receives a Falsey value from the source.
Args:
get_data_fn: Data-reading function. Called repeatedly until it returns
False-y to indicate that the thread should terminate.
put_data_fn: Data-writing function.
Returns: threading.Thread | 4.796604 | 5.418436 | 0.885238 |
# NOTE: dup2 makes `self._from_fd` inheritable unconditionally
self.redir_file.flush()
os.dup2(self.orig_file.fileno(), self._from_fd) | def restore(self) | Restore `self.redir_file` to its original state. | 11.289577 | 6.6731 | 1.691804 |
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack(str('!HH'), data)
self.size_received_callback(rows, columns)
else:
logger.warning('Wrong number of NAWS bytes') | def naws(self, data) | Received NAWS. (Window dimensions.) | 10.623449 | 8.781496 | 1.209754 |
command, payload = data[0:1], data[1:]
assert isinstance(command, bytes)
if command == NAWS:
self.naws(payload)
else:
logger.info('Negotiate (%r got bytes)', len(data)) | def negotiate(self, data) | Got negotiate data. | 7.741591 | 7.196687 | 1.075716 |
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, None)
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b''.join(data))
else:
self.received_data(d) | def _parse_coroutine(self) | Parser state machine.
Every 'yield' expression returns the next byte. | 3.694899 | 3.548018 | 1.041398 |
assert isinstance(data, binary_type)
for b in iterbytes(data):
self._parser.send(int2byte(b)) | def feed(self, data) | Feed data to the parser. | 8.108313 | 6.888176 | 1.177135 |
run = self.settings['run']
project = self.settings['project']
username = self.settings['username']
parts = path.replace("/runs/", "/").split("/")
if ":" in parts[-1]:
run = parts[-1].split(":")[-1]
parts[-1] = parts[-1].split(":")[0]
elif parts[-1]:
run = parts[-1]
if len(parts) > 1:
project = parts[1]
if username and run == project:
project = parts[0]
else:
username = parts[0]
else:
project = parts[0]
return (username, project, run) | def _parse_path(self, path) | Parses paths in the following formats:
url: username/project/runs/run_id
path: username/project/run_id
docker: username/project:run_id
username is optional and will fallback to the current logged in user. | 2.968516 | 2.577348 | 1.151772 |
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path + str(filters) + str(order)] = Runs(self.client, username, project,
filters=filters, order=order, per_page=per_page)
return self._runs[path + str(filters) + str(order)] | def runs(self, path="", filters={}, order="-created_at", per_page=None) | Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending. | 3.182637 | 3.938823 | 0.808017 |
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path] = Run(self.client, username, project, run)
return self._runs[path] | def run(self, path="") | Returns a run by parsing path in the form username/project/run, if
defaults were set on the Api, only overrides what's passed. I.E. you can just pass
run_id if you set username and project on the Api | 4.020509 | 3.11039 | 1.292606 |
run_id = run_id or util.generate_id()
project = project or api.settings.get("project")
mutation = gql('''
mutation upsertRun($project: String, $entity: String, $name: String!) {
upsertBucket(input: {modelName: $project, entityName: $entity, name: $name}) {
bucket {
project {
name
entity { name }
}
id
name
}
inserted
}
}
''')
variables = {'entity': username,
'project': project, 'name': run_id}
res = api.client.execute(mutation, variable_values=variables)
res = res['upsertBucket']['bucket']
return Run(api.client, res["project"]["entity"]["name"], res["project"]["name"], res["name"], {
"id": res["id"],
"config": "{}",
"systemMetrics": "{}",
"summaryMetrics": "{}",
"tags": [],
"description": None,
"state": "running"
}) | def create(cls, api, run_id=None, project=None, username=None) | Create a run for the given project | 3.662834 | 3.624429 | 1.010596 |
variables = {'entity': self.username,
'project': self.project, 'name': self.name}
variables.update(kwargs)
return self.client.execute(query, variable_values=variables) | def _exec(self, query, **kwargs) | Execute a query against the cloud backend | 4.817734 | 4.41823 | 1.090422 |
node = "history" if stream == "default" else "events"
query = gql('''
query Run($project: String!, $entity: String!, $name: String!, $samples: Int!) {
project(name: $project, entityName: $entity) {
run(name: $name) { %s(samples: $samples) }
}
}
''' % node)
response = self._exec(query, samples=samples)
lines = [json.loads(line)
for line in response['project']['run'][node]]
if pandas:
pandas = util.get_module("pandas")
if pandas:
lines = pandas.DataFrame.from_records(lines)
else:
print("Unable to load pandas, call history with pandas=False")
return lines | def history(self, samples=500, pandas=True, stream="default") | Return history metrics for a run
Args:
samples (int, optional): The number of samples to return
pandas (bool, optional): Return a pandas dataframe
stream (str, optional): "default" for metrics, "system" for machine metrics | 3.652989 | 4.267503 | 0.856002 |
assert isinstance(exclude, tuple)
# When we have a bit of saturation, avoid the gray-like colors, otherwise,
# too often the distance to the gray color is less.
saturation = abs(r - g) + abs(g - b) + abs(b - r) # Between 0..510
if saturation > 30:
exclude += ('ansilightgray', 'ansidarkgray', 'ansiwhite', 'ansiblack')
# Take the closest color.
# (Thanks to Pygments for this part.)
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 'ansidefault'
for name, (r2, g2, b2) in ANSI_COLORS_TO_RGB.items():
if name != 'ansidefault' and name not in exclude:
d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2
if d < distance:
match = name
distance = d
return match | def _get_closest_ansi_color(r, g, b, exclude=()) | Find closest ANSI color. Return it by name.
:param r: Red (Between 0 and 255.)
:param g: Green (Between 0 and 255.)
:param b: Blue (Between 0 and 255.)
:param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.) | 5.273702 | 4.781965 | 1.102832 |
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
# Inline imports, because these modules are not available on Windows.
# (This file is used by ConEmuOutput, which is used on Windows.)
import fcntl
import termios
# Buffer for the C call
buf = array.array(b'h' if six.PY2 else u'h', [0, 0, 0, 0])
# Do TIOCGWINSZ (Get)
# Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True
# is the default.) This causes segmentation faults on some systems.
# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364
fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf)
# Return rows, cols
return buf[0], buf[1] | def _get_size(fileno) | Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple. | 6.906413 | 6.871127 | 1.005135 |
key = (value, exclude)
if key not in self:
self[key] = self._get(value, exclude)
return self[key] | def get_code(self, value, exclude=()) | Return a (ansi_code, ansi_name) tuple. (E.g. ``(44, 'ansiblue')``.) for
a given (r,g,b) value. | 3.745693 | 4.637918 | 0.807624 |
" Turn 'ffffff', into (0xff, 0xff, 0xff). "
try:
rgb = int(color, 16)
except ValueError:
raise
else:
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return r, g, b | def _color_name_to_rgb(self, color) | Turn 'ffffff', into (0xff, 0xff, 0xff). | 3.145878 | 2.252474 | 1.396632 |
" Return a tuple with the vt100 values that represent this color. "
# When requesting ANSI colors only, and both fg/bg color were converted
# to ANSI, ensure that the foreground and background color are not the
# same. (Unless they were explicitely defined to be the same color.)
fg_ansi = [()]
def get(color, bg):
table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS
if color is None:
return ()
# 16 ANSI colors. (Given by name.)
elif color in table:
return (table[color], )
# RGB colors. (Defined as 'ffffff'.)
else:
try:
rgb = self._color_name_to_rgb(color)
except ValueError:
return ()
# When only 16 colors are supported, use that.
if self.ansi_colors_only():
if bg: # Background.
if fg_color != bg_color:
exclude = (fg_ansi[0], )
else:
exclude = ()
code, name = _16_bg_colors.get_code(rgb, exclude=exclude)
return (code, )
else: # Foreground.
code, name = _16_fg_colors.get_code(rgb)
fg_ansi[0] = name
return (code, )
# True colors. (Only when this feature is enabled.)
elif self.true_color:
r, g, b = rgb
return (48 if bg else 38, 2, r, g, b)
# 256 RGB colors.
else:
return (48 if bg else 38, 5, _256_colors[rgb])
result = []
result.extend(get(fg_color, False))
result.extend(get(bg_color, True))
return map(six.text_type, result) | def _colors_to_code(self, fg_color, bg_color) | Return a tuple with the vt100 values that represent this color. | 4.787328 | 4.339761 | 1.103132 |
assert stdout.isatty()
def get_size():
rows, columns = _get_size(stdout.fileno())
# If terminal (incorrectly) reports its size as 0, pick a reasonable default.
# See https://github.com/ipython/ipython/issues/10071
return Size(rows=(rows or 24), columns=(columns or 80))
return cls(stdout, get_size, true_color=true_color,
ansi_colors_only=ansi_colors_only, term=term) | def from_pty(cls, stdout, true_color=False, ansi_colors_only=None, term=None) | Create an Output class from a pseudo terminal.
(This will take the dimensions by reading the pseudo
terminal attributes.) | 3.946147 | 3.879339 | 1.017221 |
if self.term not in ('linux', 'eterm-color'): # Not supported by the Linux console.
self.write_raw('\x1b]2;%s\x07' % title.replace('\x1b', '').replace('\x07', '')) | def set_title(self, title) | Set terminal title. | 7.077714 | 5.567605 | 1.271231 |
if self.true_color() and not self.ansi_colors_only():
self.write_raw(self._escape_code_cache_true_color[attrs])
else:
self.write_raw(self._escape_code_cache[attrs]) | def set_attributes(self, attrs) | Create new style and output.
:param attrs: `Attrs` instance. | 7.114988 | 7.763613 | 0.916453 |
if not self._buffer:
return
data = ''.join(self._buffer)
try:
# (We try to encode ourself, because that way we can replace
# characters that don't exist in the character set, avoiding
# UnicodeEncodeError crashes. E.g. u'\xb7' does not appear in 'ascii'.)
# My Arch Linux installation of july 2015 reported 'ANSI_X3.4-1968'
# for sys.stdout.encoding in xterm.
if self.write_binary:
if hasattr(self.stdout, 'buffer'):
out = self.stdout.buffer # Py3.
else:
out = self.stdout
out.write(data.encode(self.stdout.encoding or 'utf-8', 'replace'))
else:
self.stdout.write(data)
self.stdout.flush()
except IOError as e:
if e.args and e.args[0] == errno.EINTR:
# Interrupted system call. Can happpen in case of a window
# resize signal. (Just ignore. The resize handler will render
# again anyway.)
pass
elif e.args and e.args[0] == 0:
# This can happen when there is a lot of output and the user
# sends a KeyboardInterrupt by pressing Control-C. E.g. in
# a Python REPL when we execute "while True: print('test')".
# (The `ptpython` REPL uses this `Output` class instead of
# `stdout` directly -- in order to be network transparent.)
# So, just ignore.
pass
else:
raise
self._buffer = [] | def flush(self) | Write to output stream and flush. | 6.061187 | 5.964427 | 1.016223 |
global watch_called
if run is None:
raise ValueError(
"You must call `wandb.init` before calling watch")
if watch_called:
raise ValueError(
"You can only call `wandb.watch` once per process. If you want to watch multiple models, pass them in as a tuple."
)
watch_called = True
log_parameters = False
log_gradients = True
if log == "all":
log_parameters = True
elif log == "parameters":
log_parameters = True
log_gradients = False
elif log is None:
log_gradients = False
if not isinstance(models, (tuple, list)):
models = (models,)
graphs = []
prefix = ''
for idx, model in enumerate(models):
if idx > 0:
prefix = "graph_%i" % idx
run.history.torch.add_log_hooks_to_pytorch_module(
model, log_parameters=log_parameters, log_gradients=log_gradients, prefix=prefix, log_freq=log_freq)
graph = wandb_torch.TorchGraph.hook_torch(model, criterion, graph_idx=idx)
graphs.append(graph)
# NOTE: the graph is set in run.summary by hook_torch on the backward pass
return graphs | def watch(models, criterion=None, log="gradients", log_freq=100) | Hooks into the torch model to collect gradients and the topology. Should be extended
to accept arbitrary ML models.
:param (torch.Module) models: The model to hook, can be a tuple
:param (torch.F) criterion: An optional loss value being optimized
:param (str) log: One of "gradients", "parameters", "all", or None
:param (int) log_freq: log gradients and parameters every N batches
:return: (wandb.Graph) The graph object that will populate after the first backward pass | 3.952817 | 3.737176 | 1.057702 |
from wandb import jupyter
# TODO: Should we log to jupyter?
# global logging had to be disabled because it set the level to debug
# I also disabled run logging because we're rairly using it.
# try_to_set_up_global_logging()
# run.enable_logging()
api = InternalApi()
if not api.api_key:
termerror(
"Not authenticated. Copy a key from https://app.wandb.ai/profile?message=true")
key = getpass.getpass("API Key: ").strip()
if len(key) == 40:
os.environ[env.API_KEY] = key
util.write_netrc(api.api_url, "user", key)
else:
raise ValueError("API Key must be 40 characters long")
# Ensure our api client picks up the new key
api = InternalApi()
os.environ["WANDB_JUPYTER"] = "true"
run.resume = "allow"
api.set_current_run_id(run.id)
print("W&B Run: %s" % run.get_url(api))
print("Call `%%wandb` in the cell containing your training loop to display live results.")
try:
run.save(api=api)
except (CommError, ValueError) as e:
termerror(str(e))
run.set_environment()
run._init_jupyter_agent()
ipython = get_ipython()
ipython.register_magics(jupyter.WandBMagics)
def reset_start():
global START_TIME
START_TIME = time.time()
ipython.events.register("pre_run_cell", reset_start)
ipython.events.register('post_run_cell', run._stop_jupyter_agent) | def _init_jupyter(run) | Asks for user input to configure the machine if it isn't already and creates a new run.
Log pushing and system stats don't start until `wandb.monitor()` is called. | 6.76964 | 6.620679 | 1.022499 |
global _saved_files
if run is None:
raise ValueError(
"You must call `wandb.init` before calling save")
if policy not in ("live", "end"):
raise ValueError(
'Only "live" and "end" policies are currently supported.')
if base_path is None:
base_path = os.path.dirname(glob_str)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode('utf-8')
wandb_glob_str = os.path.relpath(glob_str, base_path)
if "../" in wandb_glob_str:
raise ValueError(
"globs can't walk above base_path")
if (glob_str, base_path, policy) in _saved_files:
return []
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str)
run.send_message(
{"save_policy": {"glob": wandb_glob_str, "policy": policy}})
files = []
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(run.dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
_saved_files.add((glob_str, base_path, policy))
return files | def save(glob_str, base_path=None, policy="live") | Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends | 2.590587 | 2.5489 | 1.016355 |
if run_path is None and run is None:
raise ValueError(
"You must call `wandb.init` before calling restore or specify a run_path")
api = Api()
api_run = api.run(run_path or run.path)
root = run.dir if run else root
path = os.path.exists(os.path.join(root, name))
if path and replace == False:
return open(path, "r")
files = api_run.files([name])
if len(files) == 0:
return None
return files[0].download(root=root, replace=True) | def restore(name, run_path=None, replace=False, root=".") | Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run | 3.844553 | 3.215237 | 1.195729 |
try:
from IPython.display import display
except ImportError:
def display(stuff): return None
class Monitor():
def __init__(self, options={}):
if os.getenv("WANDB_JUPYTER"):
display(jupyter.Run())
else:
self.rm = False
termerror(
"wandb.monitor is only functional in Jupyter notebooks")
def __enter__(self):
termlog(
"DEPRECATED: with wandb.monitor(): is deprecated, just call wandb.monitor() to see live results.")
pass
def __exit__(self, *args):
pass
return Monitor(options) | def monitor(options={}) | Starts syncing with W&B if you're in Jupyter. Displays your W&B charts live in a Jupyter notebook.
It's currently a context manager for legacy reasons. | 7.430912 | 6.068969 | 1.224411 |
if run is None:
raise ValueError(
"You must call `wandb.init` in the same process before calling log")
if row is None:
row = {}
if commit:
run.history.add(row, *args, **kargs)
else:
run.history.update(row, *args, **kargs) | def log(row=None, commit=True, *args, **kargs) | Log a dict to the global run's history. If commit is false, enables multiple calls before commiting.
Eg.
wandb.log({'train-loss': 0.5, 'accuracy': 0.9}) | 3.814164 | 3.467408 | 1.100004 |
global run, config, watch_called, _saved_files
run = config = None
watch_called = False
_saved_files = set() | def uninit() | Undo the effects of init(). Useful for testing. | 11.154376 | 11.188682 | 0.996934 |
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key]
return True
else:
return False | def reset_env(exclude=[]) | Remove environment variables, used in Jupyter notebooks | 3.406798 | 3.025211 | 1.126136 |
root = logging.getLogger()
root.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(filename)s:%(funcName)s():%(lineno)s] %(message)s')
if env.is_debug():
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
try:
handler = logging.FileHandler(GLOBAL_LOG_FNAME, mode='w')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
except IOError as e: # eg. in case wandb directory isn't writable
termerror('Failed to set up logging: {}'.format(e))
return False
return True | def try_to_set_up_global_logging() | Try to set up global W&B debug log that gets re-written by every W&B process.
It may fail (and return False) eg. if the current directory isn't user-writable | 2.772213 | 2.594253 | 1.068598 |
api_key = overrides.get(env.API_KEY, Api().api_key)
if api_key is None:
raise ValueError(
"Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`")
overrides[env.API_KEY] = api_key
with open(os.path.join(path, "secrets.env"), "w") as file:
for k, v in six.iteritems(overrides):
file.write("{}={}\n".format(k, v)) | def sagemaker_auth(overrides={}, path=".") | Write a secrets.env file with the W&B ApiKey and any additional secrets passed.
Args:
overrides (dict, optional): Additional environment variables to write to secrets.env
path (str, optional): The path to write the secrets file. | 3.677673 | 3.008598 | 1.222388 |
if text[0:1] == '#':
col = text[1:]
if col in ANSI_COLOR_NAMES:
return col
elif len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return text
raise ValueError('Wrong color format %r' % text) | def _colorformat(text) | Parse/validate color format.
Like in Pygments, but also support the ANSI color names.
(These will map to the colors of the 16 color palette.) | 3.214916 | 3.128864 | 1.027503 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.