code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if datetime_obj is None: return None tzinfo_match = tz.gettz(tz_string) return datetime_obj.replace(tzinfo=tzinfo_match)
def _add_tzinfo(self, datetime_obj, tz_string)
take a naive datetime and add dateutil.tz.tzinfo object :param datetime_obj: naive datetime object :return: datetime object with tzinfo
3.623283
3.977948
0.910842
# Try to find ranges first range_strings = list() found_range = False for range_match in RANGE_REGEX.finditer(text): # Parse datetime 1 and datetime 2 recursively range_captures = range_match.capturesdict() dt1 = range_captures.get("dt1", []) dt2 = range_captures.get("dt2", []) for dt1_str in dt1: range_strings.extend(self.extract_date_strings(dt1_str, strict=strict)) for dt2_str in dt2: range_strings.extend(self.extract_date_strings(dt2_str, strict=strict)) found_range = True for range_string in range_strings: yield range_string # Try to match regular datetimes if no ranges have been found if not found_range: for match in DATE_REGEX.finditer(text): match_str = match.group(0) indices = match.span(0) ## Get individual group matches captures = match.capturesdict() # time = captures.get('time') digits = captures.get("digits") # digits_modifiers = captures.get('digits_modifiers') # days = captures.get('days') months = captures.get("months") # timezones = captures.get('timezones') # delimiters = captures.get('delimiters') # time_periods = captures.get('time_periods') # extra_tokens = captures.get('extra_tokens') if strict: complete = False if len(digits) == 3: # 12-05-2015 complete = True elif (len(months) == 1) and ( len(digits) == 2 ): # 19 February 2013 year 09:10 complete = True if not complete: continue ## sanitize date string ## replace unhelpful whitespace characters with single whitespace match_str = re.sub(r"[\n\t\s\xa0]+", " ", match_str) match_str = match_str.strip(STRIP_CHARS) ## Save sanitized source string yield match_str, indices, captures
def extract_date_strings(self, text, strict=False)
Scans text for possible datetime strings and extracts them :param strict: Strict mode will only return dates sourced with day, month, and year
3.217119
3.212286
1.001505
if sys.version_info[:2] >= (3, 2): if value not in FORMAT_STYLE_PATTERNS: msg = "Unsupported logging format style! (%r)" raise ValueError(format(msg, value)) elif value != DEFAULT_FORMAT_STYLE: msg = "Format string styles other than %r require Python 3.2+!" raise ValueError(msg, DEFAULT_FORMAT_STYLE) return value
def check_style(value)
Validate a logging format style. :param value: The logging format style to validate (any value). :returns: The logging format character (a string of one character). :raises: :exc:`~exceptions.ValueError` when the given style isn't supported. On Python 3.2+ this function accepts the logging format styles ``%``, ``{`` and ``$`` while on older versions only ``%`` is accepted (because older Python versions don't support alternative logging format styles).
5.141969
4.645076
1.106972
defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = max(0, current_index - 1) set_level(defined_levels[selected_index])
def increase_verbosity()
Increase the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module.
3.810575
4.406744
0.864714
defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = min(current_index + 1, len(defined_levels) - 1) set_level(defined_levels[selected_index])
def decrease_verbosity()
Decrease the verbosity of the root handler by one defined level. Understands custom logging levels like defined by my ``verboselogs`` module.
3.176507
3.586258
0.885744
handler, logger = find_handler(logging.getLogger(), match_stream_handler) return handler.level if handler else DEFAULT_LOG_LEVEL
def get_level()
Get the logging level of the root handler. :returns: The logging level of the root handler (an integer) or :data:`DEFAULT_LOG_LEVEL` (if no root handler exists).
12.612441
12.790896
0.986048
handler, logger = find_handler(logging.getLogger(), match_stream_handler) if handler and logger: # Change the level of the existing handler. handler.setLevel(level_to_number(level)) # Adjust the level of the selected logger. adjust_level(logger, level) else: # Create a new handler with the given level. install(level=level)
def set_level(level)
Set the logging level of the root handler. :param level: The logging level to filter on (an integer or string). If no root handler exists yet this automatically calls :func:`install()`.
6.701685
6.625208
1.011543
level = level_to_number(level) if logger.getEffectiveLevel() > level: logger.setLevel(level)
def adjust_level(logger, level)
Increase a logger's verbosity up to the requested level. :param logger: The logger to change (a :class:`~logging.Logger` object). :param level: The log level to enable (a string or number). This function is used by functions like :func:`install()`, :func:`increase_verbosity()` and :func:`.enable_system_logging()` to adjust a logger's level so that log messages up to the requested log level are propagated to the configured output handler(s). It uses :func:`logging.Logger.getEffectiveLevel()` to check whether `logger` propagates or swallows log messages of the requested `level` and sets the logger's level to the requested level if it would otherwise swallow log messages. Effectively this function will "widen the scope of logging" when asked to do so but it will never "narrow the scope of logging". This is because I am convinced that filtering of log messages should (primarily) be decided by handlers.
3.799152
5.356546
0.709254
defined_levels = {} for name in dir(logging): if name.isupper(): value = getattr(logging, name) if isinstance(value, int): defined_levels[name] = value return defined_levels
def find_defined_levels()
Find the defined logging levels. :returns: A dictionary with level names as keys and integers as values. Here's what the result looks like by default (when no custom levels or level names have been defined): >>> find_defined_levels() {'NOTSET': 0, 'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'WARNING': 30, 'ERROR': 40, 'FATAL': 50, 'CRITICAL': 50}
2.523218
2.923019
0.863223
if is_string(value): try: defined_levels = find_defined_levels() value = defined_levels[value.upper()] except KeyError: # Don't fail on unsupported log levels. value = DEFAULT_LOG_LEVEL return value
def level_to_number(value)
Coerce a logging level name to a number. :param value: A logging level (integer or string). :returns: The number of the log level (an integer). This function translates log level names into their numeric values. The :mod:`logging` module does this for us on Python 2.7 and 3.4 but fails to do so on Python 2.6 which :mod:`coloredlogs` still supports.
5.236165
5.234104
1.000394
mapping = collections.defaultdict(list) for name, value in find_defined_levels().items(): mapping[value].append(name) aliases = {} for value, names in mapping.items(): if len(names) > 1: names = sorted(names, key=lambda n: len(n)) canonical_name = names.pop() for alias in names: aliases[alias] = canonical_name return aliases
def find_level_aliases()
Find log level names which are aliases of each other. :returns: A dictionary that maps aliases to their canonical name. .. note:: Canonical names are chosen to be the alias with the longest string length so that e.g. ``WARN`` is an alias for ``WARNING`` instead of the other way around. Here's what the result looks like by default (when no custom levels or level names have been defined): >>> from coloredlogs import find_level_aliases >>> find_level_aliases() {'WARN': 'WARNING', 'FATAL': 'CRITICAL'}
2.630347
3.031603
0.867642
parsed_styles = {} for assignment in split(text, ';'): name, _, styles = assignment.partition('=') target = parsed_styles.setdefault(name, {}) for token in split(styles, ','): # When this code was originally written, setting background colors # wasn't supported yet, so there was no need to disambiguate # between the text color and background color. This explains why # a color name or number implies setting the text color (for # backwards compatibility). if token.isdigit(): target['color'] = int(token) elif token in ANSI_COLOR_CODES: target['color'] = token elif '=' in token: name, _, value = token.partition('=') if name in ('color', 'background'): if value.isdigit(): target[name] = int(value) elif value in ANSI_COLOR_CODES: target[name] = value else: target[token] = True return parsed_styles
def parse_encoded_styles(text, normalize_key=None)
Parse text styles encoded in a string into a nested data structure. :param text: The encoded styles (a string). :returns: A dictionary in the structure of the :data:`DEFAULT_FIELD_STYLES` and :data:`DEFAULT_LEVEL_STYLES` dictionaries. Here's an example of how this function works: >>> from coloredlogs import parse_encoded_styles >>> from pprint import pprint >>> encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold' >>> pprint(parse_encoded_styles(encoded_styles)) {'debug': {'color': 'green'}, 'warning': {'color': 'yellow'}, 'error': {'color': 'red'}, 'critical': {'bold': True, 'color': 'red'}}
4.098689
4.013985
1.021102
for chroot_file in CHROOT_FILES: try: with open(chroot_file) as handle: first_line = next(handle) name = first_line.strip() if name: return name except Exception: pass return socket.gethostname()
def find_hostname(use_chroot=True)
Find the host name to include in log messages. :param use_chroot: Use the name of the chroot when inside a chroot? (boolean, defaults to :data:`True`) :returns: A suitable host name (a string). Looks for :data:`CHROOT_FILES` that have a nonempty first line (taken to be the chroot name). If none are found then :func:`socket.gethostname()` is used as a fall back.
3.165184
2.591447
1.221397
# Gotcha: sys.argv[0] is '-c' if Python is started with the -c option. return ((os.path.basename(sys.argv[0]) if sys.argv and sys.argv[0] != '-c' else '') or (os.path.basename(sys.executable) if sys.executable else '') or 'python')
def find_program_name()
Select a suitable program name to embed in log messages. :returns: One of the following strings (in decreasing order of preference): 1. The base name of the currently running Python program or script (based on the value at index zero of :data:`sys.argv`). 2. The base name of the Python executable (based on :data:`sys.executable`). 3. The string 'python'.
3.79734
4.050921
0.937402
handler, other_logger = find_handler(logger, match_handler) if handler and other_logger and reconfigure: # Remove the existing handler from the logger that its attached to # so that we can install a new handler that behaves differently. other_logger.removeHandler(handler) # Switch to the logger that the existing handler was attached to so # that reconfiguration doesn't narrow the scope of logging. logger = other_logger return handler, logger
def replace_handler(logger, match_handler, reconfigure)
Prepare to replace a handler. :param logger: Refer to :func:`find_handler()`. :param match_handler: Refer to :func:`find_handler()`. :param reconfigure: :data:`True` if an existing handler should be replaced, :data:`False` otherwise. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` to which the matched handler was attached or the logger given to :func:`replace_handler()`.
5.53428
5.269751
1.050198
for logger in walk_propagation_tree(logger): for handler in getattr(logger, 'handlers', []): if match_handler(handler): return handler, logger return None, None
def find_handler(logger, match_handler)
Find a (specific type of) handler in the propagation tree of a logger. :param logger: The logger to check (a :class:`~logging.Logger` object). :param match_handler: A callable that receives a :class:`~logging.Handler` object and returns :data:`True` to match a handler or :data:`False` to skip that handler and continue searching for a match. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` object to which the handler is attached or :data:`None` if no handler was matched. This function finds a logging handler (of the given type) attached to a logger or one of its parents (see :func:`walk_propagation_tree()`). It uses the undocumented :class:`~logging.Logger.handlers` attribute to find handlers attached to a logger, however it won't raise an exception if the attribute isn't available. The advantages of this approach are: - This works regardless of whether :mod:`coloredlogs` attached the handler or other Python code attached the handler. - This will correctly recognize the situation where the given logger has no handlers but :attr:`~logging.Logger.propagate` is enabled and the logger has a parent logger that does have a handler attached.
4.511832
3.737669
1.207125
return (isinstance(handler, logging.StreamHandler) and getattr(handler, 'stream') in (streams or (sys.stdout, sys.stderr)))
def match_stream_handler(handler, streams=[])
Identify stream handlers writing to the given streams(s). :param handler: The :class:`~logging.Handler` class to check. :param streams: A sequence of streams to match (defaults to matching :data:`~sys.stdout` and :data:`~sys.stderr`). :returns: :data:`True` if the handler is a :class:`~logging.StreamHandler` logging to the given stream(s), :data:`False` otherwise. This function can be used as a callback for :func:`find_handler()`.
4.262878
4.913859
0.867521
while isinstance(logger, logging.Logger): # Yield the logger to our caller. yield logger # Check if the logger has propagation enabled. if logger.propagate: # Continue with the parent logger. We use getattr() because the # `parent' attribute isn't documented so properly speaking we # shouldn't break if it's not available. logger = getattr(logger, 'parent', None) else: # The propagation chain stops here. logger = None
def walk_propagation_tree(logger)
Walk through the propagation hierarchy of the given logger. :param logger: The logger whose hierarchy to walk (a :class:`~logging.Logger` object). :returns: A generator of :class:`~logging.Logger` objects. .. note:: This uses the undocumented :class:`logging.Logger.parent` attribute to find higher level loggers, however it won't raise an exception if the attribute isn't available.
7.715066
6.681139
1.154753
# The default value of the following argument is defined here so # that Sphinx doesn't embed the default value in the generated # documentation (because the result is awkward to read). datefmt = datefmt or DEFAULT_DATE_FORMAT # Replace %f with the value of %(msecs)03d. if '%f' in datefmt: datefmt = datefmt.replace('%f', '%03d' % record.msecs) # Delegate the actual date/time formatting to the base formatter. return logging.Formatter.formatTime(self, record, datefmt)
def formatTime(self, record, datefmt=None)
Format the date/time of a log record. :param record: A :class:`~logging.LogRecord` object. :param datefmt: A date/time format string (defaults to :data:`DEFAULT_DATE_FORMAT`). :returns: The formatted date/time (a string). This method overrides :func:`~logging.Formatter.formatTime()` to set `datefmt` to :data:`DEFAULT_DATE_FORMAT` when the caller hasn't specified a date format. When `datefmt` contains the token ``%f`` it will be replaced by the value of ``%(msecs)03d`` (refer to issue `#45`_ for use cases).
5.041488
4.282951
1.177106
result = [] parser = FormatStringParser(style=style) for group in parser.get_grouped_pairs(fmt): applicable_styles = [self.nn.get(self.field_styles, token.name) for token in group if token.name] if sum(map(bool, applicable_styles)) == 1: # If exactly one (1) field style is available for the group of # tokens then all of the tokens will be styled the same way. # This provides a limited form of backwards compatibility with # the (intended) behavior of coloredlogs before the release of # version 10. result.append(ansi_wrap( ''.join(token.text for token in group), **next(s for s in applicable_styles if s) )) else: for token in group: text = token.text if token.name: field_styles = self.nn.get(self.field_styles, token.name) if field_styles: text = ansi_wrap(text, **field_styles) result.append(text) return ''.join(result)
def colorize_format(self, fmt, style=DEFAULT_FORMAT_STYLE)
Rewrite a logging format string to inject ANSI escape sequences. :param fmt: The log format string. :param style: One of the characters ``%``, ``{`` or ``$`` (defaults to :data:`DEFAULT_FORMAT_STYLE`). :returns: The logging format string with ANSI escape sequences. This method takes a logging format string like the ones you give to :class:`logging.Formatter` and processes it as follows: 1. First the logging format string is separated into formatting directives versus surrounding text (according to the given `style`). 2. Then formatting directives and surrounding text are grouped based on whitespace delimiters (in the surrounding text). 3. For each group styling is selected as follows: 1. If the group contains a single formatting directive that has a style defined then the whole group is styled accordingly. 2. If the group contains multiple formatting directives that have styles defined then each formatting directive is styled individually and surrounding text isn't styled. As an example consider the default log format (:data:`DEFAULT_LOG_FORMAT`):: %(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s The default field styles (:data:`DEFAULT_FIELD_STYLES`) define a style for the `name` field but not for the `process` field, however because both fields are part of the same whitespace delimited token they'll be highlighted together in the style defined for the `name` field.
4.825675
4.595951
1.049984
style = self.nn.get(self.level_styles, record.levelname) # After the introduction of the `Empty' class it was reported in issue # 33 that format() can be called when `Empty' has already been garbage # collected. This explains the (otherwise rather out of place) `Empty # is not None' check in the following `if' statement. The reasoning # here is that it's much better to log a message without formatting # then to raise an exception ;-). # # For more details refer to issue 33 on GitHub: # https://github.com/xolox/python-coloredlogs/issues/33 if style and Empty is not None: # Due to the way that Python's logging module is structured and # documented the only (IMHO) clean way to customize its behavior is # to change incoming LogRecord objects before they get to the base # formatter. However we don't want to break other formatters and # handlers, so we copy the log record. # # In the past this used copy.copy() but as reported in issue 29 # (which is reproducible) this can cause deadlocks. The following # Python voodoo is intended to accomplish the same thing as # copy.copy() without all of the generalization and overhead that # we don't need for our -very limited- use case. # # For more details refer to issue 29 on GitHub: # https://github.com/xolox/python-coloredlogs/issues/29 copy = Empty() copy.__class__ = ( self.log_record_factory() if self.log_record_factory is not None else logging.LogRecord ) copy.__dict__.update(record.__dict__) copy.msg = ansi_wrap(coerce_string(record.msg), **style) record = copy # Delegate the remaining formatting to the base formatter. return logging.Formatter.format(self, record)
def format(self, record)
Apply level-specific styling to log records. :param record: A :class:`~logging.LogRecord` object. :returns: The result of :func:`logging.Formatter.format()`. This method injects ANSI escape sequences that are specific to the level of each log record (because such logic cannot be expressed in the syntax of a log format string). It works by making a copy of the log record, changing the `msg` field inside the copy and passing the copy into the :func:`~logging.Formatter.format()` method of the base class.
7.941561
7.668186
1.035651
if fmt: parser = FormatStringParser(style=style) if not parser.contains_field(fmt, 'hostname'): return handler.addFilter(cls(use_chroot))
def install(cls, handler, fmt=None, use_chroot=True, style=DEFAULT_FORMAT_STYLE)
Install the :class:`HostNameFilter` on a log handler (only if needed). :param fmt: The log format string to check for ``%(hostname)``. :param style: One of the characters ``%``, ``{`` or ``$`` (defaults to :data:`DEFAULT_FORMAT_STYLE`). :param handler: The logging handler on which to install the filter. :param use_chroot: Refer to :func:`find_hostname()`. If `fmt` is given the filter will only be installed if `fmt` uses the ``hostname`` field. If `fmt` is not given the filter is installed unconditionally.
10.376443
5.479078
1.89383
if fmt: parser = FormatStringParser(style=style) if not parser.contains_field(fmt, 'programname'): return handler.addFilter(cls(programname))
def install(cls, handler, fmt, programname=None, style=DEFAULT_FORMAT_STYLE)
Install the :class:`ProgramNameFilter` (only if needed). :param fmt: The log format string to check for ``%(programname)``. :param style: One of the characters ``%``, ``{`` or ``$`` (defaults to :data:`DEFAULT_FORMAT_STYLE`). :param handler: The logging handler on which to install the filter. :param programname: Refer to :func:`__init__()`. If `fmt` is given the filter will only be installed if `fmt` uses the ``programname`` field. If `fmt` is not given the filter is installed unconditionally.
10.30358
5.689188
1.811081
# Step 1: Split simple tokens (without a name) into # their whitespace parts and non-whitespace parts. separated = [] pattern = re.compile(r'(\s+)') for token in self.get_pairs(format_string): if token.name: separated.append(token) else: separated.extend( FormatStringToken(name=None, text=text) for text in pattern.split(token.text) if text ) # Step 2: Group tokens together based on whitespace. current_group = [] grouped_pairs = [] for token in separated: if token.text.isspace(): if current_group: grouped_pairs.append(current_group) grouped_pairs.append([token]) current_group = [] else: current_group.append(token) if current_group: grouped_pairs.append(current_group) return grouped_pairs
def get_grouped_pairs(self, format_string)
Group the results of :func:`get_pairs()` separated by whitespace. :param format_string: The logging format string. :returns: A list of lists of :class:`FormatStringToken` objects.
2.940353
2.735479
1.074895
for token in self.get_tokens(format_string): match = self.name_pattern.search(token) name = match.group(1) if match else None yield FormatStringToken(name=name, text=token)
def get_pairs(self, format_string)
Tokenize a logging format string and extract field names from tokens. :param format_string: The logging format string. :returns: A generator of :class:`FormatStringToken` objects.
3.989458
3.243098
1.230138
return re.compile(self.raw_pattern.replace(r'\w+', field_name), re.VERBOSE)
def get_pattern(self, field_name)
Get a regular expression to match a formatting directive that references the given field name. :param field_name: The name of the field to match (a string). :returns: A compiled regular expression object.
8.057651
8.883037
0.907083
return [t for t in self.tokenize_pattern.split(format_string) if t]
def get_tokens(self, format_string)
Tokenize a logging format string. :param format_string: The logging format string. :returns: A list of strings with formatting directives separated from surrounding text.
5.496972
7.792973
0.705375
name = name.lower() if name in self.aliases: name = self.aliases[name] return name
def normalize_name(self, name)
Normalize a field or level name. :param name: The field or level name (a string). :returns: The normalized name (a string). Transforms all strings to lowercase and resolves level name aliases (refer to :func:`find_level_aliases()`) to their canonical name: >>> from coloredlogs import NameNormalizer >>> from humanfriendly import format_table >>> nn = NameNormalizer() >>> sample_names = ['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'FATAL', 'CRITICAL'] >>> print(format_table([(n, nn.normalize_name(n)) for n in sample_names])) ----------------------- | DEBUG | debug | | INFO | info | | WARN | warning | | WARNING | warning | | ERROR | error | | FATAL | critical | | CRITICAL | critical | -----------------------
3.063823
5.452836
0.561877
return dict((self.normalize_name(k), v) for k, v in value.items())
def normalize_keys(self, value)
Normalize the keys of a dictionary using :func:`normalize_name()`. :param value: The dictionary to normalize. :returns: A dictionary with normalized keys.
3.931324
4.328496
0.908242
# Determine the available logging levels and order them by numeric value. decorated_levels = [] defined_levels = coloredlogs.find_defined_levels() normalizer = coloredlogs.NameNormalizer() for name, level in defined_levels.items(): if name != 'NOTSET': item = (level, normalizer.normalize_name(name)) if item not in decorated_levels: decorated_levels.append(item) ordered_levels = sorted(decorated_levels) # Initialize colored output to the terminal, default to the most # verbose logging level but enable the user the customize it. coloredlogs.install(level=os.environ.get('COLOREDLOGS_LOG_LEVEL', ordered_levels[0][1])) # Print some examples with different timestamps. for level, name in ordered_levels: log_method = getattr(logger, name, None) if log_method: log_method("message with level %s (%i)", name, level) time.sleep(DEMO_DELAY)
def demonstrate_colored_logging()
Interactively demonstrate the :mod:`coloredlogs` package.
4.720012
4.667851
1.011175
actions = [] try: # Parse the command line arguments. options, arguments = getopt.getopt(sys.argv[1:], 'cdh', [ 'convert', 'to-html', 'demo', 'help', ]) # Map command line options to actions. for option, value in options: if option in ('-c', '--convert', '--to-html'): actions.append(functools.partial(convert_command_output, *arguments)) arguments = [] elif option in ('-d', '--demo'): actions.append(demonstrate_colored_logging) elif option in ('-h', '--help'): usage(__doc__) return else: assert False, "Programming error: Unhandled option!" if not actions: usage(__doc__) return except Exception as e: warning("Error: %s", e) sys.exit(1) for function in actions: function()
def main()
Command line interface for the ``coloredlogs`` program.
3.524998
3.382516
1.042123
captured_output = capture(command) converted_output = convert(captured_output) if connected_to_terminal(): fd, temporary_file = tempfile.mkstemp(suffix='.html') with open(temporary_file, 'w') as handle: handle.write(converted_output) webbrowser.open(temporary_file) elif captured_output and not captured_output.isspace(): output(converted_output)
def convert_command_output(*command)
Command line interface for ``coloredlogs --to-html``. Takes a command (and its arguments) and runs the program under ``script`` (emulating an interactive terminal), intercepts the output of the command and converts ANSI escape sequences in the output to HTML.
3.749228
3.78642
0.990178
with open(os.devnull, 'wb') as dev_null: # We start by invoking the `script' program in a form that is supported # by the Linux implementation [1] but fails command line validation on # the MacOS (BSD) implementation [2]: The command is specified using # the -c option and the typescript file is /dev/null. # # [1] http://man7.org/linux/man-pages/man1/script.1.html # [2] https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man1/script.1.html command_line = ['script', '-qc', ' '.join(map(pipes.quote, command)), '/dev/null'] script = subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=dev_null) stdout, stderr = script.communicate() if script.returncode == 0: # If `script' succeeded we assume that it understood our command line # invocation which means it's the Linux implementation (in this case # we can use standard output instead of a temporary file). output = stdout.decode(encoding) else: # If `script' failed we assume that it didn't understand our command # line invocation which means it's the MacOS (BSD) implementation # (in this case we need a temporary file because the command line # interface requires it). fd, temporary_file = tempfile.mkstemp(prefix='coloredlogs-', suffix='-capture.txt') try: command_line = ['script', '-q', temporary_file] + list(command) subprocess.Popen(command_line, stdout=dev_null, stderr=dev_null).wait() with codecs.open(temporary_file, 'r', encoding) as handle: output = handle.read() finally: os.unlink(temporary_file) # On MacOS when standard input is /dev/null I've observed # the captured output starting with the characters '^D': # # $ script -q capture.txt echo example </dev/null # example # $ xxd capture.txt # 00000000: 5e44 0808 6578 616d 706c 650d 0a ^D..example.. # # I'm not sure why this is here, although I suppose it has to do # with ^D in caret notation signifying end-of-file [1]. What I do # know is that this is an implementation detail that callers of the # capture() function shouldn't be bothered with, so we strip it. # # [1] https://en.wikipedia.org/wiki/End-of-file if output.startswith(b'^D'): output = output[2:] # Clean up backspace and carriage return characters and the 'erase line' # ANSI escape sequence and return the output as a Unicode string. return u'\n'.join(clean_terminal_output(output))
def capture(command, encoding='UTF-8')
Capture the output of an external command as if it runs in an interactive terminal. :param command: The command name and its arguments (a list of strings). :param encoding: The encoding to use to decode the output (a string). :returns: The output of the command. This function runs an external command under ``script`` (emulating an interactive terminal) to capture the output of the command as if it was running in an interactive terminal (including ANSI escape sequences).
4.582104
4.515411
1.01477
# Convert Windows line endings (CR+LF) to UNIX line endings (LF). text = text.replace('\r\n', '\n') # Convert UNIX line endings (LF) to HTML line endings (<br>). text = text.replace('\n', '<br>\n') # Convert tabs to spaces. text = text.expandtabs(tabsize) # Convert leading spaces (that is to say spaces at the start of the string # and/or directly after a line ending) into non-breaking spaces, otherwise # HTML rendering engines will simply ignore these spaces. text = re.sub(INDENT_PATTERN, encode_whitespace_cb, text) # The conversion of leading spaces we just did misses a corner case where a # line starts with an HTML tag but the first visible text is a space. Web # browsers seem to ignore these spaces, so we need to convert them. text = re.sub(TAG_INDENT_PATTERN, r'\1&nbsp;', text) # Convert runs of multiple spaces into non-breaking spaces to avoid HTML # rendering engines from visually collapsing runs of spaces into a single # space. We specifically don't replace single spaces for several reasons: # 1. We'd break the HTML emitted by convert() by replacing spaces # inside HTML elements (for example the spaces that separate # element names from attribute names). # 2. If every single space is replaced by a non-breaking space, # web browsers perform awkwardly unintuitive word wrapping. # 3. The HTML output would be bloated for no good reason. text = re.sub(' {2,}', encode_whitespace_cb, text) return text
def encode_whitespace(text, tabsize=4)
Encode whitespace so that web browsers properly render it. :param text: The plain text (a string). :param tabsize: Refer to :func:`str.expandtabs()` for details. :returns: The text converted to HTML (a string). The purpose of this function is to encode whitespace in such a way that web browsers render the same whitespace regardless of whether 'preformatted' styling is used (by wrapping the text in a ``<pre>...</pre>`` element). .. note:: While the string manipulation performed by this function is specifically intended not to corrupt the HTML generated by :func:`convert()` it definitely does have the potential to corrupt HTML from other sources. You have been warned :-).
5.859701
5.698136
1.028354
text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') text = text.replace('"', '&quot;') return text
def html_encode(text)
Encode characters with a special meaning as HTML. :param text: The plain text (a string). :returns: The text converted to HTML (a string).
1.473583
1.90862
0.772067
if value.startswith('#'): value = value[1:] if len(value) == 3: return ( int(value[0] * 2, 16), int(value[1] * 2, 16), int(value[2] * 2, 16), ) elif len(value) == 6: return ( int(value[0:2], 16), int(value[2:4], 16), int(value[4:6], 16), ) else: raise ValueError()
def parse_hex_color(value)
Convert a CSS color in hexadecimal notation into its R, G, B components. :param value: A CSS color in hexadecimal notation (a string like '#000000'). :return: A tuple with three integers (with values between 0 and 255) corresponding to the R, G and B components of the color. :raises: :exc:`~exceptions.ValueError` on values that can't be parsed.
1.39489
1.441108
0.967928
# Provide defaults for omitted arguments. programname = programname or find_program_name() logger = logger or logging.getLogger() fmt = fmt or DEFAULT_LOG_FORMAT level = level_to_number(kw.get('level', DEFAULT_LOG_LEVEL)) # Check whether system logging is already enabled. handler, logger = replace_handler(logger, match_syslog_handler, reconfigure) # Make sure reconfiguration is allowed or not relevant. if not (handler and not reconfigure): # Create a system logging handler. handler = connect_to_syslog(**kw) # Make sure the handler was successfully created. if handler: # Enable the use of %(programname)s. ProgramNameFilter.install(handler=handler, fmt=fmt, programname=programname) # Connect the formatter, handler and logger. handler.setFormatter(logging.Formatter(fmt)) logger.addHandler(handler) # Adjust the level of the selected logger. adjust_level(logger, level) return handler
def enable_system_logging(programname=None, fmt=None, logger=None, reconfigure=True, **kw)
Redirect :mod:`logging` messages to the system log (e.g. ``/var/log/syslog``). :param programname: The program name to embed in log messages (a string, defaults to the result of :func:`~coloredlogs.find_program_name()`). :param fmt: The log format for system log messages (a string, defaults to :data:`DEFAULT_LOG_FORMAT`). :param logger: The logger to which the :class:`~logging.handlers.SysLogHandler` should be connected (defaults to the root logger). :param level: The logging level for the :class:`~logging.handlers.SysLogHandler` (defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced using :func:`~coloredlogs.level_to_number()`. :param reconfigure: If :data:`True` (the default) multiple calls to :func:`enable_system_logging()` will each override the previous configuration. :param kw: Refer to :func:`connect_to_syslog()`. :returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None`. If an existing handler is found and `reconfigure` is :data:`False` the existing handler object is returned. If the connection to the system logging daemon fails :data:`None` is returned. .. note:: When the logger's effective level is too restrictive it is relaxed (refer to `notes about log levels`_ for details).
4.695208
4.236779
1.108202
if not address: address = find_syslog_address() if facility is None: facility = logging.handlers.SysLogHandler.LOG_USER if level is None: level = DEFAULT_LOG_LEVEL for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None: kw = dict(facility=facility, address=address) if socktype is not None: kw['socktype'] = socktype try: handler = logging.handlers.SysLogHandler(**kw) except (IOError, TypeError): # The socktype argument was added in Python 2.7 and its use will raise a # TypeError exception on Python 2.6. IOError is a superclass of socket.error # (since Python 2.6) which can be raised if the system logging daemon is # unavailable. pass else: handler.setLevel(level_to_number(level)) return handler
def connect_to_syslog(address=None, facility=None, level=None)
Create a :class:`~logging.handlers.SysLogHandler`. :param address: The device file or network address of the system logging daemon (a string or tuple, defaults to the result of :func:`find_syslog_address()`). :param facility: Refer to :class:`~logging.handlers.SysLogHandler`. Defaults to ``LOG_USER``. :param level: The logging level for the :class:`~logging.handlers.SysLogHandler` (defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced using :func:`~coloredlogs.level_to_number()`. :returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the system logging daemon is unavailable). The process of connecting to the system logging daemon goes as follows: - If :class:`~logging.handlers.SysLogHandler` supports the `socktype` option (it does since Python 2.7) the following two socket types are tried (in decreasing preference): 1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may not be supported. 2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the default (which is UDP). - If socket types are not supported Python's (2.6) defaults are used to connect to the selected `address`.
3.414818
2.774297
1.230877
if sys.platform == 'darwin' and os.path.exists(LOG_DEVICE_MACOSX): return LOG_DEVICE_MACOSX elif os.path.exists(LOG_DEVICE_UNIX): return LOG_DEVICE_UNIX else: return 'localhost', logging.handlers.SYSLOG_UDP_PORT
def find_syslog_address()
Find the most suitable destination for system log messages. :returns: The pathname of a log device (a string) or an address/port tuple as supported by :class:`~logging.handlers.SysLogHandler`. On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX` is checked for existence. If both of these device files don't exist the default used by :class:`~logging.handlers.SysLogHandler` is returned.
2.946218
1.983519
1.485349
coloredlogs.install(level='debug') arguments = sys.argv[1:] if arguments: interpret_script(arguments[0]) else: logger.notice(compact()) generate_screenshots()
def main()
Command line interface.
11.333846
11.591307
0.977788
this_script = os.path.abspath(__file__) this_directory = os.path.dirname(this_script) repository = os.path.join(this_directory, os.pardir) examples_directory = os.path.join(repository, 'docs', 'examples') images_directory = os.path.join(repository, 'docs', 'images') for shell_script in sorted(glob.glob(os.path.join(examples_directory, '*.sh'))): basename, extension = os.path.splitext(os.path.basename(shell_script)) image_file = os.path.join(images_directory, '%s.png' % basename) logger.info("Generating %s by running %s ..", format_path(image_file), format_path(shell_script)) command_line = [sys.executable, __file__, shell_script] random_title = random_string(25) # Generate the urxvt command line. urxvt_command = [ 'urxvt', # Enforce a default geometry. '-geometry', '98x30', # Set the text and background color. '-fg', TEXT_COLOR, '-bg', BACKGROUND_COLOR, # Set the font name and pixel size. '-fn', 'xft:%s:pixelsize=%i' % (FONT_NAME, FONT_SIZE), # Set the window title. '-title', random_title, # Hide scrollbars. '+sb', ] if which('qtile-run'): # I've been using tiling window managers for years now, at the # moment 'qtile' is my window manager of choice. It requires the # following special handling to enable the 'urxvt' window to float, # which in turn enables it to respect the '--geometry' option. urxvt_command.insert(0, 'qtile-run') urxvt_command.insert(1, '-f') # Apply the Ubuntu color scheme to urxvt. for index, css_color in enumerate(EIGHT_COLOR_PALETTE): urxvt_command.extend(('--color%i' % index, css_color)) # Add the command that should run inside the terminal. urxvt_command.extend(('-e', 'sh', '-c', 'setterm -cursor off; %s' % quote(command_line))) # Launch urxvt. execute(*urxvt_command, async=True) # Make sure we close the urxvt window. try: # Wait for urxvt to start up. If I were to improve this I could # instead wait for the creation of a file by interpret_script(). time.sleep(10) # Take a screen shot of the window using ImageMagick. execute('import', '-window', random_title, image_file) # Auto-trim the screen shot, then give it a 5px border. execute('convert', image_file, '-trim', '-bordercolor', BACKGROUND_COLOR, '-border', '5', image_file) finally: execute('wmctrl', '-c', random_title)
def generate_screenshots()
Generate screenshots from shell scripts.
4.322926
4.239976
1.019564
with CaptureOutput() as capturer: shell = subprocess.Popen(['bash', '-'], stdin=subprocess.PIPE) with open(shell_script) as handle: for line in handle: sys.stdout.write(ansi_wrap('$', color='green') + ' ' + line) sys.stdout.flush() shell.stdin.write(line) shell.stdin.flush() shell.stdin.close() time.sleep(12) # Get the text that was shown in the terminal. captured_output = capturer.get_text() # Store the text that was shown in the terminal. filename, extension = os.path.splitext(shell_script) transcript_file = '%s.txt' % filename logger.info("Updating %s ..", format_path(transcript_file)) with open(transcript_file, 'w') as handle: handle.write(ansi_strip(captured_output))
def interpret_script(shell_script)
Make it appear as if commands are typed into the terminal.
3.603407
3.514902
1.02518
contents = get_contents(*args) metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents)) return metadata['version']
def get_version(*args)
Extract the version number from a Python module.
3.76578
3.122648
1.205957
install_requires = get_requirements('requirements.txt') if 'bdist_wheel' not in sys.argv: if sys.platform == 'win32': install_requires.append('colorama') return sorted(install_requires)
def get_install_requires()
Add conditional dependencies for Windows (when creating source distributions).
3.160968
2.861424
1.104684
requirements = set() with open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements)
def get_requirements(*args)
Get requirements from pip requirement files.
3.028633
2.916227
1.038545
try: from pkg_resources import parse_version from setuptools import __version__ return parse_version(__version__) >= parse_version('0.7.2') except Exception: return False
def have_environment_marker_support()
Check whether setuptools has support for PEP-426 environment marker support. Based on the ``setup.py`` script of the ``pytest`` package: https://bitbucket.org/pytest-dev/pytest/src/default/setup.py
3.051117
3.063089
0.996091
return ('/' if 'bdist_wheel' in sys.argv else os.path.relpath(distutils.sysconfig.get_python_lib(), sys.prefix))
def find_pth_directory()
Determine the correct directory pathname for installing ``*.pth`` files. To install a ``*.pth`` file using a source distribution archive (created when ``python setup.py sdist`` is called) the relative directory pathname ``lib/pythonX.Y/site-packages`` needs to be passed to the ``data_files`` option to ``setup()``. Unfortunately this breaks universal wheel archives (created when ``python setup.py bdist_wheel --universal`` is called) because a specific Python version is now encoded in the pathname of a directory that becomes part of the supposedly universal archive :-). Through trial and error I've found that by passing the directory pathname ``/`` when ``python setup.py bdist_wheel`` is called we can ensure that ``*.pth`` files are installed in the ``lib/pythonX.Y/site-packages`` directory without hard coding its location.
6.271217
5.692594
1.101645
from flask import _app_ctx_stack as stack # We do not support < Flask 0.9 if stack.top is None: raise ExecutedOutsideContext() g_object_attr = stack.top.app.config['LOG_REQUEST_ID_G_OBJECT_ATTRIBUTE'] return g.get(g_object_attr, None)
def flask_ctx_get_request_id()
Get request id from flask's G object :return: The id or None if not found.
6.02851
5.599239
1.076666
logger.info( '{ip} - - "{method} {path} {status_code}"'.format( ip=request.remote_addr, method=request.method, path=request.path, status_code=response.status_code) ) return response
def _log_http_event(response)
It will create a log event as werkzeug but at the end of request holding the request-id Intended usage is a handler of Flask.after_request :return: The same response object
2.692661
2.904706
0.927
if ctx_fetcher not in self.ctx_fetchers: self.ctx_fetchers.append(ctx_fetcher)
def register_fetcher(self, ctx_fetcher)
Register another context-specialized fetcher :param Callable ctx_fetcher: A callable that will return the id or raise ExecutedOutsideContext if it was executed outside its context
2.254798
2.961158
0.761458
amazon_request_id = request.headers.get('X-Amzn-Trace-Id', '') trace_id_params = dict(x.split('=') if '=' in x else (x, None) for x in amazon_request_id.split(';')) if 'Self' in trace_id_params: return trace_id_params['Self'] if 'Root' in trace_id_params: return trace_id_params['Root'] return None
def amazon_elb_trace_id()
Get the amazon ELB trace id from current Flask request context :return: The found Trace-ID or None if not found :rtype: str | None
2.414121
2.37906
1.014737
def parser(): request_id = request.headers.get(header_name, '').strip() if not request_id: # If the request id is empty return None return None return request_id return parser
def generic_http_header_parser_for(header_name)
A parser factory to extract the request id from an HTTP header :return: A parser that can be used to extract the request id from the current request context :rtype: ()->str|None
5.428391
4.709235
1.152712
for parser in parsers: request_id = parser() if request_id is not None: return request_id return None
def auto_parser(parsers=(x_request_id, x_correlation_id, amazon_elb_trace_id))
Meta parser that will try all known parser and it will bring the first found id :param list[Callable] parsers: A list of callable parsers to try to extract request_id :return: The request id if it is found or None :rtype: str|None
2.667173
2.803074
0.951517
print logger.info('Called generic_add({}, {})'.format(a, b)) return a + b
def generic_add(a, b)
Simple function to add two numbers
9.563777
8.654011
1.105127
if _CELERY_X_HEADER not in headers: request_id = current_request_id() headers[_CELERY_X_HEADER] = request_id logger.debug("Forwarding request_id '{}' to the task consumer.".format(request_id))
def on_before_publish_insert_request_id_header(headers, **kwargs)
This function is meant to be used as signal processor for "before_task_publish". :param Dict headers: The headers of the message :param kwargs: Any extra keyword arguments
5.136386
5.597118
0.917684
logger.debug('Called generic_add({}, {})'.format(a, b)) return a + b
def generic_add(a, b)
Simple function to add two numbers
4.287084
4.34383
0.986936
# type: (T.Mapping, str, str, datetime.datetime) -> int date = message[date_key] time = message[time_key] hour = time // 100 minute = time % 100 year = date // 10000 month = date // 100 % 100 day = date % 100 data_datetime = datetime.datetime(year, month, day, hour, minute) # Python 2 compatible timestamp implementation without timezone hurdle # see: https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp return int((data_datetime - epoch).total_seconds())
def from_grib_date_time(message, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH)
Return the number of seconds since the ``epoch`` from the values of the ``message`` keys, using datetime.total_seconds(). :param message: the target GRIB message :param date_key: the date key, defaults to "dataDate" :param time_key: the time key, defaults to "dataTime" :param epoch: the reference datetime
2.792869
3.231172
0.864351
# type: (np.ndarray, np.ndarray) -> T.Tuple[T.Tuple[str, ...], np.ndarray] step_s = step * 3600 if len(time.shape) == 0 and len(step.shape) == 0: data = time + step_s dims = () # type: T.Tuple[str, ...] elif len(time.shape) > 0 and len(step.shape) == 0: data = time + step_s dims = ('time',) elif len(time.shape) == 0 and len(step.shape) > 0: data = time + step_s dims = ('step',) else: data = time[:, None] + step_s[None, :] dims = ('time', 'step') return dims, data
def build_valid_time(time, step)
Return dimensions and data of the valid_time corresponding to the given ``time`` and ``step``. The data is seconds from the same epoch as ``time`` and may have one or two dimensions. :param time: given in seconds from an epoch, as returned by ``from_grib_date_time`` :param step: given in hours, as returned by ``from_grib_step``
2.088774
2.138852
0.976587
if 'mode' in kwargs: warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning) kwargs.pop('mode') stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors) return Dataset(*build_dataset_components(stream, **kwargs))
def open_file(path, grib_errors='warn', **kwargs)
Open a GRIB file as a ``cfgrib.Dataset``.
6.701034
6.287588
1.065756
# type: (T.IO[bytes], xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any], T.Any) -> None # validate Dataset keys, DataArray names, and attr keys/values detected_keys, suggested_keys = detect_grib_keys(data_var, default_grib_keys, grib_keys) merged_grib_keys = merge_grib_keys(grib_keys, detected_keys, suggested_keys) if 'gridType' not in merged_grib_keys: raise ValueError("required grib_key 'gridType' not passed nor auto-detected") template_message = make_template_message(merged_grib_keys, **kwargs) coords_names, data_var = expand_dims(data_var) header_coords_values = [data_var.coords[name].values.tolist() for name in coords_names] for items in itertools.product(*header_coords_values): select = {n: v for n, v in zip(coords_names, items)} field_values = data_var.sel(**select).values.flat[:] # Missing values handling invalid_field_values = np.logical_not(np.isfinite(field_values)) # There's no need to save a message full of missing values if invalid_field_values.all(): continue missing_value = merged_grib_keys.get('missingValue', 9999) field_values[invalid_field_values] = missing_value message = cfgrib.CfMessage.from_message(template_message) for coord_name, coord_value in zip(coords_names, items): if coord_name in ALL_TYPE_OF_LEVELS: coord_name = 'level' message[coord_name] = coord_value # OPTIMIZE: convert to list because Message.message_set doesn't support np.ndarray message['values'] = field_values.tolist() message.write(file)
def canonical_dataarray_to_grib( data_var, file, grib_keys={}, default_grib_keys=DEFAULT_GRIB_KEYS, **kwargs )
Write a ``xr.DataArray`` in *canonical* form to a GRIB file.
4.446032
4.440678
1.001206
# type: (xr.Dataset, str, str, bool, T.Dict[str, T.Any] T.Any) -> None if not no_warn: warnings.warn("GRIB write support is experimental, DO NOT RELY ON IT!", FutureWarning) # validate Dataset keys, DataArray names, and attr keys/values xr.backends.api._validate_dataset_names(dataset) xr.backends.api._validate_attrs(dataset) real_grib_keys = {k[5:]: v for k, v in dataset.attrs.items() if k[:5] == 'GRIB_'} real_grib_keys.update(grib_keys) with open(path, mode=mode) as file: for data_var in dataset.data_vars.values(): canonical_dataarray_to_grib(data_var, file, grib_keys=real_grib_keys, **kwargs)
def canonical_dataset_to_grib(dataset, path, mode='wb', no_warn=False, grib_keys={}, **kwargs)
Write a ``xr.Dataset`` in *canonical* form to a GRIB file.
3.339055
3.394541
0.983654
# type: (str, T.Any) -> xr.Dataset if 'engine' in kwargs and kwargs['engine'] != 'cfgrib': raise ValueError("only engine=='cfgrib' is supported") kwargs['engine'] = 'cfgrib' return xr.backends.api.open_dataset(path, **kwargs)
def open_dataset(path, **kwargs)
Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file.
3.385722
3.694892
0.916325
# type: (str, T.Dict[str, T.Any], bool, T.Any) -> T.List[xr.Dataset] if not no_warn: warnings.warn("open_datasets is an experimental API, DO NOT RELY ON IT!", FutureWarning) fbks = [] datasets = [] try: datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs)) except DatasetBuildError as ex: fbks.extend(ex.args[2]) # NOTE: the recursive call needs to stay out of the exception handler to avoid showing # to the user a confusing error message due to exception chaining for fbk in fbks: bks = backend_kwargs.copy() bks['filter_by_keys'] = fbk datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs)) return datasets
def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs)
Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.
4.147278
4.245596
0.976842
# type: (cffi.FFI.CData, str) -> int size = ffi.new('size_t *') _codes_get_size(handle, key.encode(ENC), size) return size[0]
def codes_get_size(handle, key)
Get the number of coded value from a key. If several keys of the same name are present, the total sum is returned. :param bytes key: the keyword to get the size of :rtype: int
2.773576
4.698785
0.590275
# type: (cffi.FFI.CData, str) -> int size = ffi.new('size_t *') _codes_get_length(handle, key.encode(ENC), size) return size[0]
def codes_get_string_length(handle, key)
Get the length of the string representation of the key. If several keys of the same name are present, the maximum length is returned. :param bytes key: the keyword to get the string representation size of. :rtype: int
3.329835
5.083695
0.655003
# type: (cffi.FFI.CData, str, int) -> T.List[int] values = ffi.new('unsigned char[]', size) size_p = ffi.new('size_t *', size) _codes_get_bytes(handle, key.encode(ENC), values, size_p) return list(values)
def codes_get_bytes_array(handle, key, size)
Get unsigned chars array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int)
3.073246
4.475287
0.686715
# type: (cffi.FFI.CData, str, int) -> T.List[int] values = ffi.new('long[]', size) size_p = ffi.new('size_t *', size) _codes_get_long_array(handle, key.encode(ENC), values, size_p) return list(values)
def codes_get_long_array(handle, key, size)
Get long array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int)
2.881689
4.337938
0.664299
# type: (cffi.FFI.CData, str, int) -> T.List[float] values = ffi.new('double[]', size) size_p = ffi.new('size_t *', size) _codes_get_double_array(handle, key.encode(ENC), values, size_p) return list(values)
def codes_get_double_array(handle, key, size)
Get double array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List(float)
2.920025
3.779931
0.772508
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes] if length is None: length = codes_get_string_length(handle, key) values_keepalive = [ffi.new('char[]', length) for _ in range(size)] values = ffi.new('char*[]', values_keepalive) size_p = ffi.new('size_t *', size) _codes_get_string_array(handle, key.encode(ENC), values, size_p) return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_string_array(handle, key, size, length=None)
Get string array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List[bytes]
2.554139
2.736827
0.933248
# type: (cffi.FFI.CData, str, int) -> str if length is None: length = codes_get_string_length(handle, key) values = ffi.new('char[]', length) length_p = ffi.new('size_t *', length) _codes_get_string = check_return(lib.codes_get_string) _codes_get_string(handle, key.encode(ENC), values, length_p) return ffi.string(values, length_p[0]).decode(ENC)
def codes_get_string(handle, key, length=None)
Get string element from a key. It may or may not fail in case there are more than one key in a message. Outputs the last element. :param bytes key: the keyword to select the value of :param bool strict: flag to select if the method should fail in case of more than one key in single message :rtype: bytes
2.390458
2.999253
0.797018
ver = lib.codes_get_api_version() patch = ver % 100 ver = ver // 100 minor = ver % 100 major = ver // 100 return "%d.%d.%d" % (major, minor, patch)
def codes_get_api_version()
Get the API version. Returns the version of the API as a string in the format "major.minor.revision".
2.816547
2.564674
1.098209
# type: (cffi.FFI.CData, T.BinaryIO) -> None mess = ffi.new('const void **') mess_len = ffi.new('size_t*') codes_get_message = check_return(lib.codes_get_message) codes_get_message(handle, mess, mess_len) message = ffi.buffer(mess[0], size=mess_len[0]) outfile.write(message)
def codes_write(handle, outfile)
Write a coded message to a file. If the file does not exist, it is created. :param str path: (optional) the path to the GRIB file; defaults to the one of the open index.
3.449117
4.493501
0.767579
return ( hasattr(obj, "__iter__") and not isinstance(obj, str) and not isinstance(obj, tuple) )
def is_iterable(obj)
Are we being asked to look up a list of things, instead of a single thing? We check for the `__iter__` attribute so that this can cover types that don't have to be known by this module, such as NumPy arrays. Strings, however, should be considered as atomic values to look up, not iterables. The same goes for tuples, since they are immutable and therefore valid entries. We don't need to check for the Python 2 `unicode` type, because it doesn't have an `__iter__` attribute anyway.
2.511816
2.965667
0.846965
item_index = None try: for item in sequence: item_index = self.add(item) except TypeError: raise ValueError( "Argument needs to be an iterable, got %s" % type(sequence) ) return item_index
def update(self, sequence)
Update the set with the given iterable sequence, then return the index of the last element inserted. Example: >>> oset = OrderedSet([1, 2, 3]) >>> oset.update([3, 1, 5, 1, 4]) 4 >>> print(oset) OrderedSet([1, 2, 3, 5, 4])
4.153587
4.414253
0.940949
if not self.items: raise KeyError("Set is empty") elem = self.items[-1] del self.items[-1] del self.map[elem] return elem
def pop(self)
Remove and return the last element from the set. Raises KeyError if the set is empty. Example: >>> oset = OrderedSet([1, 2, 3]) >>> oset.pop() 3
4.005393
4.151051
0.964911
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet containers = map(list, it.chain([self], sets)) items = it.chain.from_iterable(containers) return cls(items)
def union(self, *sets)
Combines all unique items. Each items order is defined by its first appearance. Example: >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0]) >>> print(oset) OrderedSet([3, 1, 4, 5, 2, 0]) >>> oset.union([8, 9]) OrderedSet([3, 1, 4, 5, 2, 0, 8, 9]) >>> oset | {10} OrderedSet([3, 1, 4, 5, 2, 0, 10])
5.207175
5.018125
1.037673
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet if sets: common = set.intersection(*map(set, sets)) items = (item for item in self if item in common) else: items = self return cls(items)
def intersection(self, *sets)
Returns elements in common between all sets. Order is defined only by the first set. Example: >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3]) >>> print(oset) OrderedSet([1, 2, 3]) >>> oset.intersection([2, 4, 5], [1, 2, 3, 4]) OrderedSet([2]) >>> oset.intersection() OrderedSet([1, 2, 3])
3.579082
3.673405
0.974323
cls = self.__class__ if sets: other = set.union(*map(set, sets)) items = (item for item in self if item not in other) else: items = self return cls(items)
def difference(self, *sets)
Returns all elements that are in this set but not the others. Example: >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2])) OrderedSet([1, 3]) >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3])) OrderedSet([1]) >>> OrderedSet([1, 2, 3]) - OrderedSet([2]) OrderedSet([1, 3]) >>> OrderedSet([1, 2, 3]).difference() OrderedSet([1, 2, 3])
3.777915
4.321174
0.87428
if len(self) > len(other): # Fast check for obvious cases return False return all(item in other for item in self)
def issubset(self, other)
Report whether another set contains this set. Example: >>> OrderedSet([1, 2, 3]).issubset({1, 2}) False >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4}) True >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5}) False
5.369133
7.027368
0.764032
if len(self) < len(other): # Fast check for obvious cases return False return all(item in self for item in other)
def issuperset(self, other)
Report whether this set contains another set. Example: >>> OrderedSet([1, 2]).issuperset([1, 2, 3]) False >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3}) True >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3}) False
6.130391
7.985971
0.767645
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet diff1 = cls(self).difference(other) diff2 = cls(other).difference(self) return diff1.union(diff2)
def symmetric_difference(self, other)
Return the symmetric difference of two OrderedSets as a new set. That is, the new set will contain all elements that are in exactly one of the sets. Their order will be preserved, with elements from `self` preceding elements from `other`. Example: >>> this = OrderedSet([1, 4, 3, 5, 7]) >>> other = OrderedSet([9, 7, 1, 3, 2]) >>> this.symmetric_difference(other) OrderedSet([4, 5, 9, 2])
3.321436
3.923267
0.8466
self.items = items self.map = {item: idx for (idx, item) in enumerate(items)}
def _update_items(self, items)
Replace the 'items' list of this OrderedSet with a new one, updating self.map accordingly.
5.174442
2.894038
1.787966
items_to_remove = set() for other in sets: items_to_remove |= set(other) self._update_items([item for item in self.items if item not in items_to_remove])
def difference_update(self, *sets)
Update this OrderedSet to remove items from one or more other sets. Example: >>> this = OrderedSet([1, 2, 3]) >>> this.difference_update(OrderedSet([2, 4])) >>> print(this) OrderedSet([1, 3]) >>> this = OrderedSet([1, 2, 3, 4, 5]) >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6])) >>> print(this) OrderedSet([3, 5])
3.046716
4.314474
0.706162
other = set(other) self._update_items([item for item in self.items if item in other])
def intersection_update(self, other)
Update this OrderedSet to keep only items in another set, preserving their order in this set. Example: >>> this = OrderedSet([1, 4, 3, 5, 7]) >>> other = OrderedSet([9, 7, 1, 3, 2]) >>> this.intersection_update(other) >>> print(this) OrderedSet([1, 3, 7])
6.629557
11.209324
0.591432
items_to_add = [item for item in other if item not in self] items_to_remove = set(other) self._update_items( [item for item in self.items if item not in items_to_remove] + items_to_add )
def symmetric_difference_update(self, other)
Update this OrderedSet to remove items from another set, then add items from the other set that were not present in this set. Example: >>> this = OrderedSet([1, 4, 3, 5, 7]) >>> other = OrderedSet([9, 7, 1, 3, 2]) >>> this.symmetric_difference_update(other) >>> print(this) OrderedSet([4, 5, 9, 2])
3.226207
3.908381
0.825459
if self.is_success(): return # Handle the error if we have any information if self.details: error = self.details.get('error', None) if error == PushResponse.ERROR_DEVICE_NOT_REGISTERED: raise DeviceNotRegisteredError(self) elif error == PushResponse.ERROR_MESSAGE_TOO_BIG: raise MessageTooBigError(self) elif error == PushResponse.ERROR_MESSAGE_RATE_EXCEEDED: raise MessageRateExceededError(self) # No known error information, so let's raise a generic error. raise PushResponseError(self)
def validate_response(self)
Raises an exception if there was an error. Otherwise, do nothing. Clients should handle these errors, since these require custom handling to properly resolve.
3.581678
3.328917
1.075929
import six return ( isinstance(token, six.string_types) and token.startswith('ExponentPushToken'))
def is_exponent_push_token(cls, token)
Returns `True` if the token is an Exponent push token
6.971719
5.982603
1.165332
# Delayed import because this file is immediately read on install, and # the requests library may not be installed yet. import requests response = requests.post( self.host + self.api_url + '/push/send', data=json.dumps([pm.get_payload() for pm in push_messages]), headers={ 'accept': 'application/json', 'accept-encoding': 'gzip, deflate', 'content-type': 'application/json', } ) # Let's validate the response format first. try: response_data = response.json() except ValueError: # The response isn't json. First, let's attempt to raise a normal # http error. If it's a 200, then we'll raise our own error. response.raise_for_status() raise PushServerError('Invalid server response', response) # If there are errors with the entire request, raise an error now. if 'errors' in response_data: raise PushServerError( 'Request failed', response, response_data=response_data, errors=response_data['errors']) # We expect the response to have a 'data' field with the responses. if 'data' not in response_data: raise PushServerError( 'Invalid server response', response, response_data=response_data) # Use the requests library's built-in exceptions for any remaining 4xx # and 5xx errors. response.raise_for_status() # Sanity check the response if len(push_messages) != len(response_data['data']): raise PushServerError( ('Mismatched response length. Expected %d %s but only ' 'received %d' % ( len(push_messages), 'receipt' if len(push_messages) == 1 else 'receipts', len(response_data['data']))), response, response_data=response_data) # At this point, we know it's a 200 and the response format is correct. # Now let's parse the responses per push notification. receipts = [] for i, receipt in enumerate(response_data['data']): receipts.append(PushResponse( push_message=push_messages[i], # If there is no status, assume error. status=receipt.get('status', PushResponse.ERROR_STATUS), message=receipt.get('message', ''), details=receipt.get('details', None))) return receipts
def _publish_internal(self, push_messages)
Send push notifications The server will validate any type of syntax errors and the client will raise the proper exceptions for the user to handle. Each notification is of the form: { 'to': 'ExponentPushToken[xxx]', 'body': 'This text gets display in the notification', 'badge': 1, 'data': {'any': 'json object'}, } Args: push_messages: An array of PushMessage objects.
3.30463
3.250895
1.016529
try: return line.strip(sep) except TypeError: return line.decode('utf-8').strip(sep)
def strip_line(line, sep=os.linesep)
Removes occurrence of character (sep) from a line of text
3.435626
3.07614
1.116863
pairs = dict() if keys is None: keys = "\S+" regex = re.compile(r''' \n # all key-value pairs are on separate lines \s* # there might be some leading spaces ( # start group to return (?:{0}\s*) # placeholder for tags to detect '\S+' == all \s*:*=\s* # optional spaces, optional colon, = , optional spaces .* # the value ) # end group to return '''.format(keys), re.VERBOSE) validate = False else: keys = [k.strip() for k in keys] regex = re.compile(r''' \n # all key-value pairs are on separate lines \s* # there might be some leading spaces ( # start group to return (?:{0}\s*) # placeholder for tags to detect '\S+' == all \s*:*=\s* # optional spaces, optional colon, = , optional spaces .* # the value ) # end group to return '''.format('|'.join(keys)), re.VERBOSE) validate = True for k in keys: pairs[k] = [] matches = regex.findall(buf) for match in matches: key, val = match.split('=', 1) # remove colon and leading/trailing whitespace from key key = (strip_line(key, ':')).strip() # remove newline and leading/trailing whitespace from value val = (strip_line(val)).strip() if validate and key not in keys: msg = "regex produced incorrect match. regex pattern = {0} "\ "claims key = [{1}] while original set of search keys "\ "= {2}".format(regex.pattern, key, '|'.join(keys)) raise AssertionError(msg) pairs.setdefault(key, []).append(val) return pairs
def get_dict_from_buffer(buf, keys=['DISTNAME', 'MAJOR', 'MINOR', 'PATCHLEVEL', 'PYTHON', 'MIN_PYTHON_MAJOR', 'MIN_PYTHON_MINOR', 'MIN_NUMPY_MAJOR', 'MIN_NUMPY_MINOR'])
Parses a string buffer for key-val pairs for the supplied keys. Returns: Python dictionary with all the keys (all keys in buffer if None is passed for keys) with the values being a list corresponding to each key. Note: Return dict will contain all keys supplied (if not None). If any key was not found in the buffer, then the value for that key will be [] such that dict[key] does not produce a KeyError. Slightly modified from: "http://stackoverflow.com/questions/5323703/regex-how-to-"\ "match-sequence-of-key-value-pairs-at-end-of-string
3.982754
3.947489
1.008933
''' Replaces first line in 'buf' matching 'key' with 'replacement'. Optionally, writes out this new buffer into 'outfile'. Returns: Buffer after replacement has been done ''' regexp = re.compile(r''' \n\s* # there might be some leading spaces ( # start group to return (?:{0}\s*) # placeholder for tags to detect '\S+' == all \s*:*=\s* # optional spaces, optional colon, = , optional spaces .* # the value ) # end group to return '''.format(key), re.VERBOSE) matches = regexp.findall(buf) if matches is None: msg = "Could not find key = {0} in the provided buffer. "\ "Pattern used = {1}".format(key, regexp.pattern) raise ValueError(msg) # Only replace the first occurence newbuf = regexp.sub(replacement, buf, count=1) if outfile is not None: write_text_file(outfile, newbuf) return newbuf
def replace_first_key_in_makefile(buf, key, replacement, outfile=None)
Replaces first line in 'buf' matching 'key' with 'replacement'. Optionally, writes out this new buffer into 'outfile'. Returns: Buffer after replacement has been done
6.240084
4.659048
1.339347
try: with open(filename, 'r', encoding) as f: r = f.read() except TypeError: with open(filename, 'r') as f: r = f.read() return r
def read_text_file(filename, encoding="utf-8")
Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle.
2.26031
2.261881
0.999305
fd = sys.stderr.fileno() # assert that Python and C stdio write using the same file descriptor # assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stderr")) == fd == 1 def _redirect_stderr(to): sys.stderr.close() # + implicit flush() os.dup2(to.fileno(), fd) # fd writes to 'to' file sys.stderr = os.fdopen(fd, 'w') # Python writes to fd with os.fdopen(os.dup(fd), 'w') as old_stderr: with open(to, 'w') as file: _redirect_stderr(to=file) try: yield # allow code to be run with the redirected stderr finally: _redirect_stderr(to=old_stderr)
def stderr_redirected(to=os.devnull)
import os with stderr_redirected(to=filename): print("from Python") os.system("echo non-Python applications are also supported")
2.330165
2.492826
0.934749
dtype = np.dtype([('same_cell', np.int32), ('N1', np.int), ('N2', np.int), ('time', np.float) ]) if pd is not None: timings = pd.read_csv(filename, header=None, engine="c", dtype={'same_cell': np.int32, 'N1': np.int, 'N2': np.int, 'time': np.float}, index_col=None, names=['same_cell', 'N1', 'N2', 'time'], delim_whitespace=True) else: timings = np.loadtxt(filename, dtype=dtype) return timings
def read_file(filename)
Reads in the file I created manually (by recompiling and adding timers) Not used any more but left for historical reasons (the first 'speedup' plots were generated with this function)
2.534338
2.392932
1.059093
args = ['mysql'] db = settings_dict['OPTIONS'].get('db', settings_dict['NAME']) user = settings_dict['OPTIONS'].get('user', settings_dict['USER']) passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD']) host = settings_dict['OPTIONS'].get('host', settings_dict['HOST']) port = settings_dict['OPTIONS'].get('port', settings_dict['PORT']) cert = settings_dict['OPTIONS'].get('ssl', {}).get('ca') defaults_file = settings_dict['OPTIONS'].get('read_default_file') # Seems to be no good way to set sql_mode with CLI. if defaults_file: args += ["--defaults-file=%s" % defaults_file] if user: args += ["--user=%s" % user] if passwd: args += ["--password=%s" % passwd] if host: if '/' in host: args += ["--socket=%s" % host] else: args += ["--host=%s" % host] if port: args += ["--port=%s" % port] if cert: args += ["--ssl-ca=%s" % cert] if db: args += [db] return args
def settings_to_cmd_args(settings_dict)
Copied from django 1.8 MySQL backend DatabaseClient - where the runshell commandline creation has been extracted and made callable like so.
1.958832
1.815347
1.07904
if not have_program('pt-fingerprint'): # pragma: no cover raise OSError("pt-fingerprint doesn't appear to be installed") thread = PTFingerprintThread.get_thread() thread.in_queue.put(query) return thread.out_queue.get()
def pt_fingerprint(query)
Takes a query (in a string) and returns its 'fingerprint'
4.935156
5.07116
0.973181
if not len(field_names): raise ValueError("At least one field name required") using = kwargs.pop('using', DEFAULT_DB_ALIAS) if len(kwargs): raise ValueError("The only supported keyword argument is 'using'") existing_fields = {field.name: field for field in model._meta.fields} fields = [existing_fields[name] for name in field_names if name in existing_fields] if len(fields) != len(field_names): unfound_names = set(field_names) - {field.name for field in fields} raise ValueError("Fields do not exist: " + ",".join(unfound_names)) column_names = tuple(field.column for field in fields) list_sql = get_list_sql(column_names) with connections[using].cursor() as cursor: cursor.execute( .format(list_sql=list_sql), (model._meta.db_table,) + column_names, ) indexes = defaultdict(list) for index_name, _, column_name in cursor.fetchall(): indexes[index_name].append(column_name) indexes_by_columns = {tuple(v): k for k, v in indexes.items()} try: return indexes_by_columns[column_names] except KeyError: raise KeyError("There is no index on (" + ",".join(field_names) + ")")
def index_name(model, *field_names, **kwargs)
Returns the name of the index existing on field_names, or raises KeyError if no such index exists.
2.581397
2.497621
1.033542
if self.avg_n and self.avg_t: self.avg_n = (self.avg_n * self.weight) + n self.avg_t = (self.avg_t * self.weight) + t else: self.avg_n = n self.avg_t = t new_n = int(self.avg_rate * self.target_t) return new_n
def update(self, n, t)
Update weighted average rate. Param n is generic; it's how many of whatever the caller is doing (rows, checksums, etc.). Param s is how long this n took, in seconds (hi-res or not). Parameters: n - Number of operations (rows, etc.) t - Amount of time in seconds that n took Returns: n adjusted to meet target_t based on weighted decaying avg rate
2.72793
2.589862
1.053311
match = query_start_re.match(sql) if not match: # We don't understand what kind of query this is, don't rewrite it return sql tokens = [match.group('keyword')] comments = match.group('comments').strip() if comments: tokens.append(comments) # Inject comments after all existing comments for comment in add_comments: tokens.append('/*{}*/'.format(comment)) # Don't bother with SELECT rewrite rules on non-SELECT queries if tokens[0] == "SELECT": for group_name, hint_set in SELECT_HINTS.items(): try: # Take the last hint we were told to add from this hint_set to_add = [hint for hint in add_hints if hint in hint_set][-1] tokens.append(to_add) except IndexError: # We weren't told to add any, so just add any hint from this # set that was already there existing = match.group(group_name) if existing is not None: tokens.append(existing.rstrip()) # Maybe rewrite the remainder of the statement for index hints remainder = sql[match.end():] if tokens[0] == "SELECT" and add_index_hints: for index_hint in add_index_hints: remainder = modify_sql_index_hints(remainder, *index_hint) # Join everything tokens.append(remainder) return ' '.join(tokens)
def modify_sql(sql, add_comments, add_hints, add_index_hints)
Parse the start of the SQL, injecting each string in add_comments in individual SQL comments after the first keyword, and adding the named SELECT hints from add_hints, taking the latest in the list in cases of multiple mutually exclusive hints being given
4.180974
3.987263
1.048582
match = reverse_key_re.match(full_key) return match.group(3), match.group(1), int(match.group(2))
def default_reverse_key_func(full_key)
Reverse of Django's default_key_func, i.e. undoing: def default_key_func(key, key_prefix, version): return '%s:%s:%s' % (key_prefix, version, key)
3.669749
3.624667
1.012438