code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if dump: return self._syntax response = None # Search the syntax tree for the trigger. for category in self._syntax: for topic in self._syntax[category]: if trigger in self._syntax[category][topic]: # We got a match! if response is None: response = list() fname, lineno = self._syntax[category][topic][trigger]['trigger'] response.append(dict( category=category, topic=topic, trigger=trigger, filename=fname, line=lineno, )) return response
def trigger_info(self, trigger=None, dump=False)
Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via ``last_match()``. Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns ``None`` if there weren't any matches found. The keys in the trigger info is as follows: * ``category``: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * ``topic``: The topic name * ``trigger``: The raw trigger text * ``filename``: The filename the trigger was found in. * ``lineno``: The line number the trigger was found on. Pass in a true value for ``dump``, and the entire syntax tracking tree is returned. :param str trigger: The raw trigger text to look up. :param bool dump: Whether to dump the entire syntax tracking tree. :return: A list of matching triggers or ``None`` if no matches.
3.743914
3.103739
1.206259
if self._brain._current_user is None: # They're doing it wrong. self._warn("current_user() is meant to be used from within a Python object macro!") return self._brain._current_user
def current_user(self)
Retrieve the user ID of the current user talking to your bot. This is mostly useful inside of a Python object macro to get the user ID of the person who caused the object macro to be invoked (i.e. to set a variable for that user from within the object). This will return ``None`` if used outside of the context of getting a reply (the value is unset at the end of the ``reply()`` method).
13.326059
8.11742
1.641662
return self._brain.reply(user, msg, errors_as_replies)
def reply(self, user, msg, errors_as_replies=True)
Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output.
5.514036
8.82148
0.625069
if pattern not in self._regexc[kind]: qm = re.escape(pattern) self._regexc[kind][pattern] = { "qm": qm, "sub1": re.compile(r'^' + qm + r'$'), "sub2": re.compile(r'^' + qm + r'(\W+)'), "sub3": re.compile(r'(\W+)' + qm + r'(\W+)'), "sub4": re.compile(r'(\W+)' + qm + r'$'), }
def _precompile_substitution(self, kind, pattern)
Pre-compile the regexp for a substitution pattern. This will speed up the substitutions that happen at the beginning of the reply fetching process. With the default brain, this took the time for _substitute down from 0.08s to 0.02s :param str kind: One of ``sub``, ``person``. :param str pattern: The substitution pattern.
2.454272
2.77286
0.885105
if utils.is_atomic(trigger): return # Don't need a regexp for atomic triggers. # Check for dynamic tags. for tag in ["@", "<bot", "<get", "<input", "<reply"]: if tag in trigger: return # Can't precompile this trigger. self._regexc["trigger"][trigger] = self._brain.reply_regexp(None, trigger)
def _precompile_regexp(self, trigger)
Precompile the regex for most triggers. If the trigger is non-atomic, and doesn't include dynamic tags like ``<bot>``, ``<get>``, ``<input>/<reply>`` or arrays, it can be precompiled and save time when matching. :param str trigger: The trigger text to attempt to precompile.
11.210259
7.253312
1.545537
pp = pprint.PrettyPrinter(indent=4) print("=== Variables ===") print("-- Globals --") pp.pprint(self._global) print("-- Bot vars --") pp.pprint(self._var) print("-- Substitutions --") pp.pprint(self._sub) print("-- Person Substitutions --") pp.pprint(self._person) print("-- Arrays --") pp.pprint(self._array) print("=== Topic Structure ===") pp.pprint(self._topics) print("=== %Previous Structure ===") pp.pprint(self._thats) print("=== Includes ===") pp.pprint(self._includes) print("=== Inherits ===") pp.pprint(self._lineage) print("=== Sort Buffer ===") pp.pprint(self._sorted) print("=== Syntax Tree ===") pp.pprint(self._syntax)
def _dump(self)
For debugging, dump the entire data structure.
3.623852
3.479818
1.041391
app_label = app_label or [] exclude = exclude try: filename = '%s.%s' % (datetime.now().isoformat(), settings.SMUGGLER_FORMAT) if filename_prefix: filename = '%s_%s' % (filename_prefix, filename) if not isinstance(app_label, list): app_label = [app_label] response = serialize_to_response(app_label, exclude) response['Content-Disposition'] = 'attachment; filename=%s' % filename return response except CommandError as e: messages.error( request, _('An exception occurred while dumping data: %s') % force_text(e)) return HttpResponseRedirect(request.build_absolute_uri().split('dump')[0])
def dump_to_response(request, app_label=None, exclude=None, filename_prefix=None)
Utility function that dumps the given app/model to an HttpResponse.
2.888133
2.930256
0.985625
# Try to grab app_label data app_label = request.GET.get('app_label', []) if app_label: app_label = app_label.split(',') return dump_to_response(request, app_label=app_label, exclude=settings.SMUGGLER_EXCLUDE_LIST)
def dump_data(request)
Exports data from whole project.
4.893456
4.951038
0.98837
return dump_to_response(request, '%s.%s' % (app_label, model_label), [], '-'.join((app_label, model_label)))
def dump_model_data(request, app_label, model_label)
Exports data from a model.
4.758761
4.997913
0.95215
# Special case empty input. if len(data) == 0: return # Copy the input so as to leave it unmodified. data = data.copy() # Ignore self dependencies. for k, v in data.items(): v.discard(k) # Find all items that don't depend on anything. extra_items_in_deps = functools.reduce( set.union, data.values() ) - set(data.keys()) # Add empty dependences where needed. data.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = dict( (item, (dep - ordered)) for item, dep in data.items() if item not in ordered ) if len(data) != 0: raise ValueError( 'Cyclic dependencies exist among these items: {}' .format(', '.join(repr(x) for x in data.items())) )
def toposort(data)
Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets.
2.714868
2.492408
1.089255
jobs = collections.defaultdict(list) map_ = {} _count_roots = 0 for each in sequence: name = name_getter(each) depends_on = depends_getter(each) if depends_on is None: depends_on = [] elif isinstance(depends_on, tuple): depends_on = list(depends_on) elif not isinstance(depends_on, list): depends_on = [depends_on] if not depends_on: _count_roots += 1 jobs[name] += depends_on map_[name] = each if not _count_roots: raise CircularDAGError("No job is at the root") try: jobs = dict(zip(jobs.keys(), map(set, jobs.values()))) ordered_jobs = list(toposort_flatten(jobs)) except ValueError, e: raise CircularDAGError(e) return [map_[x] for x in ordered_jobs if x in map_]
def reorder_dag(sequence, depends_getter=lambda x: x.depends_on, name_getter=lambda x: x.app_name, impatience_max=100)
DAG = Directed Acyclic Graph If we have something like: C depends on B B depends on A A doesn't depend on any Given the order of [C, B, A] expect it to return [A, B, C] parameters: :sequence: some sort of iterable list :depends_getter: a callable that extracts the depends on sub-list :name_getter: a callable that extracts the name :impatience_max: a max count that is reached before we end up in an infinite loop.
2.706104
2.813505
0.961826
number = int(re.findall('\d+', frequency)[0]) unit = re.findall('[^\d]+', frequency)[0] if unit == 'h': number *= 60 * 60 elif unit == 'm': number *= 60 elif unit == 'd': number *= 60 * 60 * 24 elif unit: raise FrequencyDefinitionError(unit) return number
def convert_frequency(frequency)
return the number of seconds that a certain frequency string represents. For example: `1d` means 1 day which means 60 * 60 * 24 seconds. The recognized formats are: 10d : 10 days 3m : 3 minutes 12h : 12 hours
2.534315
2.459907
1.030249
def pluralize(a, b): def inner(n): if n == 1: return a % n return b % n return inner def ugettext(s): return s chunks = ( (60 * 60 * 24 * 365, pluralize('%d year', '%d years')), (60 * 60 * 24 * 30, pluralize('%d month', '%d months')), (60 * 60 * 24 * 7, pluralize('%d week', '%d weeks')), (60 * 60 * 24, pluralize('%d day', '%d days')), (60 * 60, pluralize('%d hour', '%d hours')), (60, pluralize('%d minute', '%d minutes')), (0, pluralize('%d second', '%d seconds')) ) # Convert datetime.date to datetime.datetime for comparison. if not isinstance(d, datetime.datetime): d = datetime.datetime(d.year, d.month, d.day) if now and not isinstance(now, datetime.datetime): now = datetime.datetime(now.year, now.month, now.day) delta = now - d # ignore microseconds since = delta.days * 24 * 60 * 60 + delta.seconds if since <= 0: # d is in the future compared to now, stop processing. # We'll use the last chunk (highest granularity) _, name = chunks[-1] return name(0) for i, (seconds, name) in enumerate(chunks): if seconds > 0: count = since // seconds if count != 0: break else: count = since result = name(count) if i + 1 < len(chunks): # Now get the second item seconds2, name2 = chunks[i + 1] if seconds2 > 0: count2 = (since - (seconds * count)) // seconds2 else: count2 = since - (seconds * count) if count2 != 0: result += ugettext(', ') + name2(count2) return result
def timesince(d, now)
Taken from django.utils.timesince and modified to simpler requirements. Takes two datetime objects and returns the time between d and now as a nicely formatted string, e.g. "10 minutes". If d occurs after now, then "0 minutes" is returned. Units used are years, months, weeks, days, hours, and minutes. Seconds and microseconds are ignored. Up to two adjacent units will be displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not. Adapted from http://web.archive.org/web/20060617175230/\ http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
2.110136
2.098289
1.005646
if not name: name = self._get_default_connection_name() if name in self.pool: return self.pool[name] self.pool[name] = psycopg2.connect(self.dsn) return self.pool[name]
def connection(self, name=None)
return a named connection. This function will return a named connection by either finding one in its pool by the name or creating a new one. If no name is given, it will use the name of the current executing thread as the name of the connection. parameters: name - a name as a string
2.483235
2.629522
0.944367
if force: try: connection.close() except self.operational_exceptions: self.config.logger.error('ConnectionFactory - failed closing') for name, conn in self.pool.iteritems(): if conn is connection: break del self.pool[name] else: pass
def close_connection(self, connection, force=False)
overriding the baseclass function, this routine will decline to close a connection at the end of a transaction context. This allows for reuse of connections.
5.9142
5.98591
0.98802
global restart restart = True if logger: logger.info('detected SIGHUP') raise KeyboardInterrupt
def respond_to_SIGHUP(signal_number, frame, logger=None)
raise the KeyboardInterrupt which will cause the app to effectively shutdown, closing all it resources. Then, because it sets 'restart' to True, the app will reread all the configuration information, rebuild all of its structures and resources and start running again
7.558013
6.842933
1.104499
for x in self.config.backoff_delays: yield x while True: yield self.config.backoff_delays[-1]
def backoff_generator(self)
Generate a series of integers used for the length of the sleep between retries. It produces after exhausting the list, it repeats the last value from the list forever. This generator will never raise the StopIteration exception.
4.036766
3.789641
1.065211
for x in xrange(int(seconds)): if (self.config.wait_log_interval and not x % self.config.wait_log_interval): self.config.logger.debug( '%s: %dsec of %dsec' % (wait_reason, x, seconds) ) self.quit_check() time.sleep(1.0)
def responsive_sleep(self, seconds, wait_reason='')
Sleep for the specified number of seconds, logging every 'wait_log_interval' seconds with progress info.
4.191318
3.695032
1.134312
#---------------------------------------------------------------------- def main(self, function=None): return super(cls, self).main( function=function, once=False, ) cls.main = main cls._is_backfill_app = True return cls
def as_backfill_cron_app(cls)
a class decorator for Crontabber Apps. This decorator embues a CronApp with the parts necessary to be a backfill CronApp. It adds a main method that forces the base class to use a value of False for 'once'. That means it will do the work of a backfilling app.
7.099938
4.8677
1.458582
def class_decorator(cls): if not issubclass(cls, RequiredConfig): raise Exception( '%s must have RequiredConfig as a base class' % cls ) new_req = cls.get_required_config() new_req.namespace(resource_name) new_req[resource_name].add_option( '%s_class' % resource_name, default=transactional_resource_class, from_string_converter=class_converter, reference_value_from=reference_value_from, ) new_req[resource_name].add_option( '%s_transaction_executor_class' % resource_name, default='crontabber.transaction_executor.TransactionExecutor', doc='a class that will execute transactions', from_string_converter=class_converter, reference_value_from=reference_value_from ) cls.required_config = new_req #------------------------------------------------------------------ def new__init__(self, *args, **kwargs): # instantiate the connection class for the resource super(cls, self).__init__(*args, **kwargs) setattr( self, "%s_connection_factory" % resource_name, self.config[resource_name]['%s_class' % resource_name]( self.config[resource_name] ) ) # instantiate a transaction executor bound to the # resource connection setattr( self, "%s_transaction_executor" % resource_name, self.config[resource_name][ '%s_transaction_executor_class' % resource_name ]( self.config[resource_name], getattr(self, "%s_connection_factory" % resource_name) ) ) if hasattr(cls, '__init__'): original_init = cls.__init__ def both_inits(self, *args, **kwargs): new__init__(self, *args, **kwargs) return original_init(self, *args, **kwargs) cls.__init__ = both_inits else: cls.__init__ = new__init__ return cls return class_decorator
def with_transactional_resource( transactional_resource_class, resource_name, reference_value_from=None )
a class decorator for Crontabber Apps. This decorator will give access to a resource connection source. Configuration will be automatically set up and the cron app can expect to have attributes: self.{resource_name}_connection_factory self.{resource_name}_transaction_executor available to use. Within the setup, the RequiredConfig structure gets set up like this: config.{resource_name}.{resource_name}_class = \ transactional_resource_class config.{resource_name}.{resource_name}_transaction_executor_class = \ 'crontabber.transaction_executor.TransactionExecutor' parameters: transactional_resource_class - a string representing the full path of the class that represents a connection to the resource. An example is "crontabber.connection_factory.ConnectionFactory". resource_name - a string that will serve as an identifier for this resource within the mixin. For example, if the resource is 'database' we'll see configman namespace in the cron job section of "...class-SomeCronJob.database.database_connection_class" and "...class-SomeCronJob.database.transaction_executor_class"
2.580955
2.177986
1.185019
connection_factory_attr_name = '%s_connection_factory' % resource_name def class_decorator(cls): def _run_proxy(self, *args, **kwargs): factory = getattr(self, connection_factory_attr_name) with factory() as connection: try: self.run(connection, *args, **kwargs) finally: factory.close_connection(connection, force=True) cls._run_proxy = _run_proxy return cls return class_decorator
def with_resource_connection_as_argument(resource_name)
a class decorator for Crontabber Apps. This decorator will a class a _run_proxy method that passes a databsase connection as a context manager into the CronApp's run method. The connection will automatically be closed when the ConApp's run method ends. In order for this dectorator to function properly, it must be used in conjunction with previous dectorator, "with_transactional_resource" or equivalent. This decorator depends on the mechanims added by that decorator.
2.874081
2.507461
1.146212
transaction_executor_attr_name = "%s_transaction_executor" % resource_name def class_decorator(cls): def _run_proxy(self, *args, **kwargs): getattr(self, transaction_executor_attr_name)( self.run, *args, **kwargs ) cls._run_proxy = _run_proxy return cls return class_decorator
def with_single_transaction(resource_name)
a class decorator for Crontabber Apps. This decorator will give a class a _run_proxy method that passes a databsase connection as a context manager into the CronApp's 'run' method. The run method may then use the connection at will knowing that after if 'run' exits normally, the connection will automatically be commited. Any abnormal exit from 'run' will result in the connnection being rolledback. In order for this dectorator to function properly, it must be used in conjunction with previous dectorator, "with_transactional_resource" or equivalent. This decorator depends on the mechanims added by that decorator.
3.743179
3.203236
1.168562
def run_process(self, command, input=None): if isinstance(command, (tuple, list)): command = ' '.join('"%s"' % x for x in command) proc = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = proc.communicate(input=input) return proc.returncode, out.strip(), err.strip() cls.run_process = run_process return cls
def with_subprocess(cls)
a class decorator for Crontabber Apps. This decorator gives the CronApp a _run_proxy method that will execute the cron app as a single PG transaction. Commit and Rollback are automatic. The cron app should do no transaction management of its own. The cron app should be short so that the transaction is not held open too long.
2.458455
2.598838
0.945983
# ------------------------------------------------------------------------- def class_list_converter(class_list_str): if isinstance(class_list_str, basestring): class_str_list = list_splitter_fn(class_list_str) else: raise TypeError('must be derivative of a basestring') # ===================================================================== class InnerClassList(RequiredConfig): # we're dynamically creating a class here. The following block of # code is actually adding class level attributes to this new class # 1st requirement for configman required_config = Namespace() # to help the programmer know what Namespaces we added subordinate_namespace_names = [] # save the template for future reference namespace_template = template_for_namespace # for display original_input = class_list_str.replace('\n', '\\n') # for each class in the class list class_list = [] for namespace_index, class_list_element in enumerate( class_str_list ): try: a_class = class_converter( class_extractor(class_list_element) ) except CannotConvertError: raise JobNotFoundError(class_list_element) class_list.append((a_class.__name__, a_class)) # figure out the Namespace name namespace_name_dict = { 'name': a_class.__name__, 'index': namespace_index } namespace_name = template_for_namespace % namespace_name_dict subordinate_namespace_names.append(namespace_name) # create the new Namespace required_config.namespace(namespace_name) a_class_namespace = required_config[namespace_name] # add options for the 'extra data' try: extra_options = extra_extractor(class_list_element) a_class_namespace.update(extra_options) except NotImplementedError: pass # add options frr the classes required config try: for k, v in a_class.get_required_config().iteritems(): if k not in reference_namespace: a_class_namespace[k] = v except AttributeError: # a_class has no get_required_config pass @classmethod def to_str(cls): return cls.original_input return InnerClassList # result of class_list_converter return class_list_converter
def classes_in_namespaces_converter_with_compression( reference_namespace={}, template_for_namespace="class-%(name)s", list_splitter_fn=_default_list_splitter, class_extractor=_default_class_extractor, extra_extractor=_default_extra_extractor)
parameters: template_for_namespace - a template for the names of the namespaces that will contain the classes and their associated required config options. There are two template variables available: %(name)s - the name of the class to be contained in the namespace; %(index)d - the sequential index number of the namespace. list_converter - a function that will take the string list of classes and break it up into a sequence if individual elements class_extractor - a function that will return the string version of a classname from the result of the list_converter extra_extractor - a function that will return a Namespace of options created from any extra information associated with the classes returned by the list_converter function
4.868441
4.607664
1.056596
try: h, m = value.split(':') h = int(h) m = int(m) if h >= 24 or h < 0: raise ValueError if m >= 60 or m < 0: raise ValueError except ValueError: raise TimeDefinitionError("Invalid definition of time %r" % value)
def check_time(value)
check that it's a value like 03:45 or 1:1
2.749581
2.574566
1.067978
keys = [] for app_name, __ in self.items(): keys.append(app_name) return keys
def keys(self)
return a list of all app_names
6.593217
3.648228
1.807238
sql = columns = ( 'app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error' ) items = [] for record in self.transaction_executor(execute_query_fetchall, sql): row = dict(zip(columns, record)) items.append((row.pop('app_name'), row)) return items
def items(self)
return all the app_names and their values as tuples
5.719155
5.131842
1.114445
values = [] for __, data in self.items(): values.append(data) return values
def values(self)
return a list of all state values
7.302443
6.575048
1.11063
try: popped = self[key] del self[key] return popped except KeyError: if default == _marker: raise return default
def pop(self, key, default=_marker)
remove the item by key If not default is specified, raise KeyError if nothing could be removed. Return 'default' if specified and nothing could be removed
2.552719
2.772981
0.920568
warnings = [] criticals = [] for class_name, job_class in self.config.crontabber.jobs.class_list: if job_class.app_name in self.job_state_database: info = self.job_state_database.get(job_class.app_name) if not info.get('error_count', 0): continue error_count = info['error_count'] # trouble! serialized = ( '%s (%s) | %s | %s' % (job_class.app_name, class_name, info['last_error']['type'], info['last_error']['value']) ) if ( error_count == 1 and hasattr(job_class, "_is_backfill_app") ): # just a warning for now warnings.append(serialized) else: # anything worse than that is critical criticals.append(serialized) if criticals: stream.write('CRITICAL - ') stream.write('; '.join(criticals)) stream.write('\n') return 2 elif warnings: stream.write('WARNING - ') stream.write('; '.join(warnings)) stream.write('\n') return 1 stream.write('OK - All systems nominal') stream.write('\n') return 0
def nagios(self, stream=sys.stdout)
return 0 (OK) if there are no errors in the state. return 1 (WARNING) if a backfill app only has 1 error. return 2 (CRITICAL) if a backfill app has > 1 error. return 2 (CRITICAL) if a non-backfill app has 1 error.
3.607468
3.204964
1.125588
class_list = self.config.crontabber.jobs.class_list class_list = self._reorder_class_list(class_list) for class_name, job_class in class_list: if ( job_class.app_name == description or description == job_class.__module__ + '.' + job_class.__name__ ): if job_class.app_name in self.job_state_database: self.config.logger.info('App reset') self.job_state_database.pop(job_class.app_name) else: self.config.logger.warning('App already reset') return raise JobNotFoundError(description)
def reset_job(self, description)
remove the job from the state. if means that next time we run, this job will start over from scratch.
4.066773
4.050658
1.003978
app_name = class_.app_name try: info = self.job_state_database[app_name] except KeyError: if time_: h, m = [int(x) for x in time_.split(':')] # only run if this hour and minute is < now now = utc_now() if now.hour > h: return True elif now.hour == h and now.minute >= m: return True return False else: # no past information, run now return True next_run = info['next_run'] if not next_run: # It has never run before. # If it has an active ongoing status it means two # independent threads tried to start it. The second one # (by a tiny time margin) will have a job_class whose # `ongoing` value has already been set. # If that's the case, let it through because it will # commence and break due to RowLevelLockError in the # state's __setitem__ method. return bool(info['ongoing']) if next_run < utc_now(): return True return False
def time_to_run(self, class_, time_)
return true if it's time to run the job. This is true if there is no previous information about its last run or if the last time it ran and set its next_run to a date that is now past.
7.192078
6.620088
1.086402
print_header = True for app_name in self._get_ghosts(): if print_header: print_header = False print ( "Found the following in the state database but not " "available as a configured job:" ) print "\t%s" % (app_name,)
def audit_ghosts(self)
compare the list of configured jobs with the jobs in the state
7.962033
6.37208
1.249519
'''Export the grammar to a JavaScript file which can be used with the js-lrparsing module. Two templates are available: Grammar.JS_WINDOW_TEMPLATE Grammar.JS_ES6_IMPORT_EXPORT_TEMPLATE (default) ''' language = [] refs = [] classes = {'Grammar'} indent = 0 cname = self.__class__.__name__ if 'import ' in js_template else None for name in self._order: elem = getattr(self, name, None) if not isinstance(elem, Element): continue if not hasattr(elem, '_export_js'): continue language.append('{indent}{var} {name} = {value};'.format( indent=js_indent, name=name, var='static' if cname else 'var', value=elem._export_js(js_indent, indent, classes, cname))) for name, ref in self._refs.items(): refs.append( '{pre}{name}.set({value});' .format( pre='{}.'.format(cname) if cname else js_indent, name=name, value=ref._element._export_js( js_indent, -1 if cname else indent, classes, cname))) if 'Rule' in classes: classes.remove('Rule') return js_template.format( name=self.__class__.__name__, indent=js_indent, js_module=js_module_name, datetime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), refs='\n{}'.format('\n'.join(refs)), arguments=',\n'.join(map(lambda s: js_indent * 3 + s, classes)), re_keywords=self.RE_KEYWORDS.pattern.replace('\\', '\\\\'), classes=', '.join(classes), constructors=',\n'.join( map(lambda s: js_indent + s, ['.'.join([ 'window', js_module_name, n]) for n in classes])))
def export_js( self, js_module_name=JS_MODULE_NAME, js_template=JS_ES6_IMPORT_EXPORT_TEMPLATE, js_indent=JS_INDENTATION)
Export the grammar to a JavaScript file which can be used with the js-lrparsing module. Two templates are available: Grammar.JS_WINDOW_TEMPLATE Grammar.JS_ES6_IMPORT_EXPORT_TEMPLATE (default)
4.014184
3.2656
1.229233
'''Export the grammar to a python file which can be used with the pyleri module. This can be useful when python code if used to auto-create a grammar and an export of the final result is required.''' language = [] classes = {'Grammar'} indent = 0 for name in self._order: elem = getattr(self, name, None) if not isinstance(elem, Element): continue if not hasattr(elem, '_export_py'): continue language.append('{indent}{name} = {value}'.format( indent=py_indent, name=name, value=elem._export_py(py_indent, indent, classes))) for name, ref in self._refs.items(): language.append( '{indent}{name} = {value}' .format( indent=py_indent, name=name, value=ref._element._export_py( py_indent, indent, classes))) return py_template.format( name=self.__class__.__name__, indent=py_indent, py_module=py_module_name, datetime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), re_keywords=self.RE_KEYWORDS.pattern.replace('\\', '\\\\'), imports='\n'.join( map(lambda s: s, [ ' '.join(['from', py_module_name, 'import', n]) for n in classes if n != 'Rule'])))
def export_py( self, py_module_name=PY_MODULE_NAME, py_template=PY_TEMPLATE, py_indent=PY_INDENTATION)
Export the grammar to a python file which can be used with the pyleri module. This can be useful when python code if used to auto-create a grammar and an export of the final result is required.
3.848473
2.732591
1.40836
'''Export the grammar to a c (source and header) file which can be used with the libcleri module.''' language = [] indent = 0 enums = set() for name in self._order: elem = getattr(self, name, None) if not isinstance(elem, Element): continue if not hasattr(elem, '_export_c'): continue language.append( '{indent}cleri_t * {name} = {value};'.format( indent=c_indent, name=name, value=elem._export_c(c_indent, indent, enums))) for name, ref in self._refs.items(): language.append( '{indent}cleri_ref_set({name}, {value});' .format( indent=c_indent, name=name, value=ref._element._export_c( c_indent, indent, enums, ref))) pattern = self.RE_KEYWORDS.pattern.replace('\\', '\\\\') if not pattern.startswith('^'): pattern = '^' + pattern enums = ',\n'.join([ '{}{}'.format(c_indent, gid) for gid in sorted(enums)]) + ',' header_file = '"{}.h"'.format(target) if headerf is None else headerf fun = target.strip('/').replace('/', '_') return (self.__class__.C_TEMPLATE_C.format( name=self.__class__.__name__, target=target, header_file=header_file, fun=fun, indent=c_indent, datetime=time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), re_keywords=pattern), self.__class__.C_TEMPLATE_H.format( name=self.__class__.__name__, target=target, fun=fun, guard=target.upper().replace('/', '_').replace('\\', '_'), datetime=time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), enums=enums))
def export_c(self, target=C_TARGET, c_indent=C_INDENTATION, headerf=None)
Export the grammar to a c (source and header) file which can be used with the libcleri module.
3.316917
2.905909
1.141439
'''Export the grammar to a Go file which can be used with the goleri module.''' language = [] enums = set() indent = 0 pattern = self.RE_KEYWORDS.pattern.replace('`', '` + "`" + `') if not pattern.startswith('^'): pattern = '^' + pattern for name in self._order: elem = getattr(self, name, None) if not isinstance(elem, Element): continue if not hasattr(elem, '_export_go'): continue language.append('{indent}{name} := {value}'.format( indent=go_indent, name=camel_case(name), value=elem._export_go(go_indent, indent, enums))) for name, ref in self._refs.items(): language.append( '{indent}{name}.Set({value})' .format( indent=go_indent, name=camel_case(name), value=ref._element._export_go( go_indent, indent, enums))) enums = ' = iota\n'.join([ '{}{}'.format(go_indent, gid) for gid in sorted(enums)]) + ' = iota' return go_template.format( name=self.__class__.__name__, indent=go_indent, package=go_package, datetime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), re_keywords=pattern, enums=enums)
def export_go( self, go_template=GO_TEMPLATE, go_indent=GO_INDENTATION, go_package=GO_PACKAGE)
Export the grammar to a Go file which can be used with the goleri module.
3.866647
3.453638
1.119587
'''Export the grammar to a Java file which can be used with the jleri module.''' language = [] enums = set() classes = {'jleri.Grammar', 'jleri.Element'} refs = [] indent = 0 pattern = self.RE_KEYWORDS.pattern.replace('\\', '\\\\') if not pattern.startswith('^'): pattern = '^' + pattern for name in self._order: elem = getattr(self, name, None) if not isinstance(elem, Element): continue if not hasattr(elem, '_export_java'): continue language.append( '{indent}private static final Element {name} = {value};' .format( indent=java_indent, name=name.upper(), value=elem._export_java( java_indent, indent, enums, classes))) enum_str = ',\n'.join([ '{indent}{indent}{gid}'.format( indent=java_indent, gid=gid) for gid in sorted(enums)]) for name, ref in self._refs.items(): refs.append( '{indent}{indent}((Ref) {name}).set({value});' .format( indent=java_indent, name=name.upper(), value=ref._element._export_java( java_indent, -2, enums, classes))) return java_template.format( name=self.__class__.__name__, imports='\n'.join( map(lambda s: s, [ 'import {};'.format(c) for c in sorted(classes) if c != 'Rule'])), indent=java_indent, package='' if java_package is None else 'package {};\n'.format(java_package), datetime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), language='\n'.join(language), re_keywords=pattern, refs='' if not refs else '{}\n'.format('\n'.join(refs)), enums=enum_str, public='public ' if is_public else '')
def export_java( self, java_template=JAVA_TEMPLATE, java_indent=JAVA_INDENTATION, java_package=JAVA_PACKAGE, is_public=True)
Export the grammar to a Java file which can be used with the jleri module.
3.604568
3.310352
1.088878
'''Parse some string to the Grammar. Returns a nodeResult with the following attributes: - is_valid: True when the string is successfully parsed by the Grammar. - pos: position in the string where parsing ended. (this is the end of the string when is_valid is True) - expecting: a list containing possible elements at position 'pos' in the string. - tree: the parse_tree containing a structured result for the given string. ''' self._string = string self._expecting = Expecting() self._cached_kw_match.clear() self._len_string = len(string) self._pos = None tree = Node(self._element, string, 0, self._len_string) node_res = Result(*self._walk( self._element, 0, tree.children, self._element, True)) # get rest if anything rest = self._string[node_res.pos:].lstrip() # set is_valid to False if we have 'rest' left. if node_res.is_valid and rest: node_res.is_valid = False # add end_of_statement to expecting if this is possible if not self._expecting.required and rest: self._expecting.set_mode_required(node_res.pos, True) self._expecting.update(end_of_statement, node_res.pos) node_res.expecting = self._expecting.get_expecting() # add expecting and correct pos to node_res if node_res is not valid if not node_res.is_valid: node_res.pos = self._expecting.pos node_res.tree = tree return node_res
def parse(self, string)
Parse some string to the Grammar. Returns a nodeResult with the following attributes: - is_valid: True when the string is successfully parsed by the Grammar. - pos: position in the string where parsing ended. (this is the end of the string when is_valid is True) - expecting: a list containing possible elements at position 'pos' in the string. - tree: the parse_tree containing a structured result for the given string.
5.614425
3.186542
1.761918
'''Extract raw image buffer from matplotlib figure shaped as 1xHxWx3.''' assert len(figs) > 0, 'No figure buffers given. Forgot to return from draw call?' buffers = [] w, h = figs[0].canvas.get_width_height() for f in figs: wf, hf = f.canvas.get_width_height() assert wf == w and hf == h, 'All canvas objects need to have same size' buffers.append(np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)) return np.stack(buffers)
def figure_buffer(figs)
Extract raw image buffer from matplotlib figure shaped as 1xHxWx3.
3.494088
2.786145
1.254094
'''Decorate matplotlib drawing routines. This dectorator is meant to decorate functions that return matplotlib figures. The decorated function has to have the following signature def decorated(*args, **kwargs) -> figure or iterable of figures where `*args` can be any positional argument and `**kwargs` are any keyword arguments. The decorated function returns a tensor of shape `[NumFigures, Height, Width, 3]` of type `tf.uint8`. The drawing code is invoked during running of TensorFlow sessions, at a time when all positional tensor arguments have been evaluated by the session. The decorated function is then passed the tensor values. All non tensor arguments remain unchanged. ''' name = tf_pyfunc_kwargs.pop('name', func.__name__) @wraps(func) def wrapper(*func_args, **func_kwargs): tf_args = PositionalTensorArgs(func_args) def pyfnc_callee(*tensor_values, **unused): try: figs = as_list(func(*tf_args.mix_args(tensor_values), **func_kwargs)) for f in figs: f.canvas.draw() return figure_buffer(figs) except Exception: print('-'*5 + 'tfmpl catched exception' + '-'*5) print(traceback.format_exc()) print('-'*20) raise return tf.py_func(pyfnc_callee, tf_args.tensor_args, tf.uint8, name=name, **tf_pyfunc_kwargs) return wrapper
def figure_tensor(func, **tf_pyfunc_kwargs)
Decorate matplotlib drawing routines. This dectorator is meant to decorate functions that return matplotlib figures. The decorated function has to have the following signature def decorated(*args, **kwargs) -> figure or iterable of figures where `*args` can be any positional argument and `**kwargs` are any keyword arguments. The decorated function returns a tensor of shape `[NumFigures, Height, Width, 3]` of type `tf.uint8`. The drawing code is invoked during running of TensorFlow sessions, at a time when all positional tensor arguments have been evaluated by the session. The decorated function is then passed the tensor values. All non tensor arguments remain unchanged.
5.83558
2.360308
2.472381
'''Decorate matplotlib drawing routines with blitting support. This dectorator is meant to decorate functions that return matplotlib figures. The decorated function has to have the following signature def decorated(*args, **kwargs) -> iterable of artists where `*args` can be any positional argument and `**kwargs` are any keyword arguments. The decorated function returns a tensor of shape `[NumFigures, Height, Width, 3]` of type `tf.uint8`. Besides the actual drawing function, `blittable_figure_tensor` requires a `init_func` argument with the following signature def init(*args, **kwargs) -> iterable of figures, iterable of artists The init function is meant to create and initialize figures, as well as to perform drawing that is meant to be done only once. Any set of artits to be updated in later drawing calls should also be allocated in init. The initialize function must have the same positional and keyword arguments as the decorated function. It is called once before the decorated function is called. The drawing code / init function is invoked during running of TensorFlow sessions, at a time when all positional tensor arguments have been evaluated by the session. The decorated / init function is then passed the tensor values. All non tensor arguments remain unchanged. ''' name = tf_pyfunc_kwargs.pop('name', func.__name__) assert callable(init_func), 'Init function not callable' @wraps(func) def wrapper(*func_args, **func_kwargs): figs = None bgs = None tf_args = PositionalTensorArgs(func_args) def pyfnc_callee(*tensor_values, **unused): try: nonlocal figs, bgs pos_args = tf_args.mix_args(tensor_values) if figs is None: figs, artists = init_func(*pos_args, **func_kwargs) figs = as_list(figs) artists = as_list(artists) for f in figs: f.canvas.draw() for a in artists: a.set_animated(True) bgs = [f.canvas.copy_from_bbox(f.bbox) for f in figs] artists = as_list(func(*pos_args, **func_kwargs)) for f, bg in zip(figs, bgs): f.canvas.restore_region(bg) for a in artists: a.axes.draw_artist(a) for f in figs: f.canvas.blit(f.bbox) return figure_buffer(figs) except Exception: print('-'*5 + 'tfmpl catched exception' + '-'*5) print(traceback.format_exc()) print('-'*20) raise return tf.py_func(pyfnc_callee, tf_args.tensor_args, tf.uint8, name=name, **tf_pyfunc_kwargs) return wrapper
def blittable_figure_tensor(func, init_func, **tf_pyfunc_kwargs)
Decorate matplotlib drawing routines with blitting support. This dectorator is meant to decorate functions that return matplotlib figures. The decorated function has to have the following signature def decorated(*args, **kwargs) -> iterable of artists where `*args` can be any positional argument and `**kwargs` are any keyword arguments. The decorated function returns a tensor of shape `[NumFigures, Height, Width, 3]` of type `tf.uint8`. Besides the actual drawing function, `blittable_figure_tensor` requires a `init_func` argument with the following signature def init(*args, **kwargs) -> iterable of figures, iterable of artists The init function is meant to create and initialize figures, as well as to perform drawing that is meant to be done only once. Any set of artits to be updated in later drawing calls should also be allocated in init. The initialize function must have the same positional and keyword arguments as the decorated function. It is called once before the decorated function is called. The drawing code / init function is invoked during running of TensorFlow sessions, at a time when all positional tensor arguments have been evaluated by the session. The decorated / init function is then passed the tensor values. All non tensor arguments remain unchanged.
4.775105
1.940716
2.460487
'''Draw confusion matrix for MNIST.''' fig = tfmpl.create_figure(figsize=(7,7)) ax = fig.add_subplot(111) ax.set_title('Confusion matrix for MNIST classification') tfmpl.plots.confusion_matrix.draw( ax, matrix, axis_labels=['Digit ' + str(x) for x in range(10)], normalize=True ) return fig
def draw_confusion_matrix(matrix)
Draw confusion matrix for MNIST.
4.339793
4.262045
1.018242
'''Compute a confusion matrix from labels and predictions. A drop-in replacement for tf.confusion_matrix that works on CPU data and not tensors. Params ------ labels : array-like 1-D array of real labels for classification predicitions: array-like 1-D array of predicted label classes num_classes: scalar Total number of classes Returns ------- matrix : NxN array Array of shape [num_classes, num_classes] containing the confusion values. ''' assert len(labels) == len(predictions) cm = np.zeros((num_classes, num_classes), dtype=np.int32) for i in range(len(labels)): cm[labels[i], predictions[i]] += 1 return cm
def from_labels_and_predictions(labels, predictions, num_classes)
Compute a confusion matrix from labels and predictions. A drop-in replacement for tf.confusion_matrix that works on CPU data and not tensors. Params ------ labels : array-like 1-D array of real labels for classification predicitions: array-like 1-D array of predicted label classes num_classes: scalar Total number of classes Returns ------- matrix : NxN array Array of shape [num_classes, num_classes] containing the confusion values.
4.259282
1.479334
2.879188
'''Plot a confusion matrix. Inspired by https://stackoverflow.com/questions/41617463/tensorflow-confusion-matrix-in-tensorboard Params ------ ax : axis Axis to plot on cm : NxN array Confusion matrix Kwargs ------ axis_labels : array-like Array of size N containing axis labels normalize : bool Whether to plot counts or ratios. ''' cm = np.asarray(cm) num_classes = cm.shape[0] if normalize: with np.errstate(invalid='ignore', divide='ignore'): cm = cm / cm.sum(1, keepdims=True) cm = np.nan_to_num(cm, copy=True) po = np.get_printoptions() np.set_printoptions(precision=2) ax.imshow(cm, cmap='Oranges') ticks = np.arange(num_classes) ax.set_xlabel('Predicted') ax.set_xticks(ticks) ax.xaxis.set_label_position('bottom') ax.xaxis.tick_bottom() ax.set_ylabel('Actual') ax.set_yticks(ticks) ax.yaxis.set_label_position('left') ax.yaxis.tick_left() if axis_labels is not None: ticklabels = [re.sub(r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\1 ', x) for x in axis_labels] ticklabels = ['\n'.join(wrap(l, 20)) for l in ticklabels] ax.set_xticklabels(ticklabels, rotation=-90, ha='center') ax.set_yticklabels(ticklabels, va ='center') for i, j in product(range(num_classes), range(num_classes)): if cm[i,j] == 0: txt = '.' elif normalize: txt = '{:.2f}'.format(cm[i,j]) else: txt = '{}'.format(cm[i,j]) ax.text(j, i, txt, horizontalalignment="center", verticalalignment='center', color= "black", fontsize=7) np.set_printoptions(**po)
def draw(ax, cm, axis_labels=None, normalize=False)
Plot a confusion matrix. Inspired by https://stackoverflow.com/questions/41617463/tensorflow-confusion-matrix-in-tensorboard Params ------ ax : axis Axis to plot on cm : NxN array Confusion matrix Kwargs ------ axis_labels : array-like Array of size N containing axis labels normalize : bool Whether to plot counts or ratios.
2.21572
1.771724
1.250602
'''Create a single figure. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible. ''' fig = Figure(*fig_args, **fig_kwargs) # Attach canvas FigureCanvas(fig) return fig
def create_figure(*fig_args, **fig_kwargs)
Create a single figure. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible.
11.363073
1.978555
5.743117
'''Create multiple figures. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible. ''' return [create_figure(*fig_args, **fig_kwargs) for _ in range(n)]
def create_figures(n, *fig_args, **fig_kwargs)
Create multiple figures. Args and Kwargs are passed to `matplotlib.figure.Figure`. This routine is provided in order to avoid usage of pyplot which is stateful and not thread safe. As drawing routines in tf-matplotlib are called from py-funcs in their respective thread, avoid usage of pyplot where possible.
10.301718
1.639459
6.283608
'''Decorator to handle variable argument decorators.''' @wraps(f) def decorator(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return f(args[0]) else: return lambda realf: f(realf, *args, **kwargs) return decorator
def vararg_decorator(f)
Decorator to handle variable argument decorators.
2.267784
2.191112
1.034993
'''Ensure `x` is of list type.''' if x is None: x = [] elif not isinstance(x, Sequence): x = [x] return list(x)
def as_list(x)
Ensure `x` is of list type.
4.752945
3.880027
1.224977
return self._all("/photos", page=page, per_page=per_page, order_by=order_by)
def all(self, page=1, per_page=10, order_by="latest")
Get a single page from the list of all photos. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :param order_by [string]: How to sort the photos. Optional. (Valid values: latest, oldest, popular; default: latest) :return: [Array]: A single page of the Photo list.
3.62319
4.668186
0.776145
return self._all("/photos/curated", page=page, per_page=per_page, order_by=order_by)
def curated(self, page=1, per_page=10, order_by="latest")
Get a single page from the list of the curated photos (front-page’s photos). :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :param order_by [string]: How to sort the photos. Optional. (Valid values: latest, oldest, popular; default: latest) :return: [Array]: A single page of the curated Photo list.
3.533393
4.194048
0.842478
url = "/photos/%s" % photo_id params = { "w": width, "h": height, "rect": rect } result = self._get(url, params=params) return PhotoModel.parse(result)
def get(self, photo_id, width=None, height=None, rect=None)
Retrieve a single photo. Note: Supplying the optional w or h parameters will result in the custom photo URL being added to the 'urls' object: :param photo_id [string]: The photo’s ID. Required. :param width [integer]: Image width in pixels. :param height [integer]: Image height in pixels. :param rect [string]: 4 comma-separated integers representing x, y, width, height of the cropped rectangle. :return: [Photo]: The Unsplash Photo.
2.624439
3.082208
0.85148
if orientation and orientation not in self.orientation_values: raise Exception() params = { "query": query, "category": category, "orientation": orientation, "page": page, "per_page": per_page } url = "/photos/search" result = self._get(url, params=params) return PhotoModel.parse_list(result)
def search(self, query, category=None, orientation=None, page=1, per_page=10)
Get a single page from a photo search. Optionally limit your search to a set of categories by supplying the category ID’s. Note: If supplying multiple category ID’s, the resulting photos will be those that match all of the given categories, not ones that match any category. :param query [string]: Search terms. :param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated) :param orientation [string]: Filter search results by photo orientation. Valid values are landscape, portrait, and squarish. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the curated Photo list. :raise UnsplashError: If the given orientation is not in the default orientation values.
2.585784
2.874911
0.899431
kwargs.update({"count": count}) orientation = kwargs.get("orientation", None) if orientation and orientation not in self.orientation_values: raise Exception() url = "/photos/random" result = self._get(url, params=kwargs) return PhotoModel.parse_list(result)
def random(self, count=1, **kwargs)
Retrieve a single random photo, given optional filters. Note: If supplying multiple category ID’s, the resulting photos will be those that match all of the given categories, not ones that match any category. Note: You can’t use the collections and query parameters in the same request Note: When supplying a count parameter - and only then - the response will be an array of photos, even if the value of count is 1. All parameters are optional, and can be combined to narrow the pool of photos from which a random one will be chosen. :param count [integer]: The number of photos to return. (Default: 1; max: 30) :param category: Category ID(‘s) to filter selection. If multiple, comma-separated. (deprecated) :param collections: Public collection ID(‘s) to filter selection. If multiple, comma-separated :param featured: Limit selection to featured photos. :param username: Limit selection to a single user. :param query: Limit selection to photos matching a search term. :param w: Image width in pixels. :param h: Image height in pixels. :param orientation: Filter search results by photo orientation. Valid values are landscape, portrait, and squarish. :return: [Array] or [Photo]: A single page of the curated Photo list or The Unsplash Photo. . :raise UnsplashError: If the given orientation is not in the default orientation values.
4.799821
5.443544
0.881746
url = "/photos/%s/stats" % photo_id result = self._get(url) return StatModel.parse(result)
def stats(self, photo_id)
Retrieve a single photo’s stats. :param photo_id [string]: The photo’s ID. Required. :return: [Stat]: The Unsplash Stat.
4.845396
5.605718
0.864367
url = "/photos/%s/like" % photo_id result = self._post(url) return PhotoModel.parse(result)
def like(self, photo_id)
Like a photo on behalf of the logged-in user. This requires the 'write_likes' scope. Note: This action is idempotent; sending the POST request to a single photo multiple times has no additional effect. :param photo_id [string]: The photo’s ID. Required. :return: [Photo]: The Unsplash Photo.
4.827891
6.127779
0.78787
url = "/photos/%s/like" % photo_id result = self._delete(url) return PhotoModel.parse(result)
def unlike(self, photo_id)
Remove a user’s like of a photo. Note: This action is idempotent; sending the DELETE request to a single photo multiple times has no additional effect. :param photo_id [string]: The photo’s ID. Required. :return: [Photo]: The Unsplash Photo.
5.439919
6.538984
0.831921
url = "/search/photos" data = self._search(url, query, page=page, per_page=per_page) data["results"] = PhotoModel.parse_list(data.get("results")) return data
def photos(self, query, page=1, per_page=10)
Get a single page of photo results for a query. :param query [string]: Search terms. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [dict]: {u'total': 0, u'total_pages': 0, u'results': [Photo]}
3.635824
4.303938
0.844767
url = "/search/collections" data = self._search(url, query, page=page, per_page=per_page) data["results"] = CollectionModel.parse_list(data.get("results")) return data
def collections(self, query, page=1, per_page=10)
Get a single page of collection results for a query. :param query [string]: Search terms. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [dict]: {u'total': 0, u'total_pages': 0, u'results': [Collection]}
3.495455
4.367547
0.800324
url = "/search/users" data = self._search(url, query, page=page, per_page=per_page) data["results"] = UserModel.parse_list(data.get("results")) return data
def users(self, query, page=1, per_page=10)
Get a single page of user results for a query. :param query [string]: Search terms. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [dict]: {u'total': 0, u'total_pages': 0, u'results': [User]}
3.530472
4.319467
0.81734
url = "/collections" result = self._all(url, page=page, per_page=per_page) return CollectionModel.parse_list(result)
def all(self, page=1, per_page=10)
Get a single page from the list of all collections. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the Collection list.
4.78857
7.701532
0.621769
url = "/collections/%s" % collection_id result = self._get(url) return CollectionModel.parse(result)
def get(self, collection_id)
Retrieve a single collection. To view a user’s private collections, the 'read_collections' scope is required. :param collection_id [string]: The collections’s ID. Required. :return: [Collection]: The Unsplash Collection.
4.250257
5.862103
0.72504
url = "/collections/curated/%s" % collection_id result = self._get(url) return CollectionModel.parse(result)
def get_curated(self, collection_id)
Retrieve a single curated collection. To view a user’s private collections, the 'read_collections' scope is required. :param collection_id [string]: The collections’s ID. Required. :return: [Collection]: The Unsplash Collection.
4.822125
5.827289
0.827507
url = "/collections/%s/photos" % collection_id result = self._all(url, page=page, per_page=per_page) return PhotoModel.parse_list(result)
def photos(self, collection_id, page=1, per_page=10)
Retrieve a collection’s photos. :param collection_id [string]: The collection’s ID. Required. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the Photo list.
3.285038
4.497841
0.730359
url = "/collections/%s/related" % collection_id result = self._get(url) return CollectionModel.parse_list(result)
def related(self, collection_id)
Retrieve a list of collections related to this one. :param collection_id [string]: The collection’s ID. Required. :return: [Array]: A single page of the Collection list.
4.184189
5.304809
0.788754
url = "/collections" data = { "title": title, "description": description, "private": private } result = self._post(url, data=data) return CollectionModel.parse(result)
def create(self, title, description=None, private=False)
Create a new collection. This requires the 'write_collections' scope. :param title [string]: The title of the collection. (Required.) :param description [string]: The collection’s description. (Optional.) :param private [boolean]: Whether to make this collection private. (Optional; default false). :return: [Collection]: The Unsplash Collection.
2.924744
3.069819
0.952742
url = "/collections/%s" % collection_id data = { "title": title, "description": description, "private": private } result = self._put(url, data=data) return CollectionModel.parse(result)
def update(self, collection_id, title=None, description=None, private=False)
Update an existing collection belonging to the logged-in user. This requires the 'write_collections' scope. :param collection_id [string]: The collection’s ID. Required. :param title [string]: The title of the collection. (Required.) :param description [string]: The collection’s description. (Optional.) :param private [boolean]: Whether to make this collection private. (Optional; default false). :return: [Collection]: The Unsplash Collection.
2.457472
2.842366
0.864587
url = "/collections/%s/add" % collection_id data = { "collection_id": collection_id, "photo_id": photo_id } result = self._post(url, data=data) or {} return CollectionModel.parse(result.get("collection")), PhotoModel.parse(result.get("photo"))
def add_photo(self, collection_id, photo_id)
Add a photo to one of the logged-in user’s collections. Requires the 'write_collections' scope. Note: If the photo is already in the collection, this acion has no effect. :param collection_id [string]: The collection’s ID. Required. :param photo_id [string]: The photo’s ID. Required. :return: [Tuple]: The Unsplash Collection and Photo
3.192271
3.225418
0.989723
url = "/collections/%s/remove" % collection_id data = { "collection_id": collection_id, "photo_id": photo_id } result = self._delete(url, data=data) or {} return CollectionModel.parse(result.get("collection")), PhotoModel.parse(result.get("photo"))
def remove_photo(self, collection_id, photo_id)
Remove a photo from one of the logged-in user’s collections. Requires the 'write_collections' scope. :param collection_id [string]: The collection’s ID. Required. :param photo_id [string]: The photo’s ID. Required. :return: [Tuple]: The Unsplash Collection and Photo
3.432508
3.401023
1.009258
url = "/stats/total" result = self._get(url) return StatModel.parse(result)
def total(self)
Get a list of counts for all of Unsplash :return [Stat]: The Unsplash Stat.
10.275427
8.343451
1.231556
url = "/stats/month" result = self._get(url) return StatModel.parse(result)
def month(self)
Get the overall Unsplash stats for the past 30 days. :return [Stat]: The Unsplash Stat.
11.911716
8.637425
1.379082
self.token = self.oauth.fetch_token( token_url=self.access_token_url, client_id=self.client_id, client_secret=self.client_secret, scope=self.scope, code=code ) return self.token.get("access_token")
def get_access_token(self, code)
Getting access token :param code [string]: The authorization code supplied to the callback by Unsplash. :return [string]: access token
2.005922
2.04579
0.980512
self.token = self.oauth.refresh_token(self.access_token_url, refresh_token=self.get_refresh_token()) self.access_token = self.token.get("access_token")
def refresh_token(self)
Refreshing the current expired access token
2.895343
2.656019
1.090106
results = ResultSet() data = data or [] for obj in data: if obj: results.append(cls.parse(obj)) return results
def parse_list(cls, data)
Parse a list of JSON objects into a result set of model instances.
5.257157
4.021045
1.307411
if self.api.is_authenticated: return {"Authorization": "Bearer %s" % self.api.access_token} return {"Authorization": "Client-ID %s" % self.api.client_id}
def get_auth_header(self)
Getting the authorization header according to the authentication procedure :return [dict]: Authorization header
3.073715
3.443369
0.892648
url = "/me" result = self._get(url) return UserModel.parse(result)
def me(self)
Get the currently-logged in user. Note: To access a user’s private data, the user is required to authorize the 'read_user' scope. Without it, this request will return a '403 Forbidden' response. Note: Without a Bearer token (i.e. using a Client-ID token) this request will return a '401 Unauthorized' response. :return: [User]: The Unsplash User.
8.278644
10.771361
0.768579
url = "/me" result = self._put(url, data=kwargs) return UserModel.parse(result)
def update(self, **kwargs)
Update the currently-logged in user. Note: This action requires the write_user scope. Without it, it will return a 403 Forbidden response. All parameters are optional. :param username [string]: Username. :param first_name [string]: First name. :param last_name [string]: Last name. :param email [string]: Email. :param url [string]: Portfolio/personal URL. :param location [string]: Location. :param bio [string]: About/bio. :param instagram_username [string]: Instagram username. :return: [User]: The Unsplash User.
7.966214
10.506416
0.758224
url = "/users/{username}".format(username=username) params = { "w": width, "h": height } result = self._get(url, params=params) return UserModel.parse(result)
def get(self, username, width=None, height=None)
Retrieve public details on a given user. Note: Supplying the optional w or h parameters will result in the 'custom' photo URL being added to the 'profile_image' object: :param username [string]: The user’s username. Required. :param width [integer]: Profile image width in pixels. :param height [integer]: Profile image height in pixels. :return: [User]: The Unsplash User.
2.817073
3.643486
0.773181
url = "/users/{username}/portfolio".format(username=username) result = self._get(url) return LinkModel.parse(result)
def portfolio(self, username)
Retrieve a single user’s portfolio link. :param username [string]: The user’s username. Required. :return: [Link]: The Unsplash Link.
5.693296
5.241553
1.086185
url = "/users/{username}/photos".format(username=username) result = self._photos(url, username, page=page, per_page=per_page, order_by=order_by) return PhotoModel.parse_list(result)
def photos(self, username, page=1, per_page=10, order_by="latest")
Get a list of photos uploaded by a user. :param username [string]: The user’s username. Required. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :param order_by [string]: How to sort the photos. Optional. (Valid values: latest, oldest, popular; default: latest) :return: [Array]: A single page of the Photo list.
2.72764
4.06399
0.671173
url = "/users/{username}/collections".format(username=username) params = { "page": page, "per_page": per_page } result = self._get(url, params=params) return CollectionModel.parse_list(result)
def collections(self, username, page=1, per_page=10)
Get a list of collections created by the user. :param username [string]: The user’s username. Required. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the Collection list.
2.321783
2.892109
0.802799
res = [] prev_state = set() part = [] cwidth = 0 for char, _width, state in zip(self._string, self._width, self._state): if cwidth + _width > width: if prev_state: part.append(self.ANSI_RESET) res.append("".join(part)) prev_state = set() part = [] cwidth = 0 cwidth += _width if prev_state == state: pass elif prev_state <= state: part.extend(state - prev_state) else: part.append(self.ANSI_RESET) part.extend(state) prev_state = state part.append(char) if prev_state: part.append(self.ANSI_RESET) if part: res.append("".join(part)) return res
def wrap(self, width)
Returns a partition of the string based on `width`
2.506304
2.435035
1.029268
offset = ((self._column_count - 1) * termwidth(self.column_separator_char)) offset += termwidth(self.left_border_char) offset += termwidth(self.right_border_char) self._max_table_width = max(self._max_table_width, offset + self._column_count) return self._max_table_width
def max_table_width(self)
get/set the maximum width of the table. The width of the table is guaranteed to not exceed this value. If it is not possible to print a given table with the width provided, this value will automatically adjust.
3.389705
3.485853
0.972417
header = [''] * column_count alignment = [self.default_alignment] * column_count width = [0] * column_count padding = [self.default_padding] * column_count self._column_count = column_count self._column_headers = HeaderData(self, header) self._column_alignments = AlignmentMetaData(self, alignment) self._column_widths = PositiveIntegerMetaData(self, width) self._left_padding_widths = PositiveIntegerMetaData(self, padding) self._right_padding_widths = PositiveIntegerMetaData(self, padding)
def _initialize_table(self, column_count)
Sets the column count of the table. This method is called to set the number of columns for the first time. Parameters ---------- column_count : int number of columns in the table
2.684822
2.811851
0.954824
if not isinstance(style, enums.Style): allowed = ("{}.{}".format(type(self).__name__, i.name) for i in enums.Style) error_msg = ("allowed values for style are: " + ', '.join(allowed)) raise ValueError(error_msg) style_template = style.value self.left_border_char = style_template.left_border_char self.right_border_char = style_template.right_border_char self.top_border_char = style_template.top_border_char self.bottom_border_char = style_template.bottom_border_char self.header_separator_char = style_template.header_separator_char self.column_separator_char = style_template.column_separator_char self.row_separator_char = style_template.row_separator_char self.intersect_top_left = style_template.intersect_top_left self.intersect_top_mid = style_template.intersect_top_mid self.intersect_top_right = style_template.intersect_top_right self.intersect_header_left = style_template.intersect_header_left self.intersect_header_mid = style_template.intersect_header_mid self.intersect_header_right = style_template.intersect_header_right self.intersect_row_left = style_template.intersect_row_left self.intersect_row_mid = style_template.intersect_row_mid self.intersect_row_right = style_template.intersect_row_right self.intersect_bottom_left = style_template.intersect_bottom_left self.intersect_bottom_mid = style_template.intersect_bottom_mid self.intersect_bottom_right = style_template.intersect_bottom_right
def set_style(self, style)
Set the style of the table from a predefined set of styles. Parameters ---------- style: Style It can be one of the following: * beautifulTable.STYLE_DEFAULT * beautifultable.STYLE_NONE * beautifulTable.STYLE_DOTTED * beautifulTable.STYLE_MYSQL * beautifulTable.STYLE_SEPARATED * beautifulTable.STYLE_COMPACT * beautifulTable.STYLE_MARKDOWN * beautifulTable.STYLE_RESTRUCTURED_TEXT * beautifultable.STYLE_BOX * beautifultable.STYLE_BOX_DOUBLED * beautifultable.STYLE_BOX_ROUNDED * beautifultable.STYLE_GRID
1.62127
1.650234
0.982448
table_width = self.get_table_width() lpw, rpw = self._left_padding_widths, self._right_padding_widths pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)] max_widths = [0 for index in range(self._column_count)] offset = table_width - sum(self._column_widths) + sum(pad_widths) self._max_table_width = max(self._max_table_width, offset + self._column_count) for index, column in enumerate(zip(*self._table)): max_length = 0 for i in column: for j in to_unicode(i).split('\n'): output_str = get_output_str(j, self.detect_numerics, self.numeric_precision, self.sign_mode.value) max_length = max(max_length, termwidth(output_str)) for i in to_unicode(self._column_headers[index]).split('\n'): output_str = get_output_str(i, self.detect_numerics, self.numeric_precision, self.sign_mode.value) max_length = max(max_length, termwidth(output_str)) max_widths[index] += max_length sum_ = sum(max_widths) desired_sum = self._max_table_width - offset # Set flag for columns who are within their fair share temp_sum = 0 flag = [0] * len(max_widths) for i, width in enumerate(max_widths): if width <= int(desired_sum / self._column_count): temp_sum += width flag[i] = 1 else: # Allocate atleast 1 character width to the column temp_sum += 1 avail_space = desired_sum - temp_sum actual_space = sum_ - temp_sum shrinked_columns = {} # Columns which exceed their fair share should be shrinked based on # how much space is left for the table for i, width in enumerate(max_widths): self.column_widths[i] = width if not flag[i]: new_width = 1 + int((width-1) * avail_space / actual_space) if new_width < width: self.column_widths[i] = new_width shrinked_columns[new_width] = i # Divide any remaining space among shrinked columns if shrinked_columns: extra = (self._max_table_width - offset - sum(self.column_widths)) actual_space = sum(shrinked_columns) if extra > 0: for i, width in enumerate(sorted(shrinked_columns)): index = shrinked_columns[width] extra_width = int(width * extra / actual_space) self.column_widths[i] += extra_width if i == (len(shrinked_columns) - 1): extra = (self._max_table_width - offset - sum(self.column_widths)) self.column_widths[index] += extra for i in range(self.column_count): self.column_widths[i] += pad_widths[i]
def _calculate_column_widths(self)
Calculate width of column automatically based on data.
2.996078
2.976904
1.006441
if isinstance(key, int): index = key elif isinstance(key, basestring): index = self.get_column_index(key) else: raise TypeError("'key' must either be 'int' or 'str'") self._table.sort(key=operator.itemgetter(index), reverse=reverse)
def sort(self, key, reverse=False)
Stable sort of the table *IN-PLACE* with respect to a column. Parameters ---------- key: int, str index or header of the column. Normal list rules apply. reverse : bool If `True` then table is sorted as if each comparison was reversed.
2.766126
2.770911
0.998273
try: index = self._column_headers.index(header) return index except ValueError: raise_suppressed(KeyError(("'{}' is not a header for any " "column").format(header)))
def get_column_index(self, header)
Get index of a column from it's header. Parameters ---------- header: str header of the column. Raises ------ ValueError: If no column could be found corresponding to `header`.
5.565202
5.879187
0.946594
if isinstance(key, int): index = key elif isinstance(key, basestring): index = self.get_column_index(key) else: raise TypeError(("key must be an int or str, " "not {}").format(type(key).__name__)) return iter(map(operator.itemgetter(index), self._table))
def get_column(self, key)
Return an iterator to a column. Parameters ---------- key : int, str index of the column, or the header of the column. If index is specified, then normal list rules apply. Raises ------ TypeError: If key is not of type `int`, or `str`. Returns ------- iter: Iterator to the specified column.
2.91275
2.879068
1.011699
if isinstance(index, int): pass elif isinstance(index, basestring): index = self.get_column_index(index) else: raise TypeError(("column index must be an integer or a string, " "not {}").format(type(index).__name__)) if self._column_count == 0: raise IndexError("pop from empty table") if self._column_count == 1: # This is the last column. So we should clear the table to avoid # empty rows self.clear(clear_metadata=True) else: # Not the last column. safe to pop from row self._column_count -= 1 self._column_alignments._pop(index) self._column_widths._pop(index) self._left_padding_widths._pop(index) self._right_padding_widths._pop(index) self._column_headers._pop(index) for row in self._table: row._pop(index)
def pop_column(self, index=-1)
Remove and return row at index (default last). Parameters ---------- index : int, str index of the column, or the header of the column. If index is specified, then normal list rules apply. Raises ------ TypeError: If index is not an instance of `int`, or `str`. IndexError: If Table is empty.
3.043366
3.136164
0.97041
row = self._validate_row(row) row_obj = RowData(self, row) self._table.insert(index, row_obj)
def insert_row(self, index, row)
Insert a row before index in the table. Parameters ---------- index : int List index rules apply row : iterable Any iterable of appropriate length. Raises ------ TypeError: If `row` is not an iterable. ValueError: If size of `row` is inconsistent with the current number of columns.
4.917278
5.927687
0.829544
if isinstance(key, int): row = self._validate_row(value, init_table_if_required=False) row_obj = RowData(self, row) self._table[key] = row_obj elif isinstance(key, slice): row_obj_list = [] for row in value: row_ = self._validate_row(row, init_table_if_required=True) row_obj_list.append(RowData(self, row_)) self._table[key] = row_obj_list else: raise TypeError("key must be an integer or a slice object")
def update_row(self, key, value)
Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- key : int or slice index of the row, or a slice object. value : iterable If an index is specified, `value` should be an iterable of appropriate length. Instead if a slice object is passed as key, value should be an iterable of rows. Raises ------ IndexError: If index specified is out of range. TypeError: If `value` is of incorrect type. ValueError: If length of row does not matches number of columns.
2.871011
2.793744
1.027657
index = self.get_column_index(header) if not isinstance(header, basestring): raise TypeError("header must be of type str") for row, new_item in zip(self._table, column): row[index] = new_item
def update_column(self, header, column)
Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- header : str Header of the column column : iterable Any iterable of appropriate length. Raises ------ TypeError: If length of `column` is shorter than number of rows. ValueError: If no column exists with title `header`.
3.798776
4.248832
0.894075
if self._column_count == 0: self.column_headers = HeaderData(self, [header]) self._table = [RowData(self, [i]) for i in column] else: if not isinstance(header, basestring): raise TypeError("header must be of type str") column_length = 0 for i, (row, new_item) in enumerate(zip(self._table, column)): row._insert(index, new_item) column_length = i if column_length == len(self._table) - 1: self._column_count += 1 self._column_headers._insert(index, header) self._column_alignments._insert(index, self.default_alignment) self._column_widths._insert(index, 0) self._left_padding_widths._insert(index, self.default_padding) self._right_padding_widths._insert(index, self.default_padding) else: # Roll back changes so that table remains in consistent state for j in range(column_length, -1, -1): self._table[j]._pop(index) raise ValueError(("length of 'column' should be atleast {}, " "got {}").format(len(self._table), column_length + 1))
def insert_column(self, index, header, column)
Insert a column before `index` in the table. If length of column is bigger than number of rows, lets say `k`, only the first `k` values of `column` is considered. If column is shorter than 'k', ValueError is raised. Note that Table remains in consistent state even if column is too short. Any changes made by this method is rolled back before raising the exception. Parameters ---------- index : int List index rules apply. header : str Title of the column. column : iterable Any iterable of appropriate length. Raises ------ TypeError: If `header` is not of type `str`. ValueError: If length of `column` is shorter than number of rows.
3.031164
2.906493
1.042894
self.insert_column(self._column_count, header, column)
def append_column(self, header, column)
Append a column to end of the table. Parameters ---------- header : str Title of the column column : iterable Any iterable of appropriate length.
5.962851
7.82809
0.761725
width = self.get_table_width() try: line = list(char * (int(width/termwidth(char)) + 1))[:width] except ZeroDivisionError: line = [' '] * width if len(line) == 0: return '' # Only if Special Intersection is enabled and horizontal line is # visible if not char.isspace(): # If left border is enabled and it is visible visible_junc = not intersect_left.isspace() if termwidth(self.left_border_char) > 0: if not (self.left_border_char.isspace() and visible_junc): length = min(termwidth(self.left_border_char), termwidth(intersect_left)) for i in range(length): line[i] = intersect_left[i] visible_junc = not intersect_right.isspace() # If right border is enabled and it is visible if termwidth(self.right_border_char) > 0: if not (self.right_border_char.isspace() and visible_junc): length = min(termwidth(self.right_border_char), termwidth(intersect_right)) for i in range(length): line[-i-1] = intersect_right[-i-1] visible_junc = not intersect_mid.isspace() # If column separator is enabled and it is visible if termwidth(self.column_separator_char): if not (self.column_separator_char.isspace() and visible_junc): index = termwidth(self.left_border_char) for i in range(self._column_count-1): index += (self._column_widths[i]) length = min(termwidth(self.column_separator_char), termwidth(intersect_mid)) for i in range(length): line[index+i] = intersect_mid[i] index += termwidth(self.column_separator_char) return ''.join(line)
def _get_horizontal_line(self, char, intersect_left, intersect_mid, intersect_right)
Get a horizontal line for the table. Internal method used to actually get all horizontal lines in the table. Column width should be set prior to calling this method. This method detects intersection and handles it according to the values of `intersect_*_*` attributes. Parameters ---------- char : str Character used to draw the line. Returns ------- str String which will be printed as the Top border of the table.
2.489995
2.505764
0.993707
if self.column_count == 0: return 0 width = sum(self._column_widths) width += ((self._column_count - 1) * termwidth(self.column_separator_char)) width += termwidth(self.left_border_char) width += termwidth(self.right_border_char) return width
def get_table_width(self)
Get the width of the table as number of characters. Column width should be set prior to calling this method. Returns ------- int Width of the table as number of characters.
3.324996
3.415832
0.973408
# Empty table. returning empty string. if len(self._table) == 0: return '' if self.serialno and self.column_count > 0: self.insert_column(0, self.serialno_header, range(1, len(self) + 1)) # Should widths of column be recalculated if recalculate_width or sum(self._column_widths) == 0: self._calculate_column_widths() string_ = [] # Drawing the top border if self.top_border_char: string_.append( self._get_top_border()) # Print headers if not empty or only spaces if ''.join(self._column_headers).strip(): headers = to_unicode(self._column_headers) string_.append(headers) if self.header_separator_char: string_.append( self._get_header_separator()) # Printing rows first_row_encountered = False for row in self._table: if first_row_encountered and self.row_separator_char: string_.append( self._get_row_separator()) first_row_encountered = True content = to_unicode(row) string_.append(content) # Drawing the bottom border if self.bottom_border_char: string_.append( self._get_bottom_border()) if self.serialno and self.column_count > 0: self.pop_column(0) return '\n'.join(string_)
def get_string(self, recalculate_width=True)
Get the table as a String. Parameters ---------- recalculate_width : bool, optional If width for each column should be recalculated(default True). Note that width is always calculated if it wasn't set explicitly when this method is called for the first time , regardless of the value of `recalculate_width`. Returns ------- str: Table as a string.
3.138922
3.050878
1.028858
if PY3: num_types = (int, float) else: # pragma: no cover num_types = (int, long, float) # noqa: F821 # We don't wan't to perform any conversions if item is already a number if isinstance(item, num_types): return item # First try for an int conversion so that strings like "5" are converted # to 5 instead of 5.0 . This is safe as a direct int cast for a non integer # string raises a ValueError. try: num = int(to_unicode(item)) except ValueError: try: num = float(to_unicode(item)) except ValueError: return item else: return num except TypeError: return item else: return num
def _convert_to_numeric(item)
Helper method to convert a string to float or int if possible. If the conversion is not possible, it simply returns the string.
4.26362
4.263642
0.999995
if detect_numerics: item = _convert_to_numeric(item) if isinstance(item, float): item = round(item, precision) try: item = '{:{sign}}'.format(item, sign=sign_value) except (ValueError, TypeError): pass return to_unicode(item)
def get_output_str(item, detect_numerics, precision, sign_value)
Returns the final string which should be displayed
3.001793
2.919426
1.028213