code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
keys = [] for dic, _ in self.resolve(): try: cur_keys = dic.keys() except AttributeError: raise ConfigTypeError( u'{0} must be a dict, not {1}'.format( self.name, type(dic).__name__ ) ) for key in cur_keys: if key not in keys: keys.append(key) return keys
def keys(self)
Returns a list containing all the keys available as subviews of the current views. This enumerates all the keys in *all* dictionaries matching the current view, in contrast to ``view.get(dict).keys()``, which gets all the keys for the *first* dict matching the view. If the object for this view in any source is not a dict, then a ConfigTypeError is raised. The keys are ordered according to how they appear in each source.
3.83149
3.202097
1.196556
for collection, _ in self.resolve(): try: it = iter(collection) except TypeError: raise ConfigTypeError( u'{0} must be an iterable, not {1}'.format( self.name, type(collection).__name__ ) ) for value in it: yield value
def all_contents(self)
Iterates over all subviews from collections at this view from *all* sources. If the object for this view in any source is not iterable, then a ConfigTypeError is raised. This method is intended to be used when the view indicates a list; this method will concatenate the contents of the list from all sources.
4.76103
3.389392
1.404686
od = OrderedDict() for key, view in self.items(): if redact and view.redact: od[key] = REDACTED_TOMBSTONE else: try: od[key] = view.flatten(redact=redact) except ConfigTypeError: od[key] = view.get() return od
def flatten(self, redact=False)
Create a hierarchy of OrderedDicts containing the data from this view, recursively reifying all views to get their represented values. If `redact` is set, then sensitive values are replaced with the string "REDACTED".
3.639234
3.577961
1.017125
node = super(Dumper, self).represent_list(data) length = len(data) if self.default_flow_style is None and length < 4: node.flow_style = True elif self.default_flow_style is None: node.flow_style = False return node
def represent_list(self, data)
If a list has less than 4 items, represent it in inline style (i.e. comma separated, within square brackets).
2.971108
2.708693
1.096879
if data: value = u'yes' else: value = u'no' return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_bool(self, data)
Represent bool as 'yes' or 'no' instead of 'true' or 'false'.
2.407817
2.130749
1.130033
filename = self.user_config_path() if os.path.isfile(filename): self.add(ConfigSource(load_yaml(filename) or {}, filename))
def _add_user_source(self)
Add the configuration options from the YAML file in the user's configuration directory (given by `config_dir`) if it exists.
7.232559
6.194966
1.16749
if self.modname: if self._package_path: filename = os.path.join(self._package_path, DEFAULT_FILENAME) if os.path.isfile(filename): self.add(ConfigSource(load_yaml(filename), filename, True))
def _add_default_source(self)
Add the package's default configuration settings. This looks for a YAML file located inside the package for the module `modname` if it was given.
5.676373
4.004391
1.417537
if user: self._add_user_source() if defaults: self._add_default_source()
def read(self, user=True, defaults=True)
Find and read the files for this configuration and set them as the sources for this configuration. To disable either discovered user configuration files or the in-package defaults, set `user` or `defaults` to `False`.
4.651022
3.62672
1.282432
# If environment variable is set, use it. if self._env_var in os.environ: appdir = os.environ[self._env_var] appdir = os.path.abspath(os.path.expanduser(appdir)) if os.path.isfile(appdir): raise ConfigError(u'{0} must be a directory'.format( self._env_var )) else: # Search platform-specific locations. If no config file is # found, fall back to the first directory in the list. configdirs = config_dirs() for confdir in configdirs: appdir = os.path.join(confdir, self.appname) if os.path.isfile(os.path.join(appdir, CONFIG_FILENAME)): break else: appdir = os.path.join(configdirs[0], self.appname) # Ensure that the directory exists. if not os.path.isdir(appdir): os.makedirs(appdir) return appdir
def config_dir(self)
Get the path to the user configuration directory. The directory is guaranteed to exist as a postcondition (one may be created if none exist). If the application's ``...DIR`` environment variable is set, it is used as the configuration directory. Otherwise, platform-specific standard configuration locations are searched for a ``config.yaml`` file. If no configuration file is found, a fallback path is used.
2.494806
2.427572
1.027696
filename = os.path.abspath(filename) self.set(ConfigSource(load_yaml(filename), filename))
def set_file(self, filename)
Parses the file as YAML and inserts it into the configuration sources with highest priority.
9.356257
6.495203
1.440487
if full: out_dict = self.flatten(redact=redact) else: # Exclude defaults when flattening. sources = [s for s in self.sources if not s.default] temp_root = RootView(sources) temp_root.redactions = self.redactions out_dict = temp_root.flatten(redact=redact) yaml_out = yaml.dump(out_dict, Dumper=Dumper, default_flow_style=None, indent=4, width=1000) # Restore comments to the YAML text. default_source = None for source in self.sources: if source.default: default_source = source break if default_source and default_source.filename: with open(default_source.filename, 'rb') as fp: default_data = fp.read() yaml_out = restore_yaml_comments(yaml_out, default_data.decode('utf-8')) return yaml_out
def dump(self, full=True, redact=False)
Dump the Configuration object to a YAML file. The order of the keys is determined from the default configuration file. All keys not in the default configuration will be appended to the end of the file. :param filename: The file to dump the configuration to, or None if the YAML string should be returned instead :type filename: unicode :param full: Dump settings that don't differ from the defaults as well :param redact: Remove sensitive information (views with the `redact` flag set) from the output
3.184204
3.28624
0.96895
super(LazyConfig, self).clear() self._lazy_suffix = [] self._lazy_prefix = []
def clear(self)
Remove all sources from this configuration.
9.562399
7.787993
1.227839
if view.exists(): value, _ = view.first() return self.convert(value, view) elif self.default is REQUIRED: # Missing required value. This is an error. raise NotFoundError(u"{0} not found".format(view.name)) else: # Missing value, but not required. return self.default
def value(self, view, template=None)
Get the value for a `ConfigView`. May raise a `NotFoundError` if the value is missing (and the template requires it) or a `ConfigValueError` for invalid values.
5.551481
5.360363
1.035654
exc_class = ConfigTypeError if type_error else ConfigValueError raise exc_class( u'{0}: {1}'.format(view.name, message) )
def fail(self, message, view, type_error=False)
Raise an exception indicating that a value cannot be accepted. `type_error` indicates whether the error is due to a type mismatch rather than a malformed value. In this case, a more specific exception is raised.
5.134985
5.922107
0.867088
if isinstance(value, int): return value elif isinstance(value, float): return int(value) else: self.fail(u'must be a number', view, True)
def convert(self, value, view)
Check that the value is an integer. Floats are rounded.
3.728527
3.008184
1.239461
if isinstance(value, NUMERIC_TYPES): return value else: self.fail( u'must be numeric, not {0}'.format(type(value).__name__), view, True )
def convert(self, value, view)
Check that the value is an int or a float.
5.057458
3.998283
1.264907
out = AttrDict() for key, typ in self.subtemplates.items(): out[key] = typ.value(view[key], self) return out
def value(self, view, template=None)
Get a dict with the same keys as the template and values validated according to the value types.
7.217735
5.93225
1.216694
out = [] for item in view: out.append(self.subtemplate.value(item, self)) return out
def value(self, view, template=None)
Get a list of items validated against the template.
6.501703
5.480637
1.186304
if isinstance(value, BASESTRING): if self.pattern and not self.regex.match(value): self.fail( u"must match the pattern {0}".format(self.pattern), view ) return value else: self.fail(u'must be a string', view, True)
def convert(self, value, view)
Check that the value is a string and matches the pattern.
4.191995
3.533126
1.186483
if (SUPPORTS_ENUM and isinstance(self.choices, type) and issubclass(self.choices, enum.Enum)): try: return self.choices(value) except ValueError: self.fail( u'must be one of {0!r}, not {1!r}'.format( [c.value for c in self.choices], value ), view ) if value not in self.choices: self.fail( u'must be one of {0!r}, not {1!r}'.format( list(self.choices), value ), view ) if isinstance(self.choices, abc.Mapping): return self.choices[value] else: return value
def convert(self, value, view)
Ensure that the value is among the choices (and remap if the choices are a mapping).
2.445972
2.320604
1.054024
is_mapping = isinstance(self.template, MappingTemplate) for candidate in self.allowed: try: if is_mapping: if isinstance(candidate, Filename) and \ candidate.relative_to: next_template = candidate.template_with_relatives( view, self.template ) next_template.subtemplates[view.key] = as_template( candidate ) else: next_template = MappingTemplate({view.key: candidate}) return view.parent.get(next_template)[view.key] else: return view.get(candidate) except ConfigTemplateError: raise except ConfigError: pass except ValueError as exc: raise ConfigTemplateError(exc) self.fail( u'must be one of {0}, not {1}'.format( repr(self.allowed), repr(value) ), view )
def convert(self, value, view)
Ensure that the value follows at least one template.
4.954877
4.783543
1.035818
path_eggs = [p for p in sys.path if p.endswith('.egg')] command = self.get_finalized_command("egg_info") egg_base = path.abspath(command.egg_base) unique_path_eggs = set(path_eggs + [egg_base]) os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs)
def export_live_eggs(self, env=False)
Adds all of the eggs in the current environment to PYTHONPATH.
3.810724
3.285683
1.159797
''' Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable. If EXECJS_RUNTIME environment variable is empty or invalid, return None. ''' name = os.environ.get("EXECJS_RUNTIME", "") if not name: return None try: return _find_runtime_by_name(name) except exceptions.RuntimeUnavailableError: return None
def get_from_environment()
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable. If EXECJS_RUNTIME environment variable is empty or invalid, return None.
5.440661
2.667482
2.039624
r codepoint_format = '\\u{0:04x}'.format def codepoint(m): return codepoint_format(ord(m.group(0))) return re.sub('[^\x00-\x7f]', codepoint, str)
def encode_unicode_codepoints(str)
r""" >>> encode_unicode_codepoints("a") == 'a' True >>> ascii = ''.join(chr(i) for i in range(0x80)) >>> encode_unicode_codepoints(ascii) == ascii True >>> encode_unicode_codepoints('\u4e16\u754c') == '\\u4e16\\u754c' True
4.486597
6.003788
0.747294
if isinstance(s, six.text_type): return s return s.decode(sys.getfilesystemencoding())
def _decode_if_not_text(s)
protected
3.395787
3.49091
0.972751
pathlist = _decode_if_not_text(os.environ.get('PATH', '')).split(os.pathsep) for dir in pathlist: for ext in pathext: filename = os.path.join(dir, prog + ext) try: st = os.stat(filename) except os.error: continue if stat.S_ISREG(st.st_mode) and (stat.S_IMODE(st.st_mode) & 0o111): return filename return None
def _find_executable(prog, pathext=("",))
protected
2.256981
2.20449
1.023811
if isinstance(command, str): command = [command] command = list(command) name = command[0] args = command[1:] if _is_windows(): pathext = _decode_if_not_text(os.environ.get("PATHEXT", "")) path = _find_executable(name, pathext.split(os.pathsep)) else: path = _find_executable(name) if not path: return None return [path] + args
def _which(command)
protected
2.942784
2.992347
0.983437
row = iter(row) col = 0 start_col = col + 1 cell = '' first_cell = True while True: char = next(row, None) col += 1 if char == '|': if first_cell: # First cell (content before the first |) is skipped first_cell = False else: yield (cell, start_col) cell = '' start_col = col + 1 elif char == '\\': char = next(row) col += 1 if char == 'n': cell += '\n' else: if char not in ['|', '\\']: cell += '\\' cell += char elif char: cell += char else: break
def split_table_cells(self, row)
An iterator returning all the table cells in a row with their positions, accounting for escaping.
3.065722
2.852888
1.074603
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1 if not isinstance(data_series, pd.Series): raise ValueError( "expected series, got object with class {}".format(data_series.__class__) ) if data_series.empty: return data_series series = remove_duplicates(data_series) target_freq = pd.Timedelta(atomic_freq) timedeltas = (series.index[1:] - series.index[:-1]).append( pd.TimedeltaIndex([pd.NaT]) ) if series_type == "cumulative": spread_factor = target_freq.total_seconds() / timedeltas.total_seconds() series_spread = series * spread_factor atomic_series = series_spread.asfreq(atomic_freq, method="ffill") resampled = atomic_series.resample(freq).sum() resampled_with_nans = atomic_series.resample(freq).mean() resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index) elif series_type == "instantaneous": atomic_series = series.asfreq(atomic_freq, method="ffill") resampled = atomic_series.resample(freq).mean() if resampled.index[-1] < series.index[-1]: # this adds a null at the end using the target frequency last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:] resampled = ( pd.concat([resampled, pd.Series(np.nan, index=last_index)]) .resample(freq) .mean() ) return resampled
def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative")
Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters ---------- data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). Returns ------- resampled_data : :any:`pandas.Series` Data resampled to the given frequency.
2.906635
2.898958
1.002648
# dont affect the original data index = index.copy() if len(index) == 0: return pd.Series([], index=index) timedeltas = (index[1:] - index[:-1]).append(pd.TimedeltaIndex([pd.NaT])) timedelta_days = timedeltas.total_seconds() / (60 * 60 * 24) return pd.Series(timedelta_days, index=index)
def day_counts(index)
Days between DatetimeIndex values as a :any:`pandas.Series`. Parameters ---------- index : :any:`pandas.DatetimeIndex` The index for which to get day counts. Returns ------- day_counts : :any:`pandas.Series` A :any:`pandas.Series` with counts of days between periods. Counts are given on start dates of periods.
3.415965
3.449648
0.990236
data = super(CalTRACKHourlyModel, self).json() data.update( { "occupancy_lookup": self.occupancy_lookup.to_json(orient="split"), "temperature_bins": self.temperature_bins.to_json(orient="split"), } ) return data
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
5.48333
6.23704
0.879156
warnings = [] n_non_zero = int((degree_days > 0).sum()) if n_non_zero < minimum_non_zero: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format( model_type=model_type, degree_day_type=degree_day_type ) ), description=( "Number of non-zero daily {degree_day_type} values below accepted minimum." " Candidate fit not attempted.".format( degree_day_type=degree_day_type.upper() ) ), data={ "n_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): n_non_zero, "minimum_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): minimum_non_zero, "{degree_day_type}_balance_point".format( degree_day_type=degree_day_type ): balance_point, }, ) ) return warnings
def get_too_few_non_zero_degree_day_warning( model_type, balance_point, degree_day_type, degree_days, minimum_non_zero )
Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
2.36867
2.289764
1.03446
warnings = [] total_degree_days = (avg_degree_days * period_days).sum() if total_degree_days < minimum_total: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low".format( model_type=model_type, degree_day_type=degree_day_type ) ), description=( "Total {degree_day_type} below accepted minimum." " Candidate fit not attempted.".format( degree_day_type=degree_day_type.upper() ) ), data={ "total_{degree_day_type}".format( degree_day_type=degree_day_type ): total_degree_days, "total_{degree_day_type}_minimum".format( degree_day_type=degree_day_type ): minimum_total, "{degree_day_type}_balance_point".format( degree_day_type=degree_day_type ): balance_point, }, ) ) return warnings
def get_total_degree_day_too_low_warning( model_type, balance_point, degree_day_type, avg_degree_days, period_days, minimum_total, )
Return an empty list or a single warning wrapped in a list regarding the total summed degree day values. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). avg_degree_days : :any:`pandas.Series` A series of degree day values. period_days : :any:`pandas.Series` A series of containing day counts. minimum_total : :any:`float` Minimum allowable total sum of degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
2.444956
2.311809
1.057594
warnings = [] if model_params.get(parameter, 0) < 0: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.{parameter}_negative".format( model_type=model_type, parameter=parameter ) ), description=( "Model fit {parameter} parameter is negative. Candidate model rejected.".format( parameter=parameter ) ), data=model_params, ) ) return warnings
def get_parameter_negative_warning(model_type, model_params, parameter)
Return an empty list or a single warning wrapped in a list indicating whether model parameter is negative. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
4.710324
3.467681
1.35835
warnings = [] if p_value > maximum_p_value: data = { "{}_p_value".format(parameter): p_value, "{}_maximum_p_value".format(parameter): maximum_p_value, } data.update(model_params) warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format( model_type=model_type, parameter=parameter ) ), description=( "Model fit {parameter} p-value is too high. Candidate model rejected.".format( parameter=parameter ) ), data=data, ) ) return warnings
def get_parameter_p_value_too_high_warning( model_type, model_params, parameter, p_value, maximum_p_value )
Return an empty list or a single warning wrapped in a list indicating whether model parameter p-value is too high. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. p_value : :any:`float` The p-value of the parameter. maximum_p_value : :any:`float` The maximum allowable p-value of the parameter. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
3.121343
2.608588
1.196564
warnings = [ EEMeterWarning( qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type), description=( "Error encountered in statsmodels.formula.api.ols method. (Empty data?)" ), data={"traceback": traceback.format_exc()}, ) ] return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="ERROR", warnings=warnings )
def get_fit_failed_candidate_model(model_type, formula)
Return a Candidate model that indicates the fitting routine failed. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). formula : :any:`float` The candidate model formula. Returns ------- candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate model instance with status ``'ERROR'``, and warning with traceback.
10.757633
6.028485
1.784467
model_type = "intercept_only" formula = "meter_value ~ 1" if weights_col is None: weights = 1 else: weights = data[weights_col] try: model = smf.wls(formula=formula, data=data, weights=weights) except Exception as e: return [get_fit_failed_candidate_model(model_type, formula)] result = model.fit() # CalTrack 3.3.1.3 model_params = {"intercept": result.params["Intercept"]} model_warnings = [] # CalTrack 3.4.3.2 for parameter in ["intercept"]: model_warnings.extend( get_parameter_negative_warning(model_type, model_params, parameter) ) if len(model_warnings) > 0: status = "DISQUALIFIED" else: status = "QUALIFIED" return [ CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status=status, warnings=model_warnings, model_params=model_params, model=model, result=result, r_squared_adj=0, ) ]
def get_intercept_only_candidate_models(data, weights_col)
Return a list of a single candidate intercept-only model. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` List containing a single intercept-only candidate model.
3.806055
3.057506
1.244823
model_type = "cdd_only" cdd_column = "cdd_%s" % balance_point formula = "meter_value ~ %s" % cdd_column if weights_col is None: weights = 1 else: weights = data[weights_col] period_days = weights degree_day_warnings = [] degree_day_warnings.extend( get_total_degree_day_too_low_warning( model_type, balance_point, "cdd", data[cdd_column], period_days, minimum_total_cdd, ) ) degree_day_warnings.extend( get_too_few_non_zero_degree_day_warning( model_type, balance_point, "cdd", data[cdd_column], minimum_non_zero_cdd ) ) if len(degree_day_warnings) > 0: return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="NOT ATTEMPTED", warnings=degree_day_warnings, ) try: model = smf.wls(formula=formula, data=data, weights=weights) except Exception as e: return get_fit_failed_candidate_model(model_type, formula) result = model.fit() r_squared_adj = result.rsquared_adj beta_cdd_p_value = result.pvalues[cdd_column] # CalTrack 3.3.1.3 model_params = { "intercept": result.params["Intercept"], "beta_cdd": result.params[cdd_column], "cooling_balance_point": balance_point, } model_warnings = [] # CalTrack 3.4.3.2 for parameter in ["intercept", "beta_cdd"]: model_warnings.extend( get_parameter_negative_warning(model_type, model_params, parameter) ) model_warnings.extend( get_parameter_p_value_too_high_warning( model_type, model_params, parameter, beta_cdd_p_value, beta_cdd_maximum_p_value, ) ) if len(model_warnings) > 0: status = "DISQUALIFIED" else: status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status=status, warnings=model_warnings, model_params=model_params, model=model, result=result, r_squared_adj=r_squared_adj, )
def get_single_cdd_only_candidate_model( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col, balance_point, )
Return a single candidate cdd-only model for a particular balance point. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``cdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The cooling balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single cdd-only candidate model, with any associated warnings.
2.504314
2.178843
1.149378
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")] candidate_models = [ get_single_cdd_only_candidate_model( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col, balance_point, ) for balance_point in balance_points ] return candidate_models
def get_cdd_only_candidate_models( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col )
Return a list of all possible candidate cdd-only models. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns with names of the form ``cdd_<balance_point>``. All columns with names of this form will be used to fit a candidate model. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd-only candidate models, with any associated warnings.
2.504205
2.381155
1.051677
model_type = "hdd_only" hdd_column = "hdd_%s" % balance_point formula = "meter_value ~ %s" % hdd_column if weights_col is None: weights = 1 else: weights = data[weights_col] period_days = weights degree_day_warnings = [] degree_day_warnings.extend( get_total_degree_day_too_low_warning( model_type, balance_point, "hdd", data[hdd_column], period_days, minimum_total_hdd, ) ) degree_day_warnings.extend( get_too_few_non_zero_degree_day_warning( model_type, balance_point, "hdd", data[hdd_column], minimum_non_zero_hdd ) ) if len(degree_day_warnings) > 0: return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="NOT ATTEMPTED", warnings=degree_day_warnings, ) try: model = smf.wls(formula=formula, data=data, weights=weights) except Exception as e: return get_fit_failed_candidate_model(model_type, formula) result = model.fit() r_squared_adj = result.rsquared_adj beta_hdd_p_value = result.pvalues[hdd_column] # CalTrack 3.3.1.3 model_params = { "intercept": result.params["Intercept"], "beta_hdd": result.params[hdd_column], "heating_balance_point": balance_point, } model_warnings = [] # CalTrack 3.4.3.2 for parameter in ["intercept", "beta_hdd"]: model_warnings.extend( get_parameter_negative_warning(model_type, model_params, parameter) ) model_warnings.extend( get_parameter_p_value_too_high_warning( model_type, model_params, parameter, beta_hdd_p_value, beta_hdd_maximum_p_value, ) ) if len(model_warnings) > 0: status = "DISQUALIFIED" else: status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status=status, warnings=model_warnings, model_params=model_params, model=model, result=result, r_squared_adj=r_squared_adj, )
def get_single_hdd_only_candidate_model( data, minimum_non_zero_hdd, minimum_total_hdd, beta_hdd_maximum_p_value, weights_col, balance_point, )
Return a single candidate hdd-only model for a particular balance point. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``hdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The heating balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single hdd-only candidate model, with any associated warnings.
2.399863
2.131843
1.125722
balance_points = [int(col[4:]) for col in data.columns if col.startswith("hdd")] candidate_models = [ get_single_hdd_only_candidate_model( data, minimum_non_zero_hdd, minimum_total_hdd, beta_hdd_maximum_p_value, weights_col, balance_point, ) for balance_point in balance_points ] return candidate_models
def get_hdd_only_candidate_models( data, minimum_non_zero_hdd, minimum_total_hdd, beta_hdd_maximum_p_value, weights_col )
Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns with names of the form ``hdd_<balance_point>``. All columns with names of this form will be used to fit a candidate model. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of hdd-only candidate models, with any associated warnings.
2.439857
2.318272
1.052446
cooling_balance_points = [ int(col[4:]) for col in data.columns if col.startswith("cdd") ] heating_balance_points = [ int(col[4:]) for col in data.columns if col.startswith("hdd") ] # CalTrack 3.2.2.1 candidate_models = [ get_single_cdd_hdd_candidate_model( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, cooling_balance_point, heating_balance_point, ) for cooling_balance_point in cooling_balance_points for heating_balance_point in heating_balance_points if heating_balance_point <= cooling_balance_point ] return candidate_models
def get_cdd_hdd_candidate_models( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, )
Return a list of candidate cdd_hdd models for a particular selection of cooling balance point and heating balance point Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns each of the form ``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd_hdd candidate models, with any associated warnings.
1.941631
1.74842
1.110506
best_r_squared_adj = -np.inf best_candidate = None # CalTrack 3.4.3.3 for candidate in candidate_models: if ( candidate.status == "QUALIFIED" and candidate.r_squared_adj > best_r_squared_adj ): best_candidate = candidate best_r_squared_adj = candidate.r_squared_adj if best_candidate is None: warnings = [ EEMeterWarning( qualified_name="eemeter.caltrack_daily.select_best_candidate.no_candidates", description="No qualified model candidates available.", data={ "status_count:{}".format(status): count for status, count in Counter( [c.status for c in candidate_models] ).items() }, ) ] return None, warnings return best_candidate, []
def select_best_candidate(candidate_models)
Select and return the best candidate model based on r-squared and qualification. Parameters ---------- candidate_models : :any:`list` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate models to select from. Returns ------- (best_candidate, warnings) : :any:`tuple` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` or :any:`None` and :any:`list` of `eemeter.EEMeterWarning` Return the candidate model with highest r-squared or None if none meet the requirements, and a list of warnings about this selection (or lack of selection).
4.175902
3.365652
1.240741
try: import matplotlib.pyplot as plt except ImportError: # pragma: no cover raise ImportError("matplotlib is required for plotting.") if figsize is None: figsize = (10, 4) if ax is None: fig, ax = plt.subplots(figsize=figsize) if candidate.status == "QUALIFIED": color = "C2" elif candidate.status == "DISQUALIFIED": color = "C3" else: return if best: color = "C1" alpha = 1 temp_min, temp_max = (30, 90) if temp_range is None else temp_range temps = np.arange(temp_min, temp_max) data = {"n_days": np.ones(temps.shape)} prediction_index = pd.date_range( "2017-01-01T00:00:00Z", periods=len(temps), freq="D" ) temps_hourly = pd.Series(temps, index=prediction_index).resample("H").ffill() prediction = candidate.predict( prediction_index, temps_hourly, "daily" ).result.predicted_usage plot_kwargs = {"color": color, "alpha": alpha or 0.3} plot_kwargs.update(kwargs) ax.plot(temps, prediction, **plot_kwargs) if title is not None: ax.set_title(title) return ax
def plot_caltrack_candidate( candidate, best=False, ax=None, title=None, figsize=None, temp_range=None, alpha=None, **kwargs )
Plot a CalTRACK candidate model. Parameters ---------- candidate : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` A candidate model with a predict function. best : :any:`bool`, optional Whether this is the best candidate or not. ax : :any:`matplotlib.axes.Axes`, optional Existing axes to plot on. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. temp_range : :any:`tuple`, optional (min, max) temperatures to plot model. alpha : :any:`float` between 0 and 1, optional Transparency, 0 fully transparent, 1 fully opaque. **kwargs Keyword arguments for :any:`matplotlib.axes.Axes.plot` Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes.
2.578038
2.53655
1.016356
def _json_or_none(obj): return None if obj is None else obj.json() data = { "status": self.status, "method_name": self.method_name, "interval": self.interval, "model": _json_or_none(self.model), "r_squared_adj": _noneify(self.r_squared_adj), "warnings": [w.json() for w in self.warnings], "metadata": self.metadata, "settings": self.settings, "totals_metrics": _json_or_none(self.totals_metrics), "avgs_metrics": _json_or_none(self.avgs_metrics), "candidates": None, } if with_candidates: data["candidates"] = [candidate.json() for candidate in self.candidates] return data
def json(self, with_candidates=False)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
2.917022
2.886856
1.010449
try: import matplotlib.pyplot as plt except ImportError: # pragma: no cover raise ImportError("matplotlib is required for plotting.") if figsize is None: figsize = (10, 4) if ax is None: fig, ax = plt.subplots(figsize=figsize) if temp_range is None: temp_range = (20, 90) if with_candidates: for candidate in self.candidates: candidate.plot(ax=ax, temp_range=temp_range, alpha=candidate_alpha) self.model.plot(ax=ax, best=True, temp_range=temp_range) if title is not None: ax.set_title(title) return ax
def plot( self, ax=None, title=None, figsize=None, with_candidates=False, candidate_alpha=None, temp_range=None, )
Plot a model fit. Parameters ---------- ax : :any:`matplotlib.axes.Axes`, optional Existing axes to plot on. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. with_candidates : :any:`bool` If True, also plot candidate models. candidate_alpha : :any:`float` between 0 and 1 Transparency at which to plot candidate models. 0 fully transparent, 1 fully opaque. Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes.
1.970264
1.982805
0.993675
return { "model_type": self.model_type, "formula": self.formula, "status": self.status, "model_params": self.model_params, "r_squared_adj": _noneify(self.r_squared_adj), "warnings": [w.json() for w in self.warnings], }
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
3.809267
3.938663
0.967147
return caltrack_usage_per_day_predict( self.model_type, self.model_params, prediction_index, temperature_data, with_disaggregated=with_disaggregated, with_design_matrix=with_design_matrix, **kwargs )
def predict( self, prediction_index, temperature_data, with_disaggregated=False, with_design_matrix=False, **kwargs )
Predict
3.187635
2.959544
1.07707
return plot_caltrack_candidate( self, best=best, ax=ax, title=title, figsize=figsize, temp_range=temp_range, alpha=alpha, **kwargs )
def plot( self, best=False, ax=None, title=None, figsize=None, temp_range=None, alpha=None, **kwargs )
Plot
2.730075
2.551156
1.070133
return { "status": self.status, "criteria_name": self.criteria_name, "warnings": [w.json() for w in self.warnings], "settings": self.settings, }
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
4.07408
4.173876
0.97609
sample_metadata = _load_sample_metadata() metadata = sample_metadata.get(sample) if metadata is None: raise ValueError( "Sample not found: {}. Try one of these?\n{}".format( sample, "\n".join( [" - {}".format(key) for key in sorted(sample_metadata.keys())] ), ) ) freq = metadata.get("freq") if freq not in ("hourly", "daily"): freq = None meter_data_filename = metadata["meter_data_filename"] with resource_stream("eemeter.samples", meter_data_filename) as f: meter_data = meter_data_from_csv(f, gzipped=True, freq=freq) temperature_filename = metadata["temperature_filename"] with resource_stream("eemeter.samples", temperature_filename) as f: temperature_data = temperature_data_from_csv(f, gzipped=True, freq="hourly") metadata["blackout_start_date"] = pytz.UTC.localize( parse_date(metadata["blackout_start_date"]) ) metadata["blackout_end_date"] = pytz.UTC.localize( parse_date(metadata["blackout_end_date"]) ) return meter_data, temperature_data, metadata
def load_sample(sample)
Load meter data, temperature data, and metadata for associated with a particular sample identifier. Note: samples are simulated, not real, data. Parameters ---------- sample : :any:`str` Identifier of sample. Complete list can be obtained with :any:`eemeter.samples`. Returns ------- meter_data, temperature_data, metadata : :any:`tuple` of :any:`pandas.DataFrame`, :any:`pandas.Series`, and :any:`dict` Meter data, temperature data, and metadata for this sample identifier.
2.519717
2.2396
1.125074
# TODO(philngo): include image in docs. try: import matplotlib.pyplot as plt except ImportError: # pragma: no cover raise ImportError("matplotlib is required for plotting.") default_kwargs = {"figsize": (16, 4)} default_kwargs.update(kwargs) fig, ax1 = plt.subplots(**default_kwargs) ax1.plot( meter_data.index, meter_data.value, color="C0", label="Energy Use", drawstyle="steps-post", ) ax1.set_ylabel("Energy Use") ax2 = ax1.twinx() ax2.plot( temperature_data.index, temperature_data, color="C1", label="Temperature", alpha=0.8, ) ax2.set_ylabel("Temperature") fig.legend() return ax1, ax2
def plot_time_series(meter_data, temperature_data, **kwargs)
Plot meter and temperature data in dual-axes time series. Parameters ---------- meter_data : :any:`pandas.DataFrame` A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``. temperature_data : :any:`pandas.Series` A :any:`pandas.DatetimeIndex`-indexed Series of temperature data. **kwargs Arbitrary keyword arguments to pass to :any:`plt.subplots <matplotlib.pyplot.subplots>` Returns ------- axes : :any:`tuple` of :any:`matplotlib.axes.Axes` Tuple of ``(ax_meter_data, ax_temperature_data)``.
2.472598
2.433663
1.015999
try: import matplotlib.pyplot as plt except ImportError: # pragma: no cover raise ImportError("matplotlib is required for plotting.") # format data temperature_mean = compute_temperature_features(meter_data.index, temperature_data) usage_per_day = compute_usage_per_day_feature(meter_data, series_name="meter_value") df = merge_features([usage_per_day, temperature_mean.temperature_mean]) if figsize is None: figsize = (10, 4) if ax is None: fig, ax = plt.subplots(figsize=figsize) if temp_col is None: temp_col = "temperature_mean" ax.scatter(df[temp_col], df.meter_value, **kwargs) ax.set_xlabel("Temperature") ax.set_ylabel("Energy Use per Day") if title is not None: ax.set_title(title) return ax
def plot_energy_signature( meter_data, temperature_data, temp_col=None, ax=None, title=None, figsize=None, **kwargs )
Plot meter and temperature data in energy signature. Parameters ---------- meter_data : :any:`pandas.DataFrame` A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``. temperature_data : :any:`pandas.Series` A :any:`pandas.DatetimeIndex`-indexed Series of temperature data. temp_col : :any:`str`, default ``'temperature_mean'`` The name of the temperature column. ax : :any:`matplotlib.axes.Axes` The axis on which to plot. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. **kwargs Arbitrary keyword arguments to pass to :any:`matplotlib.axes.Axes.scatter`. Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes.
2.765999
2.836827
0.975033
if number is None: return None if isinstance(number, float): return None if np.isinf(number) or np.isnan(number) else number # errors if number is not float compatible return float(number)
def _json_safe_float(number)
JSON serialization for infinity can be problematic. See https://docs.python.org/2/library/json.html#basic-usage This function returns None if `number` is infinity or negative infinity. If the `number` cannot be converted to float, this will raise an exception.
4.528553
4.494144
1.007656
return { "observed_length": _json_safe_float(self.observed_length), "predicted_length": _json_safe_float(self.predicted_length), "merged_length": _json_safe_float(self.merged_length), "num_parameters": _json_safe_float(self.num_parameters), "observed_mean": _json_safe_float(self.observed_mean), "predicted_mean": _json_safe_float(self.predicted_mean), "observed_variance": _json_safe_float(self.observed_variance), "predicted_variance": _json_safe_float(self.predicted_variance), "observed_skew": _json_safe_float(self.observed_skew), "predicted_skew": _json_safe_float(self.predicted_skew), "observed_kurtosis": _json_safe_float(self.observed_kurtosis), "predicted_kurtosis": _json_safe_float(self.predicted_kurtosis), "observed_cvstd": _json_safe_float(self.observed_cvstd), "predicted_cvstd": _json_safe_float(self.predicted_cvstd), "r_squared": _json_safe_float(self.r_squared), "r_squared_adj": _json_safe_float(self.r_squared_adj), "rmse": _json_safe_float(self.rmse), "rmse_adj": _json_safe_float(self.rmse_adj), "cvrmse": _json_safe_float(self.cvrmse), "cvrmse_adj": _json_safe_float(self.cvrmse_adj), "mape": _json_safe_float(self.mape), "mape_no_zeros": _json_safe_float(self.mape_no_zeros), "num_meter_zeros": _json_safe_float(self.num_meter_zeros), "nmae": _json_safe_float(self.nmae), "nmbe": _json_safe_float(self.nmbe), "autocorr_resid": _json_safe_float(self.autocorr_resid), }
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
1.550382
1.555625
0.99663
data = { "segment_name": self.segment_name, "formula": self.formula, "warnings": [w.json() for w in self.warnings], "model_params": self.model_params, } return data
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
4.717398
4.520936
1.043456
def _json_or_none(obj): return None if obj is None else obj.json() data = { "segment_models": [_json_or_none(m) for m in self.segment_models], "model_lookup": { key: _json_or_none(val) for key, val in self.model_lookup.items() }, "prediction_segment_type": self.prediction_segment_type, "prediction_segment_name_mapping": self.prediction_segment_name_mapping, "prediction_feature_processor": self.prediction_feature_processor.__name__, } return data
def json(self)
Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`.
3.131844
2.972778
1.053508
read_csv_kwargs = { "usecols": [start_col, value_col], "dtype": {value_col: np.float64}, "parse_dates": [start_col], "index_col": start_col, } if gzipped: read_csv_kwargs.update({"compression": "gzip"}) # allow passing extra kwargs read_csv_kwargs.update(kwargs) df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize("UTC") if tz is not None: df = df.tz_convert(tz) if freq == "hourly": df = df.resample("H").sum() elif freq == "daily": df = df.resample("D").sum() return df
def meter_data_from_csv( filepath_or_buffer, tz=None, start_col="start", value_col="value", gzipped=False, freq=None, **kwargs )
Load meter data from a CSV file. Default format:: start,value 2017-01-01T00:00:00+00:00,0.31 2017-01-02T00:00:00+00:00,0.4 2017-01-03T00:00:00+00:00,0.58 Parameters ---------- filepath_or_buffer : :any:`str` or file-handle File path or object. tz : :any:`str`, optional E.g., ``'UTC'`` or ``'US/Pacific'`` start_col : :any:`str`, optional, default ``'start'`` Date period start column. value_col : :any:`str`, optional, default ``'value'`` Value column, can be in any unit. gzipped : :any:`bool`, optional Whether file is gzipped. freq : :any:`str`, optional If given, apply frequency to data using :any:`pandas.DataFrame.resample`. **kwargs Extra keyword arguments to pass to :any:`pandas.read_csv`, such as ``sep='|'``.
1.875266
2.216534
0.846035
read_csv_kwargs = { "usecols": [date_col, temp_col], "dtype": {temp_col: np.float64}, "parse_dates": [date_col], "index_col": date_col, } if gzipped: read_csv_kwargs.update({"compression": "gzip"}) # allow passing extra kwargs read_csv_kwargs.update(kwargs) if tz is None: tz = "UTC" df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize(tz) if freq == "hourly": df = df.resample("H").sum() return df[temp_col]
def temperature_data_from_csv( filepath_or_buffer, tz=None, date_col="dt", temp_col="tempF", gzipped=False, freq=None, **kwargs )
Load temperature data from a CSV file. Default format:: dt,tempF 2017-01-01T00:00:00+00:00,21 2017-01-01T01:00:00+00:00,22.5 2017-01-01T02:00:00+00:00,23.5 Parameters ---------- filepath_or_buffer : :any:`str` or file-handle File path or object. tz : :any:`str`, optional E.g., ``'UTC'`` or ``'US/Pacific'`` date_col : :any:`str`, optional, default ``'dt'`` Date period start column. temp_col : :any:`str`, optional, default ``'tempF'`` Temperature column. gzipped : :any:`bool`, optional Whether file is gzipped. freq : :any:`str`, optional If given, apply frequency to data using :any:`pandas.Series.resample`. **kwargs Extra keyword arguments to pass to :any:`pandas.read_csv`, such as ``sep='|'``.
2.052781
2.547348
0.80585
if orient == "list": df = pd.DataFrame(data, columns=["start", "value"]) df["start"] = pd.DatetimeIndex(df.start).tz_localize("UTC") df = df.set_index("start") return df else: raise ValueError("orientation not recognized.")
def meter_data_from_json(data, orient="list")
Load meter data from json. Default format:: [ ['2017-01-01T00:00:00+00:00', 3.5], ['2017-02-01T00:00:00+00:00', 0.4], ['2017-03-01T00:00:00+00:00', 0.46], ] Parameters ---------- data : :any:`list` List elements are each a rows of data. Returns ------- df : :any:`pandas.DataFrame` DataFrame with a single column (``'value'``) and a :any:`pandas.DatetimeIndex`.
2.876949
3.009973
0.955806
if orient == "list": df = pd.DataFrame(data, columns=["dt", "tempF"]) series = df.tempF series.index = pd.DatetimeIndex(df.dt).tz_localize("UTC") return series else: raise ValueError("orientation not recognized.")
def temperature_data_from_json(data, orient="list")
Load temperature data from json. (Must be given in degrees Fahrenheit). Default format:: [ ['2017-01-01T00:00:00+00:00', 3.5], ['2017-01-01T01:00:00+00:00', 5.4], ['2017-01-01T02:00:00+00:00', 7.4], ] Parameters ---------- data : :any:`list` List elements are each a rows of data. Returns ------- series : :any:`pandas.Series` DataFrame with a single column (``'tempF'``) and a :any:`pandas.DatetimeIndex`.
3.65928
3.445787
1.061958
if meter_data.index.name is None: meter_data.index.name = "start" return meter_data.to_csv(path_or_buf, index=True)
def meter_data_to_csv(meter_data, path_or_buf)
Write meter data to CSV. See also :any:`pandas.DataFrame.to_csv`. Parameters ---------- meter_data : :any:`pandas.DataFrame` Meter data DataFrame with ``'value'`` column and :any:`pandas.DatetimeIndex`. path_or_buf : :any:`str` or file handle, default None File path or object, if None is provided the result is returned as a string.
2.881786
3.859136
0.746744
if temperature_data.index.name is None: temperature_data.index.name = "dt" if temperature_data.name is None: temperature_data.name = "temperature" return temperature_data.to_frame().to_csv(path_or_buf, index=True)
def temperature_data_to_csv(temperature_data, path_or_buf)
Write temperature data to CSV. See also :any:`pandas.DataFrame.to_csv`. Parameters ---------- temperature_data : :any:`pandas.Series` Temperature data series with :any:`pandas.DatetimeIndex`. path_or_buf : :any:`str` or file handle, default None File path or object, if None is provided the result is returned as a string.
2.363256
2.414881
0.978622
if flask.has_request_context(): emit(_NAME + str(self._uuid)) else: sio = flask.current_app.extensions['socketio'] sio.emit(_NAME + str(self._uuid)) eventlet.sleep()
def notify(self)
Notify the client. The function passed to ``App.respond`` will get called.
6.102296
6.218477
0.981317
if not isinstance(key, (str, bytes)): raise KeyError('Key must be of type str or bytes, found type {}'.format(type(key)))
def validate(key)
Check that the key is a string or bytestring. That's the only valid type of key.
3.894226
3.630767
1.072563
try: # numpy isn't an explicit dependency of bowtie # so we can't assume it's available import numpy as np if isinstance(obj, (np.ndarray, np.generic)): return obj.tolist() except ImportError: pass try: # pandas isn't an explicit dependency of bowtie # so we can't assume it's available import pandas as pd if isinstance(obj, pd.DatetimeIndex): return [x.isoformat() for x in obj.to_pydatetime()] if isinstance(obj, pd.Index): return obj.tolist() if isinstance(obj, pd.Series): try: return [x.isoformat() for x in obj.dt.to_pydatetime()] except AttributeError: return obj.tolist() except ImportError: pass if isinstance(obj, (datetime, time, date)): return obj.isoformat() raise TypeError('Not sure how to serialize {} of type {}'.format(obj, type(obj)))
def json_conversion(obj: Any) -> JSON
Encode additional objects to JSON.
2.297595
2.242654
1.024498
try: return msgpack.packb(x, default=encoders) except TypeError as exc: message = ('Serialization error, check the data passed to a do_ command. ' 'Cannot serialize this object:\n') + str(exc)[16:] raise SerializationError(message)
def pack(x: Any) -> bytes
Encode ``x`` into msgpack with additional encoders.
9.675681
8.372732
1.155618
@property # type: ignore @wraps(event) def actualevent(self): # pylint: disable=missing-docstring name = event.__name__[3:] try: # the getter post processing function # is preserved with an underscore getter = event(self).__name__ except AttributeError: getter = None return Event(name, self._uuid, getter) # pylint: disable=protected-access return actualevent
def make_event(event: Callable) -> Callable
Create an event from a method signature.
6.526772
6.175224
1.056929
@wraps(command) def actualcommand(self, *args, **kwds): # pylint: disable=missing-docstring data = command(self, *args, **kwds) name = command.__name__[3:] signal = '{uuid}{sep}{event}'.format( uuid=self._uuid, # pylint: disable=protected-access sep=SEPARATOR, event=name ) if flask.has_request_context(): emit(signal, {'data': pack(data)}) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, {'data': pack(data)}) eventlet.sleep() return actualcommand
def make_command(command: Callable) -> Callable
Create an command from a method signature.
3.785424
3.582294
1.056704
def get(self, timeout=10): # pylint: disable=missing-docstring name = getter.__name__ signal = '{uuid}{sep}{event}'.format( uuid=self._uuid, # pylint: disable=protected-access sep=SEPARATOR, event=name ) event = LightQueue(1) if flask.has_request_context(): emit(signal, callback=lambda x: event.put(unpack(x))) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, callback=lambda x: event.put(unpack(x))) data = event.get(timeout=timeout) return getter(self, data) # don't want to copy the signature in this case get.__doc__ = getter.__doc__ return get
def make_getter(getter: Callable) -> Callable
Create an command from a method signature.
4.205529
4.092021
1.027739
if tag is None: raise ValueError('tag cannot be None') formatter = string.Formatter() mapping = FormatDict(component=tag) return formatter.vformat(wrap, (), mapping)
def _insert(wrap: str, tag: Optional[str]) -> str
Insert the component tag into the wrapper html. This ignores other tags already created like ``{socket}``. https://stackoverflow.com/a/11284026/744520
7.681881
6.111829
1.256887
return [dict(label=l, value=v) for l, v in zip(labels, values)]
def do_options(self, labels, values)
Replace the drop down fields. Parameters ---------- labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None
3.509394
5.75952
0.609321
return [{'label': label, 'value': value} for label, value in zip(labels, values)]
def do_options(self, labels: Sequence[str], values: Sequence[Union[str, int]]) -> Sequence[Dict]
Replace the checkbox options. Parameters ---------- labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None
2.698864
4.658907
0.579291
return [{'label': label, 'value': value} for label, value in zip(labels, values)]
def do_options(self, labels, values)
Replace the radio button options. Parameters ---------- labels : Sequence List of strings which will be visible to the user. values : Sequence List of values associated with the labels that are hidden from the user. Returns ------- None
3.392535
5.880138
0.576948
version = check_output(('node', '--version')) return tuple(int(x) for x in version.strip()[1:].split(b'.'))
def node_version()
Get node version.
4.67823
4.799686
0.974695
ret = eventlet.spawn(self.context(self.func)) eventlet.sleep(self.seconds) try: ret.wait() except Exception: # pylint: disable=broad-except traceback.print_exc() self.thread = eventlet.spawn(self.run)
def run(self)
Invoke the function repeatedly on a timer.
4.819125
4.402593
1.094611
return not ( # if one rectangle is left of other other.column_end <= self.column_start or self.column_end <= other.column_start # if one rectangle is above other or other.row_end <= self.row_start or self.row_end <= other.row_start )
def overlap(self, other: 'Span')
Detect if two spans overlap.
3.063683
2.65827
1.15251
yield from itertools.product( range(self.row_start, self.row_end), range(self.column_start, self.column_end) )
def cells(self) -> Generator[Tuple[int, int], None, None]
Generate cells in span.
2.731258
2.322806
1.175844
raise_not_number(value) self.maximum = '{}px'.format(value) return self
def pixels(self, value: float) -> 'Size'
Set the size in pixels.
17.255142
15.203782
1.134924
raise_not_number(value) self.minimum = '{}px'.format(value) return self
def min_pixels(self, value: float) -> 'Size'
Set the minimum size in pixels.
10.95634
9.966423
1.099325
raise_not_number(value) self.maximum = '{}em'.format(value) return self
def ems(self, value: float) -> 'Size'
Set the size in ems.
17.580931
13.711984
1.282158
raise_not_number(value) self.minimum = '{}em'.format(value) return self
def min_ems(self, value: float) -> 'Size'
Set the minimum size in ems.
12.752867
9.581593
1.330976
raise_not_number(value) self.maximum = '{}fr'.format(value) return self
def fraction(self, value: float) -> 'Size'
Set the fraction of free space to use.
16.502224
15.522873
1.063091
raise_not_number(value) self.maximum = '{}%'.format(value) return self
def percent(self, value: float) -> 'Size'
Set the percentage of free space to use.
13.404422
12.121926
1.1058
raise_not_number(value) self.minimum = '{}%'.format(value) return self
def min_percent(self, value: float) -> 'Size'
Set the minimum percentage of free space to use.
10.974638
9.752877
1.125272
raise_not_number(value) self.gap = '{}px'.format(value) return self
def pixels(self, value: int) -> 'Gap'
Set the margin in pixels.
11.014072
10.262473
1.073238
raise_not_number(value) self.gap = '{}em'.format(value) return self
def ems(self, value: int) -> 'Gap'
Set the margin in ems.
12.427358
9.544157
1.302091
raise_not_number(value) self.gap = '{}%'.format(value) return self
def percent(self, value) -> 'Gap'
Set the margin as a percentage.
8.886721
8.690647
1.022561
try: self[Span(*self._available_cell())] = component except NoUnusedCellsError: span = list(self._spans.keys())[-1] self._spans[span] += component
def add(self, component: Union[Component, Sequence[Component]]) -> None
Add a widget to the grid in the next available cell. Searches over columns then rows for available cells. Parameters ---------- components : bowtie._Component A Bowtie widget instance.
13.806783
11.77074
1.172975
cells = set(itertools.product(range(len(self.rows)), range(len(self.columns)))) for span in self._spans: for cell in span.cells: cells.remove(cell) if not cells: raise NoUnusedCellsError('No available cells') return min(cells)
def _available_cell(self) -> Tuple[int, int]
Find next available cell first by row then column. First, construct a set containing all cells. Then iterate over the spans and remove occupied cells.
3.930205
3.125126
1.257615
if not self.sidebar: raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.') if not isinstance(component, Component): raise ValueError('component must be Component type, found {}'.format(component)) # self._track_widget(widget) self._controllers.append(component)
def add_sidebar(self, component: Component) -> None
Add a widget to the sidebar. Parameters ---------- component : bowtie._Component Add this component to the sidebar, it will be appended to the end.
7.010544
6.418726
1.092202
if path[0] != '/': path = '/' + path for route in self._routes: assert path != route.path, 'Cannot use the same path twice' self._routes.append(Route(view=view, path=path, exact=exact)) self.app.add_url_rule( path, path[1:], lambda: render_template('bowtie.html', title=self.title) )
def add_route(self, view: View, path: str, exact: bool = True) -> None
Add a view to the app. Parameters ---------- view : View path : str exact : bool, optional
4.036201
4.084198
0.988248
try: first_event = events[0] except IndexError: raise IndexError('Must subscribe to at least one event.') if len(events) != len(set(events)): raise ValueError( 'Subscribed to the same event multiple times. All events must be unique.' ) if len(events) > 1: # check if we are using any non stateful events for event in events: if isinstance(event, Pager): raise NotStatefulEvent('Pagers must be subscribed by itself.') if event.getter is None: raise NotStatefulEvent( f'{event.uuid}.on_{event.name} is not a stateful event. ' 'It must be used alone.' ) def decorator(func: Callable) -> Callable: if isinstance(first_event, Pager): self._pages[first_event] = func elif first_event.name == 'upload': if first_event.uuid in self._uploads: warnings.warn( ('Overwriting function "{func1}" with function ' '"{func2}" for upload object "{obj}".').format( func1=self._uploads[first_event.uuid], func2=func.__name__, obj=COMPONENT_REGISTRY[first_event.uuid] ), Warning) self._uploads[first_event.uuid] = func else: for event in events: # need to have `events` here to maintain order of arguments # not sure how to deal with mypy typing errors on events so ignoring self._subscriptions[event].append((events, func)) # type: ignore return func return decorator
def subscribe(self, *events: Union[Event, Pager]) -> Callable
Call a function in response to an event. If more than one event is given, `func` will be given as many arguments as there are events. If the pager calls notify, the decorated function will be called. Parameters ---------- *event : event or pager Bowtie event, must have at least one. Examples -------- Subscribing a function to multiple events. >>> from bowtie.control import Dropdown, Slider >>> app = App() >>> dd = Dropdown() >>> slide = Slider() >>> @app.subscribe(dd.on_change, slide.on_change) ... def callback(dd_item, slide_value): ... pass >>> @app.subscribe(dd.on_change) ... @app.subscribe(slide.on_change) ... def callback2(value): ... pass Using the pager to run a callback function. >>> from bowtie.pager import Pager >>> app = App() >>> pager = Pager() >>> @app.subscribe(pager) ... def callback(): ... pass >>> def scheduledtask(): ... pager.notify()
4.18885
4.736882
0.884305
def wrap(func: Callable): self._schedules.append(Scheduler(self.app, seconds, func)) return wrap
def schedule(self, seconds: float)
Call a function periodically. Parameters ---------- seconds : float Minimum interval of function calls. func : callable Function to be called.
9.719892
10.981676
0.885101
if node_version() < _MIN_NODE_VERSION: raise WebpackError( f'Webpack requires at least version {_MIN_NODE_VERSION} of Node, ' f'found version {node_version}.' ) packages = self._write_templates() for filename in ['package.json', 'webpack.prod.js', 'webpack.dev.js']: if not (self._build_dir / filename).is_file(): sourcefile = self._package_dir / 'src' / filename shutil.copy(sourcefile, self._build_dir) if self._run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1: raise YarnError('Error installing node packages') if packages: installed = self._installed_packages() new_packages = [x for x in packages if x.split('@')[0] not in installed] if new_packages: retval = self._run( ['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook ) if retval > 1: raise YarnError('Error installing node packages') elif retval == 1: print('Yarn error but trying to continue build') retval = self._run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook) if retval != 0: raise WebpackError('Error building with webpack')
def _build(self, notebook: Optional[str] = None) -> None
Compile the Bowtie application.
3.586026
3.491732
1.027005
with (self._build_dir / 'package.json').open('r') as f: packages = json.load(f) yield from packages['dependencies'].keys()
def _installed_packages(self) -> Generator[str, None, None]
Extract installed packages as list from `package.json`.
3.591408
3.034149
1.183662
src = self._build_dir / 'bowtiejs' os.makedirs(src, exist_ok=True) return src
def _create_jspath(self) -> Path
Create the source directory for the build.
12.429322
7.211916
1.723442
if notebook is None: return Popen(command, cwd=self._build_dir).wait() cmd = Popen(command, cwd=self._build_dir, stdout=PIPE, stderr=STDOUT) while True: line = cmd.stdout.readline() if line == b'' and cmd.poll() is not None: return cmd.poll() print(line.decode('utf-8'), end='') raise Exception()
def _run(self, command: List[str], notebook: Optional[str] = None) -> int
Run command from terminal and notebook and view output from subprocess.
2.499939
2.392695
1.044821
try: return self.credentials[username] == password except KeyError: return False
def _check_auth(self, username: str, password: str) -> bool
Check if a username/password combination is valid.
6.695693
3.982414
1.681315
auth = request.authorization if not auth or not self._check_auth(auth.username, auth.password): return Response( 'Could not verify your access level for that URL.\n' 'You have to login with proper credentials', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'} ) session['logged_in'] = auth.username # pylint wants this return statement return None
def before_request(self) -> Optional[Response]
Determine if a user is allowed to view this route.
2.64777
2.5488
1.03883