prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import copy
import importlib
import itertools
import os
import sys
import warnings
import numpy as np
import pandas as pd
try:
import ixmp
has_ix = True
except ImportError:
has_ix = False
from pyam import plotting
from pyam.logger import logger
from pyam.run_control import run_control
from pyam.utils import (
write_sheet,
read_ix,
read_files,
read_pandas,
format_data,
pattern_match,
years_match,
isstr,
islistable,
cast_years_to_int,
META_IDX,
YEAR_IDX,
REGION_IDX,
IAMC_IDX,
SORT_IDX,
LONG_IDX,
)
from pyam.timeseries import fill_series
class IamDataFrame(object):
"""This class is a wrapper for dataframes following the IAMC format.
It provides a number of diagnostic features (including validation of data,
completeness of variables provided) as well as a number of visualization
and plotting tools.
"""
def __init__(self, data, **kwargs):
"""Initialize an instance of an IamDataFrame
Parameters
----------
data: ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file
an instance of an TimeSeries or Scenario (requires `ixmp`),
or pd.DataFrame or data file with IAMC-format data columns.
A pd.DataFrame can have the required data as columns or index.
Special support is provided for data files downloaded directly from
IIASA SSP and RCP databases. If you run into any problems loading
data, please make an issue at:
https://github.com/IAMconsortium/pyam/issues
"""
# import data from pd.DataFrame or read from source
if isinstance(data, pd.DataFrame):
self.data = format_data(data.copy())
elif has_ix and isinstance(data, ixmp.TimeSeries):
self.data = read_ix(data, **kwargs)
else:
self.data = read_files(data, **kwargs)
# cast year column to `int` if necessary
if not self.data.year.dtype == 'int64':
self.data.year = cast_years_to_int(self.data.year)
# define a dataframe for categorization and other metadata indicators
self.meta = self.data[META_IDX].drop_duplicates().set_index(META_IDX)
self.reset_exclude()
# execute user-defined code
if 'exec' in run_control():
self._execute_run_control()
def __getitem__(self, key):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__getitem__(key)
else:
return self.data.__getitem__(key)
def __setitem__(self, key, value):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__setitem__(key, value)
else:
return self.data.__setitem__(key, value)
def __len__(self):
return self.data.__len__()
def _execute_run_control(self):
for module_block in run_control()['exec']:
fname = module_block['file']
functions = module_block['functions']
dirname = os.path.dirname(fname)
if dirname:
sys.path.append(dirname)
module = os.path.basename(fname).split('.')[0]
mod = importlib.import_module(module)
for func in functions:
f = getattr(mod, func)
f(self)
def head(self, *args, **kwargs):
"""Identical to pd.DataFrame.head() operating on data"""
return self.data.head(*args, **kwargs)
def tail(self, *args, **kwargs):
"""Identical to pd.DataFrame.tail() operating on data"""
return self.data.tail(*args, **kwargs)
def models(self):
"""Get a list of models"""
return pd.Series(self.meta.index.levels[0])
def scenarios(self):
"""Get a list of scenarios"""
return pd.Series(self.meta.index.levels[1])
def regions(self):
"""Get a list of regions"""
return pd.Series(self.data['region'].unique(), name='region')
def variables(self, include_units=False):
"""Get a list of variables
Parameters
----------
include_units: boolean, default False
include the units
"""
if include_units:
return self.data[['variable', 'unit']].drop_duplicates()\
.reset_index(drop=True).sort_values('variable')
else:
return pd.Series(self.data.variable.unique(), name='variable')
def append(self, other, ignore_meta_conflict=False, inplace=False,
**kwargs):
"""Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(
other.meta.loc[intersect, cols]):
conflict_idx = (
pd.concat([ret.meta.loc[intersect, cols],
other.meta.loc[intersect, cols]]
).drop_duplicates()
.index.drop_duplicates()
)
msg = 'conflict in `meta` for scenarios {}'.format(
[i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg)
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer',
left_index=True, right_index=True)
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \
else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg)
# append other.data (verify integrity for no duplicates)
ret.data.set_index(LONG_IDX, inplace=True)
other.data.set_index(LONG_IDX, inplace=True)
ret.data = ret.data.append(other.data, verify_integrity=True)\
.reset_index(drop=False)
if not inplace:
return ret
def pivot_table(self, index, columns, values='value',
aggfunc='count', fill_value=None, style=None):
"""Returns a pivot table
Parameters
----------
index: str or list of strings
rows for Pivot table
columns: str or list of strings
columns for Pivot table
values: str, default 'value'
dataframe column to aggregate or count
aggfunc: str or function, default 'count'
function used for aggregation,
accepts 'count', 'mean', and 'sum'
fill_value: scalar, default None
value to replace missing values with
style: str, default None
output style for pivot table formatting
accepts 'highlight_not_max', 'heatmap'
"""
index = [index] if isstr(index) else index
columns = [columns] if isstr(columns) else columns
df = self.data
# allow 'aggfunc' to be passed as string for easier user interface
if isstr(aggfunc):
if aggfunc == 'count':
df = self.data.groupby(index + columns, as_index=False).count()
fill_value = 0
elif aggfunc == 'mean':
df = self.data.groupby(index + columns, as_index=False).mean()\
.round(2)
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
elif aggfunc == 'sum':
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
df = df.pivot_table(values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value)
return df
def interpolate(self, year):
"""Interpolate missing values in timeseries (linear interpolation)
Parameters
----------
year: int
year to be interpolated
"""
df = self.pivot_table(index=IAMC_IDX, columns=['year'],
values='value', aggfunc=np.sum)
# drop year-rows where values are already defined
if year in df.columns:
df = df[np.isnan(df[year])]
fill_values = df.apply(fill_series,
raw=False, axis=1, year=year)
fill_values = fill_values.dropna().reset_index()
fill_values = fill_values.rename(columns={0: "value"})
fill_values['year'] = year
self.data = self.data.append(fill_values, ignore_index=True)
def as_pandas(self, with_metadata=False):
"""Return this as a pd.DataFrame
Parameters
----------
with_metadata : bool, default False
if True, join data with existing metadata
"""
df = self.data
if with_metadata:
df = (df
.set_index(META_IDX)
.join(self.meta)
.reset_index()
)
return df
def timeseries(self):
"""Returns a dataframe in the standard IAMC format
"""
return (
self.data
.pivot_table(index=IAMC_IDX, columns='year')
.value # column name
.rename_axis(None, axis=1)
)
def reset_exclude(self):
"""Reset exclusion assignment for all scenarios to `exclude: False`"""
self.meta['exclude'] = False
def set_meta(self, meta, name=None, index=None):
"""Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series')
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') \
and set(META_IDX).issubset(meta.index.names):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex')
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
meta.name = name = name or meta.name
# reduce index dimensions to model-scenario only
meta = (
meta
.reset_index()
.reindex(columns=META_IDX + [name])
.set_index(META_IDX)
)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff))
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name])
def categorize(self, name, value, criteria,
color=None, marker=None, linestyle=None):
"""Assign scenarios to a category according to specific criteria
or display the category assignment
Parameters
----------
name: str
category column name
value: str
category identifier
criteria: dict
dictionary with variables mapped to applicable checks
('up' and 'lo' for respective bounds, 'year' for years - optional)
color: str
assign a color to this category for plotting
marker: str
assign a marker to this category for plotting
linestyle: str
assign a linestyle to this category for plotting
"""
# add plotting run control
for kind, arg in [('color', color), ('marker', marker),
('linestyle', linestyle)]:
if arg:
run_control().update({kind: {name: {value: arg}}})
# find all data that matches categorization
rows = _apply_criteria(self.data, criteria,
in_range=True, return_test='all')
idx = _meta_idx(rows)
if len(idx) == 0:
logger().info("No scenarios satisfy the criteria")
return # EXIT FUNCTION
# update metadata dataframe
self._new_meta_column(name)
self.meta.loc[idx, name] = value
msg = '{} scenario{} categorized as `{}: {}`'
logger().info(msg.format(len(idx), '' if len(idx) == 1 else 's',
name, value))
def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set to value `np.nan`"""
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name))
if name not in self.meta:
self.meta[name] = np.nan
def require_variable(self, variable, unit=None, year=None,
exclude_on_fail=False):
"""Check whether all scenarios have a required variable
Parameters
----------
variable: str
required variable
unit: str, default None
name of unit (optional)
years: int or list, default None
years (optional)
exclude: bool, default False
flag scenarios missing the required variables as `exclude: True`
"""
criteria = {'variable': variable}
if unit:
criteria.update({'unit': unit})
if year:
criteria.update({'year': year})
keep = _apply_filters(self.data, self.meta, criteria)
idx = self.meta.index.difference(_meta_idx(self.data[keep]))
n = len(idx)
if n == 0:
logger().info('All scenarios have the required variable `{}`'
.format(variable))
return
msg = '{} scenario does not include required variable `{}`' if n == 1 \
else '{} scenarios do not include required variable `{}`'
if exclude_on_fail:
self.meta.loc[idx, 'exclude'] = True
msg += ', marked as `exclude: True` in metadata'
logger().info(msg.format(n, variable))
return pd.DataFrame(index=idx).reset_index()
def validate(self, criteria={}, exclude_on_fail=False):
"""Validate scenarios using criteria on timeseries values
Parameters
----------
criteria: dict
dictionary with variable keys and check values
('up' and 'lo' for respective bounds, 'year' for years)
exclude_on_fail: bool, default False
flag scenarios failing validation as `exclude: True`
"""
df = _apply_criteria(self.data, criteria, in_range=False)
if not df.empty:
msg = '{} of {} data points to not satisfy the criteria'
logger().info(msg.format(len(df), len(self.data)))
if exclude_on_fail and len(df) > 0:
self._exclude_on_fail(df)
return df
def rename(self, mapping, inplace=False):
"""Rename and aggregate column entries using `groupby.sum()` on values.
When renaming models or scenarios, the uniqueness of the index must be
maintained, and the function will raise an error otherwise.
Parameters
----------
mapping: dict
for each column where entries should be renamed, provide current
name and target name
{<column name>: {<current_name_1>: <target_name_1>,
<current_name_2>: <target_name_2>}}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for col, _mapping in mapping.items():
if col in ['model', 'scenario']:
index = pd.DataFrame(index=ret.meta.index).reset_index()
index.loc[:, col] = index.loc[:, col].replace(_mapping)
if index.duplicated().any():
raise ValueError('Renaming to non-unique {} index!'
.format(col))
ret.meta.index = index.set_index(META_IDX).index
elif col not in ['region', 'variable', 'unit']:
raise ValueError('Renaming by {} not supported!'.format(col))
ret.data.loc[:, col] = ret.data.loc[:, col].replace(_mapping)
ret.data = ret.data.groupby(LONG_IDX).sum().reset_index()
if not inplace:
return ret
def convert_unit(self, conversion_mapping, inplace=False):
"""Converts units based on provided unit conversion factors
Parameters
----------
conversion_mapping: dict
for each unit for which a conversion should be carried out,
provide current unit and target unit and conversion factor
{<current unit>: [<target unit>, <conversion factor>]}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for current_unit, (new_unit, factor) in conversion_mapping.items():
factor = pd.to_numeric(factor)
where = ret.data['unit'] == current_unit
ret.data.loc[where, 'value'] *= factor
ret.data.loc[where, 'unit'] = new_unit
if not inplace:
return ret
def check_aggregate(self, variable, components=None, units=None,
exclude_on_fail=False, multiplier=1, **kwargs):
"""Check whether the timeseries data match the aggregation
of components or sub-categories
Parameters
----------
variable: str
variable to be checked for matching aggregation of sub-categories
components: list of str, default None
list of variables, defaults to all sub-categories of `variable`
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
multiplier: number, default 1
factor when comparing variable and sum of components
kwargs: passed to `np.isclose()`
"""
# default components to all variables one level below `variable`
if components is None:
components = self.filter(variable='{}|*'.format(variable),
level=0).variables()
if not len(components):
msg = '{} - cannot check aggregate because it has no components'
logger().info(msg.format(variable))
return
# filter and groupby data, use `pd.Series.align` for matching index
df_variable, df_components = (
_aggregate_by_variables(self.data, variable, units)
.align(_aggregate_by_variables(self.data, components, units))
)
# use `np.isclose` for checking match
diff = df_variable[~np.isclose(df_variable, multiplier * df_components,
**kwargs)]
if len(diff):
msg = '{} - {} of {} data points are not aggregates of components'
logger().info(msg.format(variable, len(diff), len(df_variable)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff = pd.concat([diff], keys=[variable], names=['variable'])
return diff.unstack().rename_axis(None, axis=1)
def check_aggregate_regions(self, variable, region='World',
components=None, units=None,
exclude_on_fail=False, **kwargs):
"""Check whether the region timeseries data match the aggregation
of components
Parameters
----------
variable: str
variable to be checked for matching aggregation of components data
region: str
region to be checked for matching aggregation of components data
components: list of str, default None
list of regions, defaults to all regions except region
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
kwargs: passed to `np.isclose()`
"""
var_df = self.filter(variable=variable, level=0)
if components is None:
components = var_df.filter(region=region, keep=False).regions()
if not len(components):
msg = (
'{} - cannot check regional aggregate because it has no '
'regional components'
)
logger().info(msg.format(variable))
return None
# filter and groupby data, use `pd.Series.align` for matching index
df_region, df_components = (
_aggregate_by_regions(var_df.data, region, units)
.align(_aggregate_by_regions(var_df.data, components, units))
)
df_components.index = df_components.index.droplevel(
"variable"
)
# Add in variables that are included in region totals but which
# aren't included in the regional components.
# For example, if we are looking at World and Emissions|BC, we need
# to add aviation and shipping to the sum of Emissions|BC for each
# of World's regional components to do a valid check.
different_region = components[0]
variable_components = self.filter(
variable="{}|*".format(variable)
).variables()
for var_to_add in variable_components:
var_rows = self.data.variable == var_to_add
region_rows = self.data.region == different_region
var_has_regional_info = (var_rows & region_rows).any()
if not var_has_regional_info:
df_var_to_add = self.filter(
region=region, variable=var_to_add
).data.groupby(REGION_IDX).sum()['value']
df_var_to_add.index = df_var_to_add.index.droplevel("variable")
if len(df_var_to_add):
df_components = df_components.add(df_var_to_add,
fill_value=0)
df_components = pd.concat([df_components], keys=[variable],
names=['variable'])
# use `np.isclose` for checking match
diff = df_region[~np.isclose(df_region, df_components, **kwargs)]
if len(diff):
msg = (
'{} - {} of {} data points are not aggregates of regional '
'components'
)
logger().info(msg.format(variable, len(diff), len(df_region)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff = pd.concat([diff], keys=[region], names=['region'])
return diff.unstack().rename_axis(None, axis=1)
def check_internal_consistency(self, **kwargs):
"""Check whether the database is internally consistent
We check that all variables are equal to the sum of their sectoral
components and that all the regions add up to the World total. If
the check is passed, None is returned, otherwise a dictionary of
inconsistent variables is returned.
Note: at the moment, this method's regional checking is limited to
checking that all the regions sum to the World region. We cannot
make this more automatic unless we start to store how the regions
relate, see
[this issue](https://github.com/IAMconsortium/pyam/issues/106).
Parameters
----------
kwargs: passed to `np.isclose()`
"""
inconsistent_vars = {}
for variable in self.variables():
diff_agg = self.check_aggregate(variable, **kwargs)
if diff_agg is not None:
inconsistent_vars[variable + "-aggregate"] = diff_agg
diff_regional = self.check_aggregate_regions(variable, **kwargs)
if diff_regional is not None:
inconsistent_vars[variable + "-regional"] = diff_regional
return inconsistent_vars if inconsistent_vars else None
def _exclude_on_fail(self, df):
"""Assign a selection of scenarios as `exclude: True` in meta"""
idx = df if isinstance(df, pd.MultiIndex) else _meta_idx(df)
self.meta.loc[idx, 'exclude'] = True
logger().info('{} non-valid scenario{} will be excluded'
.format(len(idx), '' if len(idx) == 1 else 's'))
def filter(self, filters=None, keep=True, inplace=False, **kwargs):
"""Return a filtered IamDataFrame (i.e., a subset of current data)
Parameters
----------
keep: bool, default True
keep all scenarios satisfying the filters (if True) or the inverse
inplace: bool, default False
if True, do operation inplace and return None
filters by kwargs or dict (deprecated):
The following columns are available for filtering:
- metadata columns: filter by category assignment in metadata
- 'model', 'scenario', 'region', 'variable', 'unit':
string or list of strings, where ``*`` can be used as a wildcard
- 'level': the maximum "depth" of IAM variables (number of '|')
(exluding the strings given in the 'variable' argument)
- 'year': takes an integer, a list of integers or a range
note that the last year of a range is not included,
so ``range(2010,2015)`` is interpreted as ``[2010, ..., 2014]``
- 'regexp=True' overrides pseudo-regexp syntax in `pattern_match()`
"""
if filters is not None:
warnings.warn(
'`filters` keyword argument in filters() is deprecated and will be removed in the next release')
kwargs.update(filters)
_keep = _apply_filters(self.data, self.meta, kwargs)
_keep = _keep if keep else ~_keep
ret = copy.deepcopy(self) if not inplace else self
ret.data = ret.data[_keep]
idx = pd.MultiIndex.from_tuples(
pd.unique(list(zip(ret.data['model'], ret.data['scenario']))),
names=('model', 'scenario')
)
if len(idx) == 0:
logger().warning('Filtered IamDataFrame is empty!')
ret.meta = ret.meta.loc[idx]
if not inplace:
return ret
def col_apply(self, col, func, *args, **kwargs):
"""Apply a function to a column
Parameters
----------
col: string
column in either data or metadata
func: functional
function to apply
"""
if col in self.data:
self.data[col] = self.data[col].apply(func, *args, **kwargs)
else:
self.meta[col] = self.meta[col].apply(func, *args, **kwargs)
def _to_file_format(self):
"""Return a dataframe suitable for writing to a file"""
df = self.timeseries().reset_index()
df = df.rename(columns={c: str(c).title() for c in df.columns})
return df
def to_csv(self, path, index=False, **kwargs):
"""Write data to a csv file
Parameters
----------
index: boolean, default False
write row names (index)
"""
self._to_file_format().to_csv(path, index=False, **kwargs)
def to_excel(self, path=None, writer=None, sheet_name='data', index=False,
**kwargs):
"""Write timeseries data to Excel using the IAMC template convention
(wrapper for `pd.DataFrame.to_excel()`)
Parameters
----------
excel_writer: string or ExcelWriter object
file path or existing ExcelWriter
sheet_name: string, default 'data'
name of the sheet that will contain the (filtered) IamDataFrame
index: boolean, default False
write row names (index)
"""
if (path is None and writer is None) or \
(path is not None and writer is not None):
raise ValueError('Only one of path and writer must have a value')
if writer is None:
writer = pd.ExcelWriter(path)
self._to_file_format().to_excel(writer, sheet_name=sheet_name,
index=index, **kwargs)
def export_metadata(self, path):
"""Export metadata to Excel
Parameters
----------
path: string
path/filename for xlsx file of metadata export
"""
writer = | pd.ExcelWriter(path) | pandas.ExcelWriter |
# PopulationSim
# See full license in LICENSE.txt.
import logging
import os
import pandas as pd
import numpy as np
from activitysim.core import inject
from .helper import get_control_table
from .helper import get_weight_table
from activitysim.core.config import setting
logger = logging.getLogger(__name__)
AS_CSV = False
def out_table(table_name, df):
table_name = "summary_%s" % table_name
if AS_CSV:
file_name = "%s.csv" % table_name
output_dir = inject.get_injectable('output_dir')
file_path = os.path.join(output_dir, file_name)
logger.info("writing output file %s" % file_path)
write_index = df.index.name is not None
df.to_csv(file_path, index=write_index)
else:
logger.info("saving summary table %s" % table_name)
repop = inject.get_step_arg('repop', default=False)
inject.add_table(table_name, df, replace=repop)
def summarize_geography(geography, weight_col, hh_id_col,
crosswalk_df, results_df, incidence_df):
# controls_table for current geography level
controls_df = get_control_table(geography)
control_names = controls_df.columns.tolist()
# only want zones from crosswalk for which non-zero control rows exist
zone_ids = crosswalk_df[geography].unique()
zone_ids = controls_df.index.intersection(zone_ids)
results = []
controls = []
for zone_id in zone_ids:
zone_controls = controls_df.loc[zone_id].tolist()
controls.append(zone_controls)
zone_row_map = results_df[geography] == zone_id
zone_weights = results_df[zone_row_map]
incidence = incidence_df.loc[zone_weights[hh_id_col]]
weights = zone_weights[weight_col].tolist()
x = [(incidence[c] * weights).sum() for c in control_names]
results.append(x)
controls_df = pd.DataFrame(
data=np.asanyarray(controls),
columns=['%s_control' % c for c in control_names],
index=zone_ids
)
summary_df = pd.DataFrame(
data=np.asanyarray(results),
columns=['%s_result' % c for c in control_names],
index=zone_ids
)
dif_df = pd.DataFrame(
data=np.asanyarray(results) - np.asanyarray(controls),
columns=['%s_diff' % c for c in control_names],
index=zone_ids
)
summary_df = pd.concat([controls_df, summary_df, dif_df], axis=1)
summary_cols = summary_df.columns.tolist()
summary_df['geography'] = geography
summary_df['id'] = summary_df.index
summary_df.index = summary_df['geography'] + '_' + summary_df['id'].astype(str)
summary_df = summary_df[['geography', 'id'] + summary_cols]
return summary_df
def meta_summary(incidence_df, control_spec, top_geography, top_id, sub_geographies, hh_id_col):
if setting('NO_INTEGERIZATION_EVER', False):
seed_weight_cols = ['preliminary_balanced_weight', 'balanced_weight']
sub_weight_cols = ['balanced_weight']
else:
seed_weight_cols = ['preliminary_balanced_weight', 'balanced_weight', 'integer_weight']
sub_weight_cols = ['balanced_weight', 'integer_weight']
incidence_df = incidence_df[incidence_df[top_geography] == top_id]
control_cols = control_spec.target.values
controls_df = get_control_table(top_geography)
# controls for this geography as series
controls = controls_df[control_cols].loc[top_id]
incidence = incidence_df[control_cols]
summary = | pd.DataFrame(index=control_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
import xarray as xr
import sys
from paths import results_path
rp_usa = results_path + '/USA'
rp_usa2 = rp_usa + '/results_GWA2'
rp_bra = results_path + '/BRA'
rp_bra2 = rp_bra + '/results_GWA2'
rp_nz = results_path + '/NZ'
rp_zaf = results_path + '/ZAF'
## Load USA data
# load size indicator
nums_usa = pd.read_csv(rp_usa + '/number_grid_points.csv',index_col=0).drop([2,59]) # drop 2,59 - what is it? new england?
nums_usa['scale'] = 'state'
nums_usa.loc[nums_usa.region=='USA','scale'] = 'country'
nums_usa['country'] = 'USA'
# read results
results_USA_GWA2 = pd.read_csv(rp_usa2 + '/stats_GWA2.csv',index_col=0)
results_USA_GWA3 = pd.read_csv(rp_usa + '/stats_GWA3.csv',index_col=0)
results_USA_tidy = pd.concat([results_USA_GWA2,results_USA_GWA3],axis=0).rename({'location':'region'},axis=1)
results_USA_tidy['country'] = 'USA'
results_USA_tidy.scale = results_USA_tidy.scale.replace({'subsystem':'state'})
# filter regions with bad observed time series (filtered by visual inspection)
bad_states = ['CT','MA','IL','RI','VT','OH','NJ','DE','NC']
bad_states = ['CT','MA','IL','RI','VT','OH','NJ','DE','NC','NE','MI','WI','TN','ND','SD','AK']
bad_states = ['AK','CT','DE','IL','NC','SD','TN']
bad_regions = ['NewEng','ESC','PacNon']
# filter bad regions
results_USA_tidy = results_USA_tidy.set_index(['country','region']).drop(list(zip(['USA']*len(bad_regions),bad_regions)),axis=0).reset_index()
# filter bad states
results_USA_tidy = results_USA_tidy.set_index(['country','region']).drop(list(zip(['USA']*len(bad_states),bad_states)),axis=0).reset_index()
## Load BRA data
# load size indicator
nums_bra = pd.read_csv(rp_bra + '/number_grid_points.csv',index_col=0)
nums_bra['country'] = 'BRA'
# read results
results_BRA_GWA2 = | pd.read_csv(rp_bra2 + '/stats_GWA2.csv',index_col=0) | pandas.read_csv |
import logging
import pickle
import pandas as pd
import numpy as np
from pytest import mark
try:
import implicit
have_implicit = True
except ImportError:
have_implicit = False
import lenskit.util.test as lktu
from lenskit.algorithms.implicit import ALS, BPR
from lenskit import util
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
@mark.slow
@mark.skipif(not have_implicit, reason='implicit not installed')
def test_implicit_als_train_rec():
algo = ALS(25)
assert algo.factors == 25
ratings = lktu.ml_test.ratings
ret = algo.fit(ratings)
assert ret is algo
recs = algo.recommend(100, n=20)
assert len(recs) == 20
_log.info('serializing implicit model')
mod = pickle.dumps(algo)
_log.info('serialized to %d bytes')
a2 = pickle.loads(mod)
r2 = a2.recommend(100, n=20)
assert len(r2) == 20
assert all(r2 == recs)
@mark.slow
@mark.eval
@mark.skipif(not have_implicit, reason='implicit not installed')
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_implicit_als_batch_accuracy():
import lenskit.crossfold as xf
from lenskit import batch, topn
ratings = lktu.ml100k.ratings
algo_t = ALS(25)
def eval(train, test):
_log.info('running training')
train['rating'] = train.rating.astype(np.float_)
algo = util.clone(algo_t)
algo.fit(train)
users = test.user.unique()
_log.info('testing %d users', len(users))
candidates = topn.UnratedCandidates(train)
recs = batch.recommend(algo, users, 100, candidates)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = | pd.concat(f.test for f in folds) | pandas.concat |
#---------------------------------------------------------------------
# File Name : LogisticRegression2.py
# Author : <NAME>.
# Description : Implementing Logistic Regression
# Date: : 12 Nov. 2020
# Version : V1.0
# Ref No : DS_Code_P_K07
#---------------------------------------------------------------------
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split # train and test
from sklearn import metrics
from sklearn import preprocessing
from sklearn.metrics import classification_report
# loading claimants data
claimants = pd.read_csv("claimants.csv")
claimants.head(10)
# Droping first column
claimants.drop(["CASENUM"],inplace=True,axis = 1)
#cat_cols = ["ATTORNEY","CLMSEX","SEATBELT","CLMINSUR"]
#cont_cols = ["CLMAGE","LOSS"]
# Getting the barplot for the categorical columns
sb.countplot(x="ATTORNEY",data=claimants,palette="hls")
pd.crosstab(claimants.ATTORNEY,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="CLMSEX",data=claimants,palette="hls")
pd.crosstab(claimants.CLMSEX,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="SEATBELT",data=claimants,palette="hls")
pd.crosstab(claimants.SEATBELT,claimants.CLMINSUR).plot(kind="bar")
sb.countplot(x="CLMINSUR",data=claimants,palette="hls")
# Data Distribution - Boxplot of continuous variables wrt to each category of categorical columns
sb.boxplot(x="ATTORNEY",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="ATTORNEY",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="CLMSEX",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="CLMSEX",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="SEATBELT",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="SEATBELT",y="LOSS",data=claimants,palette="hls")
sb.boxplot(x="CLMINSUR",y="CLMAGE",data=claimants,palette="hls")
sb.boxplot(x="CLMINSUR",y="LOSS",data=claimants,palette="hls")
# To get the count of null values in the data
claimants.isnull().sum()
claimants.shape # 1340 6 => Before dropping null values
# To drop null values ( dropping rows)
claimants.dropna().shape # 1096 6 => After dropping null values
# Fill nan values with mode of the categorical column
claimants["CLMSEX"].fillna(1,inplace=True) # claimants.CLMSEX.mode() = 1
claimants["CLMINSUR"].fillna(1,inplace=True) # claimants.CLMINSUR.mode() = 1
claimants["SEATBELT"].fillna(0,inplace=True) # claimants.SEATBELT.mode() = 0
claimants["CLMSEX"].fillna(1,inplace=True) # claimants.CLMSEX.mode() = 1
claimants.CLMAGE.fillna(28.4144,inplace=True) # claimants.CLMAGE.mean() = 28.4
# Model building
from sklearn.linear_model import LogisticRegression
claimants.shape
X = claimants.iloc[:,[1,2,3,4,5]]
Y = claimants.iloc[:,0]
classifier = LogisticRegression()
classifier.fit(X,Y)
classifier.coef_ # coefficients of features
classifier.predict_proba (X) # Probability values
y_pred = classifier.predict(X)
claimants["y_pred"] = y_pred
y_prob = pd.DataFrame(classifier.predict_proba(X.iloc[:,:]))
new_df = pd.concat([claimants,y_prob],axis=1)
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(Y,y_pred)
print (confusion_matrix)
type(y_pred)
accuracy = sum(Y==y_pred)/claimants.shape[0]
pd.crosstab(y_pred,Y)
##########################################################################
# Loading data which contains categorical data to demonstrate how to
# create dummy columns
salary = pd.read_csv("sal.csv")
# creating dummy columns for the categorical columns
salary.columns
sal_dummies = | pd.get_dummies(salary[["workclass","occupation","education","maritalstatus","relationship","race","sex","native"]]) | pandas.get_dummies |
#!/usr/bin/env python
import sys
import os
import json
import operator
import functools
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from ivadomed import config_manager as imed_config_manager
def plot_histogram(data, layer_no, fname_out):
"""
Save the histograms showing the frequency of the values inside gammas or betas tensors for one layer.
:param data: input data, which are gammas or betas for one layer
:param layer_no: number of the layer to consider
:param fname_out: directory to save the figure
"""
bins = np.linspace(0, 1, 100)
fig = plt.figure(figsize=(12, 12))
plt.title(f'Histogram: Layer {layer_no}')
plt.xlabel('Value')
plt.ylabel('Frequency')
# Flatten data
data = np.array(functools.reduce(operator.iconcat, data, [])).ravel()
plt.hist(data, bins)
fig.savefig(fname_out)
def visualize_pca(data, metadata_values, layer_no, fname_out):
"""
Save the PCA graphs showing gammas or betas tensors for one layer.
:param data: input data, which are gammas or betas for one layer
:param metadata_values: numpy array with the metadata values of all the images
:param layer_no: number of the layer to consider
:param fname_out: directory to save the figure
"""
pca_df = pd.DataFrame()
pca = PCA(n_components=2)
# Dim 0 will be the number of examples
data = np.concatenate(list(data), axis=0)
metadata_values = np.array(functools.reduce(operator.iconcat, metadata_values, [])).ravel()
pca_result = pca.fit_transform(data)
pca_df2 = pd.DataFrame()
pca_df2['pca1'] = pca_result[:, 0]
pca_df2['pca2'] = pca_result[:, 1]
pca_df2['contrast'] = metadata_values
pca_df = | pd.concat([pca_df, pca_df2]) | pandas.concat |
## Jobs data import (<NAME>, 10/11/2017)
import numpy as np
from scipy.special import expit
import argparse
import pandas as pd
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
'''
Input: train_rate: 0.8
Outputs:
- Train_X, Test_X: Train and Test features
- Train_Y: Observable outcomes
- Train_T: Assigned treatment
- Opt_Train_Y, Test_Y: Potential outcomes.
'''
def Data_Twins(fn_csv, train_rate = 0.8):
#%% Data Input (11400 patients, 30 features, 2 potential outcomes)
Data = np.loadtxt(fn_csv, delimiter=",", skiprows=1)
# Features
X = Data[:,:30]
# Feature dimensions and patient numbers
Dim = len(X[0])
No = len(X)
# Labels
Opt_Y = Data[:,30:]
for i in range(2):
idx = np.where(Opt_Y[:,i] > 365)
Opt_Y[idx,i] = 365
Opt_Y = 1-(Opt_Y / 365.0)
#%% Patient Treatment Assignment
coef = 0* np.random.uniform(-0.01, 0.01, size = [Dim,1])
Temp = expit(np.matmul(X,coef) + np.random.normal(0,0.01, size = [No,1]) )
Temp = Temp/(2*np.mean(Temp))
Temp[Temp>1] = 1
T = np.random.binomial(1,Temp,[No,1])
T = T.reshape([No,])
#%% Observable outcomes
Y = np.zeros([No,1])
# Output
Y = np.transpose(T) * Opt_Y[:,1] + np.transpose(1-T) * Opt_Y[:,0]
Y = np.transpose(Y)
Y = np.reshape(Y,[No,])
#%% Train / Test Division
temp = np.random.permutation(No)
Train_No = int(train_rate*No)
train_idx = temp[:Train_No]
test_idx = temp[Train_No:No]
Train_X = X[train_idx,:]
Train_T = T[train_idx]
Train_Y = Y[train_idx]
Opt_Train_Y = Opt_Y[train_idx,:]
Test_X = X[test_idx,:]
Test_Y = Opt_Y[test_idx,:]
return [Train_X, Train_T, Train_Y, Opt_Train_Y, Test_X, Test_Y]
def Data_Jobs(fn_csv):
# Data Input
Data = np.loadtxt(fn_csv, delimiter=",", skiprows=1)
# Feature, Treatment and Output
X = Data[:,:7]
T = Data[:,7]
Raw_Y = Data[:,8]
Y = np.array(Raw_Y>0,dtype=float)
# Parameters
No = len(X)
RCT_No = 723
Train_Rate = 0.8
# RCT / NoRCT
RCT_X = X[:RCT_No,:]
RCT_T = T[:RCT_No]
RCT_Y = Y[:RCT_No]
NoRCT_X = X[RCT_No:,:]
NoRCT_T = T[RCT_No:]
NoRCT_Y = Y[RCT_No:]
# Train / Test Division
temp = np.random.permutation(RCT_No)
Train_No = int(Train_Rate*RCT_No)
Test_No = RCT_No - Train_No
train_idx = temp[:Train_No]
test_idx = temp[Train_No:]
Train_X_Test = RCT_X[train_idx,:]
Train_T_Test = RCT_T[train_idx]
Train_Y_Test = RCT_Y[train_idx]
Train_Test_No = len(train_idx)
Test_X = RCT_X[test_idx,:]
Test_T = RCT_T[test_idx]
Test_Y = 1- (1*(RCT_Y[test_idx] == 0))
Train_X = np.concatenate([Train_X_Test,NoRCT_X])
Train_T = np.concatenate([Train_T_Test,NoRCT_T])
Train_Y = np.concatenate([Train_Y_Test,NoRCT_Y])
Train_No = No - Test_No
return [Train_X, Train_T, Train_Y, Test_X, Test_T, Test_Y, Train_X_Test, Train_T_Test, Train_Y_Test, Train_No, Test_No, Train_Test_No]
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="twins")
parser.add_argument("--trainx", default="trainx.csv")
parser.add_argument("--trainy", default="trainy.csv")
parser.add_argument("--traint", default="traint.csv")
parser.add_argument("--testx", default="testx.csv")
parser.add_argument("--testy", default="testy.csv")
parser.add_argument("--testt", default="testt.csv")
return parser.parse_args()
if __name__ == '__main__':
args = init_arg()
dataset = args.dataset
fn_trainx, fn_trainy, fn_traint = args.trainx, args.trainy, args.traint
fn_testx, fn_testy, fn_testt = args.testx, args.testy, args.testt
Test_T = None
if dataset == 'twins':
train_rate = 0.8
fn_twins_csv = utilmlab.get_data_dir() + "/twins/Twin_Data.csv.gz"
[Train_X, Train_T, Train_Y, Opt_Train_Y, Test_X, Test_Y] \
= Data_Twins(fn_twins_csv, train_rate)
elif dataset == 'jobs':
fn_jobs_csv = utilmlab.get_data_dir() + "/jobs/Jobs_Lalonde_Data.csv.gz"
[Train_X, Train_T, Train_Y, Test_X, Test_T, Test_Y, Train_X_Test,
Train_T_Test, Train_Y_Test, Train_No, Test_No, Train_Test_No] \
= Data_Jobs(fn_jobs_csv)
else:
assert 0
pd.DataFrame(Train_X).to_csv(fn_trainx, index=False)
| pd.DataFrame(Train_Y) | pandas.DataFrame |
from __future__ import annotations
from typing import Dict, List, Iterable, NewType, Optional, Union
from pgmpy.models import NaiveBayes
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference import VariableElimination
from data.DataSet import Data, DataSet
from .TextClassifier import TextClassifier
import pandas as pd
import numpy as np
class PGMNaiveBayes(TextClassifier):
def __add_category(self, categories: Union[str, List[str], List[(str, str)], Dict[str, str]]) -> PGMNaiveBayes:
'''setup the bayes network with a new category entry'''
if type(categories) is str: categories = [categories]
if type(categories) is dict: categories = categories.items()
to_create = False
for category in categories:
if type(category) is str: category = (category, category)
category, index = category
if category not in self.categories:
self.categories[category] = index
self.cardinality = len(self.categories) or 1
to_create = True
if to_create: self.__create_class_cpd()
return self
def __add_token(self, tokens: Union[str, List[str]]) -> PGMNaiveBayes:
if type(tokens) is str: tokens = [tokens]
to_create = []
for token in tokens:
if token not in self.tokens:
to_create.append(token)
self.total_tokens += 1
self.__create_word_cpd(to_create)
return self
def __create_word_cpd(self, tokens: Union[str, List[str]], check: bool=True) -> PGMNaiveBayes:
'''Generate the table for the given token node'''
if type(tokens) is str: tokens = [tokens]
cpds = []
for token in tokens:
if token in self.tokens:
self.model.remove_cpds(self.tokens[token])
cpd_word = TabularCPD(
variable=token,
variable_card=2,
evidence=[Data.CATEGORY_NAME],
evidence_card=[self.cardinality],
values=[ [0.5 for _ in range(self.cardinality) ] ] * 2
)
self.tokens[token] = cpd_word
cpds.append(cpd_word)
self.model.add_nodes_from(tokens)
self.model.add_edges_from([ (Data.CATEGORY_NAME, token) for token in tokens ])
self.model.add_cpds(*cpds)
# if check: self.model.check_model()
return self
def __create_class_cpd(self, check: bool=True) -> PGMNaiveBayes:
'''Generate the table for the category node'''
if self.cpd_class:
self.model.remove_cpds(self.cpd_class)
self.cpd_class = TabularCPD(
variable=Data.CATEGORY_NAME,
variable_card=self.cardinality,
values=[ [1 / self.cardinality] for _ in range(self.cardinality) ]
)
self.model.add_cpds(self.cpd_class)
# if check: self.model.check_model()
return self
def __cpd_to_json(self, cpd: TabularCPD) -> Dict:
return {
'variable': cpd.variable,
'variables': cpd.variables,
'variable_card': cpd.variable_card.tolist(),
'values': cpd.values.tolist()
}
def __cpd_from_json(self, cpd: Dict) -> TabularCPD:
return TabularCPD(**cpd)
def reset(self) -> PGMNaiveBayes:
'''Totally reset the Classifier'''
self.categories = {}
self.tokens = {}
self.cardinality = 1
self.total_documents = 0
self.total_tokens = 0
self.cpd_class = None
self.model = NaiveBayes()
self.model.add_node(Data.CATEGORY_NAME)
return self
def token_probability(self, token: str, category: str) -> float:
'''return the probability of a given token to belong a given category'''
probability = self.model.predict_probability(pd.DataFrame([[1]], columns=[token]))
column = '{}_{}'.format(Data.CATEGORY_NAME, self.categories.get(category, 0))
return probability[column][0] if column in probability else 0
def category_probability(self, category: str) -> float:
'''return the probability of the given category'''
score = Data.CATEGORY_VALUES.get(category, 0)
elimination = VariableElimination(self.model)
probability = elimination.query(variables=[Data.CATEGORY_NAME])
state = probability.get_state_no(Data.CATEGORY_NAME, self.categories.get(category, 0))
return probability.values[state]
def word_probability(self, text: str) -> pd.DataFrame:
'''retrive the probability table of the given text without knowing the probability of the category (no evidence): P(C | w1,...,wn)'''
data = Data(text)
elimination = VariableElimination(self.model)
values = [ [] for _ in range(self.cardinality) ]
for token in data.tokens:
if token not in self.tokens:
for v in values: v.append(1 / (self.cardinality or 1))
else:
probability = elimination.query(variables=[Data.CATEGORY_NAME], evidence={ token: 1 }).values
for i in range(len(probability)): values[i].append(probability[i])
return pd.DataFrame(np.array(values).T, columns=list(self.categories), index=data.tokens)
def probability(self, text: str) -> pd.DataFrame:
'''retrive the probability table of the given text knowing the probability of categories: P(C) * P(C | w1,...,wn)'''
data = Data(text)
values = pd.DataFrame([[ 1 if t in data.table else 0 for t in self.tokens ]], columns=self.tokens)
probabilities = self.model.predict_probability(values)
return probabilities.rename(columns={ '{}_{}'.format(Data.CATEGORY_NAME, v): k for k, v in self.categories.items() })
def fit(self, text: Union[str, Iterable[str], Iterable[Data], pd.DataFrame], category: Union[str, Iterable[str]]=None) -> TextClassifier:
'''learn probabilities for tokens extracted by the given text'''
data = DataSet.FromAny(text, category)
categories = []
tokens = {}
values = []
for d in data:
categories.append((d.category, d.score))
for token in d.tokens: tokens[token] = 1
values.append((d.table, d.score))
self.total_documents += 1
tokens = list(tokens)
self.__add_category(categories)
self.__add_token(tokens)
data_values = [ [1 if t in v[0] else 0 for t in tokens ] + [v[1]] for v in values ]
tokens.append(Data.CATEGORY_NAME)
data_values = | pd.DataFrame(data_values, columns=tokens) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 - 2021, ETH Zurich, Computer Engineering Group (TEC)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
@author: romantrueb
@brief: Anaylse dpp2linktest measurements
"""
import sys
import os
import numpy as np
import pandas as pd
import json
from collections import OrderedDict
import pickle
# construct html with python
import dominate
from dominate.tags import *
from dominate.util import raw
from flocklab import Flocklab
from flocklab import *
fl = Flocklab()
################################################################################
assertionOverride = False
outputDir = './data'
################################################################################
# Helper Functions
################################################################################
def getJson(text):
'''Find an convert json in a single line from serial output. Returns None if no valid json could be found.
'''
ret = None
# find index
idx = 0
if not '{' in text:
return ret
for i in range(len(text)):
if text[i] == '{':
idx = i
break
try:
ret = json.loads(text[idx:], strict=False)
except json.JSONDecodeError:
print('WARNING: json could not be parsed: {}'.format(text[idx:]))
return ret
def getRows(nodeOfRound, df):
'''Extract rows for requested round from df
Args:
nodeOfRound: nodeID which identifies the round
df: dataframe containing all rows to search through
'''
inRange = False
ret = []
for d in df.data.to_list():
if d['type'] == 'StartOfRound':
if d['node'] == nodeOfRound:
inRange = True
elif d['type'] == 'EndOfRound':
if d['node'] == nodeOfRound:
break
elif inRange:
ret.append(d)
return ret
def styleDf(df, cmap='inferno', format='{:.1f}', replaceNan=True, applymap=None):
ret = ( df.style
.background_gradient(cmap=cmap, axis=None)
.format(format) )
if applymap is not None:
ret = ret.applymap(applymap)
ret = ret.render()
if replaceNan:
ret = ret.replace('nan','')
return ret
htmlStyleBlock = '''
table, th, td {font-size:10pt; border:1px solid lightgrey; border-collapse:collapse; text-align:left; font-family:arial;}
th, td {padding: 5px; text-align:center; width:22px;}
table.outer, th.outer, td.outer {font-size:10pt; border:0px solid lightgrey; border-collapse:collapse; text-align:left; font-family:arial;}
th.outer, td.outer {padding: 5px; text-align:center;}
'''
################################################################################
def extractData(testNo, testDir):
serialPath = os.path.join(testDir, "{}/serial.csv".format(testNo))
# download test results if directory does not exist
if not os.path.isfile(serialPath):
fl.getResults(testNo)
df = fl.serial2Df(serialPath, error='ignore')
df.sort_values(by=['timestamp', 'observer_id'], inplace=True, ignore_index=True)
# convert output with valid json to dict and remove other rows
keepMask = []
resList = []
for idx, row in df.iterrows():
jsonDict = getJson(row['output'])
keepMask.append(1 if jsonDict else 0)
if jsonDict:
resList.append(jsonDict)
dfd = df[np.asarray(keepMask).astype(bool)].copy()
dfd['data'] = resList
nodeList = sorted(dfd.observer_id.unique())
groups = dfd.groupby('observer_id')
# Get TestConfig and RadioConfig & check for consistency
testConfigDict = OrderedDict()
radioConfigDict = OrderedDict()
floodConfigDict = OrderedDict()
for node in nodeList:
testConfigFound = False
radioConfigFound = False
floodConfigFound = False
testConfigDict[node] = None
radioConfigDict[node] = None
floodConfigDict[node] = None
gDf = groups.get_group(node)
for d in gDf.data.to_list():
if d['type'] == 'TestConfig':
testConfigDict[node] = d
testConfigFound = True
if d['type'] == 'RadioConfig':
radioConfigDict[node] = d
radioConfigFound = True
if d['type'] == 'FloodConfig':
floodConfigDict[node] = d
floodConfigFound = True
if testConfigFound and (radioConfigFound or floodConfigFound):
break
testConfig = testConfigDict[nodeList[0]]
if not('p2pMode' in testConfig) and (len(radioConfigDict) == len(nodeList)):
# backwards compatibility for test results without linktest mode indication
testConfig['p2pMode'] = 1
testConfig['floodMode'] = 0
if testConfig['p2pMode'] and not testConfig['floodMode']:
radioConfig = radioConfigDict[nodeList[0]]
floodConfig = None
elif testConfig['floodMode'] and not testConfig['p2pMode']:
radioConfig = None
floodConfig = floodConfigDict[nodeList[0]]
else:
raise Exception('TestConfig seems invalid!')
# check configs for consistency
for node in nodeList:
assert testConfigDict[nodeList[0]] == testConfigDict[node]
if len(radioConfigDict) == len(nodeList):
assert radioConfigDict[nodeList[0]] == radioConfigDict[node]
if len(floodConfigDict) == len(nodeList):
assert floodConfigDict[nodeList[0]] == floodConfigDict[node]
# Make sure that round boundaries do not overlap
if not assertionOverride:
stack = []
currentNode = -1
for d in dfd.data.to_list():
if d['type'] == 'StartOfRound':
if len(stack)==0:
currentNode = d['node']
else:
assert d['node'] == stack[-1]
stack.append(d['node'])
elif d['type'] == 'EndOfRound':
assert d['node'] == stack.pop()
# collect extracted data (including nodeList to resolve idx <-> node ID relations)
d = {
'testConfig': testConfig,
'nodeList': nodeList,
}
if testConfig['p2pMode'] and (not testConfig['floodMode']):
pathlossMatrix, prrMatrix, crcErrorMatrix = extractP2pStats(dfd, testConfig, radioConfig)
d['radioConfig'] = radioConfig
d['prrMatrix'] = prrMatrix
d['crcErrorMatrix'] = crcErrorMatrix
d['pathlossMatrix'] = pathlossMatrix
elif testConfig['floodMode'] and (not testConfig['p2pMode']):
if floodConfig['delayTx'] == 0:
numFloodsRxMatrix, hopDistanceMatrix, hopDistanceStdMatrix = extractFloodNormal(dfd, testConfig, floodConfig)
else: # floods with delayed Tx
numFloodsRxMatrix, hopDistanceMatrix, hopDistanceStdMatrix = extractFloodDelayedTx(dfd, testConfig, floodConfig)
d['floodConfig'] = floodConfig
d['numFloodsRxMatrix'] = numFloodsRxMatrix
d['hopDistanceMatrix'] = hopDistanceMatrix
d['hopDistanceStdMatrix'] = hopDistanceStdMatrix
# save obtained data to file
pklPath = os.path.join(outputDir, 'linktest_data_{}.pkl'.format(testNo))
os.makedirs(os.path.split(pklPath)[0], exist_ok=True)
with open(pklPath, 'wb' ) as f:
pickle.dump(d, f)
return d
def extractP2pStats(dfd, testConfig, radioConfig):
groups = dfd.groupby('observer_id')
nodeList = sorted(dfd.observer_id.unique())
numNodes = len(nodeList)
# prepare
pathlossMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # path loss
prrMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # packet reception ratio (PRR)
crcErrorMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # ratio of packets with CRC error
# iterate over rounds
for nodeOfRound in nodeList:
txNode = nodeOfRound
txNodeIdx = nodeList.index(txNode)
numTx = 0
numRxDict = OrderedDict()
numCrcErrorDict = OrderedDict()
rssiAvgDict = OrderedDict()
# iterate over nodes
for node in nodeList:
rows = getRows(nodeOfRound, groups.get_group(node))
if node == txNode:
txDoneList = [elem for elem in rows if (elem['type']=='TxDone')]
numTx = len(txDoneList)
assert numTx == testConfig['numTx']
else:
rxDoneList = [elem for elem in rows if (elem['type']=='RxDone' and elem['key']==testConfig['key'] and elem['crc_error']==0)]
crcErrorList = [elem for elem in rows if (elem['type']=='RxDone' and elem['crc_error']==1)]
numRxDict[node] = len(rxDoneList)
numCrcErrorDict[node] = len(crcErrorList)
rssiAvgDict[node] = np.mean([elem['rssi'] for elem in rxDoneList]) if len(rxDoneList) else np.nan
# fill path loss matrix
for rxNode, rssi in rssiAvgDict.items():
rxNodeIdx = nodeList.index(rxNode)
pathlossMatrix[txNodeIdx][rxNodeIdx] = -(rssi - radioConfig['txPower'])
# fill PRR matrix
for rxNode, numRx in numRxDict.items():
rxNodeIdx = nodeList.index(rxNode)
prrMatrix[txNodeIdx][rxNodeIdx] = numRx/numTx
# fill CRC error matrix
for rxNode, numCrcError in numCrcErrorDict.items():
rxNodeIdx = nodeList.index(rxNode)
crcErrorMatrix[txNodeIdx][rxNodeIdx] = numCrcError/numTx
# NOTE: some CRC error cases are ignored while getting the rows (getRows()) because the json parser cannot parse the RxDone output
return pathlossMatrix, prrMatrix, crcErrorMatrix
def extractFloodNormal(dfd, testConfig, floodConfig):
groups = dfd.groupby('observer_id')
nodeList = sorted(dfd.observer_id.unique())
numNodes = len(nodeList)
numFloodsRxMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
hopDistanceMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
hopDistanceStdMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
# iterate over rounds
for nodeOfRound in nodeList:
txNode = nodeOfRound
txNodeIdx = nodeList.index(txNode)
# iterate over nodes
for rxNode in nodeList:
rxNodeIdx = nodeList.index(rxNode)
rows = getRows(txNode, groups.get_group(rxNode))
floodRxList = [elem for elem in rows if (elem['type']=='FloodDone' and elem['rx_cnt']>0 and elem['is_initiator']==0)]
# fill matrix
numFloodsRxMatrix[txNodeIdx][rxNodeIdx] = len(floodRxList)
hopDistanceMatrix[txNodeIdx][rxNodeIdx] = np.mean([elem['rx_idx']+1 for elem in floodRxList]) if len(floodRxList) else np.nan
hopDistanceStdMatrix[txNodeIdx][rxNodeIdx] = np.std([elem['rx_idx']+1 for elem in floodRxList]) if len(floodRxList) else np.nan
return numFloodsRxMatrix, hopDistanceMatrix, hopDistanceStdMatrix
def extractFloodDelayedTx(dfd, testConfig, floodConfig):
groups = dfd.groupby('observer_id')
nodeList = sorted(dfd.observer_id.unique())
numNodes = len(nodeList)
numFloodsRxMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
hopDistanceMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
hopDistanceStdMatrix = np.empty( (numNodes, numNodes,) ) * np.nan
# iterate over rounds
for nodeOfRound in nodeList:
delayedNode = nodeOfRound
delayedNodeIdx = nodeList.index(delayedNode)
# iterate over nodes
for rxNode in nodeList:
rxNodeIdx = nodeList.index(rxNode)
rows = getRows(delayedNode, groups.get_group(rxNode))
floodRxList = [elem for elem in rows if (elem['type']=='FloodDone' and elem['rx_cnt']>0 and elem['is_initiator']==0)]
# fill matrix
# hop distance = rx_idx + 1
numFloodsRxMatrix[delayedNodeIdx][rxNodeIdx] = len(floodRxList)
hopDistanceMatrix[delayedNodeIdx][rxNodeIdx] = np.mean([elem['rx_idx']+1 for elem in floodRxList]) if len(floodRxList) else np.nan
hopDistanceStdMatrix[delayedNodeIdx][rxNodeIdx] = np.std([elem['rx_idx']+1 for elem in floodRxList]) if len(floodRxList) else np.nan
return numFloodsRxMatrix, hopDistanceMatrix, hopDistanceStdMatrix
def saveP2pMatricesToHtml(extractionDict):
nodeList = extractionDict['nodeList']
prrMatrixDf = pd.DataFrame(data=extractionDict['prrMatrix'], index=nodeList, columns=nodeList)
crcErrorMatrixDf = | pd.DataFrame(data=extractionDict['crcErrorMatrix'], index=nodeList, columns=nodeList) | pandas.DataFrame |
import collections
import errno
import logging
import os
import re
import shutil
import uuid
import time
import traceback
import sys
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from xlrd.biffh import XLRDError
from sklearn import preprocessing
from skbio.stats.composition import ilr, clr
from skbio import DistanceMatrix
from skbio.stats.distance import anosim, permanova, permdisp, pwmantel
import scipy.spatial.distance as dist
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist
import rpy2.robjects.packages as rpackages
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri, numpy2ri
from rpy2.robjects.conversion import localconverter
import plotly.graph_objects as go
from plotly.offline import plot
import plotly.express as px
from installed_clients.DataFileUtilClient import DataFileUtil
from GenericsAPI.Utils.AttributeUtils import AttributesUtil
from GenericsAPI.Utils.SampleServiceUtil import SampleServiceUtil
from GenericsAPI.Utils.DataUtil import DataUtil
import GenericsAPI.Utils.MatrixValidation as vd
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.fba_toolsClient import fba_tools
from installed_clients.kb_GenericsReportClient import kb_GenericsReport
from installed_clients.SampleServiceClient import SampleService
TYPE_ATTRIBUTES = {'description', 'scale', 'row_normalization', 'col_normalization'}
SCALE_TYPES = {'raw', 'ln', 'log2', 'log10'}
class MatrixUtil:
def _validate_import_matrix_from_excel_params(self, params):
"""
_validate_import_matrix_from_excel_params:
validates params passed to import_matrix_from_excel method
"""
logging.info('start validating import_matrix_from_excel params')
# check for required parameters
for p in ['obj_type', 'matrix_name', 'workspace_name', 'scale']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
obj_type = params.get('obj_type')
if obj_type not in self.matrix_types:
raise ValueError('Unknown matrix object type: {}'.format(obj_type))
scale = params.get('scale')
if scale not in SCALE_TYPES:
raise ValueError('Unknown scale type: {}'.format(scale))
if params.get('input_file_path'):
file_path = params.get('input_file_path')
elif params.get('input_shock_id'):
file_path = self.dfu.shock_to_file(
{'shock_id': params['input_shock_id'],
'file_path': self.scratch}).get('file_path')
elif params.get('input_staging_file_path'):
file_path = self.dfu.download_staging_file(
{'staging_file_subdir_path': params.get('input_staging_file_path')}
).get('copy_file_path')
else:
error_msg = "Must supply either a input_shock_id or input_file_path "
error_msg += "or input_staging_file_path"
raise ValueError(error_msg)
refs = {k: v for k, v in params.items() if "_ref" in k}
return (obj_type, file_path, params.get('workspace_name'),
params.get('matrix_name'), refs, scale)
def _upload_to_shock(self, file_path):
"""
_upload_to_shock: upload target file to shock using DataFileUtil
"""
logging.info('Start uploading file to shock: {}'.format(file_path))
file_to_shock_params = {
'file_path': file_path,
'pack': 'zip'
}
shock_id = self.dfu.file_to_shock(file_to_shock_params).get('shock_id')
return shock_id
@staticmethod
def _mkdir_p(path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@staticmethod
def _find_between(s, start, end):
"""
_find_between: find string in between start and end
"""
return re.search('{}(.*){}'.format(start, end), s).group(1)
@staticmethod
def _write_mapping_sheet(file_path, sheet_name, mapping, index):
"""
_write_mapping_sheet: write mapping to sheet
"""
df_dict = collections.OrderedDict()
df_dict[index[0]] = []
df_dict[index[1]] = []
for key, value in mapping.items():
df_dict.get(index[0]).append(key)
df_dict.get(index[1]).append(value)
df = pd.DataFrame.from_dict(df_dict)
with pd.ExcelWriter(file_path, engine='openpyxl') as writer:
writer.book = load_workbook(file_path)
df.to_excel(writer, sheet_name=sheet_name)
def _generate_tab_content(self, index_page, viewer_name):
tab_content = ''
if index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Matrix is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
return tab_content
def _generate_simper_tab_content(self, res, viewer_name):
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
html = '''<pre class="tab">''' + str(res).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
return tab_content
def _generate_variable_stat_tab_content(self, res, viewer_name):
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '''<table>\n'''
for key, value in res.items():
tab_content += '''<tr>\n'''
tab_content += '''<td>{}</td>\n'''.format(key)
tab_content += '''<td>{}</td>\n'''.format(value)
tab_content += '''</tr>\n'''
tab_content += '''</table>\n'''
tab_content += '\n</div>\n'
return tab_content
def _generate_mantel_test_visualization_content(self, pwmantel_res):
tab_def_content = ''
tab_content = ''
viewer_name = 'pwmantel_res'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Mantel Test</button>\n'''
tab_def_content += '\n</div>\n'
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '''<table>\n'''
# add table headers
tab_content += '''<tr>\n'''
tab_content += '''<th>Distance Matrix 1</th>\n'''
tab_content += '''<th>Distance Matrix 2</th>\n'''
for col in pwmantel_res.columns:
tab_content += '''<th>{}</th>\n'''.format(col)
tab_content += '''</tr>\n'''
# add table contents
for idx, values in enumerate(pwmantel_res.values):
tab_content += '''<tr>\n'''
tab_content += '''<td>{}</td>\n'''.format(pwmantel_res.index[idx][0])
tab_content += '''<td>{}</td>\n'''.format(pwmantel_res.index[idx][1])
values[0] = round(values[0], 4)
for value in values:
tab_content += '''<td>{}</td>\n'''.format(value)
tab_content += '''</tr>\n'''
tab_content += '''</table>\n'''
tab_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_simper_plot(self, species_stats, grouping_names):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating plotly simper plot in {}'.format(output_directory))
self._mkdir_p(output_directory)
simper_plot_path = os.path.join(output_directory, 'SimperPlot.html')
species = list(species_stats.keys())
plot_data = list()
for grouping_name in set(grouping_names):
y_values = list()
y_error = list()
for species_name in species:
species_data = species_stats[species_name]
y_values.append(species_data[grouping_name][0])
y_error.append(species_data[grouping_name][1])
plot_data.append(go.Bar(name=str(grouping_name), x=species, y=y_values,
error_y=dict(type='data', array=y_error)))
fig = go.Figure(data=plot_data)
fig.update_layout(barmode='group',
xaxis=dict(title='species'),
yaxis=dict(title='average abundance count'))
plot(fig, filename=simper_plot_path)
return simper_plot_path
def _generate_simper_plot_content(self, viewer_name, species_stats, grouping_names,
output_directory):
simper_plot_path = self._generate_simper_plot(species_stats, grouping_names)
simper_plot_name = 'SimperPlot.html'
shutil.copy2(simper_plot_path,
os.path.join(output_directory, simper_plot_name))
tab_content = ''
tab_content += '''\n<div id="{}" class="tabcontent">\n'''.format(viewer_name)
tab_content += '<iframe height="500px" width="100%" '
tab_content += 'src="{}" '.format(simper_plot_name)
tab_content += 'style="border:none;"></iframe>\n<p></p>\n'
tab_content += '\n</div>\n'
return tab_content
def _generate_simper_visualization_content(self, simper_ret, simper_sum,
species_stats, grouping_names, output_directory):
tab_def_content = ''
tab_content = ''
viewer_name = 'simper_plot'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Most Influential Species Bar Plot</button>\n'''
tab_content += self._generate_simper_plot_content(viewer_name, species_stats,
grouping_names, output_directory)
viewer_name = 'simper_ret'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Most Influential Species Info</button>\n'''
tab_content += self._generate_simper_tab_content(simper_ret, viewer_name)
viewer_name = 'simper_sum'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Similarity Percentage Summary</button>\n'''
tab_content += self._generate_simper_tab_content(simper_sum, viewer_name)
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_variable_stats_visualization_content(self, anosim_res,
permanova_res, permdisp_res):
tab_def_content = ''
tab_content = ''
first_tab_token = False
if anosim_res is not None:
viewer_name = 'anosim_res'
first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Analysis of Similarities</button>\n'''
tab_content += self._generate_variable_stat_tab_content(anosim_res, viewer_name)
if permanova_res is not None:
viewer_name = 'permanova_res'
if first_tab_token:
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Permutational Multivariate Analysis of Variance</button>\n'''
else:
first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Permutational Multivariate Analysis of Variance</button>\n'''
tab_content += self._generate_variable_stat_tab_content(permanova_res, viewer_name)
if permdisp_res is not None:
viewer_name = 'permdisp_res'
if first_tab_token:
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Homogeneity Multivariate Analysis of Variance</button>\n'''
else:
# first_tab_token = True
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Homogeneity Multivariate Analysis of Variance</button>\n'''
tab_content += self._generate_variable_stat_tab_content(permdisp_res, viewer_name)
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_rarefy_visualization_content(self, output_directory,
rarefied_matrix_dir, rarecurve_image,
obs_vs_rare_image, random_rare_df):
tab_def_content = ''
tab_content = ''
row_data_summary = random_rare_df.T.describe().round(2).to_string()
col_data_summary = random_rare_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Rarefied Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
tab_content += '''\n<h5>Rarefied Matrix Size: {} x {}</h5>'''.format(
len(random_rare_df.index),
len(random_rare_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
if False and len(random_rare_df.columns) <= 200:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Linear Plot</button>\n'''
linear_plot_page = self._generate_linear_plot(random_rare_df, output_directory)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
viewer_name = 'RarefiedMatrixViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Rarefied Matrix Heatmap</button>\n'''
rarefied_matrix_report_files = os.listdir(rarefied_matrix_dir)
rarefied_matrix_index_page = None
for rarefied_matrix_report_file in rarefied_matrix_report_files:
if rarefied_matrix_report_file.endswith('.html'):
rarefied_matrix_index_page = rarefied_matrix_report_file
shutil.copy2(os.path.join(rarefied_matrix_dir, rarefied_matrix_report_file),
output_directory)
tab_content += self._generate_tab_content(rarefied_matrix_index_page, viewer_name)
rarecurve_image_name = os.path.basename(rarecurve_image)
shutil.copy2(rarecurve_image,
os.path.join(output_directory, rarecurve_image_name))
obs_vs_rare_image_name = os.path.basename(obs_vs_rare_image)
shutil.copy2(obs_vs_rare_image,
os.path.join(output_directory, obs_vs_rare_image_name))
viewer_name = 'RarecurvePlot'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Rarecurve Plot</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<img src="{}" '''.format(rarecurve_image_name)
tab_content += '''alt="rarecurve" width="600" height="600">\n'''
tab_content += '''<br>\n<br>\n'''
tab_content += '''\n<img src="{}" '''.format(obs_vs_rare_image_name)
tab_content += '''alt="rarecurve" width="600" height="600">\n'''
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_trans_visualization_content(self, output_directory,
operations, heatmap_html_dir_l,
transformed_matrix_df, variable_specific):
row_data_summary = transformed_matrix_df.T.describe().round(2).to_string()
col_data_summary = transformed_matrix_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
op_2_name = {
'abundance_filtering': 'Filtered',
'standardization': 'Standardized',
'ratio_transformation': 'Log Ratio Transformed',
'relative_abundance': 'Relative Abundance',
'logit': 'Logit',
'sqrt': 'Square Root',
'log': 'Log',
}
## Start tabs ##
tab_def_content += '''\n<div class="tab">\n'''
## Operations tabs ##
for i, (op, heatmap_html_dir) in enumerate(zip(operations, heatmap_html_dir_l)):
viewer_name = 'op%s_%s' % (i, op)
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '%s')"''' % viewer_name
tab_def_content += '''>%d. %s</button>\n''' % (i+1, op_2_name[op])
flnms = os.listdir(heatmap_html_dir)
heatmap_html_flnm = None
for flnm in flnms:
if flnm.endswith('.html'):
heatmap_html_flnm = flnm
shutil.copy2(os.path.join(heatmap_html_dir, flnm), output_directory)
tab_content += self._generate_tab_content(heatmap_html_flnm, viewer_name)
## Transformed matrix statistics tab ##
viewer_name = 'data_summary'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
if variable_specific:
tab_def_content += '''>Transformed Selected Variables Statistics</button>\n'''
else:
tab_def_content += '''>Transformed Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
if variable_specific:
tab_content += '''\n<h5>Transformed Selected Variables Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
else:
tab_content += '''\n<h5>Transformed Matrix Size: {} x {}</h5>'''.format(
len(transformed_matrix_df.index),
len(transformed_matrix_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_linear_plot(self, data_df, output_directory, row_name='abundance',
top_percent=100):
linear_plot_path = 'linear_plot.html'
sum_order = data_df.sum(axis=1).sort_values(ascending=False).index
data_df = data_df.reindex(sum_order)
top_index = data_df.index[:int(data_df.index.size * top_percent / 100)]
data_df = data_df.loc[top_index]
links = data_df.stack().reset_index()
col_names = links.columns
links.rename(columns={col_names[0]: row_name,
col_names[1]: 'samples',
col_names[2]: 'value'},
inplace=True)
fig = px.line(links, x=row_name, y='value', color='samples')
plot(fig, filename=os.path.join(output_directory, linear_plot_path))
return linear_plot_path
def _create_chem_abun_heatmap(self, output_directory, data_groups):
data_df = pd.concat(data_groups.values())
col_ordered_label = self._compute_cluster_label_order(data_df.T.values.tolist(),
data_df.T.index.tolist())
data_df = data_df.reindex(columns=col_ordered_label)
data_label_groups_pos = dict()
for group_name, data_group_df in data_groups.items():
if pd.isna(group_name[1]):
label_name = group_name[0]
else:
label_name = '{} ({})'.format(group_name[0], group_name[1])
data_label_groups_pos[label_name] = [
data_df.index.to_list().index(data_id) for data_id in data_group_df.index]
heatmap_file_name = 'chem_abun_heatmap_{}.html'.format(str(uuid.uuid4()))
heatmap_path = os.path.join(output_directory, heatmap_file_name)
colors = px.colors.sequential.OrRd
colorscale = [[0, colors[1]], # 0
[1./10000, colors[2]], # 10
[1./1000, colors[3]], # 100
[1./100, colors[4]], # 1000
[1./10, colors[5]], # 10000
[1., colors[6]]]
layout = go.Layout(xaxis={'type': 'category'},
yaxis={'type': 'category'})
fig = go.Figure(data=go.Heatmap(
z=data_df.values,
x=data_df.columns,
y=data_df.index,
hoverongaps=False,
coloraxis='coloraxis'), layout=layout)
width = max(15 * data_df.columns.size, 1400)
height = max(10 * data_df.index.size, 1000)
fig.update_layout(coloraxis=dict(colorscale=colorscale),
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
width=width,
height=height,
xaxis=dict(tickangle=45,
automargin=True,
tickfont=dict(color='black', size=8)),
yaxis=dict(automargin=True,
tickfont=dict(color='black', size=8)))
colors = px.colors.qualitative.Bold
chemical_types = ['aggregate', 'exometabolite', 'specific']
text_height = 0
col_size = width / data_df.columns.size
label_pos = 70 / col_size
if len(data_label_groups_pos) > 1:
for i, label_name in enumerate(data_label_groups_pos):
data_label_idx = data_label_groups_pos[label_name]
chemical_type = label_name.split(' ')[0]
if i == 0:
fig.update_layout(yaxis=dict(range=[0, data_df.index.size-1],
tickvals=data_label_idx,
automargin=True,
tickfont=dict(
color=colors[chemical_types.index(chemical_type)],
size=8)))
text_height += len(data_label_idx) - 1
fig.add_annotation(x=label_pos, y=0.5,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
else:
fig.add_trace(dict(yaxis='y{}'.format(i + 1)))
fig.update_layout({'yaxis{}'.format(i + 1): dict(
range=[0, data_df.index.size-1],
tickvals=data_label_idx,
ticktext=[data_df.index[i] for i in data_label_idx],
tickfont=dict(color=colors[chemical_types.index(chemical_type)], size=8),
automargin=True,
overlaying='y')})
text_height += len(data_label_idx)
fig.add_annotation(x=label_pos, y=text_height - len(data_label_idx) + 1,
ax=label_pos, ay=text_height,
text=label_name,
showarrow=True,
xref="x", yref="y",
axref="x", ayref="y",
arrowside='start',
# arrowwidth=1.5,
font=dict(color=colors[chemical_types.index(chemical_type)],
size=8))
plot(fig, filename=heatmap_path)
return heatmap_file_name
def _generate_chem_visualization_content(self, output_directory, data_groups):
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
chemical_types = list(data_groups.keys())
chemical_types = ['{} ({})'.format(item[0], item[1]) for item in chemical_types]
type_text = 'Chemical Type' if len(chemical_types) == 1 else 'Chemical Types'
tab_content += '''\n<h5>{}: {}</h5>'''.format(type_text,
', '.join(chemical_types))
for chemical_type, data_df in data_groups.items():
chemical_type = '{} ({})'.format(chemical_type[0], chemical_type[1])
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_content += '''\n<h5>{} Chemical Matrix Size: {} x {}</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:],
len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>{} Row Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<h5>{} Column Aggregating Statistics</h5>'''.format(
chemical_type[0].upper() + chemical_type[1:])
html = '''\n<pre class="tab">''' + \
str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
heatmap_index_page = self._create_chem_abun_heatmap(output_directory, data_groups)
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_visualization_content(self, output_directory, heatmap_dir, data_df,
top_heatmap_dir, top_percent):
row_data_summary = data_df.T.describe().round(2).to_string()
col_data_summary = data_df.describe().round(2).to_string()
tab_def_content = ''
tab_content = ''
viewer_name = 'data_summary'
tab_def_content += '''\n<div class="tab">\n'''
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += ''' id="defaultOpen"'''
tab_def_content += '''>Matrix Statistics</button>\n'''
tab_content += '''\n<div id="{}" class="tabcontent" style="overflow:auto">'''.format(
viewer_name)
tab_content += '''\n<h5>Matrix Size: {} x {}</h5>'''.format(len(data_df.index),
len(data_df.columns))
tab_content += '''\n<h5>Row Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(row_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '''\n<br>'''
tab_content += '''\n<hr style="height:2px;border-width:0;color:gray;background-color:gray">'''
tab_content += '''\n<br>'''
tab_content += '''\n<h5>Column Aggregating Statistics</h5>'''
html = '''\n<pre class="tab">''' + str(col_data_summary).replace("\n", "<br>") + "</pre>"
tab_content += html
tab_content += '\n</div>\n'
if top_heatmap_dir:
viewer_name = 'TopHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Heatmap</button>\n'''.format(top_percent)
heatmap_report_files = os.listdir(top_heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(top_heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
if False and len(data_df.columns) <= 200:
if top_heatmap_dir:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Top {} Percent Linear Plot</button>\n'''.format(top_percent)
linear_plot_page = self._generate_linear_plot(data_df, output_directory,
top_percent=top_percent)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
msg = 'Top {} percent of matrix sorted by sum of abundance values.'.format(
top_percent)
tab_content += '''<p style="color:red;" >{}</p>'''.format(msg)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
viewer_name = 'MatrixLinearPlotViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Linear Plot</button>\n'''
linear_plot_page = self._generate_linear_plot(data_df, output_directory)
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(linear_plot_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
viewer_name = 'MatrixHeatmapViewer'
tab_def_content += '''\n<button class="tablinks" '''
tab_def_content += '''onclick="openTab(event, '{}')"'''.format(viewer_name)
tab_def_content += '''>Matrix Heatmap</button>\n'''
heatmap_report_files = os.listdir(heatmap_dir)
heatmap_index_page = None
for heatmap_report_file in heatmap_report_files:
if heatmap_report_file.endswith('.html'):
heatmap_index_page = heatmap_report_file
shutil.copy2(os.path.join(heatmap_dir, heatmap_report_file),
output_directory)
if heatmap_index_page:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '\n<iframe height="1300px" width="100%" '
tab_content += 'src="{}" '.format(heatmap_index_page)
tab_content += 'style="border:none;"></iframe>'
tab_content += '\n</div>\n'
else:
tab_content += '''\n<div id="{}" class="tabcontent">'''.format(viewer_name)
tab_content += '''\n<p style="color:red;" >'''
tab_content += '''Heatmap is too large to be displayed.</p>\n'''
tab_content += '\n</div>\n'
tab_def_content += '\n</div>\n'
return tab_def_content + tab_content
def _generate_mantel_test_html_report(self, pwmantel_res):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mantel_test_viewer_report.html')
visualization_content = self._generate_mantel_test_visualization_content(pwmantel_res)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mantel Test App'
})
return html_report
def _generate_simper_html_report(self, simper_ret, simper_sum, species_stats, grouping_names):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'simper_viewer_report.html')
visualization_content = self._generate_simper_visualization_content(simper_ret,
simper_sum,
species_stats,
grouping_names,
output_directory)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 66%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Simper App'
})
return html_report
def _generate_variable_stats_html_report(self, anosim_res, permanova_res, permdisp_res):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'variable_stats_viewer_report.html')
visualization_content = self._generate_variable_stats_visualization_content(anosim_res,
permanova_res,
permdisp_res)
table_style_content = '''
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 66%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>'''
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('</style>',
table_style_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Variable Stats App'
})
return html_report
def _generate_rarefy_html_report(self, rarefied_matrix_dir,
rarecurve_image, obs_vs_rare_image, random_rare_df):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'rarefy_matrix_viewer_report.html')
visualization_content = self._generate_rarefy_visualization_content(
output_directory,
rarefied_matrix_dir,
rarecurve_image,
obs_vs_rare_image,
random_rare_df)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Transform Matrix App'
})
return html_report
def _generate_transform_html_report(self, operations, heatmap_html_dir_l,
transformed_matrix_df, variable_specific):
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'transform_matrix_viewer_report.html')
visualization_content = self._generate_trans_visualization_content(
output_directory,
operations,
heatmap_html_dir_l,
transformed_matrix_df,
variable_specific)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Transform Matrix App'
})
return html_report
def _compute_cluster_label_order(self, values, labels):
# values = [[1, 0, 21, 50, 1], [20, 0, 60, 80, 30], [30, 60, 1, -10, 20]]
# labels = ['model_1', 'model_2', 'model_3']
if len(labels) == 1:
return labels
dist_matrix = pdist(values)
linkage_matrix = linkage(dist_matrix, 'ward')
dn = dendrogram(linkage_matrix, labels=labels, distance_sort='ascending')
ordered_label = dn['ivl']
return ordered_label
def _generate_chem_abund_heatmap_html_report(self, data, metadata_df):
logging.info('Start generating chemical abundance heatmap report page')
data_df = pd.DataFrame(data['values'], index=data['row_ids'], columns=data['col_ids'])
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
group_by = ['chemical_type', 'units']
metadata_groups = metadata_df.groupby(by=group_by).groups
data_groups = dict()
for group_name, ids in metadata_groups.items():
chem_type_data = data_df.loc[ids]
idx_ordered_label = self._compute_cluster_label_order(chem_type_data.values.tolist(),
chem_type_data.index.tolist())
data_groups[group_name] = chem_type_data.reindex(index=idx_ordered_label)
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'matrix_viewer_report.html')
visualization_content = self._generate_chem_visualization_content(output_directory,
data_groups)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Import Matrix App'
})
return html_report
def _generate_heatmap_html_report(self, data):
logging.info('Start generating heatmap report page')
data_df = pd.DataFrame(data['values'], index=data['row_ids'], columns=data['col_ids'])
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
tsv_file_path = os.path.join(result_directory, 'heatmap_data_{}.tsv'.format(
str(uuid.uuid4())))
data_df.to_csv(tsv_file_path)
heatmap_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_file_path,
'cluster_data': True})['html_dir']
top_heatmap_dir = None
top_percent = 100
if len(data_df.index) > 500:
display_count = 200 # roughly count for display items
top_percent = min(int(display_count / len(data_df.index) * 100), 100)
top_percent = max(top_percent, 1)
top_heatmap_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_file_path,
'sort_by_sum': True,
'top_percent': top_percent})['html_dir']
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
logging.info('Start generating html report in {}'.format(output_directory))
html_report = list()
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'matrix_viewer_report.html')
visualization_content = self._generate_visualization_content(output_directory,
heatmap_dir,
data_df,
top_heatmap_dir,
top_percent)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'matrix_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Import Matrix App'
})
return html_report
def _generate_rarefy_report(self, new_matrix_obj_ref, workspace_id,
random_rare_df, rarecurve_image, obs_vs_rare_image,
warnings):
objects_created = [{'ref': new_matrix_obj_ref, 'description': 'Randomly Rarefied Matrix'}]
data_tsv_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(data_tsv_directory)
logging.info('Start generating matrix tsv files in {}'.format(data_tsv_directory))
rarefied_matrix_tsv_path = os.path.join(data_tsv_directory,
'rarefied_matrix_{}.tsv'.format(
str(uuid.uuid4())))
random_rare_df.to_csv(rarefied_matrix_tsv_path)
rarefied_matrix_dir = self.report_util.build_heatmap_html({
'tsv_file_path': rarefied_matrix_tsv_path,
'cluster_data': True})['html_dir']
output_html_files = self._generate_rarefy_html_report(rarefied_matrix_dir,
rarecurve_image,
obs_vs_rare_image,
random_rare_df)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'rarefy_matrix_' + str(uuid.uuid4()),
'warnings': warnings}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_transform_report(self, new_matrix_obj_ref, workspace_id,
operations, df_results, variable_specific=False):
objects_created = [{'ref': new_matrix_obj_ref, 'description': 'Transformed Matrix'}]
data_tsv_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(data_tsv_directory)
heatmap_html_dir_l = []
for i, (op, df) in enumerate(zip(operations, df_results)):
tsv_path = os.path.join(data_tsv_directory, 'op%d_%s.tsv' % (i, op))
df.to_csv(tsv_path)
heatmap_html_dir = self.report_util.build_heatmap_html({
'tsv_file_path': tsv_path,
'cluster_data': True
})['html_dir']
heatmap_html_dir_l.append(heatmap_html_dir)
output_html_files = self._generate_transform_html_report(operations, heatmap_html_dir_l,
df_results[-1],
variable_specific)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'transform_matrix_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mantel_test_report(self, workspace_id, pwmantel_res):
output_html_files = self._generate_mantel_test_html_report(pwmantel_res)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 300,
'report_object_name': 'mantel_test_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_simper_report(self, workspace_id, simper_ret, simper_sum,
species_stats, grouping_names):
output_html_files = self._generate_simper_html_report(simper_ret, simper_sum,
species_stats, grouping_names)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 450,
'report_object_name': 'simper_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_variable_stats_report(self, workspace_id,
anosim_res, permanova_res, permdisp_res):
output_html_files = self._generate_variable_stats_html_report(anosim_res,
permanova_res,
permdisp_res)
report_params = {'message': '',
'workspace_id': workspace_id,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 450,
'report_object_name': 'variable_stats_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_report(self, matrix_obj_ref, workspace_name, new_row_attr_ref=None,
new_col_attr_ref=None, data=None, metadata_df=None):
"""
_generate_report: generate summary report
"""
objects_created = [{'ref': matrix_obj_ref, 'description': 'Imported Matrix'}]
if new_row_attr_ref:
objects_created.append({'ref': new_row_attr_ref,
'description': 'Imported Row Attribute Mapping'})
if new_col_attr_ref:
objects_created.append({'ref': new_col_attr_ref,
'description': 'Imported Column Attribute Mapping'})
if data:
if metadata_df is not None:
output_html_files = self._generate_chem_abund_heatmap_html_report(data,
metadata_df)
else:
output_html_files = self._generate_heatmap_html_report(data)
report_params = {'message': '',
'objects_created': objects_created,
'workspace_name': workspace_name,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 1400,
'report_object_name': 'import_matrix_from_excel_' + str(uuid.uuid4())}
else:
report_params = {'message': '',
'objects_created': objects_created,
'workspace_name': workspace_name,
'report_object_name': 'import_matrix_from_excel_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
@staticmethod
def _process_mapping_sheet(file_path, sheet_name):
"""
_process_mapping: process mapping sheet
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, dtype='str')
except XLRDError:
return dict()
else:
mapping = {value[0]: value[1] for value in df.values.tolist()}
return mapping
def _process_attribute_mapping_sheet(self, file_path, sheet_name, matrix_name, workspace_id):
"""
_process_attribute_mapping_sheet: process attribute_mapping sheet
"""
try:
df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
except XLRDError:
return ''
else:
obj_name = f'{matrix_name}_{sheet_name}'
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
file_path = os.path.join(result_directory, '{}.xlsx'.format(obj_name))
df.to_excel(file_path)
import_attribute_mapping_params = {
'output_obj_name': obj_name,
'output_ws_id': workspace_id,
'input_file_path': file_path
}
ref = self.attr_util.file_to_attribute_mapping(import_attribute_mapping_params)
return ref.get('attribute_mapping_ref')
@staticmethod
def _file_to_df(file_path):
logging.info('start parsing file content to data frame')
try:
df = pd.read_excel(file_path, sheet_name='data', index_col=0)
except XLRDError:
try:
df = pd.read_excel(file_path, index_col=0)
logging.warning('WARNING: A sheet named "data" was not found in the attached file,'
' proceeding with the first sheet as the data sheet.')
except XLRDError:
try:
reader = pd.read_csv(file_path, sep=None, iterator=True)
inferred_sep = reader._engine.data.dialect.delimiter
df = pd.read_csv(file_path, sep=inferred_sep, index_col=0)
except Exception:
raise ValueError(
'Cannot parse file. Please provide valide tsv, excel or csv file')
# remove NaN indexed rows
df = df[df.index.notnull()]
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
# fill NA with "None" so that they are properly represented as nulls in the KBase Object
df = df.where((pd.notnull(df)), None)
return df
@staticmethod
def _check_df_col_inclusive(df, col_name, valid_values):
# check if given column contains all values in valid_values
if col_name not in df:
raise ValueError('Please provide {} column'.format(col_name))
unmatched_type = set(df[col_name]) - valid_values
if unmatched_type:
err_msg = 'Found unsupported {}: {}\n'.format(' '.join(col_name.split('_')),
unmatched_type)
err_msg += 'Please use one of {} as {}'.format(valid_values,
' '.join(col_name.split('_')))
raise ValueError(err_msg)
@staticmethod
def _check_df_col_non_empty(df, col_name):
if col_name not in df:
raise ValueError('Please provide {} column'.format(col_name))
# check if any column cell is empty(nan)
if df[col_name].isna().any():
empty_idx = list(df.loc[df[col_name].isna()].index)
raise ValueError('Missing [{}] value for index: {}'.format(col_name, empty_idx))
def _cal_identification_level(self, df, ids_df):
logging.info('Start calculating measured identification level')
identification_level = list()
high_level_keys = {'kegg', 'chebi', 'modelseed', 'inchikey', 'inchi', 'smiles'}
medium_level_keys = {'formula', 'compound_name'}
low_level_keys = {'mass'}
for idx in df.index:
db_ids = ids_df.loc[idx]
db_ids.dropna(inplace=True)
non_empty_ids_keys = set(db_ids.index)
if non_empty_ids_keys & high_level_keys:
identification_level.append('high')
elif non_empty_ids_keys & medium_level_keys:
identification_level.append('medium')
elif non_empty_ids_keys & low_level_keys:
identification_level.append('low')
else:
logging.info('Cannot calculate measured identification level for {}'.format(idx))
identification_level.append(None)
df['measured_identification_level'] = identification_level
def _check_chem_ids(self, df):
# check chemical abundance has at least one of database id
id_fields = {'mass', 'formula', 'inchikey', 'inchi', 'smiles', 'compound_name',
'kegg', 'chebi', 'modelseed'}
common_ids = list(df.columns & id_fields)
if not common_ids:
raise ValueError('Missing compund identification columns')
ids_df = df.loc[:, common_ids]
missing_ids_idx = list(ids_df.loc[ids_df.isnull().all(axis=1)].index)
if missing_ids_idx:
err_msg = 'Missing compound identification for {}\n'.format(missing_ids_idx)
err_msg += 'Please provide at least one of {}'.format(id_fields)
raise ValueError(err_msg)
self._cal_identification_level(df, ids_df)
def _check_chem_abun_metadata(self, metadata_df):
logging.info('Start checking metadata fields for Chemical Abundance Matrix')
metadata_df.replace(r'^\s+$', np.nan, regex=True, inplace=True)
self._check_chem_ids(metadata_df)
# convert string field to lower case
str_cols = ['chemical_type', 'measurement_type', 'units', 'unit_medium']
for str_col in str_cols:
if str_col in metadata_df:
metadata_df[str_col] = metadata_df[str_col].apply(lambda s: s.lower()
if type(s) == str else s)
valid_chem_types = {'specific', 'aggregate', 'exometabolite'}
self._check_df_col_inclusive(metadata_df, 'chemical_type', valid_chem_types)
specific_abun = metadata_df.loc[metadata_df['chemical_type'] == 'specific']
aggregate_abun = metadata_df.loc[metadata_df['chemical_type'] == 'aggregate']
exometabolite_abun = metadata_df.loc[metadata_df['chemical_type'] == 'exometabolite']
if not specific_abun.index.empty:
logging.info('Start examing specific chemical abundances')
valid_measurement_types = {'unknown', 'fticr', 'orbitrap', 'quadrapole'}
self._check_df_col_inclusive(
specific_abun, 'measurement_type', valid_measurement_types)
valid_unit_medium = {'soil', 'solvent', 'water'}
self._check_df_col_inclusive(specific_abun, 'unit_medium', valid_unit_medium)
valid_chromatography_type = {'unknown', 'HPLC', 'MS/MS', 'LCMS', 'GS'}
self._check_df_col_inclusive(
specific_abun, 'chromatography_type', valid_chromatography_type)
# valid_units = {'mg/kg', 'g/kg', 'mg/l', 'mg/g dw',
# 'mm (millimolar)', 'm (molar)', '% (percentage)',
# 'total weight %', 'unknown'}
# self._check_df_col_inclusive(
# specific_abun, 'units', valid_units)
non_empty_fields = ['units', 'chromatography_type']
for field in non_empty_fields:
self._check_df_col_non_empty(specific_abun, field)
if not exometabolite_abun.index.empty:
logging.info('Start examing exometabolite chemical abundances')
valid_measurement_types = {'unknown', 'fticr', 'orbitrap', 'quadrapole'}
self._check_df_col_inclusive(
exometabolite_abun, 'measurement_type', valid_measurement_types)
valid_unit_medium = {'soil', 'solvent', 'water'}
self._check_df_col_inclusive(exometabolite_abun, 'unit_medium', valid_unit_medium)
valid_chromatography_type = {'unknown', 'HPLC', 'MS/MS', 'LCMS', 'GS'}
self._check_df_col_inclusive(
exometabolite_abun, 'chromatography_type', valid_chromatography_type)
# valid_units = {'mg/kg', 'g/kg', 'mg/l', 'mg/g dw',
# 'mm (millimolar)', 'm (molar)', '% (percentage)'}
# self._check_df_col_inclusive(
# exometabolite_abun, 'units', valid_units)
non_empty_fields = ['units', 'chromatography_type']
for field in non_empty_fields:
self._check_df_col_non_empty(exometabolite_abun, field)
if not aggregate_abun.index.empty:
logging.info('Start examing aggregate chemical abundances')
non_empty_fields = ['protocol']
for field in non_empty_fields:
self._check_df_col_non_empty(aggregate_abun, field)
def _file_to_chem_abun_data(self, file_path, refs, matrix_name, workspace_id):
logging.info('Start reading and converting excel file data')
data = refs
df = self._file_to_df(file_path)
metadata_df = None
rename_map = {'Aggregate M/Z': 'aggregate_mz',
'Compound Name': 'compound_name',
'Predicted Formula': 'formula',
'Predicted Structure (smiles)': 'smiles',
'Predicted Structure (inchi)': 'inchi',
'Predicted Structure (inchi-key)': 'inchikey',
'Theoretical Mass': 'mass',
'Retention Time': 'retention_time',
'Polarity': 'polarity',
'KEGG': 'kegg',
'ChEBI': 'chebi',
'ModelSEED': 'modelseed',
# 'Theoretical M/Z': 'theoretical_mz',
# 'Reference Standard RT (seconds)': 'reference_rt',
'Chemical Type': 'chemical_type',
'Measurement Type': 'measurement_type',
'Units': 'units',
'Unit Medium': 'unit_medium',
'Chemical Ontology Class': 'chemical_ontology_class',
'Measured Identification Level': 'measured_identification_level',
'Chromatography Type': 'chromatography_type',
'Chemical Class': 'chemical_class',
'Protocol': 'protocol',
'Identifier': 'identifier'
}
df.rename(columns=rename_map, inplace=True)
metadata_keys = rename_map.values()
shared_metadata_keys = list(set(metadata_keys) & set(df.columns))
if shared_metadata_keys:
metadata_df = df[shared_metadata_keys]
if set(metadata_df.all(skipna=False).tolist()) == {None}:
raise ValueError('All of metadata fields are None')
df.drop(columns=shared_metadata_keys, inplace=True)
self._check_chem_abun_metadata(metadata_df)
else:
err_msg = 'Please provide at least one of below metadata fields:\n{}'.format(
list(rename_map.keys()))
raise ValueError(err_msg)
try:
df = df.astype(float)
except ValueError:
err_msg = 'Found some non-float values. Matrix contains only numeric values\n'
err_msg += 'Please list any non-numeric column names in Metadata Keys field'
raise ValueError(err_msg)
df.fillna(0, inplace=True)
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
data.update({'data': matrix_data})
data.update(self._get_axis_attributes('col', matrix_data, refs, file_path, matrix_name,
workspace_id))
data.update(self._get_axis_attributes('row', matrix_data, refs, file_path, matrix_name,
workspace_id, metadata_df=metadata_df))
return data, metadata_df
def _file_to_data(self, file_path, refs, matrix_name, workspace_id):
logging.info('Start reading and converting excel file data')
data = refs
df = self._file_to_df(file_path)
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
data.update({'data': matrix_data})
data.update(self._get_axis_attributes('col', matrix_data, refs, file_path, matrix_name,
workspace_id))
data.update(self._get_axis_attributes('row', matrix_data, refs, file_path, matrix_name,
workspace_id))
# processing metadata
metadata = self._process_mapping_sheet(file_path, 'metadata')
data['attributes'] = {}
data['search_attributes'] = []
for k, v in metadata.items():
k = k.strip()
v = v.strip()
if k in TYPE_ATTRIBUTES:
data[k] = v
else:
data['attributes'][k] = v
data['search_attributes'].append(" | ".join((k, v)))
return data
def _sample_set_to_attribute_mapping(self, axis_ids, sample_set_ref, obj_name, ws_id):
am_data = self.sampleservice_util.sample_set_to_attribute_mapping(sample_set_ref)
unmatched_ids = set(axis_ids) - set(am_data['instances'].keys())
if unmatched_ids:
name = "Column"
raise ValueError(f"The following {name} IDs from the uploaded matrix do not match "
f"the supplied {name} attribute mapping: {', '.join(unmatched_ids)}"
f"\nPlease verify the input data or upload an excel file with a"
f"{name} mapping tab.")
logging.info('start saving AttributeMapping object: {}'.format(obj_name))
info = self.dfu.save_objects({
"id": ws_id,
"objects": [{
"type": "KBaseExperiments.AttributeMapping",
"data": am_data,
"name": obj_name
}]
})[0]
return f'{info[6]}/{info[0]}/{info[4]}'
def _meta_df_to_attribute_mapping(self, axis_ids, metadata_df, obj_name, ws_id):
data = {'ontology_mapping_method': "TSV file", 'instances': {}}
attribute_keys = metadata_df.columns.tolist()
data['attributes'] = [{'attribute': key, 'source': 'upload'} for key in attribute_keys]
for axis_id in axis_ids:
data['instances'][axis_id] = [str(i) for i in metadata_df.loc[axis_id].tolist()]
logging.info('start saving AttributeMapping object: {}'.format(obj_name))
info = self.dfu.save_objects({
"id": ws_id,
"objects": [{
"type": "KBaseExperiments.AttributeMapping",
"data": data,
"name": obj_name
}]
})[0]
return f'{info[6]}/{info[0]}/{info[4]}'
def _get_axis_attributes(self, axis, matrix_data, refs, file_path, matrix_name, workspace_id,
metadata_df=None):
"""Get the row/col_attributemapping and mapping of ids, validating as needed"""
# Parameter specified mappings should take precedence over tabs in excel so only process
# if attributemapping_ref is missing:
attr_data = {}
axis_ids = matrix_data[f'{axis}_ids']
attributemapping_ref = None
if refs.get('sample_set_ref') and axis == 'col':
name = matrix_name + "_{}_attributes".format(axis)
attributemapping_ref = self._sample_set_to_attribute_mapping(
axis_ids, refs.get('sample_set_ref'), name, workspace_id)
elif refs.get(f'{axis}_attributemapping_ref'):
attributemapping_ref = refs[f'{axis}_attributemapping_ref']
elif metadata_df is not None:
name = matrix_name + "_{}_attributes".format(axis)
attributemapping_ref = self._meta_df_to_attribute_mapping(
axis_ids, metadata_df, name, workspace_id)
else:
attributemapping_ref = self._process_attribute_mapping_sheet(
file_path, f'{axis}_attribute_mapping', matrix_name, workspace_id)
if attributemapping_ref:
attr_data[f'{axis}_attributemapping_ref'] = attributemapping_ref
# col/row_mappings may not be supplied
id_mapping = self._process_mapping_sheet(file_path, f'{axis}_mapping')
if id_mapping:
attr_data[f'{axis}_mapping'] = id_mapping
# if no mapping, axis ids must match the attribute mapping
elif attributemapping_ref:
am_data = self.dfu.get_objects(
{'object_refs': [attributemapping_ref]}
)['data'][0]['data']
unmatched_ids = set(axis_ids) - set(am_data['instances'].keys())
if unmatched_ids:
name = "Column" if axis == 'col' else "Row"
raise ValueError(f"The following {name} IDs from the uploaded matrix do not match "
f"the supplied {name} attribute mapping: {', '.join(unmatched_ids)}"
f"\nPlease verify the input data or upload an excel file with a"
f"{name} mapping tab.")
else:
# just gen the IDs in this matrix
attr_data[f'{axis}_mapping'] = {x: x for x in axis_ids}
return attr_data
@staticmethod
def _build_header_str(attribute_names):
header_str = ''
width = 100.0/len(attribute_names)
header_str += '<tr class="header">'
header_str += '<th style="width:{0:.2f}%;">Feature ID</th>'.format(width)
for attribute_name in attribute_names:
header_str += '<th style="width:{0:.2f}%;"'.format(width)
header_str += '>{}</th>'.format(attribute_name)
header_str += '</tr>'
return header_str
def _build_html_str(self, row_mapping, attributemapping_data, row_ids):
logging.info('Start building html replacement')
attribute_names = [attributes.get('attribute')
for attributes in attributemapping_data.get('attributes')]
header_str = self._build_header_str(attribute_names)
table_str = ''
instances = attributemapping_data.get('instances')
for feature_id, attribute_id in row_mapping.items():
if feature_id in row_ids:
feature_instances = instances.get(attribute_id)
table_str += '<tr>'
table_str += '<td>{}</td>'.format(feature_id)
for feature_instance in feature_instances:
table_str += '<td>{}</td>'.format(feature_instance)
table_str += '</tr>'
return header_str, table_str
def _generate_search_html_report(self, header_str, table_str):
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'search.html')
shutil.copy2(os.path.join(os.path.dirname(__file__), 'templates', 'kbase_icon.png'),
output_directory)
shutil.copy2(os.path.join(os.path.dirname(__file__), 'templates', 'search_icon.png'),
output_directory)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'search_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('//HEADER_STR', header_str)
report_template = report_template.replace('//TABLE_STR', table_str)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Search Matrix App'})
return html_report
def _generate_search_report(self, header_str, table_str, workspace_name):
logging.info('Start creating report')
output_html_files = self._generate_search_html_report(header_str, table_str)
report_params = {'message': '',
'workspace_name': workspace_name,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 366,
'report_object_name': 'kb_matrix_filter_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
@staticmethod
def _filter_value_data(value_data, remove_ids, dimension):
"""Filters a value matrix based on column or row ids"""
def _norm_id(_id):
return _id.replace(" ", "_")
val_df = pd.DataFrame(value_data['values'], index=value_data['row_ids'],
columns=value_data['col_ids'], dtype='object')
if dimension == 'row':
filtered_df = val_df.drop(remove_ids, axis=0, errors='ignore')
filtered_df = filtered_df.drop([_norm_id(x)
for x in remove_ids], axis=0, errors='ignore')
elif dimension == 'col':
filtered_df = val_df.drop(remove_ids, axis=1, errors='ignore')
filtered_df = filtered_df.drop([_norm_id(x)
for x in remove_ids], axis=1, errors='ignore')
else:
raise ValueError('Unexpected dimension: {}'.format(dimension))
filtered_value_data = {
"values": filtered_df.values.tolist(),
"col_ids": list(filtered_df.columns),
"row_ids": list(filtered_df.index),
}
return filtered_value_data
def _standardize_df(self, df, dimension='col', with_mean=True, with_std=True):
logging.info("Standardizing matrix data")
if dimension == 'row':
df = df.T
df.fillna(0, inplace=True)
x_train = df.values
scaler = preprocessing.StandardScaler(with_mean=with_mean, with_std=with_std).fit(x_train)
standardized_values = scaler.transform(x_train)
standardize_df = pd.DataFrame(index=df.index, columns=df.columns, data=standardized_values)
if dimension == 'row':
standardize_df = standardize_df.T
standardize_df.fillna(0, inplace=True)
standardize_df.replace(np.inf, 2 ** 32, inplace=True)
standardize_df.replace(-np.inf, -2 ** 32, inplace=True)
return standardize_df
def _ratio_trans_df(self, df, method='clr', dimension='col'):
logging.info("Performaing log ratio transformation matrix data")
if dimension == 'col':
df = df.T
df.fillna(0, inplace=True)
if method == 'clr':
ratio_trans = clr(df)
elif method == 'ilr':
ratio_trans = ilr(df)
else:
raise ValueError('Unexpected ratio transformation method')
ratio_transformed_df = pd.DataFrame(index=df.index, columns=df.columns, data=ratio_trans)
if dimension == 'col':
ratio_transformed_df = ratio_transformed_df.T
ratio_transformed_df.fillna(0, inplace=True)
ratio_transformed_df.replace(np.inf, 2 ** 32, inplace=True)
ratio_transformed_df.replace(-np.inf, -2 ** 32, inplace=True)
return ratio_transformed_df
def _remove_all_zero(self, df):
logging.info("Removing all zero rows")
row_check = (df != 0).any(axis=1)
removed_row_ids = list(row_check[row_check == False].index)
df = df.loc[row_check]
logging.info("Removing all zero columns")
col_check = (df != 0).any(axis=0)
removed_col_ids = list(col_check[col_check == False].index)
df = df.loc[:, col_check]
return df, removed_row_ids, removed_col_ids
def _filtering_matrix(self, df, row_threshold=0, columns_threshold=0,
row_sum_threshold=10000, columns_sum_threshold=10000):
logging.info("Removing rows with values all below {}".format(row_threshold))
row_check = (df > row_threshold).any(axis=1)
removed_row_ids = list(row_check[row_check == False].index)
logging.info("Removed rows: {}".format(removed_row_ids))
df = df.loc[row_check]
logging.info("Removing columns with values all below {}".format(columns_threshold))
col_check = (df > columns_threshold).any(axis=0)
removed_col_ids = list(col_check[col_check == False].index)
logging.info("Removed columns: {}".format(removed_col_ids))
df = df.loc[:, col_check]
logging.info("Removing rows with sum below {}".format(row_sum_threshold))
row_check = df.sum(axis=1) > row_sum_threshold
additional_removed_row_ids = list(row_check[row_check == False].index)
removed_row_ids += additional_removed_row_ids
logging.info("Removed rows: {}".format(additional_removed_row_ids))
df = df.loc[row_check]
logging.info("Removing columns with sum below {}".format(columns_sum_threshold))
col_check = df.sum(axis=0) > columns_sum_threshold
additional_removed_col_ids = list(col_check[col_check == False].index)
removed_col_ids += additional_removed_col_ids
logging.info("Removed columns: {}".format(additional_removed_col_ids))
df = df.loc[:, col_check]
return df
def _relative_abundance(self, df, dimension='col'):
logging.info("Creating relative abundance matrix")
if dimension == 'col':
df = df.T
df.fillna(0, inplace=True)
values = df.values
rel_values = list()
for value in values:
total = value.sum()
rel_values.append([v/float(total) for v in value])
relative_abundance_df = pd.DataFrame(index=df.index, columns=df.columns, data=rel_values)
if dimension == 'col':
relative_abundance_df = relative_abundance_df.T
relative_abundance_df.fillna(0, inplace=True)
relative_abundance_df.replace(np.inf, 2 ** 32, inplace=True)
relative_abundance_df.replace(-np.inf, -2 ** 32, inplace=True)
return relative_abundance_df
@staticmethod
def _logit(df: pd.DataFrame):
# entries are all in range (0,1), exclusively
vd.assert_in_range(df, rng=(0, 1), inclusive=(False, False), opname='logit')
f = np.vectorize(lambda p: np.log(p/(1-p)))
df = pd.DataFrame(f(df.values), index=df.index, columns=df.columns)
return df
@staticmethod
def _sqrt(df):
# entries are nonnegative
vd.assert_is_nonnegative(df, opname='sqrt')
return pd.DataFrame(np.sqrt(df.values), index=df.index, columns=df.columns)
@staticmethod
def _log(df, base, a):
'''
log(a+x)
'''
m = df.values + a
# entries are nonnegative
# TODO allow 0? gives -np.inf
vd.assert_is_nonnegative(m, opname='log')
m = np.log(m) / np.log(base)
return pd.DataFrame(m, index=df.index, columns=df.columns)
def _create_distance_matrix(self, df, dist_metric='euclidean', dimension='col'):
'''
dist_metric: The distance metric to use. Default set to 'euclidean'.
The distance function can be
["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine",
"dice", "euclidean", "hamming", "jaccard", "kulsinski", "matching",
"rogerstanimoto", "russellrao", "sokalmichener", "sokalsneath",
"sqeuclidean", "yule"]
'''
# calculate distance matrix
logging.info('start calculating distance matrix')
if dimension == 'col':
df = df.T
df.fillna(0, inplace=True)
values = df.values
labels = df.index.tolist()
Y = dist.pdist(values, metric=dist_metric)
dist_matrix = dist.squareform(Y)
dm = DistanceMatrix(dist_matrix, labels)
return dm
def _run_anosim(self, dm, grouping, permutations):
logging.info('start performing anosim')
anosim_res = anosim(dm, grouping, permutations=permutations)
return dict(anosim_res)
def _run_permanova(self, dm, grouping, permutations):
logging.info('start performing permanova')
permanova_res = permanova(dm, grouping, permutations=permutations)
return dict(permanova_res)
def _run_permdisp(self, dm, grouping, permutations):
logging.info('start performing permdisp')
permdisp_res = permdisp(dm, grouping, permutations=permutations)
return dict(permdisp_res)
def _run_mantel_tests(self, dms, labels, permutations=0, correlation_method='pearson',
alternative_hypothesis='two-sided'):
logging.info('start performing mantel test')
pwmantel_res = pwmantel(dms, labels=labels, permutations=permutations,
method=correlation_method, alternative=alternative_hypothesis)
return pwmantel_res
def _compute_target_cols(self, df, simper_ret, grouping_names):
target_cols = [col for col in df.columns if col in str(simper_ret)]
target_cols = list(set(target_cols))
try:
max_target_col_len = 18
if len(target_cols) > max_target_col_len:
# choose first few most influential species from each condition pair
comp_group_len = len(simper_ret)
num_choosen_col = max(max_target_col_len//comp_group_len, 1)
target_cols = list()
for comp_group in simper_ret:
species_pos = list(comp_group.names).index('species')
ord_pos = list(comp_group.names).index('ord')
species = list(comp_group[species_pos])
ord_list = list(comp_group[ord_pos])
target_species_pos = [i - 1 for i in ord_list[:num_choosen_col]]
for p in target_species_pos:
target_cols.append(species[p])
target_cols = list(set(target_cols))
except Exception:
warning_msg = 'got unexpected error fetching most influential species'
logging.warning(warning_msg)
return target_cols
def _generate_species_stats(self, df, simper_ret, grouping_names):
logging.info('start calculating species stats')
target_cols = self._compute_target_cols(df, simper_ret, grouping_names)
species_stats = dict()
for target_col in target_cols:
logging.info('start calculating {} stats'.format(target_col))
dist_grouping_names = set(grouping_names)
average_abun = dict()
abun_values = df.loc[:, target_col]
for dist_grouping_name in dist_grouping_names:
grouping_name_pos = [index for index, value in enumerate(grouping_names)
if value == dist_grouping_name]
filtered_abun_values = []
for pos in grouping_name_pos:
filtered_abun_values.append(abun_values[pos])
mean = 0
std = 0
try:
mean = round(np.mean(filtered_abun_values), 2)
std = round(np.std(filtered_abun_values), 2)
except Exception:
warning_msg = 'got unexpected error calculating mean/std abundance value\n'
warning_msg += 'grouping_name_pos: {}\n'.format(grouping_name_pos)
warning_msg += 'abundance_values: {}\n'.format(abun_values)
warning_msg += 'returning 0 as mean/std abundance value\n'
logging.warning(warning_msg)
average_abun[dist_grouping_name] = [mean, std]
species_stats[target_col] = average_abun
return species_stats
def _sync_attribute_mapping(self, matrix_data, removed_ids, new_attri_mapping_name, dimension,
workspace_id):
attri_mapping_ref = matrix_data.get('{}_attributemapping_ref'.format(dimension))
if attri_mapping_ref:
logging.info('Start removing {} from {} attribute mapping object'.format(removed_ids,
dimension))
am_data = self.dfu.get_objects({"object_refs": [attri_mapping_ref]})['data'][0]['data']
instances = am_data.get('instances', {})
for removed_id in removed_ids:
instances.pop(removed_id, None)
# save new attribute mapping
info = self.dfu.save_objects({"id": workspace_id,
"objects": [{
"type": 'KBaseExperiments.AttributeMapping',
"data": am_data,
"name": new_attri_mapping_name
}]})[0]
new_attri_mapping_ref = "%s/%s/%s" % (info[6], info[0], info[4])
matrix_data['{}_attributemapping_ref'.format(dimension)] = new_attri_mapping_ref
mapping = matrix_data.get('{}_mapping'.format(dimension))
if mapping:
for remove_id in removed_ids:
mapping.pop(remove_id, None)
return matrix_data
def _link_matrix_to_samples(self, matrix_ref, matrix_obj, sample_set_ref):
sample_set_obj = self.dfu.get_objects({'object_refs': [sample_set_ref]})['data'][0]['data']
name_2_sample = {d['name']: d for d in sample_set_obj['samples']}
links = []
for name in matrix_obj['data']['col_ids']:
if name not in name_2_sample:
continue
sample = self.sample_ser.get_sample({
'id': name_2_sample[name]['id']
})
link = self.sample_ser.create_data_link({
'upa': matrix_ref,
'dataid': name,
'id': name_2_sample[name]['id'],
'version': name_2_sample[name]['version'],
'node': sample['node_tree'][0]['id'],
'update': 1,
})
links.append(link)
return links
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.token = config['KB_AUTH_TOKEN']
self.dfu = DataFileUtil(self.callback_url)
self.sample_ser = SampleService(config['srv-wiz-url'], service_ver='dev')
self.fba_tools = fba_tools(self.callback_url)
self.report_util = kb_GenericsReport(self.callback_url)
self.data_util = DataUtil(config)
self.attr_util = AttributesUtil(config)
self.sampleservice_util = SampleServiceUtil(config)
self.matrix_types = [x.split(".")[1].split('-')[0]
for x in self.data_util.list_generic_types()]
def standardize_matrix(self, params):
"""
standardize a matrix
"""
input_matrix_ref = params.get('input_matrix_ref')
workspace_name = params.get('workspace_name')
new_matrix_name = params.get('new_matrix_name')
with_mean = params.get('with_mean', 1)
with_std = params.get('with_std', 1)
dimension = params.get('dimension', 'col')
if not isinstance(workspace_name, int):
workspace_id = self.dfu.ws_name_to_id(workspace_name)
else:
workspace_id = workspace_name
input_matrix_obj = self.dfu.get_objects({'object_refs': [input_matrix_ref]})['data'][0]
input_matrix_info = input_matrix_obj['info']
input_matrix_name = input_matrix_info[1]
input_matrix_data = input_matrix_obj['data']
if not new_matrix_name:
current_time = time.localtime()
new_matrix_name = input_matrix_name + time.strftime('_%H_%M_%S_%Y_%m_%d', current_time)
data_matrix = self.data_util.fetch_data({'obj_ref': input_matrix_ref}).get('data_matrix')
df = pd.read_json(data_matrix)
standardize_df = self._standardize_df(df, dimension=dimension,
with_mean=with_mean, with_std=with_std)
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
new_matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': standardize_df.values.tolist()}
input_matrix_data['data'] = new_matrix_data
logging.info("Saving new standardized matrix object")
info = self.dfu.save_objects({
"id": workspace_id,
"objects": [{
"type": input_matrix_info[2],
"data": input_matrix_data,
"name": new_matrix_name
}]
})[0]
new_matrix_obj_ref = "%s/%s/%s" % (info[6], info[0], info[4])
objects_created = [{'ref': new_matrix_obj_ref, 'description': 'Standardized Matrix'}]
report_params = {'message': '',
'objects_created': objects_created,
'workspace_name': workspace_name,
'report_object_name': 'standardize_matrix_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
return {'new_matrix_obj_ref': new_matrix_obj_ref,
'report_name': output['name'], 'report_ref': output['ref']}
def perform_simper(self, params):
logging.info('Start performing SIMPER with {}'.format(params))
input_matrix_ref = params.get('input_matrix_ref')
workspace_id = params.get('workspace_id')
grouping = params.get('grouping')
dimension = params.get('dimension', 'col')
permutations = int(params.get('permutations', 0))
if dimension not in ['col', 'row']:
raise ValueError('Please use "col" or "row" for input dimension')
input_matrix_obj = self.dfu.get_objects({'object_refs': [input_matrix_ref]})['data'][0]
input_matrix_info = input_matrix_obj['info']
input_matrix_data = input_matrix_obj['data']
matrix_type = input_matrix_info[2]
if 'KBaseMatrices' in matrix_type:
am_ref = input_matrix_data.get('{}_attributemapping_ref'.format(dimension))
if not am_ref:
raise ValueError(
'Missing {} attribute mapping from original matrix'.format(dimension))
elif 'KBaseProfile' in matrix_type:
profile_category = input_matrix_data.get('profile_category')
if profile_category == 'community' and dimension == 'row':
raise ValueError('Please choose column dimension for community profile')
if profile_category == 'organism' and dimension == 'col':
raise ValueError('Please choose row dimension for organism profile')
am_ref = input_matrix_data.get('{}_attributemapping_ref'.format(dimension))
if not am_ref:
raise ValueError(
'Missing {} attribute mapping from functional profile'.format(dimension))
else:
raise ValueError('Unsupported data type: {}'.format(matrix_type))
data_matrix = self.data_util.fetch_data({'obj_ref': input_matrix_ref}).get('data_matrix')
df = pd.read_json(data_matrix)
am_ref = '{};{}'.format(input_matrix_ref, am_ref)
am_data = self.dfu.get_objects({'object_refs': [am_ref]})['data'][0]['data']
attribute_names = [am.get('attribute') for am in am_data.get('attributes')]
if grouping not in attribute_names:
raise ValueError('Cannot find {} in {} attribute mapping'.format(grouping, dimension))
attri_pos = attribute_names.index(grouping)
instances = am_data.get('instances')
if dimension == 'col':
items = df.columns
else:
items = df.index
grouping_names = list()
for item in items:
instance = instances.get(item)
if not instance:
raise ValueError('Cannot find instance for {} in attribute mapping'.format(item))
attri = instance[attri_pos]
grouping_names.append(attri)
logging.info('Fetched {} for {} from attributes'.format(grouping_names, grouping))
if dimension == 'col':
df = df.T
df.fillna(0, inplace=True)
vegan = rpackages.importr('vegan')
numpy2ri.activate()
with localconverter(ro.default_converter + pandas2ri.converter):
simper_ret = vegan.simper(df, grouping_names, permutations=permutations)
simper_sum = vegan.summary_simper(simper_ret)
species_stats = self._generate_species_stats(df, simper_ret, grouping_names)
report_output = self._generate_simper_report(workspace_id, simper_ret, simper_sum,
species_stats, grouping_names)
return report_output
def perform_rarefy(self, params):
logging.info('Start performing rarefying matrix with {}'.format(params))
warnings = []
input_matrix_ref = params.get('input_matrix_ref')
workspace_id = params.get('workspace_id')
new_matrix_name = params.get('new_matrix_name')
seed_number = params.get('seed_number', 'do_not_seed')
subsample_size = params.get('subsample_size')
dimension = params.get('dimension', 'col')
bootstrap = params.get('bootstrap')
if bootstrap is not None:
num_rare_reps = bootstrap['num_rare_reps']
central_tendency = bootstrap['central_tendency']
input_matrix_obj = self.dfu.get_objects({'object_refs': [input_matrix_ref]})['data'][0]
input_matrix_info = input_matrix_obj['info']
input_matrix_name = input_matrix_info[1]
input_matrix_data = input_matrix_obj['data']
for key, obj_data in input_matrix_data.items():
if key.endswith('_ref'):
subobj_ref = input_matrix_data[key]
input_matrix_data[key] = '{};{}'.format(input_matrix_ref, subobj_ref)
logging.info('updated {} to {}'.format(key, input_matrix_data[key]))
for dim in ['row', 'col']:
attribute_mapping = input_matrix_data.get('{}_mapping'.format(dim))
attributemapping_ref = input_matrix_data.get('{}_attributemapping_ref'.format(dim))
if not attribute_mapping and attributemapping_ref:
am_data = self.dfu.get_objects({'object_refs': [attributemapping_ref]})[
'data'][0]['data']
attribute_mapping = {x: x for x in am_data['instances'].keys()}
input_matrix_data['{}_mapping'.format(dim)] = attribute_mapping
if not new_matrix_name:
current_time = time.localtime()
new_matrix_name = input_matrix_name + time.strftime('_%H_%M_%S_%Y_%m_%d', current_time)
data_matrix = self.data_util.fetch_data({'obj_ref': input_matrix_ref}).get('data_matrix')
df = pd.read_json(data_matrix)
# original_matrix_df = df.copy(deep=True)
if dimension == 'col':
df = df.T
df.fillna(0, inplace=True)
run_seed = (not seed_number == 'do_not_seed')
# determining subsample size
raremax = int(min(df.sum(axis=1))) # least sample size
if subsample_size is None: # default behavior: use least sample size
subsample_size = raremax
else: # user-specified behavior, find any samples too small
unrarefied = df.index[df.sum(axis=1) < subsample_size].tolist()
if len(unrarefied) > 0:
msg = (
'At subsampling size %d, samples %s are too small and will not be rarefied. '
'Smallest sample size is %d'
% (subsample_size, str(unrarefied), raremax)
)
warnings.append(msg)
logging.info(msg)
logging.info('Using subsample size %d' % subsample_size)
vegan = rpackages.importr('vegan')
numpy2ri.activate()
# generating rarefied matrix
logging.info('Start executing rrarefy(s)')
if run_seed:
ro.r('set.seed({})'.format(seed_number))
if bootstrap is None:
with localconverter(ro.default_converter + pandas2ri.converter):
random_rare = vegan.rrarefy(df, subsample_size)
else:
random_rare_l = []
for rep in range(num_rare_reps):
with localconverter(ro.default_converter + pandas2ri.converter):
random_rare = vegan.rrarefy(df, subsample_size) # returns np.ndarray
random_rare_l.append(random_rare)
if central_tendency == 'mean':
random_rare = sum(random_rare_l) / num_rare_reps
elif central_tendency == 'median':
random_rare = np.median(random_rare_l, axis=0)
else:
raise NotImplementedError('Unknown value for `central_tendency`')
random_rare_df = pd.DataFrame(random_rare, index=df.index, columns=df.columns)
if dimension == 'col':
random_rare_df = random_rare_df.T
# generating plots
result_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(result_directory)
logging.info('Start generating rarecurve plot')
rarecurve_image = os.path.join(result_directory, 'rarecurve.jpg')
ro.r("jpeg('{}')".format(rarecurve_image))
if run_seed:
ro.r('set.seed({})'.format(seed_number))
with localconverter(ro.default_converter + pandas2ri.converter):
vegan.rarecurve(df, sample=subsample_size, step=20, col="blue", cex=0.6)
ro.r('dev.off()')
logging.info('Start generating expected species richness vs raw abundance plot')
with localconverter(ro.default_converter + pandas2ri.converter):
Srare = vegan.rarefy(df, subsample_size)
specnumber = ro.r['specnumber']
with localconverter(ro.default_converter + pandas2ri.converter):
S = specnumber(df)
obs_vs_rare_image = os.path.join(result_directory, 'obs_vs_rare.jpg')
ro.r("jpeg('{}')".format(obs_vs_rare_image))
plot = ro.r['plot']
plot(S, Srare, xlab="Observed No. of Species", ylab="Rarefied No. of Species")
ro.r('dev.off()')
new_matrix_data = {'row_ids': random_rare_df.index.tolist(),
'col_ids': random_rare_df.columns.tolist(),
'values': random_rare_df.values.tolist()}
input_matrix_data['data'] = new_matrix_data
logging.info("Saving new rarefy matrix object")
new_matrix_obj_ref = self.data_util.save_object({
'obj_type': input_matrix_info[2],
'obj_name': new_matrix_name,
'data': input_matrix_data,
'workspace_id': workspace_id})['obj_ref']
returnVal = {'new_matrix_obj_ref': new_matrix_obj_ref}
report_output = self._generate_rarefy_report(new_matrix_obj_ref, workspace_id,
random_rare_df,
rarecurve_image, obs_vs_rare_image,
warnings)
returnVal.update(report_output)
return returnVal
def perform_mantel_test(self, params):
logging.info('Start performing mantel test with {}'.format(params))
input_matrix_refs = params.get('input_matrix_refs')
workspace_id = params.get('workspace_id')
dimension = params.get('dimension', 'col')
dist_metric = params.get('dist_metric', 'euclidean')
correlation_method = params.get('correlation_method', 'pearson')
permutations = params.get('permutations', 0)
alternative_hypothesis = params.get('alternative_hypothesis', 'two-sided')
if dimension not in ['col', 'row']:
raise ValueError('Please use "col" or "row" for input dimension')
if len(input_matrix_refs) < 2:
raise ValueError('Please provide at least 2 matrices to perform mentel test')
dms = list()
labels = list()
for input_matrix_ref in input_matrix_refs:
input_matrix_obj = self.dfu.get_objects({'object_refs': [input_matrix_ref]})['data'][0]
input_matrix_info = input_matrix_obj['info']
input_matrix_name = input_matrix_info[1]
labels.append(input_matrix_name)
data_matrix = self.data_util.fetch_data(
{'obj_ref': input_matrix_ref}).get('data_matrix')
df = | pd.read_json(data_matrix) | pandas.read_json |
import tensorflow as tf
from bert import optimization, modeling
from os.path import join
import pandas as pd
import math
from preprocess import InputExample, ZaloDatasetProcessor
class BertClassifierModel(object):
def __init__(self, max_sequence_len, label_list,
learning_rate, batch_size, epochs, dropout_rate,
warmup_proportion, use_pooled_output, loss_type, loss_label_smooth,
model_dir, save_checkpoint_steps, save_summary_steps, keep_checkpoint_max, bert_model_path, tokenizer,
train_file=None, evaluation_file=None, encoding='utf-8'):
""" Constructor for BERT model for classification
:parameter max_sequence_len (int): Maximum length of input sequence
:parameter label_list (list): List of labels to classify
:parameter learning_rate (float): Initial learning rate
:parameter batch_size (int): Batch size
:parameter epochs (int): Train for how many epochs?
:parameter dropout_rate (float): The dropout rate of the fully connected layer input
:parameter warmup_proportion (float): The amount of training steps is used for warmup
:parameter use_pooled_output (bool): Use pooled output as pretrained-BERT output (or FC input) (True) or
using meaned input (False)
:parameter loss_type (string): The default loss function used during training
:parameter loss_label_smooth (float): Perform label smoothing when calculate loss.
(0 <= loss_label_smooth <= 1)
When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
:parameter model_dir (string): Folder path to store the model
:parameter save_checkpoint_steps (int): The number of steps to save checkpoints
:parameter save_summary_steps (int): The number of steps to save summary
:parameter keep_checkpoint_max (int): The maximum number of checkpoints to keep
:parameter bert_model_path (string): The path to BERT pretrained model
:parameter tokenizer (FullTokenier): BERT tokenizer for data processing
:parameter train_file (string): The path to the tfrecords file that is used for training
:parameter evaluation_file (string): The path to the tfrecords file that is used for evaluation
:parameter encoding (string): The encoding used in the dataset
"""
# Variable initialization
self.max_sequence_len = max_sequence_len
self.labels_list = label_list
self.num_labels = len(self.labels_list)
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epochs = epochs
self.dropout_rate = dropout_rate
self.use_pooled_output = use_pooled_output
self.loss_type = loss_type.lower()
self.loss_label_smooth = loss_label_smooth
self.train_file = train_file
self.evaluation_file = evaluation_file
self.bert_configfile = join(bert_model_path, 'bert_config.json')
self.init_checkpoint = join(bert_model_path, 'bert_model.ckpt')
self.tokenizer = tokenizer
self.encoding = encoding
# Specify outpit directory and number of checkpoint steps to save
self.run_config = tf.estimator.RunConfig(
model_dir=model_dir,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoint_steps,
keep_checkpoint_max=keep_checkpoint_max)
# Specify training steps
if self.train_file:
self.num_train_steps = int(sum(1 for _ in tf.python_io.tf_record_iterator(train_file))
/ self.batch_size * self.epochs)
# self.num_train_steps = int(sum(1 for _ in tf.data.TFRecordDataset(train_file)) /
# self.batch_size * self.epochs)
self.num_warmup_steps = int(self.num_train_steps * warmup_proportion)
if self.evaluation_file:
self.num_eval_steps = int(sum(1 for _ in tf.python_io.tf_record_iterator(evaluation_file))
/ self.batch_size)
# Create the Estimator
self.classifier = tf.estimator.Estimator(model_fn=self.model_fn_builder(),
config=self.run_config,
params={"batch_size": self.batch_size})
def create_model(self, is_training, input_ids, input_mask, segment_ids, labels):
""" Create a classification model based on BERT """
bert_module = modeling.BertModel(
config=modeling.BertConfig.from_json_file(self.bert_configfile),
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False, # True if use TPU
)
# Use model.get_pooled_output() for classification tasks on an entire sentence.
# Use model.get_sequence_output() for token-level output.
if self.use_pooled_output:
output_layer = bert_module.get_pooled_output()
else:
output_layer = tf.reduce_mean(bert_module.get_sequence_output(), axis=1)
hidden_size = output_layer.shape[-1].value
# Create a fully connected layer on top of BERT for classification
# Create our own layer to tune for politeness data.
with tf.compat.v1.variable_scope("fully_connected"):
# Dropout helps prevent overfitting
if is_training:
output_layer = tf.nn.dropout(output_layer, rate=self.dropout_rate)
fc_weights = tf.compat.v1.get_variable("fc_weights", [self.num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02,
seed=0))
# Bias with initialized value = -log((1-r)/r) with r = 0.01 for focal loss trick
fc_bias = tf.compat.v1.get_variable("fc_bias", [self.num_labels],
initializer=tf.constant_initializer(value=math.log((1 - 0.01) / 0.01))
if self.loss_type == 'focal_loss' else tf.zeros_initializer())
logits = tf.matmul(output_layer, fc_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, fc_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
predicted_labels = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
# If we're train/eval, compute loss between predicted and actual label
with tf.compat.v1.variable_scope("fully_connected_loss"):
one_hot_labels = tf.one_hot(labels, depth=self.num_labels,
dtype=tf.float32) # Convert labels into one-hot encoding
one_hot_labels_smooth = one_hot_labels * (1.0 - self.loss_label_smooth) + \
(self.loss_label_smooth / self.num_labels)
if self.loss_type == 'focal_loss':
# Focal loss (Set default focal loss gamma to 2)
per_example_loss = -one_hot_labels_smooth * ((1 - probabilities) ** 2) * log_probs
per_example_loss = tf.reduce_sum(per_example_loss, axis=1)
elif self.loss_type == 'cross_entropy':
per_example_loss = tf.compat.v2.metrics.categorical_crossentropy(y_true=one_hot_labels,
y_pred=probabilities,
label_smoothing=self.loss_label_smooth)
elif self.loss_type == 'kld':
per_example_loss = tf.compat.v2.metrics.kld(y_true=one_hot_labels_smooth,
y_pred=probabilities)
elif self.loss_type == 'squared_hinge':
per_example_loss = tf.compat.v2.metrics.squared_hinge(y_true=one_hot_labels_smooth,
y_pred=probabilities)
elif self.loss_type == 'hinge':
per_example_loss = tf.compat.v2.metrics.hinge(y_true=one_hot_labels_smooth,
y_pred=probabilities)
else: # Fallback to cross-entropy
per_example_loss = -tf.reduce_sum(one_hot_labels_smooth * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return loss, predicted_labels, probabilities
def model_fn_builder(self):
""" Returns `model_fn` closure for Estimator. """
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
# Get input features
guid = features["guid"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) if "is_real_example" in features \
else tf.ones(tf.shape(label_ids), dtype=tf.float32)
# Pass through model
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, predicted_labels, probabilities) = self.create_model(
is_training, input_ids, input_mask, segment_ids, label_ids)
(assignment_map, initialized_variable_names) \
= modeling.get_assignment_map_from_checkpoint(tf.trainable_variables(), self.init_checkpoint)
tf.train.init_from_checkpoint(self.init_checkpoint, assignment_map)
# Optimize/Predict
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, self.learning_rate, self.num_train_steps, self.num_warmup_steps, use_tpu=False)
return tf.estimator.EstimatorSpec(mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, predicted_labels, is_real_example):
""" Calculate evaluation metrics """
accuracy = tf.compat.v1.metrics.accuracy(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
f1_score = tf.contrib.metrics.f1_score(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
recall = tf.compat.v1.metrics.recall(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
precision = tf.compat.v1.metrics.precision(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
true_pos = tf.compat.v1.metrics.true_positives(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
true_neg = tf.compat.v1.metrics.true_negatives(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
false_pos = tf.compat.v1.metrics.false_positives(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
false_neg = tf.compat.v1.metrics.false_negatives(labels=label_ids,
predictions=predicted_labels,
weights=is_real_example)
return {
"accuracy": accuracy,
"f1_score": f1_score,
"recall": recall,
"precision": precision,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg,
}
eval_metrics = metric_fn(label_ids, predicted_labels, is_real_example)
return tf.estimator.EstimatorSpec(mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"guid": guid,
'input_texts': input_ids,
'prediction': predicted_labels,
'probabilities': probabilities,
'labels': label_ids
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
else:
raise ValueError(
"Only TRAIN, EVAL and PREDICT modes are supported: %s" % mode)
# Return the actual model function in the closure
return model_fn
def _file_based_input_fn_builder(self, input_file, is_training, drop_remainder=False):
""" Creates an `input_fn` closure to be passed to Estimator - Used for tfrecord files
:parameter input_file: The path to a TFRecord file (preprocessed file)
:parameter is_training: Is the input_file used for training?
:parameter drop_remainder: Should drop the last batch where there is not enough data to form a batch
:returns A function to generate input data to the model
"""
name_to_features = {
"guid": tf.io.FixedLenFeature([], tf.string),
"input_ids": tf.io.FixedLenFeature([self.max_sequence_len], tf.int64),
"input_mask": tf.io.FixedLenFeature([self.max_sequence_len], tf.int64),
"segment_ids": tf.io.FixedLenFeature([self.max_sequence_len], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
"is_real_example": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, dtype=tf.int32)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.map(map_func=lambda record: _decode_record(record, name_to_features)) \
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def _input_fn_builder(self, input_features, is_training, drop_remainder=False):
""" Creates an `input_fn` closure to be passed to Estimator - Used for predicting
:parameter input_features: List of processed input data (InputFeatures)
:parameter is_training: Is the input_features used for training?
:parameter drop_remainder: Should drop the last batch where there is not enough data to form a batch
:returns A function to generate input data to the model
"""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in input_features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(input_features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, self.max_sequence_len],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, self.max_sequence_len],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, self.max_sequence_len],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def train(self):
""" Training model based on predefined training record (train set) """
if not self.train_file:
return
train_input_fn = self._file_based_input_fn_builder(
input_file=self.train_file,
is_training=True,
drop_remainder=True
)
self.classifier.train(input_fn=train_input_fn, max_steps=self.num_train_steps)
def train_and_eval(self):
""" Training & evaluate model
:returns eval_results (dictionary): Evaluation results (accuracy, f1, precision & recall)
"""
if not self.train_file or not self.evaluation_file:
return
train_input_fn = self._file_based_input_fn_builder(
input_file=self.train_file,
is_training=True,
drop_remainder=True
)
eval_input_fn = self._file_based_input_fn_builder(
input_file=self.evaluation_file,
is_training=False,
drop_remainder=False
)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=self.num_train_steps,
)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=self.num_eval_steps,
throttle_secs=60,
)
tf.estimator.train_and_evaluate(self.classifier, train_spec, eval_spec)
return self.eval()
def eval(self):
""" Evaluate model based on predefined evaluation record (development set)
:returns eval_results (dictionary): Evaluation results (accuracy, f1, precision & recall)
"""
if not self.evaluation_file:
return
eval_input_fn = self._file_based_input_fn_builder(
input_file=self.evaluation_file,
is_training=False,
drop_remainder=False
)
eval_results = self.classifier.evaluate(input_fn=eval_input_fn)
return eval_results
def predict(self, qas):
""" Get a prediction for each input qa pairs
:parameter qas: (list of tuple) A list of question-paragraph pairs
:returns is_answers: (list) Corresponding to each qa pairs,
is the paragraph contains the answer for the question
"""
sentences_formatted = [InputExample(guid="",
question=qa[0],
text=qa[1],
label=None) for qa in qas]
sentences_features = ZaloDatasetProcessor().convert_examples_to_features(examples=sentences_formatted,
label_list=self.labels_list,
max_seq_length=self.max_sequence_len,
tokenizer=self.tokenizer)
predict_input_fn = self._input_fn_builder(
input_features=sentences_features,
is_training=False,
drop_remainder=False
)
predict_results = self.classifier.predict(input_fn=predict_input_fn, yield_single_examples=False)
results = []
for index, prediction in enumerate(predict_results):
results.append({
"input_question": qas[index][0],
"input_paragraph": qas[index][1],
"prediction": self.labels_list[prediction["prediction"][index]],
"probabilities": prediction["probabilities"][index][prediction["prediction"][index]]
})
return results
def predict_from_eval_file(self, test_file, output_file=None, file_output_mode="zalo"):
""" Get prediction from predefined evaluation record (test set)
:parameter test_file: The path to the tfrecords (preprocessed) file that need predicting
:parameter output_file: Desired path to store the result
:parameter file_output_mode: Can be 'full' for full information on csv file, or 'zalo' for Zalo-defined
:returns results (Dataframe): Prediction results
"""
file_output_mode = file_output_mode.lower()
assert file_output_mode in ['full', 'zalo'], "[Predict] File output mode can only be 'full' or 'zalo'"
if not test_file:
return
predict_input_fn = self._file_based_input_fn_builder(
input_file=test_file,
is_training=False,
drop_remainder=False
)
predict_results = self.classifier.predict(input_fn=predict_input_fn)
results = []
for i, prediction in enumerate(predict_results):
_dict = {
"guid": prediction["guid"].decode(self.encoding),
"input_text": self.tokenizer.convert_ids_to_tokens(prediction["input_texts"]),
"prediction": self.labels_list[prediction["prediction"]],
"label": self.labels_list[prediction["labels"]],
"probabilities": prediction["probabilities"][prediction["prediction"]]
}
results.append(_dict)
if output_file:
if file_output_mode == 'zalo':
trueonly_results = []
for result in results:
if result['prediction'] == 'True':
result_test_id = result['guid'].split('$')[0]
result_answer = result['guid'].split('$')[1]
trueonly_results.append({
"test_id": result_test_id,
"answer": result_answer
})
trueonly_results_dataframe = pd.DataFrame.from_records(trueonly_results)
trueonly_results_dataframe.to_csv(path_or_buf=output_file, encoding=self.encoding, index=False)
elif file_output_mode == 'full':
results_dataframe = pd.DataFrame.from_records(results)
results_dataframe.to_csv(path_or_buf=output_file, encoding=self.encoding, index=False)
return | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
import streamlit as st
# Essentials
import numpy as np
import pandas as pd
import datetime
import random
# Plots
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.linear_model import ElasticNet, ElasticNetCV
import xgboost
from xgboost.sklearn import XGBRegressor
import lightgbm
from lightgbm import LGBMRegressor
# Misc
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
import pickle
import joblib
import folium
import branca.colormap as cm
from streamlit_folium import folium_static
import bs4
from bs4 import BeautifulSoup as bs
import requests
import json
import re
import base64
def main():
pd.set_option('display.max_colwidth', None)
### Dataset Import and Dashboard DataFrame Preparation (renaming columns for aesthetic purposes)
cleaned = pd.read_csv(r'Cleaned Apartment Data.csv')
UnitType = list()
for _ in cleaned.No_Rooms:
if _ == 0:
UnitType.append('Studio')
elif _ == 1:
UnitType.append('1BR')
elif _ == 2:
UnitType.append('2BR')
elif _ == 3:
UnitType.append('3BR')
elif _ == 4:
UnitType.append('4+BR')
FStatus = list()
for f in cleaned.Furnished:
if f == 0:
FStatus.append('Non Furnished')
else:
FStatus.append('Fully Furnished')
cleaned = cleaned.rename({'Locality':'District', 'Water_Heater':'Water Heater', 'Dining_Set':'Dining Set',
'Access_Card':'Access Card', 'Kitchen':'Kitchen Set', 'Fridge':'Refrigerator',
'Washing_Machine':'Washing Machine', 'TV_Cable':'TV Cable', 'Grocery':'Grocery Shop',
'Internet':'Internet Services', 'Swim_Pool':'Swimming Pool', 'Basketball':'Basketball Field',
'Multipurpose_Room':'Multipurpose Room', 'Jogging':'Jogging Track', 'Tennis':'Tennis Field',
'Playground':'Kids Playground', 'Total_Facilities':'Total Number of Facilities',
'AnnualPrice':'Annual Price'}, axis='columns')
ML_Ready = cleaned #dataset to be used on the ML page
dash = cleaned #dataset to be used on the visualization page
dash['Unit Type'] = UnitType
dash['Furnished Status'] = FStatus
dash = dash.drop(['Unnamed: 0', 'No_Rooms', 'Furnished', 'FurnishedNew'], axis = 'columns')
dash = dash[['Unit Type', 'Bathroom', 'Region', 'District', 'Longitude', 'Latitude', 'Furnished Status',
'Area', 'AC', 'Water Heater', 'Dining Set', 'Bed', 'Kitchen Set', 'Refrigerator', 'Washing Machine',
'TV', 'ATM', 'TV Cable', 'Grocery Shop', 'Internet Services', 'Swimming Pool', 'Basketball Field',
'Multipurpose Room', 'Jogging Track', 'Tennis Field', 'Kids Playground',
'Total Number of Facilities', 'Annual Price']]
ML_Ready = ML_Ready.drop(['FurnishedNew'], axis='columns')
ML_Ready = ML_Ready.rename({'No_Rooms':'Number of Bedrooms'}, axis = 'columns')
InJakCheck = list()
for reg in ML_Ready.Region:
if 'Jakarta' in reg:
InJakCheck.append(1)
else:
InJakCheck.append(0)
ML_Ready['In-Jakarta Check'] = InJakCheck
###
st.sidebar.title('Navigation')
pages = st.sidebar.radio("Pages", ("Home Page", "Apartment Rent Price Calculator", "Web Scraping Demo",
"Data Visualization", "Play with Machine Learning Models", "Summary", "FAQs", "About the Author"), index = 0)
if pages == "Home Page":
st.title('Welcome to the Jakpartment Project!')
st.image('apartment.jpg', width = 650)
st.write("Open the navigation sidebar and select any pages to proceed. Happy exploring!")
elif pages == "Apartment Rent Price Calculator":
st.title("Jabodetabek Apartment Annual Rent Price Predictor")
st.markdown("Enter your desired apartment unit and we'll estimate the annual rent price.")
#Load Location Dictionary
load_dict = open('location_dict.pkl', 'rb')
location = pickle.load(load_dict)
#Define functions to find longitude/latitude
def lon_finder(dict, region, locality):
return float(dict[region][locality].split(',')[0])
def lat_finder(dict, region, locality):
return float(dict[region][locality].split(',')[1])
#Make empty lists of features
No_Rooms = list()
Bathroom = list()
Longitude = list()
Latitude= list()
Furnished = list()
Area= list()
AC= list()
Water_Heater= list()
Dining_Set= list()
Bed= list()
Kitchen= list()
Fridge= list()
Washing_Machine= list()
TV= list()
ATM= list()
TV_Cable= list()
Grocery= list()
Internet= list()
Swim_Pool= list()
Laundry= list()
Security= list()
Basketball= list()
Multipurpose_Room= list()
Gym= list()
Jogging= list()
Tennis= list()
Restaurant= list()
Playground= list()
Jakcheck= list()
Total_Facilities=list()
st.subheader("What's your apartment unit type?")
unit_type = st.selectbox("Unit Type",("Studio", "1 Bedroom(s)", "2 Bedroom(s)", "3 Bedroom(s)",
"4 (or more) Bedroom(s)"))
st.subheader('Is your apartment unit fully furnished?')
furnished = st.radio("",("Yes", "No"))
if furnished == "Yes":
Furnished.append(1) #Fully furnished
else:
Furnished.append(0) #Non furnished
st.subheader("How much is your apartment unit's area?")
if unit_type == "Studio":
No_Rooms.append(0)
st.write('Studio apartment units have area ranging from 20 - 100 m\u00b2 with an average of 27 m\u00b2.')
area = st.slider('Area', 20, 100)
Area.append(area)
elif unit_type == '1 Bedroom(s)':
No_Rooms.append(1)
st.write('1 Bedroom(s) apartment units have area ranging from 21 - 129 m\u00b2 with an average of 45 m\u00b2.')
area = st.slider('Area', 21, 129)
Area.append(area)
elif unit_type == '2 Bedroom(s)':
No_Rooms.append(2)
st.write('2 Bedroom(s) apartment units have area ranging from 28 - 232 m\u00b2 with an average of 59 m\u00b2.')
area = st.slider('Area', 28, 232)
Area.append(area)
elif unit_type == '3 Bedroom(s)':
No_Rooms.append(3)
st.write('3 Bedroom(s) apartment units have area ranging from 38 - 250 m\u00b2 with an average of 121 m\u00b2.')
area = st.slider('Area', 38, 250)
Area.append(area)
elif unit_type == '4 (or more) Bedroom(s)':
No_Rooms.append(4)
st.write('4 Bedroom(s) apartment units have area ranging from 92 - 250 m\u00b2 with an average of 211 m\u00b2.')
area = st.slider('Area', 92, 250)
Area.append(area)
st.subheader('How many bathrooms are there?')
bathroom = st.selectbox("Number of bathroom(s)", (1,2,3,4,5))
st.subheader("Where is your apartment unit? (Region)")
region = st.selectbox("Region", ("Jakarta Utara", 'Jakarta Barat', "Jakarta Pusat", "Jakarta Selatan", "Jakarta Timur",
"Bogor", "Depok", "Tangerang", "Bekasi"))
if region == "Jakarta Utara":
Jakcheck.append(1)
locality = st.selectbox("District", ("Ancol", "Kelapa Gading", "Pantai Indah Kapuk", "Pluit", "Sunter"))
elif region == "Jakarta Barat":
Jakcheck.append(1)
locality = st.selectbox("District", ("Cengkareng", "Daan Mogot", "Duri Kosambi", "Gajah Mada", "Grogol",
"Kalideres", "Kebon Jeruk", "Kedoya", "Kembangan", "Palmerah", "Pos Pengumben", "Puri Indah",
"Slipi", "Taman Sari", "Tanjung Duren"))
elif region == "Jakarta Selatan":
Jakcheck.append(1)
locality = st.selectbox("District", ("Bintaro", "Casablanca", "Cilandak", "Dharmawangsa", "Epicentrum",
"Fatmawati", "Gandaria", "Gatot Subroto", "Kalibata", "Kebagusan", "Kebayoran Baru",
"Kebayoran Lama", "Kemang", "Kuningan", "Lebak Bulus", "Mega Kuningan", "Pakubuwono",
"Pancoran", "Pasar Minggu", "Pejaten", "Permata Hijau", "Pesanggrahan", "Pondok Indah",
"Radio Dalam", "Rasuna Said", "SCBD", "Semanggi", "Senayan", "Senopati", "Setiabudi", "Simprug",
"Sudirman", "TB Simatupang", "Tebet"))
elif region == "Jakarta Pusat":
Jakcheck.append(1)
locality = st.selectbox("District", ("Bendungan Hilir", "Cempaka Putih", "Gatot Subroto", "Gunung Sahari",
"Kemayoran", "Mangga Besar", "Mangga Dua", "Menteng", "Pasar Baru", "Pecenongan", "Salemba",
"Senayan", "Senen", "Sudirman", "Tanah Abang", "Thamrin"))
elif region == "Jakarta Timur":
Jakcheck.append(1)
locality = st.selectbox("District", ("Cakung", "Cawang", "Cibubur", "Cipinang", "Jatinegara", "Kampung Melayu",
"Kelapa Gading", "MT Haryono", "Pasar Rebo", "Pondok Bambu", "Pulomas"))
elif region == "Bogor":
Jakcheck.append(0)
locality = st.selectbox("District", ("Sentul", "Tanah Sareal"))
elif region == "Depok":
Jakcheck.append(0)
locality = st.selectbox("District", ("Cimanggis", "Cinere", "Margonda"))
elif region == "Tangerang":
Jakcheck.append(0)
locality = st.selectbox("District", ("Alam Sutera", "BSD City", "Bintaro", "Cengkareng", "Cikokol", "Cipondoh",
"Ciputat", "Daan Mogot", "Gading Serpong", "Karang Tengah", "Karawaci", "Kelapa Dua - Tanggerang",
"Serpong"))
elif region == "Bekasi":
Jakcheck.append(0)
locality = st.selectbox("District", ("Bekasi", "Bekasi Timur", "Cikarang", "Kalimalang", "Lippo Cikarang",
"Pekayon", "Summarecon Bekasi"))
#Map region and district (locality) input to numerical coordinate
Longitude.append(lon_finder(location, region, locality))
Latitude.append(lat_finder(location, region, locality))
st.subheader("Which in-room facilities that your unit have?")
def facil(displayname, featurename):
if st.checkbox(displayname):
featurename.append(1)
else:
featurename.append(0)
facil('Air Conditioner', AC)
facil('Water Heater', Water_Heater)
facil('Dining Set', Dining_Set)
facil('Bed', Bed)
facil('Kitchen Set', Kitchen)
facil('Refrigerator', Fridge)
facil('Washing Machine', Washing_Machine)
facil('TV', TV)
st.subheader('Which apartment facilities are present?')
facil('ATM', ATM)
facil('TV Cable Services', TV_Cable)
facil('Grocery Shops', Grocery)
facil('Internet Services', Internet)
facil('Clothing Laundry', Laundry)
facil('Security (CCTV, etc)', Security)
facil('Restaurant', Restaurant)
facil('Multipurpose Room', Multipurpose_Room)
facil('Kids Playground', Playground)
st.subheader('Which sports facilities are present?')
facil('Swimming Pool', Swim_Pool)
facil('Basketball Field', Basketball)
facil('Gym', Gym)
facil('Jogging Track', Jogging)
facil('Tennis Field', Tennis)
#total facilities
total = AC[0] + Water_Heater[0] + Dining_Set[0] + Bed[0] + Kitchen[0] + Fridge[0] + Washing_Machine[0] + \
TV[0] + ATM[0] + TV_Cable[0] + Grocery[0] + Internet[0] + Laundry[0] + Security[0] + Multipurpose_Room[0] + \
Restaurant[0] + Playground[0] + Swim_Pool[0] + Basketball[0] + Gym[0] + Jogging[0] + Tennis[0]
Total_Facilities.append(total)
df = pd.DataFrame({'No_Rooms':No_Rooms, 'Bathroom': bathroom, 'Longitude':Longitude, 'Latitude':Latitude, 'Furnished':Furnished, 'Area':Area, 'AC':AC,
'Water_Heater':Water_Heater, 'Dining_Set':Dining_Set, 'Bed':Bed, 'Kitchen':Kitchen, 'Fridge':Fridge,
'Washing_Machine':Washing_Machine, 'TV':TV, 'ATM':ATM, 'TV_Cable':TV_Cable, 'Grocery':Grocery, 'Internet':Internet,
'Swim_Pool':Swim_Pool, 'Laundry':Laundry, 'Security':Security, 'Basketball':Basketball, 'Multipurpose_Room':Multipurpose_Room,
'Gym':Gym, 'Jogging':Jogging, 'Tennis':Tennis, 'Restaurant':Restaurant, 'Playground':Playground, 'Jakcheck':Jakcheck, 'Total_Facilities':Total_Facilities
})
if st.button("Calculate Price", key='calculate'):
lgb = joblib.load('lgb_final.joblib.dat')
price = int(lgb.predict(df)[0])
lgb_001 = joblib.load('lgb_001.joblib.dat')
lower_price1 = int(lgb_001.predict(df)[0])
lgb_099 = joblib.load('lgb_099.joblib.dat')
upper_price99 = int(lgb_099.predict(df)[0])
lgb_005 = joblib.load('lgb_005.joblib.dat')
lower_price5 = int(lgb_005.predict(df)[0])
lgb_095 = joblib.load('lgb_095.joblib.dat')
upper_price95 = int(lgb_095.predict(df)[0])
str_price = format(price, ',')
str_lowprice1 = format(lower_price1, ',')
str_upprice99 = format(upper_price99, ',')
str_lowprice5 = format(lower_price5, ',')
str_upprice95 = format(upper_price95, ',')
st.subheader("Your apartment unit's annual rent price is predicted at IDR {}".format(str_price))
st.subheader("A 99% prediction interval of your unit's price is IDR {} until IDR {}".format(str_lowprice1, str_upprice99))
st.subheader("A 95% prediction interval of your unit's price is IDR {} until IDR {}".format(str_lowprice5, str_upprice95))
st.subheader("Author's note")
st.write("If your apartment unit is located on top of a mall (or within walking distance to a mall), it is reasonable that the price might be \
higher than what is predicted here. If your apartment unit is marketed in a special promotion event, the price might be lower than what is predicted here")
st.subheader("Learn more about how the price is calculated")
st.write("The predicted price as well as the prediction interval is calculated by inputing the apartment unit details above into a saved Light GBM Regressor model. \
The model, when fitted to the scraped dataset, achieved around 0.9 R-squared score.")
elif pages == "Web Scraping Demo":
st.title('Simple Web Scraping Demo')
st.subheader("Welcome to the web scraping demo page!")
st.write("In this page, we'll simulate how an apartment unit's page is scraped during my data acquisition phase of this project.")
st.markdown("To use this scraper, insert a link of an apartment unit from [Jendela 360](https://www.jendela360.com) website.")
st.markdown("Here's a short GIF on how you can find an apartment unit page in Jendela 360.")
gif_view = st.radio("Toggle GIF viewing option:", ("View GIF", "Hide GIF"), index = 0)
if gif_view == "View GIF":
file_ = open("scrape_example.gif", "rb")
contents = file_.read()
data_url = base64.b64encode(contents).decode("utf-8")
file_.close()
st.markdown(
f'<img src="data:image/gif;base64,{data_url}" alt="scrape_example_gif">',
unsafe_allow_html=True,
)
url = st.text_input('Enter the link here:')
def get_ld_json(url: str) -> dict:
parser = "html.parser"
req = requests.get(url)
soup = bs(req.text, parser)
return json.loads("".join(soup.find("script", {"type":"application/ld+json"}).contents))
def main_feature_extractor(feature):
feature_soup = soup.find("ul",{"class":'gridded--list bordered--grid'})
if feature_soup is None:
feature_soup = feature_soup = soup.find("ul",{"class":'gridded--list'})
feature = feature_soup.findChild("img",{"alt":feature}).find_parent("li").get_text().strip()
return(feature)
def facility_checker(facility):
if facility in facilities:
return(1)
else:
return(0)
if st.button('Scrape it!'):
try:
r = requests.get(url)
soup = bs(r.content)
##Apartment Metadata
apartment_metadata = get_ld_json(url)
Apt_Name = apartment_metadata['name']
No_Rooms = apartment_metadata['numberOfRooms']
Street = apartment_metadata['address']['streetAddress']
Locality = apartment_metadata['address']['addressLocality']
Region = apartment_metadata['address']['addressRegion']
Longitude = apartment_metadata['geo']['longitude']
Latitude = apartment_metadata['geo']['latitude']
h1_title = soup.find("div", {"id": "units"})
h1_title = h1_title.find('h1')
unit_name = h1_title.get_text().strip()
UnitName = unit_name
unit_id = url[-7:]
Unit_ID = unit_id
#Apartment Features
bathroom = main_feature_extractor('Bathroom')
number_of_bathrooms = int(bathroom[0])
Bathroom = number_of_bathrooms
Furnished = main_feature_extractor('Furnish')
area_text = main_feature_extractor('Area')
area = float(area_text[:-2].strip())
Area = area
Floor = main_feature_extractor('Floor')
Tower = main_feature_extractor('Tower')
##Apartment Facilities
facilities_soup = soup.find_all('span', {"class":"facility-text"})
facilities = str()
facilities_list = []
for _ in facilities_soup:
facility = _.get_text().strip()
facilities = facilities + facility+' '
facilities_list.append(facility)
AC = facility_checker('AC')
Water_Heater = facility_checker('Water Heater')
Dining_Set = facility_checker('Dining Set')
Electricity = facility_checker('Electricity')
Bed = facility_checker('Bed')
Access_Card = facility_checker('Access Card')
Kitchen = facility_checker('Kitchen')
Fridge = facility_checker('Refrigerator')
Washing_Machine = facility_checker('Washing Machine')
TV = facility_checker('TV')
ATM = facility_checker('ATM')
TV_Cable = facility_checker('TV Kabel')
Grocery = facility_checker('Grocery Store')
Internet = facility_checker('Internet')
Swim_Pool = facility_checker('Kolam Renang')
Laundry = facility_checker('Laundry')
Security = facility_checker('Security')
Basketball = facility_checker('Lapangan Basket')
Multipurpose_Room = facility_checker('Ruang Serbaguna')
Gym = facility_checker('Gym')
Jogging = facility_checker('Jogging Track')
Tennis = facility_checker('Lapangan Tenis')
Restaurant = facility_checker('Restoran')
Playground = facility_checker('Taman Bermain')
Total_Facilities = AC + Water_Heater + Dining_Set + Electricity + Bed + Access_Card + \
Kitchen + Fridge + Washing_Machine + TV + ATM + TV_Cable + Grocery + \
Internet + Swim_Pool + Laundry + Security + Basketball + Multipurpose_Room + \
Gym + Jogging + Tennis + Restaurant + Playground
#Apartment Price
price = soup.find('div', {'class':'price-content'})
price.find('span',{'class':'text-strikethrough'})
if price.find('span',{'class':'text-strikethrough'}) is not None:
price.find('span',{'class':'text-strikethrough'}).decompose()
price_raw = price.get_text().replace('\n','').replace(' ','').replace('Rp0','').replace('$0','').replace(',','').replace('$','USD').replace('Rp','IDR').strip()
pattern_year = "['USD|IDR']\d+/['tahun'|'thn']"
pattern_month = "['USD'|'IDR']\d+/['bulan'|'bln']"
search_year_regex = re.search(pattern_year, price_raw)
if search_year_regex is not None:
search_year = search_year_regex[0]
if search_year[0] == 'D':
currency = 'USD'
elif search_year[0] == 'R':
currency = 'IDR'
else:
currency = 'unknown'
annual_price = search_year[1:].replace('/t','').strip()
Currency = currency
Annual_Price = annual_price
items = [unit_name, Unit_ID, Apt_Name, No_Rooms, Bathroom, Street, Locality, Region, \
Longitude, Latitude, Furnished, Area, Floor, Tower, AC, Water_Heater, Dining_Set, Electricity, Bed, \
Access_Card, Kitchen, Fridge, Washing_Machine, TV, ATM, TV_Cable, Grocery, Internet, Swim_Pool, Laundry, \
Security, Basketball, Multipurpose_Room, Gym, Jogging, Tennis, Restaurant, Playground, Total_Facilities, \
Currency, Annual_Price]
names = ['Unit_Name', 'Unit_ID', 'Apt_Name', 'No_Rooms', 'Bathroom', 'Street', 'Locality', 'Region', \
'Longitude', 'Latitude', 'Furnished', 'Area', 'Floor', 'Tower', 'AC', 'Water_Heater', 'Dining_Set', \
'Electricity', 'Bed', 'Access_Card', 'Kitchen', 'Fridge', 'Washing_Machine', 'TV', 'ATM', 'TV_Cable', \
'Grocery', 'Internet', 'Swim_Pool', 'Laundry', 'Security', 'Basketball', 'Multipurpose_Room', 'Gym', \
'Jogging', 'Tennis', 'Restaurant', 'Playground', 'Total_Facilities', 'Currency', 'Annual_Price']
for i in range(len(items)):
st.write(names[i]+': {}'.format(items[i]))
except:
st.write("Unknown error occurred. Please insert another apartment unit link and try again.")
elif pages == "Data Visualization":
st.title('Jabodetabek Apartment Data Visualization')
if st.checkbox("Display Data", False):
st.write(dash)
st.subheader('Unit Type Visualization')
ut_chart = st.selectbox("Unit Type Plot", ("Boxplot", "Histogram"))
if ut_chart == "Boxplot":
fig, ax = plt.subplots()
ax = sns.boxplot(x = "Unit Type", y = 'Annual Price', data = dash, order = ['Studio', '1BR', '2BR', '3BR', '4+BR'])
ax.set(ylabel = "Annual Rent Price (in tens of millions IDR)")
st.pyplot(fig)
elif ut_chart == "Histogram":
fig, ax = plt.subplots()
ax = sns.countplot(x="Unit Type", data=dash, order = ['Studio', '1BR', '2BR', '3BR', '4+BR'])
ax.set(ylabel = "Count")
st.pyplot(fig)
st.subheader('Area Visualization')
area_chart = st.selectbox("Area Plot", ("Distribution", "Boxplot", "Scatterplot"))
if area_chart == "Distribution":
fig, ax = plt.subplots()
ax = sns.kdeplot(dash.Area)
st.pyplot(fig)
elif area_chart == "Boxplot":
second_choice = st.selectbox("Categorized By", ("Unit Type", "Region", "Furnished Status"))
if second_choice == "Unit Type":
fig, ax = plt.subplots()
ax = sns.boxplot(x="Unit Type", y="Area", data = dash, order = ['Studio', '1BR', '2BR', '3BR', '4+BR'])
st.pyplot(fig)
elif second_choice == "Region":
fig, ax = plt.subplots()
ax = sns.boxplot(x="Region", y="Area", data = dash)
plt.xticks(rotation=45)
st.pyplot(fig)
elif second_choice == "Furnished Status":
fig, ax = plt.subplots()
ax = sns.boxplot(x="Furnished Status", y="Area", data = dash)
st.pyplot(fig)
elif area_chart == "Scatterplot":
fig, ax = plt.subplots()
ax = sns.scatterplot(x = "Area", y = 'Annual Price', data = dash, hue = 'Unit Type')
st.pyplot(fig)
st.subheader('Region Visualization')
region_chart = st.selectbox("Region Plot", ("Boxplot", "Histogram"))
if region_chart == "Boxplot":
fig, ax = plt.subplots()
ax = sns.boxplot(x = "Region", y = "Annual Price", data = dash)
plt.xticks(rotation=45)
st.pyplot(fig)
elif region_chart == "Histogram":
fig, ax = plt.subplots()
ax = sns.countplot(x = "Region", data = dash)
plt.xticks(rotation = 45)
st.pyplot(fig)
st.subheader('Jakarta Map Visualization')
lonlat = dash[['Longitude', 'Latitude', 'Annual Price']].rename({'Longitude':'lon', 'Latitude':'lat'}, axis='columns')
def get_center_latlong(df):
# get the center of my map for plotting
centerlat = (df['lat'].max() + df['lat'].min()) / 2
centerlon = (df['lon'].max() + df['lon'].min()) / 2
return centerlat, centerlon
center_map = get_center_latlong(lonlat)
# create a LinearColorMap and assign colors, vmin, and vmax
# the colormap will show green for $100,000 homes all the way up to red for $1,500,000 homes
colormap = cm.LinearColormap(colors=['lightgreen', 'green', 'darkgreen'], vmin=min(lonlat['Annual Price']), vmax=max(lonlat['Annual Price']))
# create our map again. This time I am using a different tileset for a new look
m = folium.Map(location=center_map, zoom_start=10, tiles='OpenStreetMap')
st.write("The dots represent apartment units from our dataset. The darker the color, the higher the annual rent price.")
st.write("We can see that most of the darker dots are in Jakarta Selatan region.")
# Same as before... go through each home in set, make circle, and add to map.
# This time we add a color using price and the colormap object
for i in range(len(lonlat.lon)):
folium.Circle(
location=[lonlat.iloc[i]['lat'], lonlat.iloc[i]['lon']],
radius=100,
fill=True,
color=colormap(lonlat.iloc[i]['Annual Price']),
fill_opacity=0.2,
weight = 5
).add_to(m)
# the following line adds the scale directly to our map
m.add_child(colormap)
folium_static(m)
st.subheader('Facilities Visualization')
facility = st.selectbox("Select Facility", ('AC', 'Water Heater','Dining Set', 'Electricity', 'Bed', 'Access Card',
'Kitchen Set', 'Refrigerator', 'Washing Machine', 'TV', 'ATM', 'TV Cable',
'Grocery Shop', 'Internet Services', 'Swimming Pool', 'Laundry', 'Security',
'Basketball Field', 'Multipurpose Room', 'Gym', 'Jogging Track', 'Tennis Field',
'Restaurant', 'Kids Playground'))
fig, (ax1, ax2) = plt.subplots(ncols = 2)
sns.boxplot(x=facility, y="Annual Price", data = dash, ax=ax1)
sns.countplot(x=facility, data=dash, ax=ax2)
plt.tight_layout()
st.pyplot(fig)
st.write("On the X-Axis, '0' represents units without the selected facility, and '1' represents units with the selected facility")
elif pages == "Play with Machine Learning Models":
st.title('Train and evaluate your own Machine Learning models')
st.subheader('Do you need some introduction to machine learning models, and what this page should do?')
explanation = st.radio('Choose', ("Yes, provide me with some explanation, please.", "No, I'm familiar with the subject matter and would like to \
train the model right away."), index=1)
if explanation == "Yes, provide me with some explanation, please.":
st.subheader('Part 1: What is a Machine Learning Regression model?')
st.write("In this project, we use data which contain apartment unit details (location, area, facilities, etc) to predict \
its annual rent price. The data about our units are called 'predictors' or 'independent variables', while the \
annual rent price is called the 'target' or 'dependent' variable.")
st.write("Simply put, a machine learning regression algorithm trains a model to be able to predict the 'target variable' as \
'accurate' as possible by learning from existing data (which has predictors with their matching target variables'). ")
st.write("Our model learns from the data we collect and try to come up with a set of rule / calculation of its own, so when \
we enter a new unit's details, it can give a good prediction, on what its rent price might be.")
st.subheader("Part 2: How to measure a model's performance?")
st.write("We have over 5000 rows of data, and we split it randomly into two sets: \
the training set (usually around 80-90 percent of all data), and the testing set. Our model is trained on the training \
set, and we'll ask them to guess the annual rent price of the testing set without providing it with the 'target' variable. \
Since we have the 'correct' answer, we can compare the model's prediction (its guess of rent price) with the actual rent price \
of those units.")
st.write("A good model should perform well however we split the data. In scikit-learn, the Python framework used for Machine Learning \
in this project, the 'randomness' of the data splitting is determined by a 'random seed'. Any 'random seed' should do the job just fine, \
but if you want to compare multiple models performance, it's best to use the same 'random seed' all across your models. This means that \
each model is trained on the same training set and tested on the same testing set.")
st.subheader('Part 3: What metrics can be used to measure model performance?')
st.write("Our model performance is scored by a few metrics. MAE is an abbrv. of 'Mean Absolute Error', \
showing how much our model's prediction differ from the actual data. RMSE is an abbrv of 'Root Mean Squared Error', in which \
differences between our prediction and the actual data is squared, averaged out and then square rooted. This metric 'punishes' \
huge errors more than the first one.")
st.write("In a simpler term, 'if you think having an error of 4 unit is twice as bad as having \
an error of 2, use the MAE. If you think having an error of 4 is more than twice as bad as having an error of 2, because you want to \
punish larger mistakes, use the RMSE.' We aim for our RMSE and MAE to be as low as possible, but since we're talking about \
apartment rent prices in the millions, it made sense for our RMSE and MAE to be in the millions too.")
st.write('The final metric, R2 (R-squared), is on a scale of 0 to 1 - and in a simple term, explains how much of variances (you coudl say movements) \
in our actual data can be explained by our model. The higher the score, the better our model is at predicting the actual relationship \
between the features and the target variable(s). This is one of the metrics that I really like - as regardless the value of our target variable, \
and R-squared score always have a range from 0 to 1.')
st.write('For example, a model predicting the price of fruits at the supermarket will naturally tend to have lower RMSE and MAE scores, as the \
target variable is in thousands of rupiah, not millions. However, the R-squared score always ranges from 0 to 1.')
st.subheader("Part 4: The Student Analogy - Underfitting and Overfitting")
st.write('A machine learning model is like a student who wishes to learn some materials for an upcoming exam. The student has to \
figure out how to understand the available materials, so he/she could give an accurate answer when given a new question. \
The study materials are our trainng data, and the set of new questions on the test is the test data.')
st.write('To really test if a student understand the lesson, we need to look at his/her performance on the test, as it is something \
our student has not encountered yet. If our student gets a good grade on the test, it means he/she understands the material well enough.')
st.write("What does this analogy has to do with underfitting and overfitting? Suppose we measure our student's performance twice. First, by asking \
him/her to answer exercise questions in the book (predicting the train set), and answering completely new questions on the test (predicting test data).")
st.write("If our student has a lower score on the exercise than the real test (having a better accuracy on data it has never seen) - it means \
that our student is lazy. He could've performed better. This is called underfitting. An underfit model is a model that's too general, and does not \
study well enough on the train set.")
st.write("If our student has a high score in answering exercise questions, but gets a low test score, it means our student does not really understand the materials - \
he/she just memorizes the exercise questions answer key. That's why our student performed very well on the exercise questions. However, \
when presented with new unseen questions, he/she fails to answer correctly. This is called overfitting.")
st.write("When overfitting happens, our model only works well on 'training' set, but does not have a good score on the test set. It means, \
if it is given new data, it cannot predict well enough")
st.write("We want a model with good accuracy, but does not underfit nor overfit. This is the delicate part of tuning our model.")
st.subheader('Epilogue')
st.write('Last but not least, although having an interactive dashboard like this may make it seem easy to train and evaluate machine \
learning models, it has some limitations. It is much better to train and evaluate model using Jupyter notebooks/Google colabs as \
we can do more things with lines of codes rather than a point-and-click interface. Furthermore, complex models can be \
trained faster by the use of GPU (Graphics Processing Unit) which is very difficult to implement in web apps like these.')
st.subheader('What score should I aim for?')
st.write("When cross validated, the final model's RMSE score usually averages at around 30 000 000, and has an R-squared score around 0.9. \
Try to see if you can find combinations of columns and parameters that yields a model with RMSE score around the final model's, \
but watch out for overfitting!")
st.subheader('Which features would you like to include?')
cols = st.multiselect("Choose your feature columns", ('Number of Bedrooms', 'Bathroom', 'District', 'Region',
'Longitude', 'Latitude', 'Furnished', 'Area', 'AC', 'Water Heater','Dining Set', 'In-Jakarta Check',
'Electricity', 'Bed', 'Access Card', 'Kitchen Set','Refrigerator', 'Washing Machine',
'TV', 'ATM', 'TV Cable','Grocery Shop', 'Internet Services', 'Swimming Pool', 'Laundry',
'Security', 'Basketball Field', 'Multipurpose Room', 'Gym', 'Jogging Track', 'Tennis Field',
'Restaurant', 'Kids Playground', 'Total Number of Facilities'))
st.subheader('Select a random seed')
st.write('Any number of seed will do just fine, but if you wish to compare multiple models consecutively, select the same random seed \
so those models will have the same train and test set.')
seed = st.slider('Seed', 0, 10000)
st.subheader('Select the test data proportion')
st.write('This represents how much rows will be taken as the test set.')
test_size = st.slider('Test Proportion', 0.1, 0.3)
X_Custom = ML_Ready[cols]
y = ML_Ready['Annual Price']
labelencoder=LabelEncoder()
for col in X_Custom.columns:
if col == 'Region':
X_Custom[col] = labelencoder.fit_transform(X_Custom[col])
elif col == 'District':
X_Custom[col] = labelencoder.fit_transform(X_Custom[col])
X_train, X_test, y_train, y_test = train_test_split(X_Custom, y, test_size = test_size, random_state = seed)
test_val = y_test.to_numpy()
train_val = y_train.to_numpy()
#function for giving prediction results
def predict_model(model, logtr = False):
if logtr == False:
predict_test = model.predict(X_test)
predict_train = model.predict(X_train)
else:
predict_test = np.expm1(model.predict(X_test))
predict_train = np.expm1(model.predict(X_train))
return predict_test, predict_train
#function to draw scatterplots of predicted vs actual train/test values
def plot_result():
fig, (ax1, ax2) = plt.subplots(ncols = 2) #scatterplot for both train and test values
sns.scatterplot(test_val, predict_test, ax=ax1)
sns.scatterplot(train_val, predict_train, ax=ax2)
ax1.set(xlabel = "Actual Test Price Values")
ax1.set(ylabel = "Predicted Test Price Values")
ax2.set(xlabel = "Actual Train Price Values")
ax2.set(ylabel = "Predicted Train Price Values")
plt.tight_layout()
st.pyplot(fig)
def evaluate():
from sklearn import metrics
RMSE_test = np.sqrt(metrics.mean_squared_error(test_val, predict_test))
R2_test = metrics.r2_score(test_val, predict_test)
RMSE_train = np.sqrt(metrics.mean_squared_error(train_val, predict_train))
R2_train = metrics.r2_score(train_val, predict_train)
st.write('RMSE of Test Set Prediction:', RMSE_test)
st.write('R2 of Test Set Prediction:', R2_test)
st.write('RMSE of Train Set Prediction:', RMSE_train)
st.write('R2 of Train Set Prediction:', R2_train)
if R2_train < R2_test:
st.write('Your R-squared train score is less than R-squared test score. Your model might be underfit. \
Try to change your feature columns to see if you can have a better fit.')
elif R2_train > R2_test:
if (R2_train - R2_test)*100 > 5.9:
st.write('Your R-squared train score is higher than your R-squared test score by 6 percent or more. \
Your model might be overfit. Try to remove one or two feature columns, do a different range of \
hyperparameter tuning, or choose a different combination of columns')
else:
if R2_test < 0.75:
st.write('Your model does not underfit nor overfit, but its R-squared score is lower than 0.75.')
st.write("You can still do better! Try to find another combination of columns and/or parameters. \
A linear regression can achieve up to 0.78 R-squared score, \
while XGBoost and Light GBM can reach up to 0.9 R-squared score.")
elif R2_test < 0.88:
st.write("Congratulations! You have made a regression model that is not underfit nor overfit, and with a relatively good \
R-squared score. You haven't beat the model deployed on 'Calculator' page, though :)")
else:
st.write("Congratulations! You have made a regression model that is not underfit nor overfit, and your model's performance \
is as good as my final model. Well done!")
st.subheader('Specify your machine learning model')
modeltype = st.selectbox('Model Type', ('Linear Regression', 'XGBoost', 'Light GBM Regressor'))
if modeltype == "Linear Regression":
if st.button("Fit and Evaluate Model"):
lm = LinearRegression()
lm.fit(X_train, y_train)
predict_test, predict_train = predict_model(lm)
plot_result()
evaluate()
elif modeltype == 'XGBoost':
if explanation == "Yes, provide me with some explanation, please.":
st.write("What is hyperparameter tuning? A hyperparameter is a parameter whose value is used to control the learning process \
of our machine learning model. As an analogy, picture a machine learning model as a student who needs to study the 'training' \
set in order to prepare for the exam, which is the 'testing' set. Hyperparameters are things that affect how this student learns.")
st.write("For example, how much he/she learns on a single day? Or how many days before the exam that he/she studies? Regardless of these \
'hyperparameters' our student still learns the same data - just in slightly different ways. Finding the optimal 'hyperparameter' \
is like finding our student's best study routine - so he can achieve better result during the exam.")
st.subheader('Do you want to do hyperparameter tuning?')
tune_check = st.radio('Answer', ('Yes', 'No, use the baseline model.'))
if tune_check == "Yes":
st.markdown('Note: In this simulation, we will only be doing tuning on four parameters. Refer to the [XGBoost documentation](https://xgboost.readthedocs.io/en/latest/parameter.html) \
to see the list of all parameters and their definition.')
new_XGB = XGBRegressor()
st.subheader("Search range for 'min_child_weight' parameter:")
min_ch_w_range = st.slider('Select range', 1, 50, (3, 20))
st.subheader("Search range for 'max_depth' parameter:")
max_depth_range = st.slider('Select range', 1, 50, (2, 20))
st.subheader("Search range for 'subsample' parameter:")
subsample_range = st.slider('Select range', 0.1, 1.0, (0.2, 0.8))
st.subheader("Search range for 'colsample_bytree' parameter:")
colsample_range = st.slider('Select range', 0.1, 1.0, (0.3, 0.8))
params = {
'min_child_weight': min_ch_w_range,
'subsample': subsample_range,
'colsample_bytree': colsample_range,
'max_depth': max_depth_range
}
skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)
random_search = RandomizedSearchCV(new_XGB, param_distributions=params, n_iter=50,
scoring='neg_root_mean_squared_error', n_jobs=-1, cv=skf.split(X_Custom,y), verbose=2,
random_state=seed)
st.subheader('Do you want to perform log transformation on the target variable?')
st.markdown('If you choose yes, then the trained target variable will be log-transformed first. Then, the prediction \
result from X_test will be exponentially transformed to match the original scale of Annual Price - before \
comparing it against the test actual data.')
log_trf = st.radio("Log Transformation", ("Yes", "No"))
if st.button("Find best parameter and evaluate the tuned model"):
with st.spinner('Searching for best parameter(s)...'):
random_search.fit(X_Custom, y)
st.success('Done!')
min_child_weight = random_search.best_params_['min_child_weight']
max_depth = random_search.best_params_['max_depth']
subsample = random_search.best_params_['subsample']
colsample_bytree = random_search.best_params_['colsample_bytree']
st.write('min_child_weight:', min_child_weight)
st.write('max_depth:', max_depth)
st.write('subsample:', subsample)
st.write('colsample_bytree:', colsample_bytree)
tuned_newxgb = XGBRegressor(min_child_weight = min_child_weight,
max_depth = max_depth,
subsample = subsample,
colsample_bytree = colsample_bytree)
if log_trf == "No":
tuned_newxgb.fit(X_train, y_train)
predict_test, predict_train = predict_model(tuned_newxgb)
plot_result()
evaluate()
else:
tuned_newxgb.fit(X_train, np.log1p(y_train))
predict_test, predict_train = predict_model(tuned_newxgb, logtr=True)
plot_result()
evaluate()
else:
st.subheader('Do you want to perform log transformation on the target variable?')
st.markdown('If you choose yes, then the trained target variable will be log-transformed first. Then, the prediction \
result from X_test will be exponentially transformed to match the original scale of Annual Price - before \
comparing it against the test actual data.')
log_trf = st.radio("Log Transformation", ("Yes", "No"))
if st.button("Train and evaluate the model"):
if log_trf == "No":
standard_xgb = XGBRegressor()
standard_xgb.fit(X_train, y_train)
predict_test, predict_train = predict_model(standard_xgb)
plot_result()
evaluate()
if log_trf == "Yes":
standard_xgb = XGBRegressor()
standard_xgb.fit(X_train, np.log1p(y_train))
predict_test, predict_train = predict_model(standard_xgb, logtr=True)
plot_result()
evaluate()
elif modeltype == 'Light GBM Regressor':
if explanation == "Yes, provide me with some explanation, please.":
st.write("What is hyperparameter tuning? A hyperparameter is a parameter whose value is used to control the learning process \
of our machine learning model. As an analogy, picture a machine learning model as a student who needs to study the 'training' \
set in order to prepare for the exam, which is the 'testing' set. Hyperparameters are things that affect how this student learns.")
st.write("For example, how much he/she learns on a single day? Or how many days before the exam that he/she studies? Regardless of these \
'hyperparameters' our student still learns the same data - just in slightly different ways. Finding the optimal 'hyperparameter' \
is like finding our student's best study routine - so he can achieve better result during the exam.")
st.subheader('Do you want to do hyperparameter tuning?')
tune_check = st.radio('Answer', ('Yes', 'No, use the baseline model.'))
if tune_check == "Yes":
st.markdown("Note: In this simulation, we will only be doing tuning on four parameters. Refer to the [Light GBM documentation](https://lightgbm.readthedocs.io/en/latest/Parameters-Tuning.html) \
to see the list of all parameters and their definition.")
new_LGB = LGBMRegressor()
st.subheader("Search range for 'num_leaves' parameter:")
num_leaves = st.slider('Select range', 1, 50, (20, 40))
st.subheader("Search range for 'min_data_in_leaf' parameter:")
min_data_in_leaf = st.slider('Select range', 1, 30, (2, 15))
st.subheader("Search range for 'learning_rate' parameter:")
learning_rate = st.slider('Select range', 0.1, 1.0, (0.2, 0.8))
st.subheader("Search range for 'max_bin' parameter:")
max_bin = st.slider('Select range', 100, 300, (200, 260))
params = {
'num_leaves': num_leaves,
'min_data_in_leaf': min_data_in_leaf,
'learning_rate': learning_rate,
'max_bin': max_bin
}
skf = StratifiedKFold(n_splits=10, shuffle = True, random_state = seed)
random_search = RandomizedSearchCV(new_LGB, param_distributions=params, n_iter=50,
scoring='neg_root_mean_squared_error', n_jobs=-1, cv=skf.split(X_Custom,y), verbose=2,
random_state=seed)
st.subheader('Do you want to perform log transformation on the target variable?')
st.markdown('If you choose yes, then the trained target variable will be log-transformed first. Then, the prediction \
result from X_test will be exponentially transformed to match the original scale of Annual Price - before \
comparing it against the test actual data.')
log_trf = st.radio("Log Transformation", ("Yes", "No"))
if st.button("Find best parameter and evaluate the tuned model"):
with st.spinner('Searching for best parameter(s)...'):
random_search.fit(X_Custom, y)
st.success('Done!')
num_leaves = random_search.best_params_['num_leaves']
min_data_in_leaf = random_search.best_params_['min_data_in_leaf']
learning_rate = random_search.best_params_['learning_rate']
max_bin = random_search.best_params_['max_bin']
st.write('num_leaves:', num_leaves)
st.write('min_data_in_leaf:', min_data_in_leaf)
st.write('learning_rate:', learning_rate)
st.write('max_bin:', max_bin)
tuned_newlgb = LGBMRegressor(num_leaves = num_leaves,
min_data_in_leaf = min_data_in_leaf,
learning_rate = learning_rate,
max_bin = max_bin)
if log_trf == "No":
tuned_newlgb.fit(X_train, y_train)
predict_test, predict_train = predict_model(tuned_newlgb)
plot_result()
evaluate()
else:
tuned_newlgb.fit(X_train, np.log1p(y_train))
predict_test, predict_train = predict_model(tuned_newlgb, logtr=True)
plot_result()
evaluate()
else:
st.subheader('Do you want to perform log transformation on the target variable?')
st.markdown('If you choose yes, then the trained target variable will be log-transformed first. Then, the prediction \
result from X_test will be exponentially transformed to match the original scale of Annual Price - before \
comparing it against the test actual data.')
log_trf = st.radio("Log Transformation", ("Yes", "No"))
if st.button("Train and evaluate the model"):
if log_trf == "No":
standard_lgb = LGBMRegressor()
standard_lgb.fit(X_train, y_train)
predict_test, predict_train = predict_model(standard_lgb)
plot_result()
evaluate()
if log_trf == "Yes":
standard_lgb = LGBMRegressor()
standard_lgb.fit(X_train, y_train)
predict_test, predict_train = predict_model(standard_lgb, logtr=True)
plot_result()
evaluate()
elif pages == "Summary":
st.title ('Summary')
st.subheader('Dataset Description')
st.markdown('The data consists of 5340 apartment units scraped from Jendela 360 website on December 3rd 2020.')
st.markdown('There are 6 numerical features: number of bedrooms, number of bathrooms, longitude, latitude, area, and total number of facilities.')
st.markdown("Other than these numerical features, there are 23 categorical features which are labeled '1' if the facility is \
present in the apartment, or '0' if the facility is not present.")
st.markdown("The area of apartments in the dataset ranges from 20 - 250 meter squared.")
st.markdown("The price of the apartments in the dataset ranges from 12 - 705.6 million rupiah")
st.subheader('Regression Model Used')
models = {'Model Name': ['Linear Regression', 'KNN Regressor', 'Random Forest Regressor', 'XGB Regressor (baseline)', 'XGB Regressor (tuned)', 'Light GBM Regressor (baseline)', 'Light GBM Regressor (tuned)'],
'Rounded R2 Score*': [0.8, 0.84, 0.9, 0.9, 0.9, 0.9, 0.9],
'Further Description': ['Suffers from multicolinearity issues', 'Data has to be scaled', 'Hyperparameter tuning does not improve the model', 'Overfits', 'Accuracy slightly below baseline Light GBM', 'Model of choice', 'Overfits']
}
model_report = pd.DataFrame(models, columns = ['Model Name', 'Rounded R2 Score*', 'Further Description'])
st.write(model_report)
st.write('*: Exact R squared score depend on how we split the training and testing data. Overall, the Random Forest, XGB, and Light GBM Regressors \
have similar R squared score, but we provide the reasons on why ultimately we choose the baseline Light GBM Model as our final model.')
st.markdown('XGB vs Light GBM: it seems that in this dataset, the baseline XGB model tends to have higher accuracy on test set, but it overfits on the training set - while \
the baseline Light GBM model does not overfit on the training set. The opposite happens when we tune the model. After hyperparameter tuning, \
the XGB model does not overfit, but the Light GBM becomes overfit.')
st.markdown('Based on the bias-variance tradeoff, ultimately the author chooses a model that does not overfit. The baseline XGB model, although \
performing slightly better (by 0.5-1 percent) than the baseline LGB model, achieves a 0.98 R2-score on the training set, and is thus not chosen due to this overfitting issue. \
By comparison, the baseline LGB model has an R2-score of 0.95 on the training set, and 0.9 on testing set.')
st.markdown("Other than single prediction value, we also generate a '99%' and '95%' prediction interval from our Light GBM model.")
st.subheader('Rank of Feature based on Importance')
st.markdown('Feature importance is ranked based on how many of these features are used to make splits in the decision trees (in the Light GBM algorithm)')
feature_imp = | pd.read_csv('Temp Files\Feature Importance.csv') | pandas.read_csv |
# Word Embedding Models: Preprocessing and InferSent Model Training
# Project title:
# Creator: <NAME>
# Institution: Department of Sociology, University of California, Berkeley
# Date created: June 9, 2019
# Date last edited: June 10, 2019
# Import general packages
import imp, importlib # For working with modules
import nltk # for natural language processing tools
import pandas as pd # for working with dataframes
#from pandas.core.groupby.groupby import PanelGroupBy # For debugging
import numpy as np # for working with numbers
import pickle # For working with .pkl files
from tqdm import tqdm # Shows progress over iterations, including in pandas via "progress_apply"
import sys # For terminal tricks
import _pickle as cPickle # Optimized version of pickle
import gc # For managing garbage collector
import timeit # For counting time taken for a process
import datetime # For workin g with dates & times
import spacy
# Import packages for cleaning, tokenizing, and stemming text
import re # For parsing text
from unicodedata import normalize # for cleaning text by converting unicode character encodings into readable format
from nltk import word_tokenize, sent_tokenize # widely used text tokenizer
from nltk.stem.porter import PorterStemmer # an approximate method of stemming words (it just cuts off the ends)
from nltk.stem.porter import PorterStemmer # approximate but effective (and common) method of normalizing words: stems words by implementing a hierarchy of linguistic rules that transform or cut off word endings
stem = PorterStemmer().stem # Makes stemming more accessible
from nltk.corpus import stopwords # for eliminating stop words
import gensim # For word embedding models
from gensim.models.phrases import Phrases # Makes word2vec more robust: Looks not just at To look for multi-word phrases within word2vec
# Import packages for multiprocessing
import os # For navigation
numcpus = len(os.sched_getaffinity(0)) # Detect and assign number of available CPUs
from multiprocessing import Pool # key function for multiprocessing, to increase processing speed
pool = Pool(processes=numcpus) # Pre-load number of CPUs into pool function
import Cython # For parallelizing word2vec
mpdo = False # Set to 'True' if using multiprocessing--faster for creating words by sentence file, but more complicated
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('words')
# imports
from random import randint
import numpy as np
import torch
from numpy import dot, absolute
from numpy.linalg import norm
# Load model
from models import InferSent
model_version = 1
MODEL_PATH = "../InferSent/encoder/infersent%s.pkl" % model_version
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': model_version}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
# Keep it on CPU or put it on GPU
use_cuda = False
model = model.cuda() if use_cuda else model
# If infersent1 -> use GloVe embeddings. If infersent2 -> use InferSent embeddings.
W2V_PATH = '../InferSent/encoder/glove.840B.300d.txt'
model.set_w2v_path(W2V_PATH)
print("Word Embeddings Loaded!")
cwd = os.getcwd()
from os import listdir
from os.path import isfile, join
import re
import sys; sys.path.insert(0, "../../../data_management/tools/")
from clean_text import stopwords_make, punctstr_make, unicode_make, get_common_words
# Import packages
import re, datetime
import string # for one method of eliminating punctuation
from nltk.corpus import stopwords # for eliminating stop words
from sklearn.feature_extraction import text
from nltk.stem.porter import PorterStemmer; ps = PorterStemmer() # approximate but effective (and common) method of stemming words
import os # for working with file trees
# Prep dictionaries of English words
from nltk.corpus import words # Dictionary of 236K English words from NLTK
english_nltk = set(words.words()) # Make callable
english_long = set() # Dictionary of 467K English words from https://github.com/dwyl/english-words
fname = "../../../models_storage/word_embeddings_data/english_words.txt" # Set file path to long english dictionary
with open(fname, "r") as f:
for word in f:
english_long.add(word.strip())
df = pd.read_csv("../../../models_storage/word_embeddings_data/ocr_text_with_tags_10000.csv")
df = df[df.text.isna()==False] #filtering out rows with NA's for text
#df = df[:50] #take this line out if it works
#df.text = df.text.apply(lambda x: x[:10000] if len(x) > 10000 else x)
# Create useful lists using above functions:
stop_words_list = stopwords_make()
punctstr = punctstr_make()
unicode_list = unicode_make()
model.build_vocab(df.text)
print("Vocabulary loading complete!")
#writing function for common cosine similarity
def doc_words_cosine(i, t):
emb = embeddings[i]
if t == 'culture':
word_vec_avg = np.sum(culture_embeddings, axis=0)/len(culture)
elif t == 'demographic':
word_vec_avg = np.sum(demographic_embeddings, axis=0)/len(demographic)
elif t == 'relational':
word_vec_avg = np.sum(relational_embeddings, axis=0)/len(relational)
return absolute(dot(emb, word_vec_avg)/(norm(emb)*norm(word_vec_avg)))
######
#defining the vocabulary - change this for the current project after the meeting
######
######
culture = pd.read_csv("../../../models_storage/word_embeddings_data/Culture.csv", sep='\n', header=None)
culture.columns = ["vocab"]
demographic = pd.read_csv("../../../models_storage/word_embeddings_data/Demographic.csv", sep='\n', header=None)
demographic.columns = ["vocab"]
relational = pd.read_csv("../../../models_storage/word_embeddings_data/Relational.csv", sep='\n', header=None)
relational.columns = ["vocab"]
culture.vocab = culture.vocab.apply(lambda x: re.sub(',', '_', x))
demographic.vocab = demographic.vocab.apply(lambda x: re.sub(',', '_', x))
relational.vocab = relational.vocab.apply(lambda x: re.sub(',', '_', x))
##################################################
##################################################
##################################################
##################################################
##################################################
#generating semantic embeddings for the inq terms
d = {'terms': culture.vocab}
culture_df = pd.DataFrame(d)
culture_embeddings = model.encode(culture_df['terms'], verbose=True)
d = {'terms': demographic.vocab}
demographic_df = pd.DataFrame(d)
demographic_embeddings = model.encode(demographic_df['terms'], verbose=True)
d = {'terms': relational.vocab}
relational_df = | pd.DataFrame(d) | pandas.DataFrame |
#!python3
"""
Download gene expression data from the GDC (TCGA) database.
"""
import os
import errno
import logging
import re
import glob
import gzip
import shutil
import requests
import pandas as pd
logging.basicConfig(filename='./annotation/download.log', level=logging.INFO)
try:
os.chdir("/home/yizhou/dockers/RStudio/data/expression_count")
except BaseException:
os.chdir("C:/users/jzhou/Desktop/expression_count")
def downloadData(df, directory='./sep'):
"""Use manifest file to download data using GDC data API.
Arguements
df: [pandas data frame] of the manifest file downloaded from GDC website.
directory: a [str] showing the directory to store the downloaded data
"""
homeDir = os.getcwd()
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.chdir(directory)
fileNum = df.filename.count()
logging.info(f"Manifest file contains {fileNum} files.")
# exclude existing files
# change counts to FPKM if downloading FPKM data
existFile = glob.glob("./**/*.counts.*", recursive=True)
existFile = [
re.sub(r".*\/(.*\.txt)(\.gz)?$", r"\1.gz", x) for x in existFile
] # include unzipped files
fileNum = len(existFile)
logging.info(f"{fileNum} files already exist, downloading the rest...")
url = 'https://api.gdc.cancer.gov/data/'
df = df[~df.filename.isin(existFile)]
# download files
uuid = df.id.tolist()
uuid = [url + x for x in uuid]
fileNum = len(uuid)
for id in uuid:
os.system(f"curl --remote-name --remote-header-name {id}")
logging.info(f"Downloaded {fileNum} files to {directory}")
os.chdir(homeDir)
def uuidToBarcode(df, directory='./annotation'):
"""Use manifest file to retrieve barcode information using GDC API.
Arguments
df: a [pandas dataframe] of the manifest file used to download TCGA files.
directory: a [str] showing the directory to store annotation.tsv and annot.tsv
Return
annot: a [pandas dataframe] of more information, and
annotDict: a dict of {filename: barcode}.
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
annotFile = glob.glob(f"{directory}/annotation.tsv", recursive=True)
if not annotFile:
uuid = df.id.tolist()
params = {
"filters": {
"op": "in",
"content": {
"field": "files.file_id",
"value": uuid
}
},
"format": "TSV",
# There must be no space after comma
"fields": "file_id,file_name,cases.samples.submitter_id,cases.samples.sample_type,cases.project.project_id,cases.diagnoses.tumor_stage,cases.case_id",
"size": len(uuid)
}
url = "https://api.gdc.cancer.gov/files"
r = requests.post(url, json=params) # API requires using POST method
with open(f"{directory}/annotation.tsv", "w") as f:
f.write(r.text) # save raw annotation file
annot = pd.read_table(f"{directory}/annotation.tsv")
annot = annot[[
'file_name', 'cases.0.project.project_id',
'cases.0.samples.0.submitter_id', 'cases.0.samples.0.sample_type',
'cases.0.diagnoses.0.tumor_stage'
]]
annot = annot.rename(columns={
'cases.0.project.project_id': 'project',
'cases.0.samples.0.submitter_id': 'barcode',
'cases.0.samples.0.sample_type': 'sample_type',
'cases.0.diagnoses.0.tumor_stage': 'tumor_stage'
})
annot.file_name = annot.file_name.str.replace(
'.gz', '') # regex in pandas dataframe
annot.project = annot.project.str.replace('TCGA.', '')
# get specific digit in barcode
annot.sample_type = pd.Series([int(x[-3]) for x in annot.barcode])
annot.loc[annot.sample_type == 0, 'sample_type'] = 'tumor'
annot.loc[annot.sample_type == 1, 'sample_type'] = 'normal'
annot.to_csv(f"{directory}/annot.tsv", index=False)
# efficiently transform to dict
annotDict = dict(zip(annot.file_name, annot.barcode))
return (annot, annotDict)
def unzipAll():
"""Unzip all txt.gz files downloaded by the GDC file transfer tool.
WARNING: will remove all zipfiles!
"""
for zipfile in glob.iglob('./**/*.gz', recursive=True):
newfile = re.sub('.gz$', '', zipfile)
with gzip.open(zipfile, 'rb') as f_in, open(newfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipfile)
def mergeData(annot, annotDict, directory="./results", filedir='./sep'):
"""Merge all the downloaded data by column
Arguement
annot: [pandas dataframe] annot from function `uuidToBarcode`
annotDict: [dict] from function `uuidToBarcode`
"""
# db = sqlite3.connect('./results/results.sql')
projects = annot.project.unique().tolist()
for project in projects:
# Normal
annotation = annot[(annot.project == project) &
(annot.sample_type == "normal")]
cases = annotation.file_name.tolist()
if len(cases) != 0:
df = pd.read_csv(
f'{filedir}/{cases[0]}',
sep='\t',
names=['ensembl', annotDict[cases[0]]])
cases.pop(0) # Get first case (ensembls) and remove it from list
for case in cases:
try:
dfSingle = pd.read_csv(
f'{filedir}/{case}',
sep="\t",
names=['ensembl', annotDict[case]])
df = | pd.merge(df, dfSingle, how='outer', on='ensembl') | pandas.merge |
import torch
import pandas as pd
import warnings
import os
import time
from .Classifier import Classifier
from .utils.general import rank_preds, merge_answers_by_rankings, merge_answers_by_probabilities
class Ensemble:
"""
Klasa reprezentująca komitet sieci neuronowych
"""
def __init__(self,
id: str,
labels: list,
ensemble_structure: dict,
save_dir: str = os.getcwd(),
device: str = "cpu"):
"""Konstuktor klasy
:param ensemble: Konfiguracja każdego modelu komitetu
:type ensemble: dict
"""
self.id = id
if not isinstance(labels, list) and not isinstance(labels, tuple):
raise ValueError("Labels should be list/tuple")
if not labels:
raise ValueError("Labels list cannot be empty")
if isinstance(ensemble_structure, dict):
raise ValueError("Ensemble structure should be dictionary")
if ensemble_structure == {}:
raise ValueError("Ensemble structure cannot be empty")
if device not in ["cpu", "gpu"]:
raise ValueError("Device should be either cpu or gpu")
self.device = device
self.labels = labels
# self.classifiers = {}
# for subensemble_name, subensemble_items in self.ensemble.items():
# for model in subensemble_items["classifiers"]:
# self.classifiers[model.id] = model
# self.answers_dict = {c.id: None for c in classifiers}
self.ensemble = ensemble_structure
# self.ensemble_stats = pd.DataFrame(columns=self.labels)
# self.ensemble = {} if ensemble is None else ensemble
self.save_dir = save_dir + self.id + '/'
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
print(f"Outputs will be saved in {self.save_dir}")
def __repr__(self):
return self.ensemble
def __str__(self):
x = "Ensemble:\n"
if self.ensemble is None:
x = x + "empty"
else:
for sub in self.ensemble:
x = x + " " + sub + "\n\t\tlabels: \n"
for l in self.ensemble[sub]["labels"]:
x = x + "\t\t\t" + l + "\n"
x = x + "\t\tclassifiers: \n"
for m in self.ensemble[sub]["classifiers"]:
x = x + "\t\t\t" + m + "\n"
return x
def train(self,
train_df: pd.DataFrame,
val_df: pd.DataFrame,
data_dir: str,
score_function,
silent_mode=False):
"""
:return:
:rtype:
"""
if train_df.empty:
raise ValueError("DataFrame cannot be empty")
if not os.path.exists(data_dir):
raise ValueError("Data dir doesn't exist")
if not callable(score_function):
raise ValueError("Score should be function")
c_dict = {}
print("Training...")
for subensemble_name, subensemble_items in self.ensemble.items():
if not silent_mode:
print("├╴" + subensemble_name)
save_sub_dir = self.save_dir + subensemble_name + '/'
if not os.path.exists(save_sub_dir):
os.makedirs(save_sub_dir)
# subensemble models detecting --------------------------------------------
for model in subensemble_items["classifiers"]:
if not silent_mode:
print("│ └╴" + model.id)
save_dir = save_sub_dir + model.id + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if model.id in c_dict.keys():
continue
(_, train_stats, val_stats, _) = model.train(data_dir=data_dir,
save_dir=save_dir,
train_df=train_df,
val_df=val_df,
score_function=score_function)
c_dict[model.id] = True
# end subensemble models detecting --------------------------------------
def __call__(self,
test_df: pd.DataFrame,
data_dir: str,
silent_mode=False) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame):
"""
:return:
:rtype:
"""
if test_df.empty:
raise ValueError("DataFrame cannot be empty")
if not os.path.exists(data_dir):
raise ValueError("Data dir doesn't exist")
answer_probabilities = | pd.DataFrame(columns=self.labels) | pandas.DataFrame |
import pytest
import pandas as pd
from sklearn import preprocessing
from primrose.transformers.categoricals import ImplicitCategoricalTransform
@pytest.fixture()
def data():
return | pd.DataFrame({"one": ["a", "b"], "two": ["c", "c"]}) | pandas.DataFrame |
import pandas as pd
import click
import requests
import lxml
from bs4 import BeautifulSoup
SLUG_HELP_TEXT = """Slug is <state><year> for assembly election or ls<year> for lok sabha election.
Examples
--------
1. For Tamil Nadu 2021 assembly election the slug is Tamilnadu2021.
2. For Lok Sabha Election 2019 the slug is ls2019.
3. You can also pass a URL for a particular page like https://myneta.info/Puducherry2021/index.php?action=show_candidates&constituency_id=4
"""
def get_url(slug):
if slug.startswith(('http', 'www')):
return slug
def show_tables(dfs):
for df in dfs:
click.echo(df)
click.secho('--' * 10)
def format_table(df, output_file):
df.to_csv(output_file, index=False)
click.echo(f'Write the table to file {output_file}')
def get_details(body):
soup = BeautifulSoup(body)
try:
return soup.find_all('div', class_='title')[0].h3.contents[0].replace('(', '').split('-')[-1].strip().split(':')
except:
return '', ''
class SkipURL(Exception):
"""SkipURL when the no details found
"""
def get_constituency_details(url):
resp = requests.get(url)
if resp.ok:
constituency, district = get_details(resp.content)
if constituency and district:
return constituency.title(), district.title()
raise SkipURL(url)
def insert_candidate_info(picked_df, constituency, district):
picked_df['Constituency'] = [constituency] * len(picked_df)
picked_df['District'] = [district] * len(picked_df)
return picked_df
def perform_for_one_url(url, output_file=None, table_type='', print_tables=False, write_to_file=True):
try:
dfs = pd.read_html(url)
except (lxml.etree.XMLSyntaxError, ValueError):
click.secho(f'{url} is empty')
return None
table_type = table_type.title()
picked_df = None
if print_tables:
show_tables(dfs)
exit()
for df in dfs:
if table_type in df.columns:
picked_df = df
break
if isinstance(picked_df, pd.DataFrame):
try:
cons, district = get_constituency_details(url)
picked_df = insert_candidate_info(picked_df, cons, district)
except SkipURL as exc:
click.secho(exc)
return None
if write_to_file:
format_table(picked_df, output_file)
else:
return picked_df
else:
click.echo('Could find the table type')
return None
def perform_for_group(slug, output_file, table_type,
print_tables, total):
url = 'https://myneta.info/{slug}/index.php?action=show_candidates&constituency_id={idx}'
idx, completed = 1, 0
df = None
while completed != total:
full_url = url.format(slug=slug, idx=idx)
returned_df = perform_for_one_url(url=full_url, output_file=None, write_to_file=False,
table_type=table_type, print_tables=print_tables)
if isinstance(returned_df, pd.DataFrame):
completed += 1
if isinstance(df, pd.DataFrame):
df = | pd.concat([df, returned_df]) | pandas.concat |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
from past.utils import old_div
import logging
import datetime as pydt
import pandas as pd
import time
import pymongo
import attrdict as ad
from enum import Enum
import emission.core.get_database as edb
def get_uuid_list():
return edb.get_usercache_db().distinct('user_id')
def get_plottable_df(user_id, loc_filter, start_dt, end_dt):
tempSection = ad.AttrDict()
tempSection.user_id = user_id
tempSection.loc_filter = loc_filter
if (start_dt is not None and end_dt is not None):
tempSection.start_ts = time.mktime(start_dt.timetuple()) * 1000
tempSection.end_ts = time.mktime(end_dt.timetuple()) * 1000
return get_points_for_section(tempSection)
from_micros = lambda x: pydt.datetime.fromtimestamp(old_div(x,1000))
def get_activities_for_section(section):
query = {"user_id": section.user_id,
"metadata.filter": section.loc_filter,
"metadata.key": "background/activity"}
start_ts = section.start_ts
end_ts = section.end_ts
query.update({'$and': [{'metadata.write_ts': {'$gt': start_ts}},
{'metadata.write_ts': {'$lt': end_ts}}]})
full_entries = list(edb.get_usercache_db().find(query))
merged_entries = [dict(list(entry["metadata"].items()) + list(entry["data"].items())) for entry in full_entries]
entries_df = pd.DataFrame(merged_entries)
entries_df['formatted_time'] = entries_df.write_ts.apply(from_micros)
entries_df['activity'] = entries_df.agb.apply(to_activity_enum)
return entries_df
def get_transitions_df(user_id, loc_filter, start_dt, end_dt):
query = {"user_id": user_id,
"metadata.filter": loc_filter,
"metadata.key": "statemachine/transition"}
if (start_dt is not None and end_dt is not None):
start_ts = time.mktime(start_dt.timetuple()) * 1000
end_ts = time.mktime(end_dt.timetuple()) * 1000
query.update({'$and': [{'metadata.write_ts': {'$gt': start_ts}},
{'metadata.write_ts': {'$lt': end_ts}}]})
full_entries = list(edb.get_usercache_db().find(query))
merged_entries = [dict(list(entry["metadata"].items()) + list(entry["data"].items())) for entry in full_entries]
entries_df = pd.DataFrame(merged_entries)
entries_df['formatted_time'] = entries_df.write_ts.apply(from_micros)
return entries_df
def get_points_for_transitions(user_id, transitions_df):
get_section = lambda transition: ad.AttrDict({'user_id': user_id,
'loc_filter': transition["filter"],
'start_ts': transition["write_ts"] - 10 * 60 * 1000,
'end_ts': transition["write_ts"] + 10})
get_last_point = lambda transition: get_points_for_section(get_section(transition)).iloc[-1]
return transitions_df.apply(get_last_point, axis=1)
def get_points_for_section(section):
query = {"user_id": section.user_id,
"metadata.filter": section.loc_filter,
"metadata.key": "background/location"}
try:
query.update({'$and': [{'data.mTime': {'$gt': section.start_ts}},
{'data.mTime': {'$lt': section.end_ts}}]})
except AttributeError:
logging.debug("Start and end times not defined, no time query")
print("final query = %s " % query)
# full_entries = list(edb.get_usercache_db().find(query).sort("data.mTime", pymongo.ASCENDING))
full_entries = list(edb.get_usercache_db().find(query))
merged_entries = [dict(list(entry["metadata"].items()) + list(entry["data"].items())) for entry in full_entries]
entries_df = | pd.DataFrame(merged_entries) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import argparse
import configparser
import multiprocessing
import time
import datetime
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import tqdm
import statsmodels.stats.multitest as multitest
import snps
import genes
import interactions
import summary
import eqtls
import aFC
def parse_tissues(user_tissues, match_tissues, eqtl_project, db):
if eqtl_project:
sql = '''SELECT * FROM meta_eqtls WHERE project = '{}' '''.format(
eqtl_project)
else:
sql = '''SELECT * FROM meta_eqtls'''
df = pd.DataFrame()
with db.connect() as con:
df = pd.read_sql(sql, con=con)
db.dispose()
tissues = []
if match_tissues:
user_tissues = match_tissues[0]
if user_tissues:
matched_df = []
matched_tissues = []
to_omit = []
not_tissues = []
for u_tissue in user_tissues:
u_df = df[
(df['name'] == u_tissue) |
(df['tags'].str.contains(
r'\b{}\b'.format(u_tissue), case=False))
]
if u_df.empty:
if u_tissue.startswith('-'):
to_omit.append(u_tissue)
else:
not_tissues.append(u_tissue)
else:
matched_df.append(u_df)
matched_tissues.append(u_tissue)
error_msg = 'Program aborting:\n\t{}\nnot found in database.'
if (len(matched_df) == 0 or len(not_tissues) > 0) and len(to_omit) == 0:
print(error_msg.format('\n\t'.join(not_tissues)))
print('\nPlease use one of the following. ' +
'Tissue names are case sensitive:')
list_eqtl_tissues(db)
sys.exit()
user_df = pd.DataFrame()
if len(to_omit) > 0 and len(matched_tissues) == 0:
user_df = df
else:
user_df = pd.concat(matched_df)
if match_tissues:
for i in range(len(matched_tissues)):
user_df = user_df[
user_df['tags'].str.contains(
r'\b{}\b'.format(matched_tissues[i]), case=False)]
user_df = user_df.drop_duplicates()
for i in range(len(to_omit)):
user_df = user_df[
~user_df['tags'].str.contains(
r'\b{}\b'.format(to_omit[i][1:]), case=False)]
if len(user_df['project'].drop_duplicates()) > 1 and not eqtl_project:
# Ensure tissues are from same eQTL project
print('FATAL: eQTL tissues are from different projects. ',
'Add another tag to fine-tune match',
'or use \'--eqtl-project\' to specify project.')
print(user_df[['name', 'project']].to_string(index=False))
sys.exit()
tissues = user_df[['name', 'project']]
else: # Use GTEx database as default
tissues = df[df['project'] == 'GTEx'][[
'name', 'project']]
return tissues
def parse_hic(
match_tissues,
include_cell_line,
exclude_cell_line,
restriction_enzymes,
db):
''' user parameters -r, -n and -x.
Args:
restriction_enzymes: space-delimited list of restriction enzymes from
user. Limits program to Hic libraries restricted by specified enzyme.
include_cell_line: space-delimited list of cell_lines from -n.
exclude_cell_line: space-delimited list of cell_lines from -x
Returns:
hic_df: a dataframe columns(library, enzyme, rep_count)
'''
sql = '''SELECT library, tags, enzyme, rep_count FROM meta_hic'''
df = pd.DataFrame()
with db.connect() as con:
df = pd.read_sql(sql, con=con)
db.dispose()
hic_df = pd.DataFrame()
if match_tissues:
matched_df = []
matched_tissues = []
to_omit = []
not_tissues = []
for u_tissue in match_tissues[0]:
u_df = df[
(df['library'] == u_tissue) |
(df['tags'].str.contains(
r'\b{}\b'.format(u_tissue), case=False))
]
if u_df.empty:
if u_tissue.startswith(u_tissue):
to_omit.append(u_tissue)
else:
not_tissues.append(u_tissue)
else:
matched_df.append(u_df)
matched_tissues.append(u_tissue)
error_msg = 'Program aborting:\n\t{}\ndid not match any Hi-C library.'
if (len(matched_df) == 0 or len(not_tissues) > 0) and len(to_omit) == 0:
print(error_msg.format('\n\t'.join(not_tissues)))
print(('Use -t and -n to include specific eQTL tissues'
' and Hi-C libraries. Library names are case sensitive:'))
sys.exit('\n\t{}'.format('\n\t'.join(df['library'].tolist())))
if len(matched_df) == 0 and len(to_omit) > 0:
hic_df = df
else:
hic_df = | pd.concat(matched_df) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
from sklearn import linear_model
import statsmodels.api as sm
from scipy import stats
###################
yaara="723"
daniel = "957"
hilla="355"
generic_path = "/tmp/pycharm_project_"+hilla+"/"
#Full data
dfOp = pd.read_csv("/mnt/nadavrap-students/STS/data/Shapira_1st-Op_6_9_20_.csv")
groupOp = dfOp.groupby("SiteID")["SiteID"].count().reset_index(name='countFirst')
#draw a plot
x = groupOp["countFirst"]
plt.hist(x, bins=40)
plt.title("Histogram of count Operation")
plt.xlabel('number of Operations')
plt.ylabel('count of SiteId')
plt.show()
plt.savefig('Histogram of count Operation.png')
#ReOp data
dfReOp = pd.read_csv("/mnt/nadavrap-students/STS/data/Shapira_reOp_6_9_20_.csv")
groupReOp = dfReOp.groupby("SiteID")["SiteID"].count().reset_index(name='countReOp')
#draw a plot
y = groupReOp['countReOp']
plt.hist(y, bins=20)
plt.title("Histogram of count ReOperation")
plt.xlabel('number of ReOperations')
plt.ylabel('count of SiteId')
plt.show()
plt.savefig('Histogram of count ReOperation.png')
##merge two dataframes into one and gets the ratio between them
result = pd.merge(groupOp, groupReOp, on='SiteID', how='left')
result['countReOp'].fillna(0, inplace=True)
result["countReOp/countFirst+countReOp"] = (result["countReOp"] /(result["countReOp"]+ result["countFirst"])) *100
result['countReOp/countFirst+countReOp'].fillna(0, inplace=True)
result.to_csv(generic_path+"result.csv")
#draw a plot
z = result['countReOp/countFirst+countReOp']
plt.hist(z, bins=40)
plt.title("Histogram of ReOperation vs Operation")
plt.xlabel('% of ReOperation of Operation')
plt.ylabel('count of SiteId')
plt.show()
plt.savefig('Histogram of ReOperation vs Operation.png')
########### nadav recomend ###############
# import feather
# feather.write_dataframe(dfOp, "/tmp/pycharm_project_723/dfOp.feather")
# feather.write_dataframe(dfReOp, "/tmp/pycharm_project_723/dfReOp.feather")
# dfop1 = feather.read_dataframe("/tmp/pycharm_project_723/dfOp.feather")
# dfReOp1 = feather.read_dataframe("/tmp/pycharm_project_723/dfReOp.feather")
######mortality
MortaltyOp = dfOp.groupby('SiteID')['Mortalty'].apply(lambda x: (x== 1 ).sum()).reset_index(name='Mortalty_SiteID_op')
MortaltyReOp = dfReOp.groupby('SiteID')['Mortalty'].apply(lambda x: (x== 1 ).sum()).reset_index(name='Mortalty_SiteID_reOp')
result2 = pd.merge(MortaltyOp, MortaltyReOp, on='SiteID', how='left')
# result.merge(result2, on='SiteID')
df=pd.merge(result, result2, on='SiteID')
df["countOpr"] = result["countReOp"]+ result["countFirst"]
countOpr=df['countOpr']
df.to_csv(generic_path+"mortalty.csv")
####AGE
ageOp = dfOp.groupby("SiteID")["Age"].mean().reset_index(name='Mean_Age_op')
ageReOp = dfReOp.groupby("SiteID")["Age"].mean().reset_index(name='Mean_Age_reOp')
resultAge = pd.merge(ageOp, ageReOp, on='SiteID', how='left')
dfAge=pd.merge(result, resultAge, on='SiteID')
genderOp = pd.get_dummies(dfOp["Gender"]).rename(columns=lambda x: 'opGender_' + str(x))
dfOp=dfOp.join(genderOp)
genderReOp = pd.get_dummies(dfReOp["Gender"]).rename(columns=lambda x: 'reOpGender_' + str(x))
dfReOp=dfReOp.join(genderReOp)
genderOp_grouped_male = (dfOp.groupby("SiteID")["opGender_1.0"]).sum().reset_index(name='male_Op')
genderOp_grouped_female = (dfOp.groupby("SiteID")["opGender_2.0"]).sum().reset_index(name='female_Op')
dfMale=pd.merge(genderOp_grouped_male, genderOp_grouped_female, on='SiteID')
genderReOp_grouped_male = (dfReOp.groupby("SiteID")["reOpGender_1.0"]).sum().reset_index(name='male_reOp')
genderReOp_grouped_female = (dfReOp.groupby("SiteID")["reOpGender_2.0"]).sum().reset_index(name='female_reOp')
dfFemale=pd.merge(genderReOp_grouped_male, genderReOp_grouped_female, on='SiteID')
dfGender=pd.merge(dfMale, dfFemale, on='SiteID')
dfMerge=pd.merge(dfAge,dfGender, on='SiteID')
##FHCAD - family history of disease
FHCADOp = dfOp.groupby('SiteID')['FHCAD'].apply(lambda x: (x== 1 ).sum()).reset_index(name='FHCAD_op')
FHCADReOp =dfReOp.groupby('SiteID')['FHCAD'].apply(lambda x: (x== 1 ).sum()).reset_index(name='FHCAD_reOp')
resultFHCAD = pd.merge(FHCADOp, FHCADReOp, on='SiteID', how='left')
dfFHCAD =pd.merge(dfMerge, resultFHCAD, on='SiteID')
##Hypertn - blood preasure
HypertnOp = dfOp.groupby("SiteID")["Hypertn"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Hypertn_op')
HypertnReOp = dfReOp.groupby("SiteID")["Hypertn"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Hypertn_reOp')
resultHypertn = pd.merge(HypertnOp, HypertnReOp, on='SiteID', how='left')
dfHypertn =pd.merge(dfFHCAD, resultHypertn, on='SiteID')
##Diabetes
DiabetesOp = dfOp.groupby('SiteID')['Diabetes'].apply(lambda x: (x== 1 ).sum()).reset_index(name='Diabetes_op')
DiabetesReOp = dfReOp.groupby('SiteID')['Diabetes'].apply(lambda x: (x== 1 ).sum()).reset_index(name='Diabetes_reOp')
resultDiabetes = pd.merge(DiabetesOp, DiabetesReOp, on='SiteID', how='left')
dfDiabetes =pd.merge(dfHypertn, resultDiabetes, on='SiteID')
##Dyslip
DyslipOp = dfOp.groupby("SiteID")["Dyslip"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Dyslip_op')
DyslipReOp = dfReOp.groupby("SiteID")["Dyslip"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Dyslip_reOp')
resultDyslip = pd.merge(DyslipOp, DyslipReOp, on='SiteID', how='left')
dfDyslip =pd.merge(dfDiabetes, resultDyslip, on='SiteID')
##TobaccoUse
smokeEveryDayOp = dfOp.groupby("SiteID")["TobaccoUse"].apply(lambda x: ((x>= 2) & (x<6) ).sum()).reset_index(name='smoke_op')
smokeEveryDayReOp = dfReOp.groupby("SiteID")["TobaccoUse"].apply(lambda x: ((x>= 2) & (x<6) ).sum()).reset_index(name='smoke_reOp')
resultSmoke = pd.merge(smokeEveryDayOp, smokeEveryDayReOp, on='SiteID', how='left')
dfTobaccoUseResult =pd.merge(dfDyslip, resultSmoke, on='SiteID')
##Cancer
CancerOp = dfOp.groupby("SiteID")["Cancer"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Cancer_op')
CancerReOp = dfReOp.groupby("SiteID")["Cancer"].apply(lambda x: (x== 1 ).sum()).reset_index(name='Cancer_reOp')
resultCancer = pd.merge(CancerOp, CancerReOp, on='SiteID', how='left')
dfCancer =pd.merge(dfTobaccoUseResult, resultCancer, on='SiteID')
##PVD
PVDOp = dfOp.groupby("SiteID")["PVD"].apply(lambda x: (x== 1 ).sum()).reset_index(name='PVD_op')
PVDReOp = dfReOp.groupby("SiteID")["PVD"].apply(lambda x: (x== 1 ).sum()).reset_index(name='PVD_reOp')
resultPVD = pd.merge(PVDOp, PVDReOp, on='SiteID', how='left')
dfPVD =pd.merge(dfCancer, resultPVD, on='SiteID')
dfPVD.to_csv(generic_path+"riskFactors.csv")
# df=pd.read_csv("mortalty.csv")
#reOp
# df['mortalPerReOp']=(df['Mortalty_SiteID_reOp']/df['countReOp'])*100
#df['prop']=df['countReOp/countFirst+countReOp']
# mortalPerReOp=df['mortalPerReOp']
# mortalPerOp=df['mortalPerOp']
df_mortality=pd.read_csv("mortalty.csv")
df_mortality['Mortalty_SiteID_reOp']=df_mortality['Mortalty_SiteID_reOp'].fillna(0)
df_mortality['prop']=df_mortality['countReOp']/df_mortality['countOpr']
#1
df_mortality.plot(kind='scatter', x='countOpr', y='Mortalty_SiteID_reOp', title="Mortality of reOp - total Ops")
plt.show()
plt.savefig('Mortality of reOp - total Ops.png')
#2
df_mortality.plot(kind='scatter', x='countReOp', y='Mortalty_SiteID_reOp', title="Mortality of reOp - reOps")
plt.show()
plt.savefig('Mortality of reOp - reOps.png')
#3
df_mortality.plot(kind='scatter', x='prop', y='Mortalty_SiteID_reOp', title="Mortality of reOp - reOp/(reOp+Ops)")
plt.show()
plt.savefig('Mortality of reOp - reOp_reOp+Ops.png')
###oP
#1
# df['mortalPerOp']=(df['Mortalty_SiteID_op']/df['countFirst'])*100
df_mortality.plot(kind='scatter', x='countOpr', y='Mortalty_SiteID_op', title="Mortality of op - total Ops")
plt.show()
plt.savefig('Mortality of op - total Ops.png')
#2
df_mortality.plot(kind='scatter', x='countFirst', y='Mortalty_SiteID_op', title="Mortality of op - ops")
plt.show()
plt.savefig('Mortality of op - ops.png')
#3
df_mortality.plot(kind='scatter', x='prop', y='Mortalty_SiteID_op', title="Mortality of op - reOp/(reOp+Ops)")
plt.show()
plt.savefig('Mortality of op - reOp_reOp+Ops.png')
#spearman
print("spearman")
print("Reop:")
print("pvalue ReOp all ops(include reOps) ",stats.spearmanr(a=df_mortality['Mortalty_SiteID_reOp'],b=df_mortality['countOpr'],nan_policy='omit')[1])
print("pvalue ReOp only reOps ",stats.spearmanr(a=df_mortality['countFirst'],b=df_mortality['countOpr'],nan_policy='omit')[1])
print("pvalue ReOp prop re/total " ,stats.spearmanr(a=df_mortality['Mortalty_SiteID_reOp'],b=df_mortality['prop'],nan_policy='omit')[1])
print()
#op
print("First op:")
print("pvalue FirstOp all ops(include reOps) ",stats.spearmanr(a=df_mortality['Mortalty_SiteID_op'],b=df_mortality['countOpr'],nan_policy='omit')[1])
print("pvalue FirstOp only Ops ",stats.spearmanr(a=df_mortality['Mortalty_SiteID_op'],b=df_mortality['countFirst'],nan_policy='omit')[1])
print("pvalue FirstOp prop re/total " , stats.spearmanr(a=df_mortality['Mortalty_SiteID_op'],b=df_mortality['prop'],nan_policy='omit')[1])
print("")
#pearson
print("pearson")
#reOp
print("Reop:")
print("pvalue ReOp all ops(include reOps) ",stats.pearsonr(x=df_mortality['Mortalty_SiteID_reOp'],y=df_mortality['countOpr'])[1])
print("pvalue ReOp only reOps ",stats.pearsonr(df_mortality['Mortalty_SiteID_reOp'],df_mortality['countFirst'])[1])
print("pvalue ReOp prop re/total " ,stats.pearsonr(df_mortality['Mortalty_SiteID_reOp'],df_mortality['prop'])[1])
#op
print("First op:")
print("pvalue FirstOp all ops(include reOps) ",stats.pearsonr(x=df_mortality['Mortalty_SiteID_op'],y=df_mortality['countOpr'])[1])
print("pvalue FirstOp only reOps ",stats.pearsonr(df_mortality['Mortalty_SiteID_op'],df_mortality['countFirst'])[1])
print("pvalue FirstOp prop re/total ",stats.pearsonr(df_mortality['Mortalty_SiteID_op'],df_mortality['prop'])[1])
df_risk= | pd.read_csv("riskFactors.csv") | pandas.read_csv |
from django.shortcuts import render
from plotly.offline import plot
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import os
def home(chart):
return render(chart, "index.html")
def engage(chart):
directory = os.getcwd() + "/simulation/engage_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
eng_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/engage_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
eng_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/engage_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
eng_own = plot(fig, output_type='div')
return render(chart, "engage.html", context={'eng_sector': eng_sector, 'eng_loc': eng_loc, 'eng_own': eng_own})
def qualify(chart):
directory = os.getcwd() + "/simulation/qualify_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
qual_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/qualify_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
qual_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/qualify_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
qual_own = plot(fig, output_type='div')
return render(chart, "qualify.html", context={'qual_sector': qual_sector, 'qual_loc': qual_loc, 'qual_own': qual_own})
def design(chart):
directory = os.getcwd() + "/simulation/design_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
des_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/design_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
des_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/design_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
des_own = plot(fig, output_type='div')
return render(chart, "design.html", context={'des_sector': des_sector, 'des_loc': des_loc, 'des_own': des_own})
def propose(chart):
directory = os.getcwd() + "/simulation/propose_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
prop_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/propose_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
prop_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/propose_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
prop_own = plot(fig, output_type='div')
return render(chart, "propose.html", context={'prop_sector': prop_sector, 'prop_loc': prop_loc, 'prop_own': prop_own})
def negotiate(chart):
directory = os.getcwd() + "/simulation/negotiate_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
neg_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/negotiate_location.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Location'],
values='Count', width=600, height=500)
neg_loc = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/negotiate_owner.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Owner'],
values='Count', width=600, height=500)
neg_own = plot(fig, output_type='div')
return render(chart, "negotiate.html", context={'neg_sector': neg_sector, 'neg_loc': neg_loc, 'neg_own': neg_own})
def closing(chart):
directory = os.getcwd() + "/simulation/closing_sector.xlsx"
data = pd.read_excel(directory)
df = pd.DataFrame(data)
fig = px.sunburst(df, path=['Changes', 'Sector'],
values='Count', width=600, height=500)
close_sector = plot(fig, output_type='div')
directory = os.getcwd() + "/simulation/closing_location.xlsx"
data = pd.read_excel(directory)
df = | pd.DataFrame(data) | pandas.DataFrame |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index=pd.Int64Index([1]))]),
([pd.DataFrame(), pd.Series([None, 1., 2., 3.])],
[pd.DataFrame(), pd.Series([1., 2., 3.], index=pd.Int64Index([1, 2, 3]))]),
([pd.DataFrame({"a": [1., 2., None]}), pd.Series([])],
[pd.DataFrame({"a": [1., 2.]}), pd.Series([])])
])
def test_drop_nan(data, expected):
no_nan_1, no_nan_2 = drop_rows_with_nans(*data)
_check_equality(no_nan_1, expected[0], check_index_type=False)
_check_equality(no_nan_2, expected[1], check_index_type=False)
def test_rename_column_names_to_numeric():
X = np.array([[1, 2], [3, 4]])
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame(X))
X = | pd.DataFrame({"<>": [1, 2], ">>": [2, 4]}) | pandas.DataFrame |
import pandas as pd;
from datasets import load_dataset
dataset = load_dataset(
'winogrande', 'winogrande_l', split='train')
smaller_dataset = dataset.filter(lambda e, i: i<5000, with_indices=True)
n = 5000
total_phrases = pd.Series(smaller_dataset[:n]['sentence'])
all_option1 = pd.Series(smaller_dataset[:n]['option1'])
all_option2 = pd.Series(smaller_dataset[:n]['option2'])
all_answers = | pd.Series(smaller_dataset[:n]['answer']) | pandas.Series |
# coding=utf-8
# Author: <NAME>
# Date: Jul 05, 2019
#
# Description: Maps DE genes to String-DB. Keeps only those genes that we want.
#
# NOTE: For some reason, "dmelanogaster_gene_ensembl" did not retrieve all gene names. Some were manually added at the end.
#
import math
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from utils import open_undefined_last_column_files, ensurePathExists
from pybiomart import Dataset
def combine_id_string_x_with_id_string_y(r):
x = r['id_string_x']
y = r['id_string_y']
if isinstance(x, list):
return x
elif not pd.isna(x):
return x
else:
return y
if __name__ == '__main__':
#
# [H]omo [S]apiens (9606) - [A]liases
#
print('Mapping HS')
# Query bioMart for Gene Name/Description
ds_HS = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
df_HS_G = ds_HS.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype', 'description']).set_index('Gene stable ID')
rCSVFileCG = "../01-diff-gene-exp/results/HS/HS-DGE_Cyte_vs_Gonia.csv"
rCSVFileCT = "../01-diff-gene-exp/results/HS/HS-DGE_Tid_vs_Cyte.csv"
df_HS_CG = pd.read_csv(rCSVFileCG, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_HS_CG.index.name = 'id_gene'
df_HS_CG.index = df_HS_CG.index.map(lambda x: x.split('.')[0])
df_HS_CG.columns = [x + '_CyteGonia' for x in df_HS_CG.columns]
df_HS_CT = | pd.read_csv(rCSVFileCT, index_col=0) | pandas.read_csv |
# from sklearn.cluster import KMeans
from sklearn.cross_decomposition import CCA
from sklearn.cross_decomposition import PLSCanonical
from sklearn.cross_decomposition import PLSRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# from sklearn.decomposition import FactorAnalysis
# from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.linear_model import ARDRegression
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import HuberRegressor
from sklearn.linear_model import Lars
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoLars
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.linear_model import MultiTaskLasso
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import TheilSenRegressor
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# from sklearn.metrics import accuracy_score
# from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
# from sklearn.multioutput import MultiOutputClassifier
# from sklearn.multioutput import MultiOutputRegressor
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
# from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import LabelEncoder
# from sklearn.semi_supervised import LabelPropagation
# from sklearn.semi_supervised import LabelSpreading
from sklearn.svm import LinearSVC
# from sklearn.svm import NuSVC
# from sklearn.svm import SVC
# from sklearn.svm import SVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
import codecs as cd
import numpy as np
import pandas as pd
import sys
'''
x_train = dataset[:,0:10]
y_train = dataset[:,10:]
seed = 1
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.33, random_state=seed)
# identify feature and response variables and values must be numeric and numpy arrays
# assumed have X (predictor) and Y (target) for training dataset and x_test (predictor) of test dataset
x_train = input_variables_values_training_datasets
y_train = target_variables_values_training_datasets
x_test = input_variables_values_test_datasets
'''
# def supervised_learning(x_train, y_train, x_test, method):
# return predicted
'''
# assumed have X (attributes) for training dataset and x_test (attributes) of test dataset
def unsupervised_learning(x_train, x_test):
# create model object
model = KMeans(n_clusters=3, random_state=0)
# train the model using the training sets and check score
model.fit(X)
# predict output
predicted = model.predict(x_test)
def dimensionality_reduction(x_train, x_test):
# create model object
k = min(n_sample, n_features)
model = PCA(n_components=k)
model = FactorAnalysis()
# reduced the dimension of training dataset
train_reduced = model.fit_transform(x_train)
# reduced the dimension of test dataset
test_reduced = model.transform(x_test)
'''
#'../data/small-csv/train-most10.csv'
#'../data/small-csv/dev-most10.csv'
# train_dataset = '../data/medium-csv/train-most50.csv'
# test_dataset = '../data/medium-csv/dev-most50.csv'
train_dataset = '../data/large-csv/train-best200.csv'
test_dataset = '../data/large-csv/dev-best200.csv'
methods = {
1 : 'classification',
2 : 'regression'
}
classifiers = {
1: 'decision tree',
2: 'extra tree',
3: 'extra trees',
4: 'k nearest neighbor',
5: 'naive bayes',
6: 'radius neighbors',
7: 'random forest',
8: 'support vector machine',
9: 'gradient boosting',
10: 'gaussian process',
11: 'stochastic gradient descent',
12: 'passive aggressive',
13: 'nearest centroid',
14: 'perceptron',
15: 'multi-layer perceptron',
16: 'ada boost',
17: 'dummy'
}
regressors = {
1: 'linear discriminant analysis',
2: 'logistic regression',
3: 'ridge regression',
4: 'quadratic discriminant analysis',
5: 'linear regression',
6: 'decision tree regression',
7: 'lasso',
8: 'multi-task lasso',
9: 'elastic net',
10: 'multi-task elastic net',
11: 'least angle regression',
12: 'least angle regression lasso',
13: 'orthogonal matching pursuit',
14: 'bayesian ridge',
15: 'automatic relevence determination',
16: 'theil sen regression',
17: 'huber regressor',
18: 'random sample consensus'
}
def main():
# Checks for correct number of arguments
if len(sys.argv) != 3:
print('usage: ./troll_identifier.py [TRAIN DATASET] [TEST/DEV DATASET]')
sys.sys.exit()
# set up dataset
data_train = pd.read_csv(sys.argv[1])
data_test = | pd.read_csv(sys.argv[2]) | pandas.read_csv |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and | is_object_dtype(values.dtype) | pandas.core.dtypes.common.is_object_dtype |
import numpy as np
import pandas as pd
import vigra
def contingency_table(left_vol, right_vol):
"""
Return a pd.DataFrame with columns 'left', 'right' and 'overlap_size',
indicating the count of overlapping pixels for each segment in 'from' with segments in 'to'.
Note: Internally, copies both volumes multiple times.
This function seems to require an extra ~5x RAM relative to the inputs.
"""
assert left_vol.dtype == right_vol.dtype
dtype = left_vol.dtype
vols_combined = np.empty((left_vol.size,2), dtype)
vols_combined[:,0]= left_vol.flat
vols_combined[:,1]= right_vol.flat
vols_combined = vols_combined.reshape(-1).view([('left', dtype), ('right', dtype)])
pairs, counts = np.unique(vols_combined, return_counts=True)
table = pd.DataFrame({'left': pairs['left'], 'right': pairs['right'], 'overlap_size': counts})
return table
def label_vol_mapping(vol_from, vol_to):
"""
Determine how remap voxel IDs in ``vol_from`` into corresponding
IDs in ``vol_to``, according to maxiumum overlap.
(Note that this is not a commutative operation.)
Returns
-------
A 1D index array such that ``mapping[i] = j``, where ``i``
is a voxel ID in ``vol_from``, and ``j`` is the corresponding
ID in ``vol_to``.
"""
table = contingency_table(vol_from, vol_to)
table.index = table['right']
mapping = table.groupby('left').agg({'overlap_size': 'idxmax'})
mapping.columns = ['right']
mapping_array = np.zeros((int(mapping.index.max())+1,), dtype=np.uint32)
mapping_array[(mapping.index.values,)] = mapping['right'].values
return mapping_array
def edge_mask_for_axis( label_img, axis ):
"""
Find all supervoxel edges along the given axis and return
a 'left-hand' mask indicating where the edges are located
(i.e. a boolean array indicating voxels that are just to the left of an edge).
Note that this mask is less wide (by 1 pixel) than ``label_img`` along the chosen axis.
"""
if axis < 0:
axis += label_img.ndim
assert label_img.ndim > axis
if label_img.shape[axis] == 1:
return np.zeros_like(label_img)
left_slicing = ((slice(None),) * axis) + (np.s_[:-1],)
right_slicing = ((slice(None),) * axis) + (np.s_[1:],)
edge_mask = (label_img[left_slicing] != label_img[right_slicing])
return edge_mask
def edge_ids_for_axis(label_img, edge_mask, axis):
"""
Given a 'left-hand' edge_mask indicating where edges are located along the given axis,
return an array of of edge ids (u,v) corresonding to the voxel ids of every voxel under the mask,
in the same order as ``edge_mask.nonzero()``.
The edge ids returned in scan-order (i.e. like ``.nonzero()``), but are *not* sorted such that u < v.
Instead, each edge id (u,v) is ordered from 'left' to 'right'.
Parameters
----------
label_img
ndarray
edge_mask
A 'left-hand' mask indicating where the image edges are.
Should be same shape as label_img, except in the dimension of the given axis,
where it is 1 pixel narrower.
You may also provide edge_mask=None, which implies that *all* pixel locations
contain an edge along the requested axis.
(Useful if you're dealing with flat superpixels.)
axis
An int, < label_img.ndim
Indicates the axis along which edges will be extracted.
Returns
-------
``ndarray`` of ``edge_ids``, ``shape=(N,2)``
To sort each pair, call ``edge_ids.sort_values(axis=1)``
"""
if axis < 0:
axis += label_img.ndim
assert label_img.ndim > axis
if label_img.shape[axis] == 1:
return np.ndarray( (0, 2), dtype=label_img.dtype )
left_slicing = ((slice(None),) * axis) + (np.s_[:-1],)
right_slicing = ((slice(None),) * axis) + (np.s_[1:],)
if edge_mask is None:
num_edges = label_img[left_slicing].size
edge_ids = np.ndarray(shape=(num_edges, 2), dtype=np.uint32 )
edge_ids[:, 0] = label_img[left_slicing].reshape(-1)
edge_ids[:, 1] = label_img[right_slicing].reshape(-1)
else:
num_edges = np.count_nonzero(edge_mask)
edge_ids = np.ndarray(shape=(num_edges, 2), dtype=np.uint32 )
edge_ids[:, 0] = label_img[left_slicing][edge_mask]
edge_ids[:, 1] = label_img[right_slicing][edge_mask]
# Do NOT sort. Edges are returned in left-to-right order.
# edge_ids.sort_values(axis=1)
return edge_ids
def unique_edge_labels( all_edge_ids ):
"""
Given a *list* of ``edge_id`` arrays (each of which has shape ``(N,2)``),
merge all ``edge_id`` arrays into a single ``pandas.DataFrame`` with
columns ``['sp1', 'sp2', and 'edge_label']``, where ``edge_label``
is a unique ID number for each ``edge_id`` pair.
(The DataFrame will have no duplicate entries.)
"""
all_dfs = []
for edge_ids in all_edge_ids:
assert edge_ids.shape[1] == 2
num_edges = len(edge_ids)
index_u32 = pd.Index(np.arange(num_edges), dtype=np.uint32)
df = pd.DataFrame(edge_ids, columns=['sp1', 'sp2'], index=index_u32)
df.drop_duplicates(inplace=True)
all_dfs.append( df )
if len(all_dfs) == 1:
combined_df = all_dfs[0]
else:
combined_df = pd.concat(all_dfs).reindex()
combined_df.drop_duplicates(inplace=True)
# This sort isn't necessary for most use-cases,
# but it's convenient for debugging.
combined_df.sort_values(['sp1', 'sp2'], inplace=True)
# TODO: Instead of adding a new column here, we might save some RAM
# if we re-index and then add the index as a column
combined_df['edge_label'] = np.arange(0, len(combined_df), dtype=np.uint32)
return combined_df
def extract_edge_values_for_axis( axis, edge_mask, value_img, aspandas=False ):
"""
Returns 1D ``ndarray``, in the same order as ``edge_mask.nonzero()``.
Result is ``float32``, regardless of ``value_img.dtype``.
"""
left_slicing = ((slice(None),) * axis) + (np.s_[:-1],)
right_slicing = ((slice(None),) * axis) + (np.s_[1:],)
# Here, we extract the voxel values *first* and then compute features on the 1D list of values (with associated labels)
# This saves RAM (and should therefore be fast), but can't be used with coordinate-based features or shape features.
# We could, instead, change the lines below to not extract the mask values, and pass the full image into vigra...
edge_values_left = value_img[left_slicing][edge_mask]
edge_values_right = value_img[right_slicing][edge_mask]
# Vigra region features require float32
edge_values_left = edge_values_left.astype(np.float32, copy=False)
edge_values_right = edge_values_right.astype(np.float32, copy=False)
# We average the left and right-hand voxel values 'manually' here and just compute features on the average
# In theory, we could compute the full set of features separately for left and right-hand voxel sets and
# then merge the two, but that seems like overkill, and only some features would be slightly different (e.g. histogram features)
edge_values = edge_values_left
edge_values += edge_values_right
edge_values /= 2
if aspandas:
# If you add a float32 array to a pd.DataFrame, it is automatically casted to float64!
# But if you add it as a Series, the dtype is untouched.
return pd.Series( edge_values, dtype=np.float32 )
return edge_values
def get_edge_ids( label_img ):
"""
Convenience function.
Returns a DataFrame with columns ``['sp1', 'sp2', 'edge_label']``, sorted by ``('sp1', 'sp2')``.
"""
all_edge_ids = []
for axis in range(label_img.ndim):
edge_mask = edge_mask_for_axis(label_img, axis)
edge_ids = edge_ids_for_axis(label_img, edge_mask, axis)
edge_ids.sort_values(axis=1)
lookup = unique_edge_labels( [edge_ids] )
all_edge_ids.append(lookup[['sp1', 'sp2']].values)
final_edge_label_lookup_df = unique_edge_labels( all_edge_ids )
return final_edge_label_lookup_df
def nonzero_coord_array(a):
"""
Equivalent to ``np.transpose(a.nonzero())``, but much
faster for large arrays, thanks to a little trick:
The elements of the tuple returned by ``a.nonzero()`` share a common ``base``,
so we can avoid the copy that would normally be incurred when
calling ``transpose()`` on the tuple.
"""
base_array = a.nonzero()[0].base
# This is necessary because VigraArrays have their own version
# of nonzero(), which adds an extra base in the view chain.
while base_array.base is not None:
base_array = base_array.base
return base_array
def generate_random_voronoi(shape, num_sp):
"""
Generate a superpixel image for testing.
A set of N seed points (N=``num_sp``) will be chosen randomly, and the superpixels
will just be a voronoi diagram for those seeds.
Note: The first superpixel ID is 1.
"""
assert len(shape) in (2,3), "Only 2D and 3D supported."
seed_coords = []
for dim in shape:
# Generate more than we need, so we can toss duplicates
seed_coords.append( np.random.randint( dim, size=(2*num_sp,) ) )
seed_coords = np.transpose(seed_coords)
seed_coords = list(set(map(tuple, seed_coords))) # toss duplicates
seed_coords = seed_coords[:num_sp]
seed_coords = tuple(np.transpose(seed_coords))
superpixels = np.zeros( shape, dtype=np.uint32 )
superpixels[seed_coords] = np.arange( num_sp )+1
vigra.analysis.watersheds( np.zeros(shape, dtype=np.float32),
seeds=superpixels,
out=superpixels )
superpixels = vigra.taggedView(superpixels, 'zyx'[3-len(shape):])
return superpixels
def colorize_labels(label_img):
label_img = label_img.withAxes('yx')
random_colors = np.random.randint(0,255,size=(label_img.max()+1, 3) ).astype(np.uint8)
colorized = np.zeros(label_img.shape + (3,), dtype=np.uint8)
colorized = vigra.taggedView(colorized, 'yxc')
for c in range(3):
colorized[...,c] = random_colors[...,c][label_img]
return colorized
def dataframe_to_hdf5(h5py_group, df):
"""
Helper function to serialize a pandas.DataFrame to an h5py.Group.
Note: This function uses a custom storage format,
not the same format as pandas.DataFrame.to_hdf().
Known to work for the DataFrames used in the Rag datastructure,
including the MultiIndex columns in the dense_edge_tables.
Not tested with more complicated DataFrame structures.
"""
h5py_group['row_index'] = df.index.values
h5py_group['column_index'] = repr(df.columns.values)
# The deserialization function below requires this.
assert len(set(df.columns.values)) == len(df.columns.values), \
"DataFrame column names must be unique to be serialized!"
columns_group = h5py_group.create_group('columns')
for col_index, _col_name in enumerate(df.columns.values):
columns_group['{:03}'.format(col_index)] = df.iloc[:, col_index].values
def dataframe_from_hdf5(h5py_group):
"""
Helper function to deserialize a pandas.DataFrame from an h5py.Group,
as written by ``dataframe_to_hdf5()``.
Note: This function uses a custom storage format,
not the same format as pandas.read_hdf().
Known to work for the DataFrames used in the Rag datastructure,
including the MultiIndex columns in the dense_edge_tables.
Not tested with more complicated DataFrame structures.
"""
from numpy import array # We use eval() for the column index, which uses 'array'
array # Avoid linter usage errors
row_index_values = h5py_group['row_index'][:]
column_index_names = list(eval(h5py_group['column_index'][()]))
if isinstance(column_index_names[0], np.ndarray):
column_index_names = list(map(tuple, column_index_names))
column_index = | pd.MultiIndex.from_tuples(column_index_names) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-contrib-HU/ampel/contrib/hu/t3/HealpixCorrPlotter.py
# License: BSD-3-Clause
# Author: jn <<EMAIL>>
# Date: 16.12.2012
# Last Modified Date: 04.01.2022
# Last Modified By: jn <<EMAIL>>
import logging
from typing import Any, Union, Generator, Sequence, Literal
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from matplotlib import cm
import pandas as pd
import numpy as np
import seaborn as sns
from adjustText import adjust_text
from ampel.types import UBson, T3Send
from ampel.struct.UnitResult import UnitResult
from ampel.view.T3Store import T3Store
from ampel.view.TransientView import TransientView
from ampel.abstract.AbsPhotoT3Unit import AbsPhotoT3Unit
from ampel.util.pretty import prettyjson
from ampel.ztf.util.ZTFIdMapper import to_ztf_id
class HealpixCorrPlotter(AbsPhotoT3Unit):
"""
Compare healpix coordinate P-value with output from T2RunSncosmo.
"""
sncosmo_unit: str = 'T2RunSncosmo'
model_name: str | None # Only use this model
time_parameter: str = 't0' # Name of the model parameter determining explosion / peak time
# What do we study
target_property: Literal['Abs fit peak mag', r'$\chi^2$ / d.o.f.'] = 'Abs fit peak mag'
target_range: list[float] = [-13.5,-17.5]
max_pvalue: float = 0.9
# Plot params
plotsize: Sequence[float] = [6,4]
# List of inclusive lower limit, non-inc upper limit, marker type, label
# marker_colors: list[str] = ["#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00", "#CC79A7", "#F0E442"]
# marker_colors: list[str] = ["#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#CC6677", "#882255", "#AA4499"]
marker_colors = cm.get_cmap('plasma', 5).colors
# marker_colors = [cm.get_cmap('summer', 3)(i) for i in range(3)]
background_color: str = "tab:green"
ndof_marker: list[Any] = [ [0,0.5,'o', marker_colors[0], '0 dof'], [1,1.5,'^', marker_colors[1], '1 dof'], [2,np.inf,'s', marker_colors[2], '>1 dof'], ]
def process(self, gen: Generator[TransientView, T3Send, None], t3s: T3Store | None = None) -> Union[UBson, UnitResult]:
self.logger.info("Printing transients info")
self.logger.info("=" * 80)
count = 0
table_rows: list[dict[str, Any]] = []
for tran_view in gen:
count += 1
self.logger.info(count)
# Stock info
tinfo = self._get_stock_info(tran_view)
# t2_info
t2docs = tran_view.get_raw_t2_body(unit=self.sncosmo_unit)
if t2docs is None:
continue
for t2info in t2docs:
if self.model_name and not t2info['model_name']==self.model_name:
continue
tinfo['z'] = t2info['z']
if t2info['z_source'] in ['AMPELz_group0', 'AMPELz_group1','AMPELz_group2','AMPELz_group3']:
tinfo['z_sharp'] = True
else:
tinfo['z_sharp'] = False
tinfo['zsource'] = t2info['z']
tinfo['model'] = t2info['model_name']
tinfo['model_peak_abs'] = t2info['fit_metrics']['restpeak_model_absmag_B']
tinfo['model_peak_obs'] = t2info['fit_metrics']['obspeak_model_B']
tinfo['ndof'] = t2info['sncosmo_result']['ndof']
tinfo['chisq'] = t2info['sncosmo_result']['chisq']
if t2info['sncosmo_result']['ndof']>0:
tinfo['chisqndof'] = t2info['sncosmo_result']['chisq'] / t2info['sncosmo_result']['ndof']
else:
tinfo['chisqndof'] = -1.
tinfo['time'] = t2info['sncosmo_result']['paramdict'][self.time_parameter]
self.logger.info(tinfo)
table_rows.append(tinfo)
self.logger.info("=" * 80)
self.logger.info(f"Printed info for {count} transients")
df = | pd.DataFrame.from_dict(table_rows) | pandas.DataFrame.from_dict |
import pandas as pd
from functools import reduce
from fooltrader.contract.files_contract import *
import re
import json
class agg_future_dayk(object):
funcs={}
def __init__(self):
self.funcs['shfeh']=self.getShfeHisData
self.funcs['shfec']=self.getShfeCurrentYearData
self.funcs['ineh']=self.getIneHisData
self.funcs['inec']=self.getIneCurrentYearData
self.funcs['dceh']=self.getDceHisData
self.funcs['dcec']=self.getDceCurrentYearData
self.funcs['czceh']=self.getCzceHisData
self.funcs['czcec']=self.getCzceCurrentYearData
self.funcs['cffexh']=self.getCffexHisData
self.funcs['cffexc']=self.getCffexCurrentYearData
def getCurrentYearAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getHisData(x),exchanges))+list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getHisData(exchange),self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getHisData(self,exchange):
return self.funcs[exchange+'h']()
def getCurrentYearData(self,exchange):
return self.funcs[exchange+'c']()
def getShfeHisData(self):
pattern = re.compile(r'(\D{1,3})(\d{3,4}).*')
dfs=[]
dir = get_exchange_cache_dir(security_type='future',exchange='shfe')+"/his/"
for j in os.listdir(dir):
a = pd.read_excel(dir+j, header=2, skipfooter=5,
usecols=list(range(0, 14))).fillna(method='ffill')
dfs.append(a)
totaldf = reduce(lambda x,y:x.append(y),dfs)
totaldf['日期']=pd.to_datetime(totaldf['日期'],format='%Y%m%d')
totaldf=totaldf[pd.isnull(totaldf['合约'])==False]
totaldf['fproduct'] = totaldf['合约'].apply(lambda x:pattern.match(x).groups()[0])
totaldf['settleDate'] = totaldf['合约'].apply(lambda x:pd.to_datetime('20'+pattern.match(x).groups()[1],format='%Y%m'))
renameMap={
'合约':'symbol',
'日期':'date',
'前收盘':'preClose',
'前结算':'preSettle',
'开盘价':'open',
'最高价':'high',
'最低价':'low',
'收盘价':'close',
'结算价':'settle',
'涨跌1':'range',
'涨跌2':'range2',
'成交量':'volume',
'成交金额':'amount',
'持仓量':'inventory'
}
totaldf.rename(index=str,columns=renameMap,inplace=True)
totaldf=totaldf[['symbol','date','open','high','low','close','settle','range','range2','volume','inventory','fproduct','settleDate']]
print("done")
# totaldf.to_pickle('testdf.pickle')
return totaldf
def getShfeCurrentYearData(self):
dir = os.path.join(get_exchange_cache_dir(security_type='future',exchange='shfe'),"2020_day_kdata")
file_list=os.listdir(dir)
tempdfs=[]
for file in file_list:
if len(file)==8:
with open(os.path.join(dir,file)) as f:
load_dict = json.load(f)
temp_df = pd.DataFrame(data=load_dict['o_curinstrument'])
temp_df['date'] = file
temp_df['date'] = pd.to_datetime(temp_df['date'],format="%Y%m%d")
tempdfs.append(temp_df)
aggdf= | pd.concat(tempdfs) | pandas.concat |
"""Code adapted from: https://github.com/SLIPO-EU/loci.git"""
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.ops import cascaded_union
from shapely.geometry import MultiPoint
from hdbscan import HDBSCAN
from sklearn.cluster import DBSCAN, OPTICS
from .plots import map_choropleth
class Clustering(object):
"""
Computes clusters using DBSCAN or OPTICS algorithm as implemented in sklearn or HDBSCAN.
"""
def __init__(self, pois, alg="hdbscan", min_samples=None, eps=None, n_jobs=-1, **kwargs):
"""Computes clusters using the sklearn algorithms or HDBSCAN.
Parameters:
pois (GeoDataFrame): A POI GeoDataFrame.
alg (string): The clustering algorithm to use (hdbscan, dbscan or optics; default: hdbscan).
min_samples (float|integer): The number of samples in a neighborhood for a point
to be considered as a core point. Expressed as an absolute number (int > 1) or
a fraction of the number of samples (float between 0 and 1).
eps (float): The neighborhood radius (used only in dbscan).
n_jobs (integer): Number of parallel jobs to run in the algorithm (default: -1)
**kwargs: Optional arguments depending on the algorithm.
"""
if min_samples is None:
min_samples = int(round(np.log(len(pois))))
if alg == 'dbscan':
assert eps is not None
self.pois = pois
self.alg = alg
self.min_samples = min_samples
self.eps = eps
self.n_jobs = n_jobs
# Prepare list of coordinates
data_arr = pois.geometry.get_coordinates().values()
# Compute the clusters
if alg == 'hdbscan':
min_cluster_size = kwargs.pop('min_cluster_size', 50)
core_dist_n_jobs = kwargs.pop('core_dist_n_jobs', n_jobs)
clusterer = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, core_dist_n_jobs=core_dist_n_jobs, **kwargs)
labels = clusterer.fit_predict(data_arr)
tree = clusterer.condensed_tree_.to_pandas()
cluster_tree = tree[tree.child_size > 1]
chosen_clusters = clusterer.condensed_tree_._select_clusters()
eps_per_cluster = cluster_tree[cluster_tree.child.isin(chosen_clusters)].\
drop("parent", axis=1).drop("child", axis=1).reset_index().drop("index", axis=1)
eps_per_cluster['lambda_val'] = eps_per_cluster['lambda_val'].apply(lambda x: 1 / x)
eps_per_cluster.rename(columns={'lambda_val': 'eps', 'child_size': 'cluster_size'}, inplace=True)
else:
if alg == 'dbscan':
clusterer = DBSCAN(eps=eps, min_samples=min_samples, n_jobs=n_jobs, **kwargs).fit(data_arr)
elif alg == 'optics':
clusterer = OPTICS(min_samples=min_samples, eps=eps, n_jobs=n_jobs, **kwargs).fit(data_arr)
else:
raise Exception('Implemented algoriths are hdbscan, dbscan and optics.')
labels = clusterer.labels_
num_of_clusters_no_noise = set(labels)
num_of_clusters_no_noise.discard(-1)
num_of_clusters_no_noise = len(num_of_clusters_no_noise)
eps_per_cluster = | pd.DataFrame({'eps': [eps] * num_of_clusters_no_noise}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
import altair as alt
st.title('Bet Goals')
st.write('Bet goals is an app designed to help you make informed bets on soccer matches')
st.subheader('Suggested Betting Strategy:')
st.write('We suggest betting on games predicted by our model to end in a draw')
st.subheader('Rationale:')
st.write('In the past 4 years, the bookmakers odds have always favoured either the home team or the away team. Not a single game has been backed by the bookmaker to end in a draw in this time frame. This systematic underestimation of the chances of a game ending in a draw lets the bookmaker overestimate the chances of a home win. To exploit this inefficiency, we built a model that can identify draws with 36% precision. Even though this means we will be wrong 2 out of 3 times, the odds on draws are have historically been high enough to give us around 20% return on investment.')
st.subheader('Matchday 28:')
st.subheader('Bookmaker Odds:')
df=pd.read_csv('Betdata27.csv')
st.write(df[['Fixture:','Home win odds','Draw odds','Away win odds','Predicted Result']])
#option = st.selectbox(
# 'Which match would you like to bet on?',
# df['Fixture:'])
#df2
options_multi = st.multiselect('What fixtures would you like to bet on? (We suggest betting on games predicted to end in draws)', df['Fixture:'])
#st.write('You selected:', options_multi)
option_team=pd.DataFrame(columns=['Teamselected'])
option_amount=pd.DataFrame(columns=['Moneybet'])
option_poss_win=pd.DataFrame(columns=['Moneywon'])
option_prob_win=pd.DataFrame(columns=['Probwin'])
for i in range(len(options_multi)):
df2=df[['Home Team','Away Team','Draw option']].loc[df['Fixture:']==options_multi[i]]
option_temp = st.selectbox(
'Which team would you like to bet on in '+options_multi[i]+'?',
(df2.iloc[0,0],df2.iloc[0,1],df2.iloc[0,2]))
option_team=option_team.append({'Teamselected':option_temp}, ignore_index=True)
# option_team[['Teamselected']].iloc[i]=option_temp
d = {'Money': [10, 20, 50, 100]}
Betopt= pd.DataFrame(data=d)
widkey='slider'+str(i)
option_mtemp = st.slider('How much would you like to bet?', 0, 200, 0, key=widkey)
option_amount=option_amount.append({'Moneybet':option_mtemp}, ignore_index=True)
if df2.iloc[0,0]==option_temp:
a1=df[['Home win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Home win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
elif df2.iloc[0,1]==option_temp:
a1=df[['Away win odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Away Win']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'
else:
a1=df[['Draw odds']].loc[df['Fixture:']==options_multi[i]]
t1=a1.iloc[0,0]
b1=df[['Draw']].loc[df['Fixture:']==options_multi[i]]*100
t2=b1.iloc[0,0]
money=(t1-1)*option_mtemp
option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)
option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)
if option_mtemp != 0:
'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on a draw in '+options_multi[i]+'.'
combinations=np.zeros((2**len(options_multi),len(options_multi)))
for i in range(2**len(options_multi)):
temp=i
for j in range(len(options_multi)):
q=temp//2
mod=temp%2
combinations[i,j]=mod
temp=q
prob_dist= | pd.DataFrame(columns=['Winning','Probability']) | pandas.DataFrame |
import copy
import utils
import torch
import torch.nn as nn
from models.classifiers import RandomSamplerParallel, MultiLinear
import numpy as np
import random
import pandas as pd
import seaborn as sns
import os
import matplotlib.pyplot as plt
import argparse
class Path(object):
def __init__(self, model):
"""
Init the path from a given model (simply coy the architecutre)
"""
self.model = copy.deepcopy(model)
self.points = []
self.reverse = False
@property
def step(self):
return 1 if not self.reverse else -1
def append(self, solution):
"""
Sparsify the solution found by the subnetwork procedure
Solution is a FCN Classifier
"""
if type(solution) is not tuple:
solution = (solution,)
self.points.append(tuple(torch.nn.utils.parameters_to_vector(s.parameters()) for s in solution[::self.step]))
pass
def extend(self, other):
assert (not self.reverse)
self.points.extend(other.points[::other.step])
def connect(self):
"""Connect the last two sparsified solutions"""
pass
def eval_path(path, sizes, dirname):
global device
os.makedirs(dirname, exist_ok=True)
param_B = path.points[0][0]
A = copy.deepcopy(path.model).requires_grad_(False)
AB = copy.deepcopy(path.model).requires_grad_(False)
B = copy.deepcopy(path.model).requires_grad_(False)
# nn.utils.vector_to_parameters(param_B, B.parameters())
A.to(device)
AB.to(device)
B.to(device)
K = 11
lbdas = np.arange(1, K) / (K-1)
index = pd.MultiIndex.from_product([range(len(path.points)), range(0, K-1)], names=["point", "t"])
stats = ['loss', 'error']
sets = ["train", "test"]
names=['set', 'stat', 'try']
tries = [1]
columns=pd.MultiIndex.from_product([sets, stats, tries], names=names)
stats = pd.DataFrame(index=index, columns=columns)
reverse = False
# A = None
# for
for idx, pt in enumerate(path.points):
# pt is a tuple
if len(pt) == 1:
param_A = pt[0].to(device)
nn.utils.vector_to_parameters(param_A, model.parameters()) # param_A from previous iteration
# print(f"error: {err}, loss: {loss}")
# if idx == 0:
loss, err = eval_epoch(model, train_loader)
loss_test, err_test = eval_epoch(model, test_loader)
stats.loc[(idx), 0] = loss, err, loss_test, err_test # consider the index of the path at K-1 as the new point
continue
elif len(pt) == 2:
# len(pt) == 2
# the point is (B,C)
# have to create intermediate point AB and walk from AB to B
if not reverse:
param_B = pt[0].to(device)
# param_AB = copy.copy(A) # from the previous iterations
# param_A = pt[1]
nn.utils.vector_to_parameters(param_A, A.parameters()) # param_A from previous iteration
nn.utils.vector_to_parameters(param_B, AB.parameters()) # param_B from this iteration
nn.utils.vector_to_parameters(param_B, B.parameters()) # param_B from this iteration
# nn.
AB.main[-1].weight.data = A.main[-1].weight.data
AB.main[-1].bias.data = A.main[-1].bias.data
param_AB = nn.utils.parameters_to_vector(AB.parameters()).to(device)
param_A = pt[1].to(device) # next starting point
else:
# reverse mode, fetch the next point
# reverse role of AB and B, load A with next point
param_AB = pt[1].to(device)
param_A = path.points[idx+1][0].to(device) # the next point
nn.utils.vector_to_parameters(param_A, A.parameters()) # param_A from previous iteration
nn.utils.vector_to_parameters(param_AB, AB.parameters()) # param_B from this iteration
nn.utils.vector_to_parameters(param_AB, B.parameters()) # param_A from previous iteration
B.main[-1].weight.data = A.main[-1].weight.data
B.main[-1].bias.data = A.main[-1].bias.data
# else: # first point
param_B = nn.utils.parameters_to_vector(B.parameters()).to(device)
# B = pt
elif len(pt) == 3:
# at thispoint the last status for A is the model with
# joint between the two paths
reverse = True
param_A = pt[0].to(device)
param_B = pt[1].to(device)
param_C = pt[2].to(device)
nn.utils.vector_to_parameters(param_A, A.parameters())
nn.utils.vector_to_parameters(param_B, AB.parameters())
# nn.utils.vector_to_parameters(param_C, C.parameters())
AB.main[-1].weight.data = A.main[-1].weight.data
AB.main[-1].bias.data = A.main[-1].bias.data
param_AB = nn.utils.parameters_to_vector(AB.parameters()).to(device)
param_A = param_C
# BC.main[-1].weight = C.main[-1].weight
# BC.main[-1].bias = C.main[-1].bias
for tidx, t in enumerate(lbdas, 1):
pt = (1-t) * param_AB + t * param_B
nn.utils.vector_to_parameters(pt, model.parameters())
loss, err = eval_epoch(model, train_loader)
loss_test, err_test = eval_epoch(model, test_loader)
# print(f"error: {err}, loss: {loss}")
stats.loc[(idx-1+tidx//(K-1), tidx%(K-1))] = loss, err, loss_test, err_test # consider the index of the path at K-1 as the new point
# stats.loc[(idx-1+tidx//(K-1), tidx%(K-1))] = loss_test, err_test # consider the index of the path at K-1 as the new point
# model.to(torch.device('cpu'))
return stats
def plot_path(stats, quant_ds, quant_ref, dirname):
# df_plot = pd.melt(stats.reset_index(), id_vars=["point", "t"], ignore_index=False)
Idx = pd.IndexSlice
# df_plot.index.name = "index"
for setn in ["train", "test"]:
for stat in ["loss", "error"]:
# df_plot = stats.loc[:, Idx[stat, :]].reset_index()
ax = stats.plot(kind="line",
# sns.lineplot(
# data=df_plot,
y=(setn,stat)
)
# )
ax.axline((0,quant_ref[stat, setn]), (1, quant_ref[stat, setn]), ls=":", zorder=2, c='g')
ax.axline((0,quant_ds[stat, setn]), (1, quant_ds[stat, setn]), ls=":", zorder=2, c='r')
plt.savefig(fname=os.path.join(dirname, f'path_{setn}_{stat}.pdf'), bbox_inches="tight")
stats.to_csv(os.path.join(dirname, f'path.csv'))
plt.close("all")
def read_csv(fname):
stats = pd.read_csv(fname, header=[0,1,2], index_col=[0,1])
stat_idx = stats.columns.names.index("stat")
nlevels = stats.columns.nlevels
if "err" in stats.columns.get_level_values("stat"):
new_stat_lvl = [s.replace("err", "error") for s in stats.columns.get_level_values(stat_idx)]
# new_stat.sort()
levels = [stats.columns.get_level_values(i) if i != stat_idx else new_stat_lvl for i in range(nlevels)]
cols = pd.MultiIndex.from_arrays(levels, names=stats.columns.names)
stats.columns = cols
stats.to_csv(fname)
return stats
"""
Complement of a permutation with total number of elements
"""
def complement_perm(perm, total):
idx = 0
cperm= []
i = 0
while idx < total:
while i<len(perm) and idx == perm[i]:
idx += 1
i+=1
upb = perm[i] if i < len(perm) else total
cperm.extend(list(range(idx, upb)))
idx = upb
return np.array(cperm)
"""
Construct the point with the new weights from the solution and with unchanged middle input layer
"""
def point_B(model, solution, ntry, idx_layer, perm, cperm):
for idx, l in enumerate(solution.network):
if isinstance(l, nn.ReLU):
continue
elif isinstance(l, nn.Linear):
# simply copy the weights to the target model
model.main[idx] = copy.deepcopy(l)
# if idx == idx_layer - 2:
# ) # also work on the bias (shift the order of the values)
elif isinstance(l, RandomSamplerParallel): # the random selection of features
# the random sampling, only copy the try one
continue
# selection = copy.deepcopy(l.random_perms[ntry, :])
elif isinstance(l, MultiLinear): # after the selection of features
# weights and bias of the original model
# different case if at the layer of starting point
weight = (model.main[idx-1].weight) # the previous weight, should be block diagonal
bias = (model.main[idx-1].bias)
wl = l.weight[ntry, :, :].transpose(0, 1) # the (transposed) weight of the solution
sz_wl = wl.size()
sz_w = weight.size()
m = sz_wl[0] # number of the dropped units
p = sz_wl[1]
# n = sz_w[0]
# d = sz_w[1]
# if idx == idx_layer+1: # modification of the previous layer
# at the selection layer
#construct the new weight with first rows fully connected and then solution | 0
# pass
if idx == idx_layer+1: # the actual weight to modify
# will copy wl to the bottom rows of weight
weight[-m:, perm] = wl # no block 0? only fill in the weights in the permutation
weight[-m:, cperm] = 0 # block 0, only on the correct weights
# copy the bias values at the correct location, keep the
# original on top
bias[-m:] = l.bias[ntry, 0, :]
# weight[:, :-p].zero_() # output weight to 0
else: # for indices after the first layer
weight[-m:,-p:] = wl
bias[-m:] = l.bias[ntry, 0, :]
if idx == len(solution.network) - 1:
weight[:, :-p].zero_() # set the output weight to zero
# bias[:, d-p].zero_() # output weight to 0
# model.main[idx-1].weight = nn.Parameter(weight)
# model.main[idx-1].biais = nn.Parameter(bias)
else:
pass
return model
"""
construct the model modifying the input layer so that it can be the starting point for the next model
"""
def point_C(model, solution, ntry, idx_layer, perm, cperm):
for idx, l in enumerate(solution.network):
if isinstance(l, nn.ReLU):
continue
elif isinstance(l, nn.Linear):
if idx == idx_layer - 2:
weight = model.main[idx].weight
bias = model.main[idx].bias
weight[:len(perm), : ] = weight[perm]
weight[len(perm):, :] = 0
bias[:len(perm) ] = bias[perm]
bias[len(perm):] = 0
# model.main[idx] = copy.deepcopy(l)
# model.main[idx].weight = nn.Parameter(weight)
# model.main[idx].bias = nn.Parameter(bias)
else:
continue
# if idx == idx_layer - 2:
# l.bias[cperm]],
# dim=0)
# ) # also work on the bias (shift the order of the values)
elif isinstance(l, RandomSamplerParallel): # the random selection of features
# the random sampling, only copy the try one
continue
# selection = copy.deepcopy(l.random_perms[ntry, :])
elif isinstance(l, MultiLinear): # after the selection of features
# weights and bias of the original model
# different case if at the layer of starting point
weight = (model.main[idx-1].weight) # the previous weight, should be block diagonal
bias = (model.main[idx-1].bias)
wl = l.weight[ntry, :, :].transpose(0, 1) # the (transposed) weight of the solution
sz_wl = wl.size()
sz_w = weight.size()
m = sz_wl[0]
p = sz_wl[1]
n = sz_w[0]
d = sz_w[1] # total dimension
# p = sz_w[1] - sz_wl[0] # size of the permutation
if idx == idx_layer+1: # modification of the previous layer
# have to reorder the output units so that the removed ones are
# at the bottom
# put the xi parameters to the top
# wl = torch.cat([wl, torch.zeros(m, d - p)], dim=1)
weight.zero_()
weight[:m, :p] = wl
bias.zero_()
bias[:m] = l.bias[ntry, 0, :]
# at the selection layer
#construct the new weight with first rows fully connected and then solution | 0
# pass
# elif idx == idx_layer+3: # the actual weight to modify
# have to permute the weights
# weight[:m, :len(perm)] = xi[perm, :] #
# weight = (model.main[idx-1].weight) # the previous weight, should be block diagonal
# bias = (model.main[idx-1].bias)
# will copy wl to the bottom rows of weight
# bias[m:, :] = l.bias[ntry, 1, :]
else: # for indices after the first layer
# xi = copy.copy(weight[-m:, -p:])
# save previous value
xi = weight[-m:, -p:].clone()
weight.zero_()
# put it on "top"
weight[:m, :p] = xi
# save previous value
b = bias[-m:].clone()
bias.zero_()
# put it on "top"
bias[:m] = b
return model
def sparsify(solution, model, path, ntry=1):
# requires a model and to copy the different weights into it ?
# record two new points in the path, cf scheme number 3 and 6
# assume the model being in the correct previous configuration, i.e. the
# I_p neurons are non zero and the others are zero for p \in {l+1, ...,
# L-1}
selection = None
idx_layer = [isinstance(l, RandomSamplerParallel) for l in solution.network].index(True)
perm = solution.network[idx_layer].random_perms[ntry, :].view(-1).numpy()
total = solution.network[idx_layer].N
cperm = complement_perm(perm, total)
B = copy.deepcopy(point_B(model, solution, ntry, idx_layer, perm, cperm))
B.requires_grad_(False)
# path.append(model)
C = copy.deepcopy(point_C(model, solution, ntry, idx_layer, perm, cperm))
C.requires_grad_(False)
path.append((B,C))
return path
"""
Connects the last points for a path
"""
def last_layer(solution, model, path):
sizes = []
for idx, l in enumerate(solution.network):
if isinstance(l, nn.Linear):
m, p = l.weight.size()
sizes.append(p)
model.main[idx].weight[-m:, -p:] = l.weight
model.main[idx].bias[-m:] = l.bias
if idx == len(solution.network) - 1:
sizes.append(m)
model.main[idx].weight[:m, :p].zero_()
model.main[idx].bias[:m].zero_()
B = copy.deepcopy(model)
C = copy.deepcopy(downsideup(model, sizes))
path.append((B, C))
return path
"""
m1 and m2 are two models (sparsified)
"""
def connect_two_models(path, model, target, sizes):
global device
A = copy.deepcopy(model)
# upsidedown(target, sizes)
B = copy.deepcopy(flip_copy_incoming(model, target, sizes))
# path.append(model)
C = copy.deepcopy(downsideup(model, sizes))
path.append((A,B,C))
# model.to(devj,c ce)
# loss, err = eval_epoch(model, train_loader)
# print(f"error: {err}, loss: {loss}")
# model.to(torch.device('cpu'))
return path
"""
swap top and bottom neurons in the model, set the bottom to 0
"""
def downsideup(model, sz):
idx = 1
for l in model.main:
if isinstance(l, nn.Linear):
nin, nout = sz[idx-1], sz[idx]
l.weight[:nout, :nin] = l.weight[-nout:, -nin:]
l.weight[-nout:, -nin:].zero_()
l.bias[:nout] = l.bias[-nout:]
l.bias[-nout:].zero_()
idx += 1
else:
continue
return model
"""
copy incoming connections for the layers from target to model
assume the weights of target are on the up side and do not intersect with the weights of the model
"""
def flip_copy_incoming(model, target, sz):
sidx = 1
for lidx, layer in enumerate(model.main):
if isinstance(layer, nn.Linear):
nin, nout = sz[sidx-1], sz[sidx]
layer.weight[-nout:, -nin:] = target.main[lidx].weight[:nout, :nin]
layer.bias[-nout:] = target.main[lidx].bias[:nout]
# layer.bias[:nout].zero_()
sidx += 1
if lidx == len(model.main) -1 :
layer.weight[:nout, :nin].zero_()
else:
continue
return model
#
ce_loss = nn.CrossEntropyLoss(reduction='none')
def zero_one_loss(x, targets):
''' x: TxBxC
targets: Bx1
returns: err of size T
'''
return (x.argmax(dim=-1)!=targets).float().mean(dim=-1)
def select_try(model, solution, ntry):
idx_layer = [isinstance(l, RandomSamplerParallel) for l in solution.network].index(True)
perm = solution.network[idx_layer].random_perms[ntry, :].view(-1).numpy()
total = solution.network[idx_layer].N
cperm = complement_perm(perm, total)
for idx, l in enumerate(solution.network):
if isinstance(l, RandomSamplerParallel):
pass
elif isinstance(l, MultiLinear):
m, p = l.weight.size()
model.main[idx-1].weight.zero_()
model.main[idx-1].weight.data[:m, :p] = l.weight.data
pass
return model
def eval_epoch(model, dataloader, ntry=None):
global device
model.eval()
model.to(device)
#loss_hidden_tot = np.zeros(classifier.L) # for the
loss_mean = 0
err_mean = 0
#ones_hidden = torch.ones(classifier.L, device=device, dtype=dtype)
with torch.no_grad():
for idx, (x, y) in enumerate(dataloader):
x = x.to(device)
y = y.to(device)
out_class = model(x) # BxC, # each output for each layer
if ntry is not None and out_class.dim() == 3:
out_class = out_class[ntry, :, :]
loss = ce_loss(out_class, y) # LxTxB
err = zero_one_loss(out_class, y) # T
err_mean = (idx * err_mean + err.detach().cpu().numpy()) / (idx+1) # mean error
loss_mean = (idx * loss_mean + loss.mean(dim=-1).detach().cpu().numpy()) / (idx+1) # mean loss
# loss_hidden_tot = (idx * loss_hidden_tot + loss_hidden.mean(dim=1).detach().cpu().numpy()) / (idx+1)
#break
model.to(torch.device('cpu'))
return loss_mean, err_mean
if __name__ == "__main__":
parser = argparse.ArgumentParser('Creation and evaluation of a path connecting two solutions')
parser_device = parser.add_mutually_exclusive_group()
parser_device.add_argument('--cpu', action='store_true', dest='cpu', help='force the cpu model')
parser_device.add_argument('--cuda', action='store_false', dest='cpu')
parser.add_argument('--nameA', default='A', help = "name of the experiment A folder")
parser.add_argument('--nameB', default='B', help = "name of the experiment B folder")
parser.add_argument('--M1', help="the first model to connect (checkpoint)")
parser.add_argument('--M2', help="the second model to connect (checkpoint)")
parser.add_argument('--output', help="directory for outputs (if None will be where the original models were)")
parser.set_defaults(cpu=False)
args = parser.parse_args()
use_cuda = torch.cuda.is_available() and not args.cpu
num_gpus = torch.cuda.device_count() # random index for the GPU
gpu_index = random.choice(range(num_gpus)) if num_gpus > 0 else 0
device = torch.device('cuda' if use_cuda else 'cpu', gpu_index)
fn_log_model = os.path.join(os.path.dirname(args.M1), 'logs.txt')
archi_model = utils.parse_archi(fn_log_model)
fn_model = args.M1
chkpt_model = torch.load(fn_model, map_location=lambda storage, location: storage)
model = copy.deepcopy(utils.construct_FCN(archi_model))
path = Path(model)
args_model = chkpt_model["args"]
# args_model = chkpt_model["args"]
# model.requires_grad_(False)
# selsol =model
# path.extend(selsol)
n_layer = utils.count_hidden_layers(model)
ntry = 1
imresize=None
train_dataset, test_dataset, num_chs = utils.get_dataset(dataset=args_model.dataset,
dataroot=args_model.dataroot,
imresize =imresize,
normalize= args_model.normalize if hasattr(args_model, 'normalize') else False,
)
# print('Transform: {}'.format(train_dataset.transform), file=logs, flush=True)
train_loader, size_train,\
test_loader, size_test = utils.get_dataloader( train_dataset,
test_dataset, batch_size
=args_model.batch_size,
size_max=100, #args_model.size_max,
collate_fn=None,
pin_memory=True)
paths = dict()
models = dict()
quant_ref = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
from featuretools.utils.cudf_utils import pd_to_cudf_clean
from featuretools.utils.gen_utils import import_or_none
cudf = import_or_none('cudf')
# TODO: Fix vjawa
@pytest.mark.skipif('not cudf')
def test_create_entity_from_cudf_df(pd_es):
cleaned_df = pd_to_cudf_clean(pd_es["log"].df)
log_cudf = cudf.from_pandas(cleaned_df)
print(pd_es["log"].variable_types)
cudf_es = EntitySet(id="cudf_es")
cudf_es = cudf_es.entity_from_dataframe(
entity_id="log_cudf",
dataframe=log_cudf,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(cleaned_df, cudf_es["log_cudf"].df.to_pandas(), check_like=True)
@pytest.mark.skipif('not cudf')
def test_create_entity_with_non_numeric_index(pd_es, cudf_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), cudf_es['new_entity'].df.to_pandas())
@pytest.mark.skipif('not cudf')
def test_create_entityset_with_mixed_dataframe_types(pd_es, cudf_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
# Test error is raised when trying to add Koalas entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(cudf_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing cudf entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(cudf_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
@pytest.mark.skipif('not cudf')
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
cudf_es = EntitySet(id="cudf_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
sessions_cudf = cudf.from_pandas(sessions)
sessions_vtypes = {
"id": ft.variable_types.Id,
"user": ft.variable_types.Id,
"time": ft.variable_types.DatetimeTimeIndex,
"strings": ft.variable_types.NaturalLanguage
}
transactions = pd.DataFrame({"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [pd.to_datetime('2019-01-10 03:53'),
pd.to_datetime('2019-01-10 04:12'),
pd.to_datetime('2019-02-03 10:34'),
pd.to_datetime('2019-01-01 12:35'),
pd.to_datetime('2019-01-01 12:49'),
pd.to_datetime('2017-08-25 04:53')]})
transactions_cudf = cudf.from_pandas(transactions)
transactions_vtypes = {
"id": ft.variable_types.Id,
"session_id": ft.variable_types.Id,
"amount": ft.variable_types.Numeric,
"time": ft.variable_types.DatetimeTimeIndex,
}
pd_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions, index="id", time_index="time")
cudf_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions_cudf, index="id", time_index="time", variable_types=sessions_vtypes)
pd_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions, index="id", time_index="time")
cudf_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions_cudf, index="id", time_index="time", variable_types=transactions_vtypes)
new_rel = Relationship(pd_es["sessions"]["id"], pd_es["transactions"]["session_id"])
cudf_rel = Relationship(cudf_es["sessions"]["id"], cudf_es["transactions"]["session_id"])
pd_es = pd_es.add_relationship(new_rel)
cudf_es = cudf_es.add_relationship(cudf_rel)
assert pd_es['sessions'].last_time_index is None
assert cudf_es['sessions'].last_time_index is None
pd_es.add_last_time_indexes()
cudf_es.add_last_time_indexes()
pd.testing.assert_series_equal(pd_es['sessions'].last_time_index.sort_index(),
cudf_es['sessions'].last_time_index.to_pandas().sort_index(), check_names=False)
@pytest.mark.skipif('not cudf')
def test_create_entity_with_make_index():
values = [1, 12, -23, 27]
df = pd.DataFrame({"values": values})
cudf_df = cudf.from_pandas(df)
cudf_es = EntitySet(id="cudf_es")
vtypes = {"values": ft.variable_types.Numeric}
cudf_es.entity_from_dataframe(entity_id="new_entity", dataframe=cudf_df, make_index=True, index="new_index", variable_types=vtypes)
expected_df = pd.DataFrame({"new_index": range(len(values)), "values": values})
pd.testing.assert_frame_equal(expected_df, cudf_es['new_entity'].df.to_pandas().sort_index())
@pytest.mark.skipif('not cudf')
def test_single_table_cudf_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
cudf_es = EntitySet(id="cudf_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = cudf.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
cudf_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
cudf_fm, _ = ft.dfs(entityset=cudf_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
cudf_computed_fm = cudf_fm.to_pandas().set_index('id').loc[fm.index][fm.columns]
# NUM_WORDS(strings) is int32 in koalas for some reason
pd.testing.assert_frame_equal(fm, cudf_computed_fm, check_dtype=False)
@pytest.mark.skipif('not cudf')
def test_single_table_cudf_entityset_ids_not_sorted():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
cudf_es = EntitySet(id="cudf_es")
df = pd.DataFrame({"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = cudf.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
cudf_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
cudf_fm, _ = ft.dfs(entityset=cudf_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not cudf')
def test_single_table_cudf_entityset_with_instance_ids():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
instance_ids = [0, 1, 3]
cudf_es = EntitySet(id="cudf_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = cudf.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
cudf_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
cudf_fm, _ = ft.dfs(entityset=cudf_es,
target_entity="data",
trans_primitives=primitives_list,
instance_ids=instance_ids)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list,
instance_ids=instance_ids)
print(fm)
# # Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not cudf')
def test_single_table_cudf_entityset_single_cutoff_time():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
cudf_es = EntitySet(id="cudf_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = cudf.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
cudf_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
cudf_fm, _ = ft.dfs(entityset=cudf_es,
target_entity="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"))
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"))
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not cudf')
def test_single_table_cudf_entityset_cutoff_time_df():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
cudf_es = EntitySet(id="cudf_es")
df = pd.DataFrame({"id": [0, 1, 2],
"values": [1, 12, -34],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
| pd.to_datetime('2019-01-01') | pandas.to_datetime |
# This file is part of me-types-mapper.
#
#
# Copyright © 2021 Blue Brain Project/EPFL
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the APACHE-2 License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
import os
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
def unique_elements(array):
"""Return a list of unique elements of an array."""
unique = []
for x in array:
if x not in unique:
unique.append(x)
return unique
def count_elements(array):
"""Return as a pandas DataFrame unique elements and the associated counts of an array."""
unq = unique_elements(array)
return pd.DataFrame([len(array[[y==x for y in array]]) for x in unq], index=unq, columns=['counts'])
def convert_edict_to_dataframe(edict):
pd_dfs = []
for cell in edict:
values_vec = []
column_names = []
for protocol in edict[cell]:
for dict_tmp in edict[cell][protocol]["soma"]:
column_names.append(dict_tmp["feature"] + "|" + protocol)
values_vec.append(dict_tmp["val"][0])
pd_dfs.append(pd.DataFrame(values_vec, index=column_names, columns=[cell]))
return | pd.concat(pd_dfs, axis=1) | pandas.concat |
"""Helper scintific module
Module serves for custom methods to support Customer Journey Analytics Project
"""
# IMPORTS
# -------
# Standard libraries
import re
import ipdb
import string
import math
# 3rd party libraries
from google.cloud import bigquery
import numpy as np
import pandas as pd
import nltk
nltk.download(['wordnet', 'stopwords'])
STOPWORDS = nltk.corpus.stopwords.words('english')
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scikit_posthocs as sp
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
import rpy2
import rpy2.rlike.container as rlc
from rpy2 import robjects
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.vectors import ListVector
from rpy2.robjects.vectors import StrVector
from rpy2.robjects import pandas2ri
from matplotlib import pyplot as plt
import seaborn as sns
# MODULE FUNCTIONS
# ----------------
def get_dissimilarity(df, normalize=True):
'''Calculates dissimilarity of observations from average
observation.
Args:
df: Data as dataframe of shape (# observations, # variables)
Returns:
ser: Calculated dissimilrity as series of size (# observations)
'''
# normalize data
if normalize:
df_scaled = StandardScaler().fit_transform(df)
df = pd.DataFrame(df_scaled, columns=df.columns, index=df.index)
else:
raise Exception('Not implemented')
# calculate multivariate dissimilarity
diss = ((df - df.mean())**2).sum(axis=1)**(1/2)
return diss
def split_data(df, diss_var, dataset_names, threshold, dis_kws={}, **split_kws):
'''Function randomly splits data into two sets, calates multivariate
dissimilarity and keep all oultiers determined by dissimilarity
treshold in each set.
Args:
df: Data as dataframe of shape (# samles, # features)
diss_var: Names of variables to calculate dissimilarity measure
as list of strings
dataset_names: Names of datasets as list of strings
threshold: Threshold for dissimilarity measure
to determine outliers as float
dis_kws: Key word arguments of dissimilarity function as dictionary
split_kws: Key word arguents of train_test_split function
Returns:
datasets: Dictionary of splitted datasets as dataframe
'''
# calculate dissimilarity series
dis_kws['normalize'] = (True if 'normalize' not in dis_kws
else dis_kws['normalize'])
dissimilarity = get_dissimilarity(df[diss_var], dis_kws['normalize'])
# Pop outlier customers
ext_mask = (dissimilarity > threshold)
X_ext = df.loc[ext_mask]
X = df.loc[~ext_mask]
# drop one random sample to keep even samples in dataset
# for purpose of having same number of samples after splitting
if X.shape[0] % 2 != 0:
split_kws['random_state'] = (1 if 'random_state' not in split_kws
else split_kws['random_state'])
remove_n = 1
drop_indices = (X.sample(remove_n,
random_state=split_kws['random_state'])
.index)
X = X.drop(drop_indices)
# Random split of sample in two groups
Xa, Xb = train_test_split(X, **split_kws)
datasets = [Xa, Xb]
# add outliers to each group
datasets = {dataset_name: dataset
for dataset_name, dataset in zip(dataset_names, datasets)}
for name, dataset in datasets.items():
datasets[name] = dataset.append(X_ext)
return datasets
def analyze_cluster_solution(df, vars_, labels, **kws):
'''Analyzes cluster solution. Following analyses are done:
1) Hypothesis testing of clusters averages difference
a) One way ANOVA
b) ANOVA assumptions
- residuals normality test: Shapiro-Wilk test
- equal variances test: Leven's test
c) Kruskal-Wallis non parametric test
d) All-Pair non parametric test, Conover test by default
2) Cluster profile vizualization
3) Cluster scatterplot vizualization
Args:
df: Dataset as pandas dataframe
of shape(# observations, # variables)
vars_: Clustering variables as list of strings
labels: Variable holding cluster labels as string
kws: Key words arguments of post-hoc test
Returns:
summary: Dataframe of hypothesis tests
post_hoc: List of post_hoc test for each clustering variable
prof_ax: Axes of profile vizualization
clst_pg: PairGrid of cluster vizulization
'''
def color_not_significant_red(val, signf=0.05):
'''Takes a scalar and returns a string withthe css property
`'color: red'` for non significant p_value
'''
color = 'red' if val > signf else 'black'
return 'color: %s' % color
# get number of seeds
num_seeds = len(df.groupby(labels).groups)
# run tests
kws['post_hoc_fnc'] = (sp.posthoc_conover if 'post_hoc_fnc' not in kws
else kws['post_hoc_fnc'])
summary, post_hoc = profile_cluster_labels(
df, labels, vars_, **kws)
# print hypothesis tests
str_ = 'PROFILE SUMMARY FOR {}'.format(labels.upper())
print(str_ + '\n' + '-' * len(str_) + '\n')
str_ = 'Hypothesis testing of clusters averages difference'
print(str_ + '\n' + '-' * len(str_))
display(summary.round(2))
# print post-hoc tests
str_ = '\nPost-hoc test: {}'.format(kws['post_hoc_fnc'].__name__)
print(str_ + '\n' + '-' * len(str_) + '\n')
for var in post_hoc:
print('\nclustering variable:', var)
display(post_hoc[var].round(2)
.style.applymap(color_not_significant_red))
# print profiles
str_ = '\nProfile vizualization'
print(str_ + '\n' + '-' * len(str_))
prof_ax = (df
.groupby(labels)
[vars_]
.mean()
.transpose()
.plot(title='Cluster Profile')
)
plt.ylabel('Standardized scale')
plt.xlabel('Clustering variables')
plt.show()
# print scatterplots
str_ = '\nClusters vizualization'
print(str_ + '\n' + '-' * len(str_))
clst_pg = sns.pairplot(x_vars=['recency', 'monetary'],
y_vars=['frequency', 'monetary'],
hue=labels, data=df, height=3.5)
clst_pg.set(yscale='log')
clst_pg.axes[0, 1].set_xscale('log')
clst_pg.fig.suptitle('Candidate Solution: {} seeds'
.format(num_seeds), y=1.01)
plt.show()
return summary, post_hoc, prof_ax, clst_pg
def profile_cluster_labels(df, group, outputs, post_hoc_fnc=sp.posthoc_conover):
'''Test distinctiveness of cluster (group) labes across clustering (output)
variables using one way ANOVA, shapiro_wilk normality test,
leven's test of equal variances, Kruskla-Wallis non parametric tests and
selected all-pairs post hoc test for each output variables.
Args:
df: Data with clustering variables and candidate solutions
as dataframe of shape (# samples, # of variables +
candidate solutions)
group: group variables for hypothesis testing as string
output: output variables for hypothesis testing as list of string
Returns:
results: Dataframe of hypothesis tests for each output
'''
# initiate summmary dataframe
summary = (df.groupby(group)[outputs]
.agg(['mean', 'median'])
.T.unstack(level=-1)
.swaplevel(axis=1)
.sort_index(level=0, axis=1))
# initiate posthoc dictionary
post_hoc = {}
# cycle over ouptputs
for i, output in enumerate(outputs):
# split group levels
levels = [df[output][df[group] == level]
for level in df[group].unique()]
# calculate F statistics and p-value
_, summary.loc[output, 'anova_p'] = stats.f_oneway(*levels)
# calculate leven's test for equal variances
_, summary.loc[output, 'levene_p'] = stats.levene(*levels)
# check if residuals are normally distributed by shapiro wilk test
model = ols('{} ~ C({})'.format(output, group), data=df).fit()
_, summary.loc[output, 'shapiro_wilk_p'] = stats.shapiro(model.resid)
# calculate H statistics and p-value for Kruskal Wallis test
_, summary.loc[output, 'kruskal_wallis_p'] = stats.kruskal(*levels)
# multiple comparison Conover's test
post_hoc[output] = post_hoc_fnc(
df, val_col=output, group_col=group) #, p_adjust ='holm')
return summary, post_hoc
def get_missmatch(**kws):
'''
Cross tabulates dataframe on 2 selected columns and
calculates missmatch proportion of rows and total
Args:
kws: Key word arguments to pd.crosstab function
Returns:
crosst_tab: result of cross tabulation as dataframe
missmatch_rows: missmatch proportion by rows as series
total_missmatch: total missmatch proportion as float
'''
cross_tab = pd.crosstab(**kws)
missmatch_rows = (cross_tab.sum(axis=1) - cross_tab.max(axis=1))
total_missmatch = missmatch_rows.sum() / cross_tab.sum().sum()
missmatch_rows = missmatch_rows / cross_tab.sum(axis=1)
missmatch_rows.name = 'missmatch_proportion'
return cross_tab, missmatch_rows, total_missmatch
def query_product_info(client, query_params):
'''Query product information from bigquery database.
Distinct records of product_sku, product_name,
product_brand, product_brand_grp,
product_category, product_category_grp,
Args:
client: Instatiated bigquery.Client to query distinct product
description(product_sku, product_name, product_category,
product_category_grp)
query_params: Query parameters for client
Returns:
product_df: product information as distict records
as pandas dataframe (# records, # variables)
'''
# Check arguments
# ----------------
assert isinstance(client, bigquery.Client)
assert isinstance(query_params, list)
# Query distinct products descriptions
# ------------------------------------
query='''
SELECT DISTINCT
hits_product.productSku AS product_sku,
hits_product.v2productName AS product_name,
hits_product.productBrand AS product_brand,
hits.contentGroup.contentGroup1 AS product_brand_grp,
hits_product.v2productCategory AS product_category,
hits.contentGroup.contentGroup2 AS product_category_grp
FROM
`bigquery-public-data.google_analytics_sample.ga_sessions_*`
LEFT JOIN UNNEST(hits) AS hits
LEFT JOIN UNNEST(hits.product) AS hits_product
WHERE
_TABLE_SUFFIX BETWEEN @start_date AND @end_date
AND hits_product.productSku IS NOT NULL
ORDER BY
product_sku
'''
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
df = client.query(query, job_config=job_config).to_dataframe()
return df
def reconstruct_brand(product_sku, df):
'''Reconstructs brand from product name and brand variables
Args:
product_sku: product_sku as of transaction records on product level
of size # transactions on produc level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
Returns:
recon_brand: reconstructed brand column as pandas series
of size # of transactions
'''
# predict brand name from product name for each sku
# -------------------------------------------------
# valid brands
brands = ['Android',
'Chrome',
r'\bGo\b',
'Google',
'Google Now',
'YouTube',
'Waze']
# concatenate different product names for each sku
brand_df = (df[['product_sku', 'product_name']]
.drop_duplicates()
.groupby('product_sku')
['product_name']
.apply(lambda product_name: ' '.join(product_name))
.reset_index()
)
# drop (no set) sku's
brand_df = brand_df.drop(
index=brand_df.index[brand_df['product_sku'] == '(not set)'])
# predict brand name from product name for each sku
brand_df['recon_brand'] = (
brand_df['product_name']
.str.extract(r'({})'.format('|'.join(set(brands)),
flags=re.IGNORECASE))
)
# adjust brand taking account spelling errors in product names
brand_df.loc[
brand_df['product_name'].str.contains('You Tube', case=False),
'recon_brand'
] = 'YouTube'
# predict brand name from brand variables for sku's where
# brand couldn't be predected from product name
# --------------------------------------------------------
# get distinct product_sku and brand variables associations
brand_vars = ['product_brand', 'product_brand_grp']
brand_var = dict()
for brand in brand_vars:
brand_var[brand] = (df[['product_sku', brand]]
.drop(index=df.index[(df['product_sku'] == '(not set)')
| df['product_sku'].isna()
| (df[brand] == '(not set)')
| df[brand].isna()])
.drop_duplicates()
.drop_duplicates(subset='product_sku', keep=False))
# check for brand abiguity at sku level
old_brand = brand_var['product_brand'].set_index('product_sku')
new_brand = brand_var['product_brand_grp'].set_index('product_sku')
shared_sku = old_brand.index.intersection(new_brand.index)
if not shared_sku.empty:
# delete sku's with abigious brands
ambigious_sku = shared_sku[
old_brand[shared_sku].squeeze().values
!= new_brand[shared_sku].squeeze().values
]
old_brand = old_brand.drop(index=ambigious_sku, errors='ignore')
new_brand = new_brand.drop(index=ambigious_sku, errors='ignore')
# delete sku's with multiple brands in new_brand
multiple_sku = shared_sku[
old_brand[shared_sku].squeeze().values
== new_brand[shared_sku].squeeze().values
]
new_brand = new_brand.drop(index=multiple_sku, errors='ignore')
# concatenate all associations of brand variables and product sku's
brand_var = pd.concat([old_brand.rename(columns={'product_brand':
'recon_brand_var'}),
new_brand.rename(columns={'product_brand_grp':
'recon_brand_var'})])
# predict brand name from brand variables
brand_df.loc[brand_df['recon_brand'].isna(), 'recon_brand'] = (
pd.merge(brand_df['product_sku'], brand_var, on='product_sku', how='left')
['recon_brand_var']
)
# recode remaining missing (not set) brands by Google brand
# ---------------------------------------------------------
brand_df['recon_brand'] = brand_df['recon_brand'].fillna('Google')
# predict brand from brand names and variables on transaction data
# ----------------------------------------------------------------
recon_brand = (pd.merge(product_sku.to_frame(),
brand_df[['product_sku', 'recon_brand']],
on='product_sku',
how='left')
.reindex(product_sku.index)
['recon_brand'])
return recon_brand
def reconstruct_category(product_sku, df, category_spec):
'''Reconstructs category from category variables and product names.
Args:
product_sku: product_sku from transaction records on product level
of size # transactions on product level
df: Product information as output of
helper.query_product_info in form of dataframe
of shape (# of distinct records, # of variables)
category_spec: Dictionary with keys as category variable names
and values as mappings between category variable levels
to category labels in form of dataframe
Returns:
recon_category: reconstructed category column as pandas series
of size # of trasactions on product level
category_df: mappings of unique sku to category labels
'''
# Check arguments
# ----------------
assert isinstance(product_sku, pd.Series)
assert isinstance(df, pd.DataFrame)
assert isinstance(category_spec, dict)
# reconstruct category name from product name for each sku
# --------------------------------------------------------
def get_category_representation(category_label, valid_categories):
'''Handle multiple categories assigned to one sku.
For ambigious categories returns missing value.
Args:
category_label: Series of category labels for
particular sku
valid_categories: Index of valid unique categories
Returns:
label: valid category label or missing value
'''
label = valid_categories[valid_categories.isin(category_label)]
if label.empty or label.size > 1:
return np.nan
else:
return label[0]
def label_category_variable(df, category_var, label_spec):
'''reconstruct category labels from category variable.
Args:
df: Product information dataframe.
category_var: Name of category variabel to reconstruct labels
label_spec: Label mapping between category variable levels
and labels.
Returns:
var_label: Label mapping to sku as dataframe
'''
valid_categories = pd.Index(label_spec
.groupby(['category_label'])
.groups
.keys())
var_label = (pd.merge(df[['product_name', category_var]]
.drop_duplicates(),
label_spec,
how='left',
on=category_var)
[['product_name', 'category_label']]
.groupby('product_name')
['category_label']
.apply(get_category_representation,
valid_categories=valid_categories)
.reset_index())
return var_label
def screen_fit_model(data):
'''Screens Naive Bayes Classifiers and selects best model
based on f1 weigted score. Returns fitted model and score.
Args:
data: Text and respective class labels as dataframe
of shape (# samples, [text, labels])
Returns:
model: Best fitted sklearn model
f1_weighted_score: Test f1 weighted score
Note: Following hyperparameters are tested
Algorithm: MultinomialNB, ComplementNB
ngrams range: (1, 1), (1, 2), (1, 3)
binarization: False, True
'''
# vectorize text inforomation in product_name
def preprocessor(text):
# not relevant words
not_relevant_words = ['google',
'youtube',
'waze',
'android']
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation])
# tokenize words
tokens = re.split('\W+', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in not_relevant_words + STOPWORDS])
return text
# define pipeline
pipe = Pipeline([('vectorizer', CountVectorizer()),
('classifier', None)])
# define hyperparameters
param_grid = dict(vectorizer__ngram_range=[(1, 1), (1, 2), (1, 3)],
vectorizer__binary=[False, True],
classifier=[MultinomialNB(),
ComplementNB()])
# screen naive buyes models
grid_search = GridSearchCV(pipe, param_grid=param_grid, cv=5,
scoring='f1_weighted', n_jobs=-1)
# devide dataset to train and test set using stratification
# due to high imbalance of lables frequencies
x_train, x_test, y_train, y_test = train_test_split(
data['product_name'],
data['recon_category'],
test_size=0.25,
stratify=data['recon_category'],
random_state=1)
# execute screening and select best model
grid_search.fit(x_train, y_train)
# calculate f1 weighted test score
y_pred = grid_search.predict(x_test)
f1_weigted_score = f1_score(y_test, y_pred, average='weighted')
return grid_search.best_estimator_, f1_weigted_score
# reconstruct category label from cateogry variables
recon_labels = dict()
for var, label_spec in category_spec.items():
recon_labels[var] = (label_category_variable(df, var, label_spec)
.set_index('product_name'))
recon_labels['product_category'][
recon_labels['product_category'].isna()
] = recon_labels['product_category_grp'][
recon_labels['product_category'].isna()
]
# reconstruct category label from produc names
valid_categories = pd.Index(category_spec['product_category_grp']
.groupby(['category_label'])
.groups
.keys())
category_df = (pd.merge(df[['product_sku', 'product_name']]
.drop_duplicates(),
recon_labels['product_category'],
how='left',
on = 'product_name')
[['product_sku', 'product_name', 'category_label']]
.groupby('product_sku')
.agg({'product_name': lambda name: name.str.cat(sep=' '),
'category_label': lambda label:
get_category_representation(label, valid_categories)})
.reset_index())
category_df.rename(columns={'category_label': 'recon_category'},
inplace=True)
# associate category from category names and variables on transaction data
# ------------------------------------------------------------------------
recon_category = (pd.merge(product_sku.to_frame(),
category_df[['product_sku', 'recon_category']],
on='product_sku',
how='left')
)
# predict category of transactions where category is unknown
# Multinomial and Complement Naive Bayes model is screened
# and finetuned using 1-grams, 2-grams and 3-grams
# as well as binarization (Tru or False)
# best model is selected based on maximizing test f1 weigted score
# ----------------------------------------------------------------
# screen best model and fit it on training data
model, f1_weighted_score = screen_fit_model(
category_df[['product_name', 'recon_category']]
.dropna()
)
# predict category labels if model has f1_weighted_score > threshold
f1_weighted_score_threshold = 0.8
if f1_weighted_score < f1_weighted_score_threshold:
raise Exception(
'Accuracy of category prediction below threshold {:.2f}'
.format(f1_weighted_score_threshold))
else:
product_name = (pd.merge(recon_category
.loc[recon_category['recon_category'].isna(),
['product_sku']],
category_df[['product_sku', 'product_name']],
how='left',
on='product_sku')
['product_name'])
category_label = model.predict(product_name)
recon_category.loc[recon_category['recon_category'].isna(),
'recon_category'] = category_label
return recon_category['recon_category']
def reconstruct_sales_region(subcontinent):
'''Reconstruct sales region from subcontinent'''
if (pd.isna(subcontinent)
or subcontinent.lower() == '(not set)'):
sales_region = np.nan
elif ('africa' in subcontinent.lower()
or 'europe' in subcontinent.lower()):
sales_region = 'EMEA'
elif ('caribbean' in subcontinent.lower()
or subcontinent.lower() == 'central america'):
sales_region = 'Central America'
elif subcontinent.lower() == 'northern america':
sales_region = 'North America'
elif subcontinent.lower() == 'south america':
sales_region = 'South America'
elif ('asia' in subcontinent.lower()
or subcontinent.lower() == 'australasia'):
sales_region = 'APAC'
else:
raise Exception(
'Can not assign sales region to {} subcontinent'
.format(subcontinent))
return sales_region
def reconstruct_traffic_keyword(text):
'''Reconstructs traffic keywords to more simple representation'''
# if empty rename to not applicable
if pd.isna(text):
text = '(not applicable)'
# if one word with mixed numbers & letters rename to (not relevant)
elif re.search(r'(?=.*\d)(?=.*[A-Z=\-])(?=.*[a-z])([\w=-]+)', text):
text = '(not relevant)'
elif ((text != '(not provided)')
and (re.search('(\s+)', text) is not None)):
# transform text to lower case and remove punctuation
text = ''.join([word.lower() for word in text
if word not in string.punctuation.replace('/', '')])
# tokenize words
tokens = re.split('\W+|/', text)
# Drop not relevant words and lemmatize words
wn = nltk.WordNetLemmatizer()
text = ' '.join([wn.lemmatize(word) for word in tokens
if word not in STOPWORDS])
return text
def aggregate_data(df):
'''Encode and aggregate engineered and missing value free data
on client level
Args:
df: engineered and missing value free data as
pandas dataframe of shape (# transaction items, # variables)
agg_df: encoded and aggregated dataframe
of shape(# clients, # encoded & engineered variables)
with client_id index
'''
# identifiers
id_vars = pd.Index(
['client_id',
'session_id',
'transaction_id',
'product_sku']
)
# session variables
session_vars = pd.Index(
['visit_number', # avg_visits
'date', # month, week, week_day + one hot encode + sum
'pageviews', # avg_pageviews
'time_on_site', # avg_time_on_site
'ad_campaign', # sum
'source', # one hot encode + sum
'browser', # one hot encode + sum
'operating_system', # one hot encode + sum
'device_category', # one hot encode + sum
'continent', # one hot encode + sum
'subcontinent', # one hot encode + sum
'country', # one hot encode + sum
'sales_region', # one hot encode + sum
'social_referral', # sum
'social_network', # one hot encode + sum
'channel_group'] # one hot encode + sum
)
# group session variables from item to session level
session_df = (df[['client_id',
'session_id',
*session_vars.to_list()]]
.drop_duplicates()
# drop ambigious region 1 case
.drop_duplicates(subset='session_id'))
# reconstruct month, weeek and week day variables
# session_df['month'] = session_df['date'].dt.month
# session_df['week'] = session_df['date'].dt.week
session_df['week_day'] = session_df['date'].dt.weekday + 1
session_df = session_df.drop(columns='date')
# encode variables on session level
keep_vars = [
'client_id',
'session_id',
'visit_number',
'pageviews',
'time_on_site',
'social_referral',
'ad_campaign'
]
encode_vars = session_df.columns.drop(keep_vars)
enc_session_df = pd.get_dummies(session_df,
columns=encode_vars.to_list(),
prefix_sep='*')
# remove not relevant encoded variables
enc_session_df = enc_session_df.drop(
columns=enc_session_df.columns[
enc_session_df.columns.str.contains('not set|other')
]
)
# summarize session level variables on customer level
sum_vars = (pd.Index(['social_referral', 'ad_campaign'])
.append(enc_session_df
.columns
.drop(keep_vars)))
client_session_sum_df = (enc_session_df
.groupby('client_id')
[sum_vars]
.sum())
client_session_avg_df = (
enc_session_df
.groupby('client_id')
.agg(avg_visits=('visit_number', 'mean'),
avg_pageviews=('pageviews', 'mean'),
avg_time_on_site=('time_on_site', 'mean'))
)
client_session_df = pd.concat([client_session_avg_df,
client_session_sum_df],
axis=1)
# product level variables
product_vars = pd.Index([
# 'product_name', # one hot encode + sum
'product_category', # one hot encode + sum
'product_price', # avg_product_revenue
'product_quantity', # avg_product_revenue
'hour'] # one hot encoded + sum
)
avg_vars = pd.Index([
'product_price',
'product_quantity'
])
sum_vars = pd.Index([
# 'product_name',
'product_category',
'hour'
])
enc_product_df = pd.get_dummies(df[id_vars.union(product_vars)],
columns=sum_vars,
prefix_sep='*')
# summarize product level variables on customer level
client_product_sum_df = (enc_product_df
.groupby('client_id')
[enc_product_df.columns.drop(avg_vars)]
.sum())
def average_product_vars(client):
d = {}
d['avg_product_revenue'] = ((client['product_price']
* client['product_quantity'])
.sum()
/ client['product_quantity'].sum())
# ipdb.set_trace(context=15)
d['avg_unique_products'] = (client
.groupby('transaction_id')
['product_sku']
.apply(lambda sku: len(sku.unique()))
.mean())
return pd.Series(d, index=['avg_product_revenue',
'avg_unique_products'])
client_product_avg_df = (enc_product_df
.groupby('client_id')
.apply(average_product_vars))
client_product_df = pd.concat([client_product_avg_df,
client_product_sum_df]
, axis=1)
agg_df = pd.concat([client_session_df,
client_product_df],
axis=1)
return agg_df
def do_pca(X_std, **kwargs):
'''# Apply PCA to the data.'''
pca = PCA(**kwargs)
model = pca.fit(X_std)
X_pca = model.transform(X_std)
return pca, X_pca
def scree_pca(pca, plot=False, **kwargs):
'''Investigate the variance accounted for by each principal component.'''
# PCA components
n_pcs = len(pca.components_)
pcs = pd.Index(range(1, n_pcs+1), name='principal component')
# Eigen Values
eig = pca.explained_variance_.reshape(n_pcs, 1)
eig_df = pd.DataFrame(np.round(eig, 2), columns=['eigen_value'], index=pcs)
eig_df['cum_eigen_value'] = np.round(eig_df['eigen_value'].cumsum(), 2)
# Explained Variance %
var = pca.explained_variance_ratio_.reshape(n_pcs, 1)
var_df = pd.DataFrame(np.round(var, 4),
columns=['explained_var'],
index=pcs)
var_df['cum_explained_var'] = (np.round(var_df['explained_var'].cumsum()
/ var_df['explained_var'].sum(), 4))
df = pd.concat([eig_df, var_df], axis=1)
if plot:
# scree plot limit
limit = pd.DataFrame(np.ones((n_pcs, 1)),
columns=['scree_plot_limit'], index=pcs)
ax = (pd.concat([df, limit], axis=1)
.plot(y=['eigen_value', 'explained_var', 'scree_plot_limit'],
title='PCA: Scree test & Variance Analysis', **kwargs)
)
df.plot(y=['cum_explained_var'], secondary_y=True, ax=ax)
return df
def get_pc_num(scree_df, pc_num = None, exp_var_threshold=None,
eig_val_threshold=1):
'''
Selects optimum number of prinipal components according specified ojectives
wheter % of explained variance or eig_val criterion
Args:
scree_df: Dataframe as ouptu of scree_pca function
exp_var_threshold: threshold for cumulative % of epxlained variance
eig_val_threshold: min eigen value, 1 by default
Returns:
pc_num: Number of selelected principal components
exp_var: Explained variance by selected components
sum_eig: Sum of eigen values of selected components
'''
# check arguments
assert pc_num is None or pc_num <= scree_df.index.size
assert exp_var_threshold is None or (0 < exp_var_threshold <= 1)
assert 0 < eig_val_threshold < scree_df.index.size
assert (pc_num is None or exp_var_threshold is not None) or \
(pc_num is not None or exp_var_threshold is None), \
('''Either number of principal components or minimum variance
explained should be selected''')
if exp_var_threshold:
pcs = scree_df.index[scree_df['cum_explained_var'] <= exp_var_threshold]
elif pc_num:
pcs = scree_df.index[range(1, pc_num+1)]
elif exp_var_threshold is None:
pcs = scree_df.index[scree_df['eigen_value'] > eig_val_threshold]
pc_num = len(pcs)
exp_var = scree_df.loc[pc_num, 'cum_explained_var']
sum_eig = scree_df.loc[[*pcs], 'eigen_value'].sum()
return pc_num, exp_var, sum_eig
def varimax(factor_df, **kwargs):
'''
varimax rotation of factor matrix
Args:
factor_df: factor matrix as pd.DataFrame with shape
(# features, # principal components)
Return:
rot_factor_df: rotated factor matrix as pd.DataFrame
'''
factor_mtr = df2mtr(factor_df)
varimax = robjects.r['varimax']
rot_factor_mtr = varimax(factor_mtr)
return pandas2ri.ri2py(rot_factor_mtr.rx2('loadings'))
def get_components(df, pca, rotation=None, sort_by='sig_ld',
feat_details=None, plot='None', **kwargs):
'''
Show significant factor loadings depending on sample size
Args:
df: data used for pca as pd.DataFrame
pca: fitted pca object
rotation: if to apply factor matrix rotation, by default None.
sort_by: sort sequence of components, by default accoring
number of significant loadings 'sig_load'
feat_details: Dictionary of mapped feature detials, by default None
plot: 'discrete' plots heatmap enhancing sifinigicant laodings
'continuous' plots continous heatmap,
by default None
Returns:
factor_df: factor matrix as pd.DataFrame
of shape (# features, # components)
sig_ld: number of significant loadings across components as
pd. Series of size # components
cross_ld: number of significant loadings across features
(cross loadings) as pd. Series of size # features
'''
# constants
# ---------
maxstr = 100 # amount of the characters to print
# guidelines for indentifying significant factor loadings
# based on sample size. Source: Multivariate Data Analysis. 7th Edition.
factor_ld = np.linspace(0.3, 0.75, 10)
signif_sz = np.array([350, 250, 200, 150, 120, 100, 85, 70, 60, 50])
# loadings significant treshold
ld_sig = factor_ld[len(factor_ld) - (signif_sz <= df.index.size).sum()]
if rotation == 'varimax':
components = varimax(pd.DataFrame(pca.components_.T))
else:
components = pca.components_.T
# annotate factor matrix
index = pd.Index([])
for feat in df.columns:
try:
index = index.append(
pd.Index([feat]) if feat_details is None else \
pd.Index([feat_details[feat]['long_name'][:maxstr]]))
except KeyError:
index = index.append( | pd.Index([feat]) | pandas.Index |
import string
from copy import deepcopy
from shutil import copyfile
from typing import List, Tuple, Dict, Optional
import warnings
import re
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from adjustText import adjust_text
from matplotlib import pyplot as plt
from tqdm.auto import tqdm, trange
from matplotlib.ticker import MaxNLocator, MultipleLocator
from matplotlib.font_manager import FontProperties
import adjustText
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from titrato import closest_pka, hungarian_pka, align_pka, TitrationCurve
from titrato import fit_titration_curves_3d
from titrato.stats import (
absolute_loss,
squared_loss,
array_mae,
array_rmse,
wrap_pearsonr,
bootstrapped_func,
array_median_error,
)
from .sampl import (
TitrationCurveType,
SAMPL6DataProvider,
bootstrap_rmse_r,
bootstrap_pKa_dataframe,
HaspKaType,
TypeIPrediction,
)
from .stats import (
area_between_curves,
area_curve_vectorized,
rmsd_curve_vectorized,
BootstrapDistribution,
)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
import logging
from typing import List
from uncertainties import ufloat
import networkx as nx
from networkx.drawing.nx_pydot import pydot_layout
from collections import deque
log = logging.getLogger()
# Default styling
sns.set_style("ticks")
glob_font = {"size": 8}
matplotlib.rc("font", **glob_font)
matplotlib.rc("lines", **{"markersize": 4})
# Default colors per charge
charge_colors = {
-4: "#470911",
-3: "#b2182b",
-2: "#d6604d",
-1: "#f4a582",
0: "#333333",
1: "#92c5de",
2: "#4393c3",
3: "#2166ac",
4: "#0d2844",
}
import os
def to_str(num: ufloat):
"""Formats ufloat with one precision digit on the uncertainty and latex syntax"""
return "{:.1uL}".format(num)
class TexBlock:
"""Basic class for latex syntax block, to be added to report."""
tex_src = ""
def __init__(self, **kwargs):
self._variables = kwargs
return
def render_source(self):
"""Fill in all variable fields and return complete latex source."""
return self.tex_src.format(**self._variables)
class ReportHeader(TexBlock):
"""Represents the preamble and document start."""
tex_src = (
"\\documentclass[9pt]{{standalone}}\n"
"\\renewcommand{{\\familydefault}}{{\\sfdefault}}\n"
"\\usepackage[utf8]{{inputenc}}\n"
"\\usepackage{{graphicx}}\n"
"\n\\begin{{document}}\n"
)
# syntax for adding a new variable
tex_var = "\\newcommand{{\\{}}}{{{}}}\n"
def __init__(self, mol_id, method_names, img_ext="pdf"):
"""Initialize the header of the file by setting all appropriate variables"""
variables = dict()
variables["molid"] = mol_id
variables["imgext"] = img_ext
self.ids = list()
# need to assign ascii name to method for use as tex variable
for method, name in enumerate(method_names):
id = string.ascii_lowercase[method]
variables[f"method{id}"] = name
# Keep track of ids defined in header
self.ids.append(id)
self._variables = variables
def render_source(self):
src = self.tex_src.format()
for name, value in self._variables.items():
src += self.tex_var.format(name, value)
return src
class ReportFooter(TexBlock):
tex_src = "\\end{{document}}\n"
class OverviewRow(TexBlock):
"""Latex syntax provider for the overview section."""
tex_src = (
"\\section{{\\molid}}"
"\n\\noindent \n"
"\\begin{{minipage}}[s]{{0.35\\textwidth}}\\centering\n"
"\\includegraphics[width=\\textwidth]{{Reports/\\molid-molecule.\\imgext}}\n"
"\\end{{minipage}}\n"
"\\begin{{minipage}}[s]{{0.35\\textwidth}}\n"
"\\includegraphics[width=\\textwidth]{{Reports/overview-virtual-titration-\\molid.\\imgext}}\n"
"\\end{{minipage}}\n"
"\\begin{{minipage}}[s]{{0.23\\textwidth}}\n"
"\\includegraphics[width=\\textwidth]{{Reports/overview-legend-\\molid.\\imgext}}\n"
"\\end{{minipage}}\n"
)
class MethodResultRow(TexBlock):
"""A row of figures for a single method"""
tex_src = (
"\n\\begin{{minipage}}[s]{{\\textwidth}}\\centering\n"
"{{\\textbf \\method{id}}}\n"
"\\end{{minipage}}\n"
"\n\\noindent\n"
"\\begin{{minipage}}[s]{{0.33\\textwidth}}\\centering\n"
"\\includegraphics[width=\\textwidth]{{Reports/\\method{id}-virtual-titration-\\molid.\\imgext}}\n"
"\\end{{minipage}}\n"
"\\begin{{minipage}}[s]{{0.33\\textwidth}}\n"
"\\includegraphics[\\textwidth]{{Reports/\\method{id}-free-energy-\\molid.\\imgext}}\n"
"\\end{{minipage}}\n"
"\\begin{{minipage}}[s]{{0.33\\textwidth}}\n"
"\\includegraphics[\\textwidth]{{Reports/\\method{id}-populations-\\molid.\\imgext}}\n"
"\\end{{minipage}}\n"
)
def __init__(self, id):
"""A row of figures for a single method.
Parameters
id - the 1-letter identifier for the method (a-z).
"""
self._variables = dict(id=id)
class SAMPL6ReportGenerator:
"""This class provides an interface for generating analysis plots between experiment, and a prediction for a single molecule."""
# Assume pH values are spaced apart by 0.1 for any integration purposes.
_dpH = 0.1
# Plotting defaults
_figprops = {
"dpi": 150,
"figsize": (2.0, 2.0), # 3 figures fitting between 3 cm margins on letter paper
"line_styles": ["-", "--", "-.", ":"],
"line_widths": [0.75, 1.25, 1.25, 1.25],
"colors_per_charge": charge_colors,
# Use consistent colors for each method
"extra_colors": sns.color_palette("dark"),
}
# Default number of bootstrap samples used to estimate titration curve confidence intervals
num_bootstrap_curves = 10000
def __init__(
self,
mol_id: str,
exp_provider: SAMPL6DataProvider,
data_providers: List[SAMPL6DataProvider],
mol_img_loc: str,
) -> None:
"""Instantiate the analysis from the identifier of the molecule, and providers of the data.
Parameters
----------
mol_id - molecule associated with this report
exp_provider - provider for the experimental data source
prediction_provides - list of providers for all the predictions
mol_png_loc - location where an image of the molecule can be found
"""
self._exp_provider = exp_provider
self._prediction_providers = data_providers
self._figures: Dict[str, Dict[str, matplotlib.figure.Figure]] = {
pred.method_desc: dict() for pred in data_providers
}
# Add dict for overview figures
self._figures["overview"] = dict()
self._figures[exp_provider.method_desc] = dict()
# Data tables by description, and latex format
self._tables: Dict[str, str] = dict()
# Latex Report document
self._tex_source = ""
self._num_predictions = len(data_providers)
self._mol_id = mol_id
self._mol_img = mol_img_loc
return
def _plot_charge_legend(self):
"""Generate a legend for all charges."""
fig, ax = self._newfig()
for charge in range(-4, 5):
color = self._figprops["colors_per_charge"][charge]
ax.plot([0, 1], [0, 1], color=color, label=f"{charge:+d}")
# Separate legend figure
figlegend, axlegend = plt.subplots(
1, 1, figsize=[8, 0.5], dpi=self._figprops["dpi"]
)
handles, labels = ax.get_legend_handles_labels()
# handles = np.concatenate((handles[::2],handles[1::2]),axis=0)
# labels = np.concatenate((labels[::2],labels[1::2]),axis=0)
leg = figlegend.legend(handles, labels, loc="center", ncol=9)
axlegend.get_xaxis().set_visible(False)
axlegend.get_yaxis().set_visible(False)
for spine in ["top", "left", "bottom", "right"]:
axlegend.spines[spine].set_visible(False)
self._figures["overview"]["charge-legend"] = figlegend
plt.close(fig)
def make_all_plots(self):
"""Make all available plots for each prediction and the experiment.."""
# self._plot_virtual_titration_overview()
self._plot_charge_legend()
# overview plot
self._plot_virtual_titration_overview()
# Experiment gets its own plots
# Virtual titration plot
figtype = "virtual-titration"
newfig = self.plot_virtual_titration(self._exp_provider)
self._figures["Experiment"][figtype] = newfig
# Free enery values
figtype = "free-energy"
newfig = self.plot_predicted_free_energy(self._exp_provider)
self._figures["Experiment"][figtype] = newfig
# Populations
figtype = "populations"
newfig = self.plot_predicted_population(self._exp_provider)
self._figures["Experiment"][figtype] = newfig
# Each method gets its own plots
for p, pred_loader in enumerate(self._prediction_providers):
desc = pred_loader.method_desc
# Virtual titration plot
figtype = "virtual-titration"
newfig = self.plot_virtual_titration(
self._exp_provider, pred_loader=pred_loader, index=p
)
self._figures[desc][figtype] = newfig
# Free enery values
figtype = "free-energy"
newfig = self.plot_predicted_free_energy(pred_loader)
self._figures[desc][figtype] = newfig
# Populations
figtype = "populations"
newfig = self.plot_predicted_population(pred_loader)
self._figures[desc][figtype] = newfig
def _plot_virtual_titration_overview(self):
"""Plot an overview of all methods using the virtual charge titration.
Also stores a legend with color codes for each method, that can be used with other overview figures.
"""
# TODO fill in the new structure for experimental plots
# Overview charge titration
titration_fig_ax = self._newfig()
for idx, pred in enumerate(self._prediction_providers, start=0):
desc = pred.method_desc
if pred.can_bootstrap:
exp_data, exp_curves, exp_bootstrap_data = (
self._load_experiment_with_bootstrap()
)
pred_data, bootstrap_data = pred.bootstrap(
self._mol_id, self.num_bootstrap_curves
)
pred_data.align_mean_charge(exp_data, area_between_curves, self._dpH)
# Align all to experiment curve (note this is a joint bootstrap of experiment and prediction)
curves = list()
for dat, exp_dat in zip(bootstrap_data, exp_bootstrap_data):
dat.align_mean_charge(exp_dat, area_between_curves, self._dpH)
curves.append(deepcopy(dat.mean_charge))
curves = np.asarray(curves)
self._add_virtual_titration_bootstrap_sd(
titration_fig_ax, desc, pred_data, curves, idx, linestyle="-"
)
# experiment plotted as dashed line
self._add_virtual_titration_bootstrap_sd(
titration_fig_ax,
f"{desc}-exp",
exp_data,
exp_curves,
idx,
linestyle="--",
alpha=0.5,
)
else:
exp_data = self._exp_provider.load(self._mol_id)
pred_data = pred.load(self._mol_id)
pred_data.align_mean_charge(exp_data, area_between_curves, self._dpH)
curve = pred_data.mean_charge
self._add_virtual_titration_bootstrap_sd(
titration_fig_ax, desc, pred_data, np.asarray([curve]), idx
)
# Unpack tuple.
fig, ax = titration_fig_ax
ax.set_title(f"{self._mol_id}", fontsize=9)
# Integer labels for y axis
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# No labels on y axis, but indicate the integer values with ticks
# labels = [item.get_text() for item in ax.get_yticklabels()]
# empty_string_labels = [""] * len(labels)
# ax.set_yticklabels(empty_string_labels)
ax.set_ylabel(r"$Q_\mathsf{avg}$")
ax.set_xlabel("pH")
# x-tick every 2 pH units
ax.set_xticks(np.arange(2.0, 14.0, 2.0))
# remove top and right spines
sns.despine()
# fit everything within bounds
fig.tight_layout()
# Separate legend figure
figlegend, axlegend = self._newfig()
leg = figlegend.legend(*ax.get_legend_handles_labels(), loc="center")
axlegend.get_xaxis().set_visible(False)
axlegend.get_yaxis().set_visible(False)
for spine in ["top", "left", "bottom", "right"]:
axlegend.spines[spine].set_visible(False)
self._figures["overview"]["virtual-titration"] = fig
self._figures["overview"]["legend"] = figlegend
def _load_experiment_with_bootstrap(self):
# All methods tested against the same experimental values.
exp_data = self._exp_provider.load(self._mol_id)
if self._exp_provider.can_bootstrap:
exp_data, exp_bootstrap_data = self._exp_provider.bootstrap(
self._mol_id, self.num_bootstrap_curves
)
# Virtual titration curves
exp_curves = np.asarray([curve.mean_charge for curve in exp_bootstrap_data])
return exp_data, exp_curves, exp_bootstrap_data
def save_all(self, dir: str, ext="pdf"):
"""Save all figures.
Parameters
----------
dir - output directory for all files
ext - Extension of the images.
"""
if not os.path.isdir(dir):
os.makedirs(dir)
for desc, method in self._figures.items():
for figtype, figure in method.items():
figure.savefig(
os.path.join(dir, f"{desc}-{figtype}-{self._mol_id}.{ext}")
)
copyfile(self._mol_img, os.path.join(dir, f"{self._mol_id}-molecule.{ext}"))
with open(os.path.join(dir, f"report-{self._mol_id}.tex"), "w") as latexfile:
latexfile.write(self._tex_source)
def generate_latex(self, img_ext="pdf") -> None:
"""Make a minipage latex document layout containing figures"""
blocks: List[TexBlock] = list()
header = ReportHeader(
self._mol_id,
[
meth.method_desc
for meth in [self._exp_provider] + self._prediction_providers
],
img_ext=img_ext,
)
blocks.append(header)
blocks.append(OverviewRow())
for id in header.ids:
blocks.append(MethodResultRow(id))
blocks.append(ReportFooter())
for block in blocks:
self._tex_source += block.render_source()
def close(self) -> None:
"""Close all figures contained within this reporter to save memory."""
for desc, method in self._figures.items():
for figtype, figure in method.items():
plt.close(figure)
return
@classmethod
def _newfig(cls) -> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]:
# Ensure style before starting figure
sns.set_style("ticks")
font = {"size": 11}
matplotlib.rc("font", **font)
return plt.subplots(
1, 1, figsize=cls._figprops["figsize"], dpi=cls._figprops["dpi"]
)
@classmethod
def _add_virtual_titration_bootstrap_sd(
cls,
fig_ax: Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes],
label: str,
curve: TitrationCurveType,
bootstrap_curves: np.ndarray,
color_idx: int,
perc: float = 5,
fill=False,
linestyle="-",
alpha=1.0,
) -> None:
"""Plot the estimate and 2 standard deviations from a bootstrap set in existing fig and axes.
Parameters
----------
fig_ax - figure and corresponding axes to add lines to
label - label for plot, used for legend
curve - TitrationCurve object containing the mean, and the pH values
bootstrap_curves - 2D array of floats, bootstrap titration curves, with the 0 axis being the different curves, and the 1 axis the pH values.
ph_values - 1d array the ph values that each point corresponds to.
color_idx - integer index for picking color from class array `extra_colors`
perc - percentile, and 100-percentile to plot
default 5, so 5th and 95th are plotted.
fill - fill the area between percentiles with color.
"""
color = cls._figprops["extra_colors"][color_idx]
std = np.std(bootstrap_curves, axis=0)
ph_values = curve.ph_values
mean = curve.mean_charge
# Unpack tuple
fig, ax = fig_ax
ax.plot(
ph_values,
mean,
linestyle,
linewidth=0.75,
color=color,
alpha=alpha,
label=label,
)
ax.plot(
ph_values,
mean + (2 * std),
":",
linewidth=0.75,
color=color,
alpha=0.5 * alpha,
)
ax.plot(
ph_values,
mean - (2 * std),
":",
linewidth=0.75,
color=color,
alpha=0.5 * alpha,
)
if fill:
ax.fill_between(
ph_values,
mean - (2 * std),
mean + (2 * std),
facecolor=color,
alpha=0.1,
)
return
def plot_virtual_titration(
self,
exp_loader: SAMPL6DataProvider,
pred_loader: Optional[SAMPL6DataProvider] = None,
fig_ax: Optional[Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]] = None,
index: int = None,
):
"""Plot titration curve using the mean charge."""
if fig_ax is None:
fig, ax = self._newfig()
else:
fig, ax = fig_ax
# Experiment a black dotted curve, prediction is black solid
exp_data = deepcopy(exp_loader.load(self._mol_id))
if pred_loader is None:
ls = 0
else:
pred_data = deepcopy(pred_loader.load(self._mol_id))
ls = 1
exp_data.align_mean_charge(pred_data, area_between_curves, self._dpH)
area = area_between_curves(
pred_data.mean_charge, exp_data.mean_charge, self._dpH
)
ax.plot(
exp_data.ph_values,
exp_data.mean_charge,
color="#333333",
ls=self._figprops["line_styles"][3],
)
if pred_loader is not None:
ax.plot(
pred_data.ph_values,
pred_data.mean_charge,
color="#333333",
ls=self._figprops["line_styles"][0],
)
# Area between curves is colored in gray
ax.fill_between(
pred_data.ph_values,
exp_data.mean_charge,
pred_data.mean_charge,
facecolor=self._figprops["extra_colors"][index],
interpolate=True,
alpha=0.7,
)
ax.set_title(r"$\Delta$ area : {:.2f}".format(area))
# Integer labels for y axis
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ensure at least one integer unit of charge on axis + .1 for spacing
ymin, ymax = ax.get_ylim()
round_min = round(ymin) - 0.05
round_max = round(ymax) + 0.05
if ymax < round_max:
ymax = round_max
if ymin > round_min:
ymin = round_min
ax.set_ylim([ymin, ymax])
# WITH labels on y axis, but indicate the integer values with ticks
labels = [item.get_text() for item in ax.get_yticklabels()]
# empty_string_labels = [""] * len(labels)
# ax.set_yticklabels(empty_string_labels)
ax.set_ylabel(r"$Q_\mathsf{avg}$")
ax.set_xlabel("pH")
# x-tick every 2 pH units
ax.set_xticks(np.arange(2.0, 14.0, 2.0))
# remove top and right spines
sns.despine()
# fit everything within bounds
fig.tight_layout()
return fig
def plot_predicted_free_energy(
self, pred_loader: SAMPL6DataProvider
) -> matplotlib.figure.Figure:
"""Plot titration curve using free energies."""
# colored by number of protons bound
fig, ax = self._newfig()
pred_data = pred_loader.load(self._mol_id)
for i, state_id in enumerate(pred_data.state_ids):
charge = pred_data.charges[i]
color = self._figprops["colors_per_charge"][charge]
# neutral on top
zorder = 10 - abs(charge)
ls = 0
ax.plot(
pred_data.ph_values,
pred_data.free_energies[i],
ls=self._figprops["line_styles"][ls],
color=color,
label="n={}".format(charge),
zorder=zorder,
)
ax.set_ylabel(r"Free energy ($k_B T$)")
ax.set_xlabel("pH")
ax.set_xticks(np.arange(2.0, 14.0, 2.0))
# remove top and right spines
sns.despine(ax=ax)
# fit everything within bounds
fig.tight_layout()
return fig
def plot_predicted_population(
self, pred_loader: TitrationCurveType
) -> matplotlib.figure.Figure:
"""Plot titration TitrationCurve using free energies."""
# colored by number of protons bound
pred_data = pred_loader.load(self._mol_id)
fig, ax = self._newfig()
for i, state_id in enumerate(pred_data.state_ids):
charge = pred_data.charges[i]
color = self._figprops["colors_per_charge"][charge]
linestyle = 0
# Neutral on top
zorder = 10 - abs(charge)
ax.plot(
pred_data.ph_values,
pred_data.populations[i],
ls=self._figprops["line_styles"][linestyle],
color=color,
label="n={}".format(charge),
zorder=zorder,
)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylim([-0.05, 1.05])
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = [""] * len(labels)
ax.set_yticklabels(empty_string_labels)
ax.set_ylabel("Population")
ax.set_xlabel("pH")
ax.set_xticks(np.arange(2.0, 14.0, 2.0))
# remove top and right spines
sns.despine(ax=ax)
# fit everything within bounds
fig.tight_layout()
return fig
def plot_experimental_free_energy(
self, exp_loader: SAMPL6DataProvider
) -> matplotlib.figure.Figure:
# colored by number of protons bound
fig, ax = self._newfig()
exp_data = exp_loader.load(self._mol_id)
for i, state_id in enumerate(exp_data.state_ids):
nbound = exp_data.charges[i]
color = self._figprops["colors_per_charge"][nbound]
if nbound == 0:
zorder = 10
else:
zorder = 2
ax.plot(
exp_data.ph_values,
exp_data.free_energies[i],
ls=self._figprops["line_styles"][0],
color=color,
label="n={}".format(nbound),
)
ax.set_ylabel(r"Free energy ($k_B T$)")
ax.set_xlabel("pH")
ax.set_xticks(np.arange(2.0, 14.0, 2.0))
# remove top and right spines
sns.despine(ax=ax)
# fit everything within bounds
fig.tight_layout()
return fig
def get_percentiles(array, percentiles):
nums = list()
for q in percentiles:
nums.append(np.percentile(array, q, axis=0))
return nums
def plot_quantiles(
curves: np.ndarray, ph_range: np.ndarray, color: str, perc: float = 5, fill=True
):
"""Plot the median, and outer percentiles.
Parameters
----------
curves - 2D array of bootstrap titration curves, with the 0 axis being the different curves, anx the 1 axis the pH values.
ph_range - the ph values that each point corresponds to.
color - a matplotlib color for the elements in the plot
perc - percentile, and 100-percentile to plot
default 5, so 5th and 95th are plotted.
fill - fill the area between percentiles with color.
"""
quantiles = get_percentiles(curves, [50.0, perc, 100.0 - perc])
plt.plot(ph_range, quantiles[0], "-", color=color, alpha=1.0, label="median")
plt.plot(
ph_range,
quantiles[1],
":",
color=color,
alpha=1.0,
label="{:.0f}th/{:.0f}th percentile".format(perc, 100 - perc),
)
plt.plot(ph_range, quantiles[2], ":", color=color, alpha=1.0)
if fill:
plt.fill_between(
ph_range, quantiles[2], quantiles[1], facecolor=color, alpha=0.1
)
def plot_mean_twosigma(curves: np.ndarray, ph_range: np.ndarray, color: str, fill=True):
"""Plot the mean, plus/minus 2 sigma.
Parameters
----------
curves - 2D array of bootstrap titration curves, with the 0 axis being the different curves, anx the 1 axis the pH values.
ph_range - the ph values that each point corresponds to.
color - a matplotlib color for the elements in the plot
fill - fill the area between +/- 2 sigma with color.
"""
mean = np.mean(curves, axis=0)
std = np.std(curves, axis=0)
plt.plot(ph_range, mean, "-", color=color, label="mean")
plt.plot(
ph_range, mean + 2 * std, ":", alpha=1.0, color=color, label=r"$\pm$2$\sigma$"
)
plt.plot(ph_range, mean - 2 * std, ":", alpha=1.0, color=color)
if fill:
plt.fill_between(
ph_range, mean + 2 * std, mean - 2 * std, facecolor=color, alpha=0.1
)
def plot_subset(curves, ph_range, n_choices: int, color="gray", alpha=0.1):
"""Plot a subset of bootstrap samples.
Parameters
----------
curves - 2D array of bootstrap titration curves, with the 0 axis being the different curves, anx the 1 axis the pH values.
ph_range - the ph values that each point corresponds to.
n_choices - number of samples to plot
color - a matplotlib color for the elements in the plot
alpha - transparency of the curves.
"""
choices = np.random.choice(curves.shape[0], n_choices, replace=False)
for i in choices:
plt.plot(ph_range, curves[i], "-", color=color, zorder=0, alpha=alpha)
def plot_correlation_analysis(
dataframe: pd.DataFrame,
xlabel: str,
ylabel: str,
title: str,
color: str,
marker: str,
error_color="black",
facecolor="none",
shaded=True,
insets=True,
):
"""Plot correlation between experiment and prediction.
Parameters
----------
dataframe - a typeI/typeIII pKa dataframe
Has columns "Experimental" , "Experimental SEM" ,"Predicted", and "Predicted SEM"
title - to put above plot. use '' (empty string) for no title.
color - edge color of the markers. This plot uses open markers.
error_color - color of the error bars
facecolor - color of the face of markers
"""
# plt.clf()
fig = plt.figure(figsize=[2.5, 2.5], dpi=150)
ax = plt.gca()
ax.set_title(title, fontsize=9)
# If possible at least show 0, 14 but allow for larger axes
limit_axes = True
if (
np.any(0 > dataframe["pKa Method1"])
or np.any(16.0 < dataframe["pKa Method1"])
or np.any(0 > dataframe["pKa Method2"])
or np.any(16.0 < dataframe["pKa Method2"])
):
limit_axes = False
ax.errorbar(
dataframe["pKa Method1"],
dataframe["pKa Method2"],
xerr=dataframe["pKa SEM Method1"],
yerr=dataframe["pKa SEM Method2"],
fmt="none",
color=error_color,
alpha=0.8,
linewidth=0.5,
zorder=1,
)
ax.scatter(
dataframe["pKa Method1"],
dataframe["pKa Method2"],
marker=marker,
color=color,
facecolors=facecolor,
edgecolors=color,
alpha=0.8,
linewidth=0.7,
zorder=0,
)
texts = []
for r, row in dataframe.iterrows():
if abs(row.Delta) > 2:
texts.append(
ax.text(
row["pKa Method1"],
row["pKa Method2"],
row.Molecule,
va="center",
ha="center",
fontsize=8,
zorder=2,
)
)
adjust_text(texts, arrowprops=dict(arrowstyle="->", color="black", zorder=2))
ax.set_ylabel(ylabel, fontsize=8)
ax.set_xlabel(xlabel, fontsize=8)
# enforce limits before linear parts a plotted
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lims = [min([xlim[0], ylim[0]]), max([xlim[1], ylim[1]])]
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.xaxis.set_major_locator(MultipleLocator(2.0))
ax.yaxis.set_major_locator(MultipleLocator(2.0))
if limit_axes:
ax.set_xlim([0, 16])
ax.set_ylim([0, 16])
plt.tight_layout()
sns.despine(fig)
# Add linear guides for 1 and 2 pK unit deviation
ax.plot((-50.0, 50.0), (-50.0, 50.0), "k", zorder=-1, linewidth=0.5, alpha=0.5)
ax.plot(
(-52.0, 48.0),
(-50.0, 50.0),
"gray",
linestyle="--",
zorder=-1,
linewidth=0.5,
alpha=0.5,
)
ax.plot(
(-48.0, 52.0),
(-50.0, 50.0),
"gray",
linestyle="--",
zorder=-1,
linewidth=0.5,
alpha=0.5,
)
if shaded:
ax.fill_between(
[-50.0, 50.0], [-51.0, 49.0], [-49.0, 51.0], color="gray", alpha=0.1
)
return fig, ax
class FullpKaComparison:
"""Compile a full report of pKa mapping analysis across all of the SAMPL6 pKa molecules."""
_loss_functions = {"square": squared_loss}
_correlation_metrics = {
"RMSE": array_rmse,
"Mean abs. error": array_mae,
r"pearson $\rho$": wrap_pearsonr,
"Median abs. error": array_median_error,
}
# algorithms per data type
_mapping_algorithms = dict(
typeiii={
"closest": closest_pka,
"hungarian": hungarian_pka,
"align": align_pka,
},
typei={"closest": closest_pka, "hungarian": hungarian_pka},
exp={"closest": closest_pka, "hungarian": hungarian_pka, "align": align_pka},
typeimacro={
"closest": closest_pka,
"hungarian": hungarian_pka,
"align": align_pka,
},
)
def __init__(
self,
exp_provider: SAMPL6DataProvider,
data_providers: List[SAMPL6DataProvider],
included_molecules: Optional[List[str]] = None,
n_bootstrap_correlation=5000,
):
"""Compile a full report of pKa mapping analysis across all of the SAMPL6 pKa molecules."""
# TODO this is commented out for debugging, please put check back in in final version.
# if "exp" != exp_provider.data_type:
# raise TypeError("Need an experimental provider as data type")
self._exp_provider = exp_provider
self._providers = data_providers
# Take all the sampl6 molecules by default if no names provided
self.included_molecules = (
["SM{:02d}".format(molecule + 1) for molecule in range(24)]
if included_molecules is None
else included_molecules
)
self._pka_data = pd.DataFrame()
self._correlation_df = pd.DataFrame()
for provider in self._providers:
if provider.data_type == "exp":
warnings.warn(
"An experiment was provided as a prediction.", UserWarning
)
# number of samples for correlation bootstrap analysis
self._n_bootstrap_correlation = n_bootstrap_correlation
def analyze_all(self):
"""Calculate all possible pKa mappings es for all molecules and methods"""
all_providers: List[SAMPL6DataProvider] = [self._exp_provider] + self._providers
pbar1 = tqdm(all_providers, desc="Dataset", unit="data set")
for provider1 in pbar1:
pbar2 = tqdm(all_providers, desc="Dataset2", unit="data set", leave=False)
for provider2 in pbar2:
if provider1 == provider2:
continue
pkamap = self._perform_pka_maps(provider1, provider2)
self._pka_data = self._pka_data.append(
pkamap, ignore_index=True, sort=False
)
self._correlation_df = self._calculate_correlations()
@staticmethod
def _extract_pka_df(titrationcurve: HaspKaType) -> pd.DataFrame:
"""Extract pKa values and standard errors from a TitrationCurve class that has pKa values."""
return pd.DataFrame({"pKa": titrationcurve.pkas, "SEM": titrationcurve.sems})
def _perform_pka_maps(
self, provider1: SAMPL6DataProvider, provider2: SAMPL6DataProvider
):
full_df = pd.DataFrame()
for mol in tqdm(
self.included_molecules, desc="pKa maps", unit="molecules", leave=False
):
exp = provider1.load(mol)
comp = provider2.load(mol)
exp_pka = self._extract_pka_df(exp)
comp_pka = self._extract_pka_df(comp)
# name, function
for alg, f in self._mapping_algorithms[provider2.data_type].items():
# name, function
if alg not in self._mapping_algorithms[provider1.data_type]:
continue
for loss, l in self._loss_functions.items():
row_df = f(exp_pka, comp_pka, l)
row_df["Algorithm"] = alg
row_df["Loss function"] = loss
row_df["Molecule"] = mol
row_df["Type1"] = provider1.data_type
row_df["Method1"] = provider1.label
row_df["Method2"] = provider2.label
row_df["Type2"] = provider2.data_type
full_df = full_df.append(row_df, ignore_index=True, sort=False)
# Patch dataframe column names
# Default labels first method as experiment and second as prediction
full_df = full_df.rename(
columns={
"Experimental": "pKa Method1",
"Experimental SEM": "pKa SEM Method1",
"Predicted": "pKa Method2",
"Predicted SEM": "pKa SEM Method2",
}
)
full_df["Delta"] = full_df.apply(
lambda row: (
ufloat(row["pKa Method2"], row["pKa SEM Method2"])
- ufloat(row["pKa Method1"], row["pKa SEM Method1"])
),
axis=1,
)
return full_df
def _calculate_correlations(self):
"""Calculate correlation metrics from pKa mapping dataframes using bootstrap analysis."""
# name
correlation_df = | pd.DataFrame() | pandas.DataFrame |
#-- -- -- -- Python Data Science Toolbox (Part 2):
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
### --------------------------------------------------------
# # ------>>>>>Iterators vs Iterables
# Let's do a quick recall of what you've learned
# about iterables and iterators. Recall from the video
# that an iterable is an object that can return an iterator,
# while an iterator is an object that keeps state and produces
# the next value when you call next() on it. In this exercise,
# you will identify which object is an iterable and which is an iterator.
# The environment has been pre-loaded with the variables flash1 and flash2.
# Try printing out their values with print() and next() to figure out which
# is an iterable and which is an iterator
# R/ flash1 is an iterable and flash2 is an iterator.
### --------------------------------------------------------
# # ------>>>>> Iterating over iterables - ex#0
# Create a list of strings: flash
flash = ['<NAME>', '<NAME>', 'wally west', 'bart allen']
# Print each list item in flash using a for loop
for name in flash:
print(name)
# Create an iterator for flash: superspeed
superhero = iter(flash)
# Print each item from the iterator
print(next(superhero))
print(next(superhero))
print(next(superhero))
print(next(superhero))
### --------------------------------------------------------
# # ------>>>>> Iterating over iterables - ex#1
# Create an iterator for range(3): small_value
small_value = iter(range(3))
# Print the values in small_value
print(next(small_value))
print(next(small_value))
print(next(small_value))
# Loop over range(3) and print the values
for num in range(3):
print(num)
# Create an iterator for range(10 ** 100): googol
googol = iter(range(10 ** 100))
# Print the first 5 values from googol
print(next(googol))
print(next(googol))
print(next(googol))
print(next(googol))
print(next(googol))
### --------------------------------------------------------
# # ------>>>>> Iterators as function arguments
# Create a range object: values
values = range(10, 21)
# Print the range object
print(values)
# Create a list of integers: values_list
values_list = list(values)
# Print values_list
print(values_list)
# Get the sum of values: values_sum
values_sum = sum(values)
# Print values_sum
print(values_sum)
### --------------------------------------------------------
# Create a list of strings: mutants
mutants = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
# Create a list of tuples: mutant_list
mutant_list = list(enumerate(mutants))
# Print the list of tuples
print(mutant_list)
# Unpack and print the tuple pairs
for index1, value1 in enumerate(mutants):
print(index1, value1)
# Change the start index
for index2, value2 in enumerate(mutants, start=1):
print(index2, value2)
### --------------------------------------------------------
# # ------>>>>> Using zip
# Create a list of tuples: mutant_data
mutant_data = list(zip(mutants, aliases, powers))
# Print the list of tuples
print(mutant_data)
# Create a zip object using the three lists: mutant_zip
mutant_zip = zip(mutants, aliases, powers)
# Print the zip object
print(mutant_zip)
# Unpack the zip object and print the tuple values
# Unpack the zip object and print the tuple values
for value1, value2, value3 in mutant_zip:
print(value1, value2, value3)
### --------------------------------------------------------
# # ------>>>>> Using * and zip to 'unzip'
# Create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# Print the tuples in z1 by unpacking with *
print(*z1)
# Re-create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# 'Unzip' the tuples in z1 by unpacking with * and zip(): result1, result2
result1, result2 = zip(*z1)
# Check if unpacked tuples are equivalent to original tuples
print(result1 == mutants)
print(result2 == powers)
### --------------------------------------------------------
# # ------>>>>> Processing large amounts of Twitter data
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Iterate over the file chunk by chunk
for chunk in pd.read_csv('./tweets.csv', chunksize=10):
# Iterate over the column in DataFrame
for entry in chunk['lang']:
if entry in counts_dict.keys():
counts_dict[entry] += 1
else:
counts_dict[entry] = 1
# Print the populated dictionary
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Extracting information for large amounts of Twitter data
# Define count_entries()
def count_entries(csv_file, c_size, colname):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Iterate over the file chunk by chunk
for chunk in pd.read_csv(csv_file, chunksize=c_size):
# Iterate over the column in DataFrame
for entry in chunk[colname]:
if entry in counts_dict.keys():
counts_dict[entry] += 1
else:
counts_dict[entry] = 1
# Return counts_dict
return counts_dict
# Call count_entries(): result_counts
result_counts = count_entries('./tweets.csv', 10, 'lang')
# Print result_counts
print(result_counts)
### --------------------------------------------------------
# # ------>>>>> Write a basic list comprehension
# In this exercise, you will practice what you've learned from
# the video about writing list comprehensions. You will write a
# list comprehension and identify the output that will be produced.
# The following list has been pre-loaded in the environment.
# doctor = ['house', 'cuddy', 'chase', 'thirteen', 'wilson']
# How would a list comprehension that produces a list of the first
# character of each string in doctor look like? Note that the list
# comprehension uses doc as the iterator variable. What will the output be?
# R/ The list comprehension is [doc[0] for doc in doctor] and produces
# the list ['h', 'c', 'c', 't', 'w'].
### --------------------------------------------------------
# # ------>>>>> List comprehension over iterables
# You know that list comprehensions can be built over iterables.
# Given the following objects below, which of these can we build list comprehensions over?
# doctor = ['house', 'cuddy', 'chase', 'thirteen', 'wilson']
# range(50)
# underwood = 'After all, we are nothing more or less than what we choose to reveal.'
# jean = '24601'
# flash = ['<NAME>', '<NAME>', 'w<NAME>', '<NAME>']
# valjean = 24601
# R/ You can build list comprehensions over all the objects except the integer object valjean.
### --------------------------------------------------------
# # ------>>>>> Writing list comprehensions
# Create list comprehension: squares
squares = [i ** 2 for i in range(10)]
print(squares)
### --------------------------------------------------------
# # ------>>>>> Nested list comprehensions
# Create a 5 x 5 matrix using a list of lists: matrix
matrix = [[col for col in range(5)] for row in range(5)]
# Print the matrix
for row in matrix:
print(row)
### --------------------------------------------------------
# # ------>>>>> Using conditionals in comprehensions - ex#0
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create list comprehension: new_fellowship
new_fellowship = [member for member in fellowship if len(member) >= 7]
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> Using conditionals in comprehensions - ex#1
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create list comprehension: new_fellowship
new_fellowship = [member if len(member) >= 7 else member.replace(
member, '') for member in fellowship]
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> Dict comprehensions
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create dict comprehension: new_fellowship
new_fellowship = {member: len(member) for member in fellowship}
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> List comprehensions vs generators
# You've seen from the videos that list comprehensions and generator
# expressions look very similar in their syntax, except for the use of
# parentheses () in generator expressions and brackets [] in list comprehensions.
# In this exercise, you will recall the difference between
# list comprehensions and generators. To help with that task,
# the following code has been pre-loaded in the environment:
# # List of strings
# fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# # List comprehension
# fellow1 = [member for member in fellowship if len(member) >= 7]
# # Generator expression
# fellow2 = (member for member in fellowship if len(member) >= 7)
# Try to play around with fellow1 and fellow2 by figuring out their types and
# printing out their values. Based on your observations and what you can recall
# from the video, select from the options below the best description for the
# difference between list comprehensions and generators.
# R/ A list comprehension produces a list as output, a generator produces a generator object.
### --------------------------------------------------------
# # ------>>>>> Write your own generator expressions
# Create generator object: result
result = (num for num in range(31))
# Print the first 5 values
print(next(result))
print(next(result))
print(next(result))
print(next(result))
print(next(result))
# Print the rest of the values
for value in result:
print(value)
### --------------------------------------------------------
# # ------>>>>> Changing the output in generator expressions
# Create a list of strings: lannister
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
# Create a generator object: lengths
lengths = (len(person) for person in lannister)
# Iterate over and print the values in lengths
for value in lengths:
print(value)
### --------------------------------------------------------
# # ------>>>>> Build a generator
# Create a list of strings
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
# Define generator function get_lengths
def get_lengths(input_list):
"""Generator function that yields the
length of the strings in input_list."""
# Yield the length of a string
for person in input_list:
yield len(person)
# Print the values generated by get_lengths()
for value in get_lengths(lannister):
print(value)
### --------------------------------------------------------
# # ------>>>>> List comprehensions for time-stamped data
# Extract the created_at column from df: tweet_time
tweet_time = df['created_at']
# Extract the clock time: tweet_clock_time
tweet_clock_time = [entry[11:19] for entry in tweet_time]
# Print the extracted times
print(tweet_clock_time)
### --------------------------------------------------------
# # ------>>>>> Conditional list comprehensions for time-stamped data
# Extract the created_at column from df: tweet_time
tweet_time = df['created_at']
# Extract the clock time: tweet_clock_time
tweet_clock_time = [entry[11:19] for entry in tweet_time if entry[17:19] == '19']
# Print the extracted times
print(tweet_clock_time)
### --------------------------------------------------------
# # ------>>>>> Dictionaries for data science
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
### --------------------------------------------------------
# # ------>>>>> Writing a function to help you
# Define lists2dict()
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
# Call lists2dict: rs_fxn
rs_fxn = lists2dict(feature_names, row_vals)
# Print rs_fxn
print(rs_fxn)
### --------------------------------------------------------
# # ------>>>>> Using a list comprehension
# Print the first two lists in row_lists
print(row_lists[0])
print(row_lists[1])
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Print the first two dictionaries in list_of_dicts
print(list_of_dicts[0])
print(list_of_dicts[1])
### --------------------------------------------------------
# # ------>>>>> Turning this all into a DataFrame
# Import the pandas package
import pandas as pd
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Turn list of dicts into a DataFrame: df
df = pd.DataFrame(list_of_dicts)
# Print the head of the DataFrame
print(df.head())
### --------------------------------------------------------
# # ------>>>>> Processing data in chunks - ex#0
with open('world_dev_ind.csv') as file:
# Skip the column names
file.readline()
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Process only the first 1000 rows
for j in range(0, 1000):
# Split the current line into a list: line
line = file.readline().split(',')
# Get the value for the first column: first_col
first_col = line[0]
# If the column value is in the dict, increment its value
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
# Else, add to the dict and set value to 1
else:
counts_dict[first_col] = 1
# Print the resulting dictionary
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Writing a generator to load data in chunks (2)
# Define read_large_file()
def read_large_file(file_object):
"""A generator function to read a large file lazily."""
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline()
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Create a generator object for the file: gen_file
gen_file = read_large_file(file)
# Print the first three lines of the file
print(next(gen_file))
print(next(gen_file))
print(next(gen_file))
### --------------------------------------------------------
# # ------>>>>> Writing a generator to load data in chunks (3)
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
# Print
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Writing an iterator to load data in chunks - ex#0
# Import the pandas package
import pandas as pd
# Initialize reader object: df_reader
df_reader = pd.read_csv('ind_pop.csv', chunksize=10)
# Print two chunks
print(next(df_reader))
print(next(df_reader))
### --------------------------------------------------------
# # ------>>>>> Writing an iterator to load data in chunks - ex#1
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out the head of the DataFrame
print(df_urb_pop.head())
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Print pops_list
print(pops_list)
### --------------------------------------------------------
# # ------>>>>> Writing an iterator to load data in chunks - ex#2
# Code from previous exercise
urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000)
df_urb_pop = next(urb_pop_reader)
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
pops_list = list(pops)
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list]
# Plot urban population data
df_pop_ceb.plot(kind='scatter', x='Year', y='Total Urban Population')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Writing an iterator to load data in chunks - ex#3
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000)
# Initialize empty DataFrame: data
data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
data = | pd.read_csv('data/citibike_tripdata.csv', sep=',') | pandas.read_csv |
"""
Datasets extracted from R packages in CRAN (https://cran.r-project.org/).
@author: <NAME>
@license: MIT
"""
import os
import pathlib
import re
import urllib
import warnings
from distutils.version import LooseVersion
from html.parser import HTMLParser
import pandas as pd
from sklearn.datasets import get_data_home
from sklearn.utils import Bunch
import rdata
from .base import fetch_tgz as _fetch_tgz
class _LatestVersionHTMLParser(HTMLParser):
"""
Class for parsing the version in the CRAN package information page.
"""
def __init__(self, *, convert_charrefs=True):
HTMLParser.__init__(self, convert_charrefs=convert_charrefs)
self.last_is_version = False
self.version = None
self.version_regex = re.compile('(?i).*version.*')
self.handling_td = False
def handle_starttag(self, tag, attrs):
if tag == "td":
self.handling_td = True
def handle_endtag(self, tag):
self.handling_td = False
def handle_data(self, data):
if self.handling_td:
if self.last_is_version:
self.version = data
self.last_is_version = False
elif self.version_regex.match(data):
self.last_is_version = True
def _get_latest_version_online(package_name):
"""
Get the latest version of the package from CRAN.
"""
parser = _LatestVersionHTMLParser()
url_request = urllib.request.Request(
url="https://CRAN.R-project.org/package=" + package_name)
try:
url_file = urllib.request.urlopen(url_request)
except urllib.request.HTTPError as e:
if e.code == 404:
e.msg = f"Package '{package_name}' not found."
raise
url_content = url_file.read().decode('utf-8')
parser.feed(url_content)
return parser.version
def _get_latest_version_offline(package_name):
"""
Get the latest downloaded version of the package.
Returns None if not found.
"""
home = pathlib.Path(get_data_home()) # Should allow providing data home?
downloaded_packages = tuple(home.glob(package_name + "_*.tar.gz"))
if downloaded_packages:
versions = [
LooseVersion(p.name[(len(package_name) + 1):-len(".tar.gz")])
for p in downloaded_packages]
versions.sort()
latest_version = versions[-1]
return str(latest_version)
else:
return None
def _get_version(package_name, *, version=None):
"""
Get the version of the package.
If the version is specified, return it.
Otherwise, try to find the last version online.
If offline, try to find the downloaded version, if any.
"""
if version is None:
try:
version = _get_latest_version_online(package_name)
except urllib.request.URLError:
version = _get_latest_version_offline(package_name)
if version is None:
raise
return version
def _get_urls(package_name, *, version=None):
version = _get_version(package_name, version=version)
latest_url = ("https://cran.r-project.org/src/contrib/" + package_name +
"_" + version + ".tar.gz")
archive_url = ("https://cran.r-project.org/src/contrib/Archive/" +
package_name + "/" + package_name +
"_" + version + ".tar.gz")
return (latest_url, archive_url)
def _download_package_data(package_name, *, package_url=None, version=None,
folder_name=None,
subdir=None):
if package_url is None:
url_list = _get_urls(package_name, version=version)
else:
url_list = (package_url,)
if folder_name is None:
folder_name = os.path.basename(url_list[0])
if subdir is None:
subdir = "data"
for i, url in enumerate(url_list):
try:
directory = _fetch_tgz(folder_name, url, subfolder='cran')
break
except Exception:
# If it is the last url, reraise
if i >= len(url_list) - 1:
raise
data_path = directory / package_name / subdir
return data_path
def fetch_dataset(dataset_name, package_name, *, package_url=None,
version=None, folder_name=None, subdir=None,
converter=None):
"""Fetch an R dataset.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
dataset_name: string
Name of the dataset, including extension if any.
package_name: string
Name of the R package where this dataset resides.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(package_name, package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir)
file_path = data_path / dataset_name
if not file_path.suffix:
possible_names = list(data_path.glob(dataset_name + ".*"))
if len(possible_names) != 1:
raise FileNotFoundError(f"Dataset {dataset_name} not found in "
f"package {package_name}")
dataset_name = possible_names[0]
file_path = data_path / dataset_name
parsed = rdata.parser.parse_file(file_path)
converted = converter.convert(parsed)
return converted
def fetch_package(package_name, *, package_url=None,
version=None,
folder_name=None, subdir=None,
converter=None, ignore_errors=False):
"""Fetch all datasets from a R package.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
package_name: string
Name of the R package.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
ignore_errors: boolean
If True, ignore the datasets producing errors and return the
remaining ones.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(package_name, package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir)
if not data_path.exists():
return {}
all_datasets = {}
for dataset in data_path.iterdir():
if dataset.suffix.lower() in ['.rda', '.rdata']:
try:
parsed = rdata.parser.parse_file(dataset)
converted = converter.convert(parsed)
all_datasets.update(converted)
except Exception:
if not ignore_errors:
raise
else:
warnings.warn(f"Error loading dataset {dataset.name}",
stacklevel=2)
return all_datasets
datasets = {
'geyser': {
'load_args': (['geyser.rda', 'MASS'], {}),
'sklearn_args': ([], {'target_name': 'waiting'})
}
}
def _to_sklearn(dataset, *, target_name):
"""Transforms R datasets to Sklearn format, if possible"""
assert len(dataset.keys()) == 1
name = tuple(dataset.keys())[0]
obj = dataset[name]
if isinstance(obj, pd.DataFrame):
feature_names = list(obj.keys())
feature_names.remove(target_name)
X = | pd.get_dummies(obj[feature_names]) | pandas.get_dummies |
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import seaborn as sns
import torch
from pyspark.sql import SparkSession
plt.style.use("seaborn")
plt.rcParams.update({
"figure.titlesize": 30,
"axes.titlesize": 24,
"axes.labelsize": 20,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"legend.title_fontsize": 20,
"legend.fontsize": 16
})
@click.command()
@click.option("--model-id", type=str)
def make_charts(model_id: str):
spark = SparkSession \
.builder \
.appName("ASBA") \
.config("spark.driver.memory", "15g") \
.config("spark.sql.shuffle.partitions", "300") \
.getOrCreate()
businesses = spark.read.json("data/yelp/business.json")
reviews = spark.read.json("data/yelp/review.json")
users = spark.read.json("data/yelp/user.json")
tips = spark.read.json("data/yelp/tip.json")
model_name = model_id.replace('/', '-')
path = Path(f"charts/{model_name}")
path.mkdir(parents=True, exist_ok=True)
# Confusion Heatmap
confusion = pd.read_csv(f"data/{model_name}/confusion.csv")
labels = list(confusion.columns)
confusion = confusion.values
fig, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
X = confusion / confusion.sum(1, keepdims=True)
sns.heatmap(
pd.DataFrame(X, index=labels, columns=labels),
cmap=cmap,
center=0,
vmin=0,
vmax=1,
annot=True,
fmt=".3f",
linewidths=.5,
cbar_kws={
"shrink": .5
}
)
ax.set_title(f"Confusion Matrix for {model_id} on Test Set")
ax.set_xlabel("Truth")
ax.set_ylabel("Predicted")
plt.tight_layout()
fig.savefig(path.joinpath("confusion.png"))
# Review vs Tip Word Length
review_wc = reviews \
.withColumn("word_count", F.size(F.split(F.col("text"), " "))) \
.select("word_count") \
.toPandas()
review_wc["type"] = "review"
tip_wc = tips \
.withColumn("word_count", F.size(F.split(F.col("text"), " "))) \
.select("word_count") \
.toPandas()
tip_wc["type"] = "tip"
word_count = pd.concat([review_wc, tip_wc], axis=0, ignore_index=True)
fig, ax = plt.subplots(figsize=(15, 10))
sns.kdeplot(
data=word_count,
x="word_count",
hue="type",
log_scale=True,
cumulative=True,
common_norm=False,
common_grid=True,
ax=ax
)
ax.set_title("Cumulative Dist. Comparison for Tips vs. Reviews")
ax.set_xlabel("Number of Words")
ax.set_ylabel("$F(x)$")
plt.tight_layout()
fig.savefig(path.joinpath("comparison.png"))
del word_count, review_wc, tip_wc
# Bias Correction Chart
adjusted = reviews \
.join(users, reviews.user_id == users.user_id) \
.withColumn("adjusted_stars", (1 / 2)*(F.col("stars") - F.col("average_stars")) + 3) \
.select("business_id", "stars", "adjusted_stars") \
.groupBy("business_id") \
.mean()
ratings = adjusted.toPandas()
ratings["bias"] = ratings["avg(stars)"] - ratings["avg(adjusted_stars)"]
df1 = ratings.get(["business_id", "avg(stars)"])
df1 = df1.rename({"avg(stars)": "stars"}, axis=1)
df1["adjusted"] = False
df2 = ratings.get(["business_id", "avg(adjusted_stars)"])
df2 = df2.rename({"avg(adjusted_stars)": "stars"}, axis=1)
df2["adjusted"] = True
combined = | pd.concat([df1, df2], axis=0, ignore_index=True) | pandas.concat |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from gneiss.composition._composition import ilr_transform
from gneiss.cluster import gradient_linkage
import pandas.util.testing as pdt
class TestILRTransform(unittest.TestCase):
def test_ilr(self):
np.random.seed(0)
table = pd.DataFrame([[1, 1, 2, 2],
[1, 2, 2, 1],
[2, 2, 1, 1]],
index=[1, 2, 3],
columns=['a', 'b', 'c', 'd'])
columns = np.random.permutation(list(table.columns))
table = table.reindex(columns=columns)
ph = pd.Series([1, 2, 3], index=table.index)
tree = gradient_linkage(table, ph)
res_balances = ilr_transform(table, tree)
exp_balances = pd.DataFrame(
[[0.693147, -5.551115e-17, 2.775558e-17],
[0.000000, -4.901291e-01, -4.901291e-01],
[-0.693147, 5.551115e-17, -2.775558e-17]],
columns=['y0', 'y1', 'y2'],
index=[1, 2, 3])
exp_balances = exp_balances.reindex(columns=res_balances.columns)
| pdt.assert_frame_equal(res_balances, exp_balances) | pandas.util.testing.assert_frame_equal |
# Coded from PP 156-157 of Zoback 2010
# Python version = 3.6
# conda env = tank
# Normalising to effective stress throughout
from matplotlib import pyplot as plt
import numpy as np
import math
import pandas as pd
import mplstereonet
import functions as fun
| pd.set_option('display.width', 1000) | pandas.set_option |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
| assertRaisesRegexp(ValueError, "^Length", testit, ind) | pandas.util.testing.assertRaisesRegexp |
from __future__ import print_function, division, absolute_import
from datetime import timedelta
import sys
import pytest
pytest.importorskip('numpy')
pytest.importorskip('pandas')
import dask
import dask.dataframe as dd
import dask.bag as db
from distributed import Executor
from distributed.executor import _wait
from distributed.utils_test import cluster, loop, gen_cluster
from distributed.collections import (_futures_to_dask_dataframe,
futures_to_dask_dataframe, _futures_to_dask_array,
futures_to_dask_array, _futures_to_collection,
_futures_to_dask_bag, futures_to_dask_bag, _future_to_dask_array,
_futures_to_dask_arrays, futures_to_collection, future_to_dask_array,
futures_to_dask_arrays)
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from toolz import identity
from tornado import gen
from tornado.ioloop import IOLoop
dfs = [pd.DataFrame({'x': [1, 2, 3]}, index=[0, 10, 20]),
pd.DataFrame({'x': [4, 5, 6]}, index=[30, 40, 50]),
pd.DataFrame({'x': [7, 8, 9]}, index=[60, 70, 80])]
def assert_equal(a, b):
assert type(a) == type(b)
if isinstance(a, pd.DataFrame):
tm.assert_frame_equal(a, b)
elif isinstance(a, pd.Series):
tm.assert_series_equal(a, b)
elif isinstance(a, pd.Index):
| tm.assert_index_equal(a, b) | pandas.util.testing.assert_index_equal |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
class DS:
"""
/**
When creating an instance, the following parameters are attributed.
data: Overall data
predictor_names : List of names of predictor features
target_name: name of the target column
scalers : A list of various scaler
encoders : Dictionary, key is encoded feature name, value is encoders
fill_nan : Dictionary, The key is "replace" ans "fill_nan".
The value for each key can also dictionary and fill values for each column.
outliers : Dictionary, the key is categorical or nubmerical, the value is upper, lower bound.
"categorical"'s value is dictionary, "numerical"'s value is also dictionary,
and key is feature name, value is list of upper,lower bound.
models : A list of various models.
params : A list of model hyper paremeters.
k_fold : A list of k values.
**/
"""
def __init__(self, data, predictor_names, target_name, test_size, scalers=None, encoders=None, fill_nan=None,
outliers=None, models=None, params=None, k_fold=None):
self.data = data
self.predictor_names = predictor_names
self.target_name = target_name
self.test_size = test_size
self.scalers = scalers
self.encoders = encoders
self.fill_nan = fill_nan
self.outliers = outliers
self.models = models
self.params = params
self.k_fold = k_fold
def run(self, random):
self.preprocessing()
self.build_test_models(random)
self.find_best()
def preprocessing(self):
# handling missing values
if self.fill_nan != None:
if "replace" in self.fill_nan:
self.data.replace(self.fill_nan["replace"], inplace=True)
if "fill_nan" in self.fill_nan:
for key, value in self.fill_nan["fill_nan"].items():
if value == "mean":
self.data[key] = self.data[key].apply(pd.to_numeric)
self.data[key].fillna(self.data[key].mean(), inplace=True)
elif value == "max":
self.data[key] = self.data[key].apply(pd.to_numeric)
self.data[key].fillna(self.data[key].max(), inplace=True)
elif value == "min":
self.data[key] = self.data[key].apply(pd.to_numeric)
self.data[key].fillna(self.data[key].min(), inplace=True)
else:
self.data[key].fillna(value, inplace=True)
# handling outliers
if self.outliers != None:
if "categorical" in self.outliers:
for key, value in self.outliers["categorical"].items():
for outlier in value:
self.data.drop(self.data[self.data[key] == outlier].index, inplace=True)
if "numerical" in self.outliers:
for key, value in self.outliers["numerical"].items():
self.data.drop(self.data[self.data[key] > value[0]].index, inplace=True)
self.data.drop(self.data[self.data[key] < value[1]].index, inplace=True)
# encoding categorical values
if self.encoders != None:
for name, encoder in self.encoders.items():
encoded_data = encoder.fit_transform(self.data[name].values.reshape(-1, 1)).toarray()
encoded_data = | pd.DataFrame(encoded_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import pprint
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
import collections
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
print(tf.__version__)
#Dataset Download(infoはメタデータ)
imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)
#Datasetのの概要を確認する
print(imdb)
print(len(imdb))
df = pd.DataFrame(imdb["train"])
df.head()
df.info()
#学習、テストデータに分割する
train, test = imdb["train"], imdb["test"]
train_sentences = []
train_labels = []
test_sentences = []
test_labels = []
#TensorをNumpyオブジェクトに変更して値を取得する
for s, l in train:
train_sentences.append(str(s.numpy()))
train_labels.append(l.numpy())
for s, l in test:
test_sentences.append(str(s.numpy()))
test_labels.append(l.numpy())
train_labels = np.array(train_labels)
test_labels = np.array(test_labels)
#padding
vocab_size = 10000
embedding_dim = 16
max_length = 120
padding_type="post"
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(train_sentences)
word_index = tokenizer.word_index
train_sequences = tokenizer.texts_to_sequences(train_sentences)
train_padded = pad_sequences(train_sequences,maxlen=max_length, padding=padding_type)
test_sequences = tokenizer.texts_to_sequences(test_sentences)
test_padded = pad_sequences(test_sequences,maxlen=max_length, padding=padding_type)
#padding前後のテキストを確認する
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return " ".join([reverse_word_index.get(i, "?") for i in text])
print(f"元のテキスト:{train_sentences[1]}\n")
print(f"padding後のテキスト:{train_padded[1]}\n")
print(f"復元後のテキスト:{decode_review(train_padded[1])}")
#学習、テストデータの型とラベルを確認する
print(f"学習データSize:{train_padded.shape}")
print(f"学習データlabel_Size:{train_labels.shape}")
print(f"テストデータSize:{test_padded.shape}")
print(f"テストデータlabel_Size:{test_labels.shape}")
print(type(train_padded))
print(type(train_labels), "\n")
print(type(test_padded))
print(type(test_labels))
#Conv1D_GlobalAveragePooling1D_Model(ベースモデル)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(128, 5, activation="relu"),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid")
])
#compile
model.compile(loss="binary_crossentropy",optimizer="adam",metrics=["accuracy"])
model.summary()
#fit
num_epochs = 5
history = model.fit(train_padded, train_labels, epochs=num_epochs, validation_data=(test_padded, test_labels))
#evaluate
model.evaluate(test_padded, test_labels)
#plot
acc=history.history["accuracy"]
val_acc=history.history["val_accuracy"]
loss=history.history["loss"]
val_loss=history.history["val_loss"]
epochs=range(len(acc))
plt.plot(epochs, acc, "r")
plt.plot(epochs, val_acc, "b")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["Accuracy", "Validation Accuracy"])
plt.show()
plt.plot(epochs, loss, "r")
plt.plot(epochs, val_loss, "b")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss", "Validation Loss"])
plt.show()
#Bidirectional_GRU_Dropout_Model(過学習対策モデル)
model2 = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)),
tf.keras.layers.Dense(6, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation="sigmoid")
])
#compile
model2.compile(loss="binary_crossentropy",optimizer="adam",metrics=["accuracy"])
model2.summary()
#fit
num_epochs = 5
history2 = model2.fit(train_padded, train_labels, epochs=num_epochs, validation_data=(test_padded, test_labels))
#evaluate
model2.evaluate(test_padded, test_labels)
#plotacc=history2.history["accuracy"]
val_acc=history2.history["val_accuracy"]
loss=history2.history["loss"]
val_loss=history2.history["val_loss"]
epochs=range(len(acc))
plt.plot(epochs, acc, "r")
plt.plot(epochs, val_acc, "b")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["Accuracy", "Validation Accuracy"])
plt.show()
plt.plot(epochs, loss, "r")
plt.plot(epochs, val_loss, "b")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss", "Validation Loss"])
plt.show()
#comparison
acc=history.history["accuracy"]
val_acc=history.history["val_accuracy"]
loss=history.history["loss"]
val_loss=history.history["val_loss"]
d_acc=history2.history["accuracy"]
d_val_acc=history2.history["val_accuracy"]
d_loss=history2.history["loss"]
d_val_loss=history2.history["val_loss"]
epochs=range(len(acc))
epochs2=range(len(d_acc))
def acc_plot(epochs, epochs2, acc, val_acc, d_acc, d_val_acc):
plt.plot(epochs, acc, "r")
plt.plot(epochs, val_acc, "b")
plt.plot(epochs2, d_acc, "m")
plt.plot(epochs2, d_val_acc, "c")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["Acc", "Valid_Acc", "Dropout_Acc", "Dropout_Val_Acc"])
plt.show()
def loss_plot(epochs, epochs2, loss, val_loss, d_loss, d_val_loss):
plt.plot(epochs, loss, "r")
plt.plot(epochs, val_loss, "b")
plt.plot(epochs2, d_loss, "m")
plt.plot(epochs2, d_val_loss, "c")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss", "Validation Loss", "Dropout_Loss", "Dropout_Val_Loss"])
plt.show()
acc_plot(epochs, epochs2, acc, val_acc, d_acc, d_val_acc)
loss_plot(epochs, epochs2, loss, val_loss, d_loss, d_val_loss)
#テストラベルの分布を確認する
test_label_num = pd.DataFrame(test_labels)
test_label_num.value_counts()
#混同行列を表示する
def make_cm(matrix, columns):
n = len(columns)
act = ["正解データ"] * n
pred = ["予測結果"] * n
cm = pd.DataFrame(matrix,
columns=[pred, columns], index=[act, columns])
return cm
cm = make_cm(c_matrix2, ["NEGATIVE", "POSITIVE"])
cm
#評価指標を確認する(ベースモデル、過学習対策モデルの順に表示する)
print(classification_report(test_labels, y_pred, target_names=["0_pos", "1_pos"]), "\n")
print(classification_report(test_labels, y_pred2, target_names=["0_pos", "1_pos"]))
#Sampling
#ポジティブなテキストをモデルがネガティブと予測したテキストを取得する
samp_index = []
index = 0
for label, pred in zip(test_labels, y_pred2):
if label == 1 and pred ==0:
samp_index.append(index)
index += 1
else:
index +=1
#複数のインデックスを指定し、要素をまとめて取得する
x_test_numpy = np.array(test_sentences)
x_test_samples = x_test_numpy[samp_index]
#ポジティブなのにネガティブと予測したテキストをDataFrameで表示する
sample_df = | pd.DataFrame(x_test_samples, columns=["sample_text"]) | pandas.DataFrame |
import helper_functions as hf
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
def plot_seasonal_cl(ds, xlimits = None):
'''
plots velocity profiles by season and year (with their uncertainty), for a given glacier.
inputs:
ds: seasonally averaged centreline dataset
xlimits: limits of plot
returns:
figure. one axes per season. lines coloured by year
'''
## plot velocity along Fcentreline per season per year
# # for getting color bar nonsense because
cNorm = mpl.colors.Normalize(vmin = np.min(ds.time.dt.year.values),# can use `astype('float64')` instead of `.dt.year` to vary colors between seasons
vmax = np.max(ds.time.dt.year.values))
fig, axs = plt.subplots(ncols = 4, nrows = 1, sharex = True, sharey = True, figsize = (18,6))
# need to loop through each season/year to have full control over colormap
for i, s in enumerate(['DJF', 'MAM', 'JJA', 'SON']):
for t in ds.isel(time = ds.time.dt.season.values == s).time.dt.year.values:
idx = ((ds.time.dt.season.values == s) & (ds.time.dt.year.values == t)) # get index
if np.count_nonzero(idx) == 0: # skip years/seasons with no vals
continue
# plot average line
ds.isel(time = idx).v.plot(ax = axs.flat[i],
c = mpl.cm.viridis(cNorm(ds.isel(time = idx).time.dt.year)))
# to plot the fill between uncertainty
axs.flat[i].fill_between(x = ds.isel(time = idx).distance,
y1 = (ds.isel(time = idx).v + ds.isel(time = idx).verr).values.flatten(),
y2 = (ds.isel(time = idx).v - ds.isel(time = idx).verr).values.flatten(),
alpha = 0.3,
color = mpl.cm.viridis(cNorm(t)))
axs.flat[i].set_title(s)
ylimits = [-10, np.nanmax(ds.v.where(ds.distance < xlimits[1]).values)]
for ax in axs.flat:
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
# ax.set_xticks([0,4000,8000,12000])
ax.set_xticklabels((ax.get_xticks() / 1000).astype(int))
ax.set_xlabel('Distance (km)')
ax.set_ylabel('')
axs[0].set_ylabel('Velocity (m/yr)')
# add axes labels - do this before adding colorbar axis
hf.labelFig(fig);
################# for color bar
# [left, bottom, width, height]
cbax = fig.add_axes([0.92,0.2,0.15,0.5])
cbax.axis('off')
# cNorm = mpl.colors.Normalize(vmin = minyear,
# vmax = maxyear)
cmap = mpl.cm.get_cmap('viridis')
sc = mpl.cm.ScalarMappable(norm = cNorm, cmap = cmap)
# for correctly labelling colorbar
tickys = pd.date_range(np.min(ds.time.values) - pd.offsets.DateOffset(years=1) ,
np.max(ds.time.values) + pd.offsets.DateOffset(years=1) ,
freq = 'AS')
cb = plt.colorbar(sc, ax=cbax, orientation = 'vertical', fraction = 1, ticks = tickys.year, label = 'Year')
cb.set_ticklabels(tickys.year)
plt.suptitle(ds.attrs['glacier'] +': Seasonal velocity profiles. Image pair separation' +ds.attrs['flab'] + str(ds.attrs['filter-days'])+'days');
fig.subplots_adjust(wspace=0.06, hspace=0)
return fig
###############################################################################################################################################################################################################################
def plot_seasonal_cl_with_counts(ds, xlimits = None):
'''
plots velocity profiles by season and year (with their uncertainty), for a given glacier.
inputs:
ds: seasonally averaged centreline dataset
xlimits: limits of plot
returns:
figure. one axes per season. lines coloured by year
bottom panel shows the number of observations that were used in constructing the seasonal average above. to help make more sense of the uncertainty
BOTTOM PANEL IS LOG SCALE
'''
## plot velocity along centreline per season per year
# # for getting color bar nonsense because
ylimits = [-10, np.nanmax(ds.v.where(ds.distance < xlimits[1]).values)]
cNorm = mpl.colors.Normalize(vmin = np.min(ds.time.dt.year.values),# can use `astype('float64')` instead of `.dt.year` to vary colors between seasons
vmax = np.max(ds.time.dt.year.values))
fig, axs = plt.subplots(ncols = 4, nrows = 2, sharex = True, figsize = (18,6), gridspec_kw={'height_ratios': [3, 1]})
# loop through each season/year to have full control over colormap
for i, s in enumerate(['DJF', 'MAM', 'JJA', 'SON']):
for t in ds.isel(time = ds.time.dt.season.values == s).time.dt.year.values:
# get index
idx = ((ds.time.dt.season.values == s) & (ds.time.dt.year.values == t))
if np.count_nonzero(idx) == 0:
continue
# plot average line
ds.isel(time = idx).v.plot(ax = axs[0,i],
c = mpl.cm.viridis(cNorm(ds.isel(time = idx).time.dt.year))) # can use `astype('float64')` instead of `.dt.year` to vary colors between seasons
# to plot the fill between uncertainty
axs[0,i].fill_between(x = ds.isel(time = idx).distance,
y1 = (ds.isel(time = idx).v + ds.isel(time = idx).verr).values.flatten(),
y2 = (ds.isel(time = idx).v - ds.isel(time = idx).verr).values.flatten(),
alpha = 0.3,
color = mpl.cm.viridis(cNorm(t)))
ds.isel(time = idx)['n'].plot(c = mpl.cm.viridis(cNorm(ds.isel(time = idx).time.dt.year)), ax = axs[1,i])
axs[1,i].set_yscale('log')
# titles and labels
axs[0,i].set_title(s)
axs[1,i].set_title('')
axs[0,i].set_xlabel('')
axs[1,i].set_xlabel('Distance (km)')
for row in [0,1]:
axs[row,i].set_xlim(xlimits)
# axs[row,i].set_xticks([0,4000,8000,12000])
axs[0,i].set_xticklabels('')
axs[1,i].set_xticklabels((axs[1,i].get_xticks() / 1000).astype(int))
axs[0,i].set_ylim(ylimits)
axs[1,i].set_ylim([1,np.nanmax(ds.n.values)])
if i > 0:
axs[0,i].set_ylabel('')
axs[1,i].set_ylabel('')
axs[0,i].set_yticklabels('')
axs[1,i].set_yticklabels('')
else:
axs[0,i].set_ylabel('Velocity (m/yr)')
axs[1,i].set_ylabel('Count')
for ax in axs.flat:
ax.spines.right.set_visible(False)
ax.spines.left.set_visible(False)
ax.spines.top.set_visible(False)
ax.spines.bottom.set_visible(False)
fig.subplots_adjust(wspace=0.06, hspace=0.15)
hf.labelFig(fig, pos = 'custom', xy = (0.05,0.8))
# for color bar
# [left, bottom, width, height]
cbax = fig.add_axes([0.92,0.15,0.15,0.7])
cbax.axis('off')
cmap = mpl.cm.get_cmap('viridis')
sc = mpl.cm.ScalarMappable(norm = cNorm, cmap = cmap)
# for correctly labelling colorbar
tickys = pd.date_range(np.min(ds.time.values) - pd.offsets.DateOffset(years=1) ,
np.max(ds.time.values) + | pd.offsets.DateOffset(years=1) | pandas.offsets.DateOffset |
import csv
import os
from itertools import product
from tempfile import TemporaryDirectory, NamedTemporaryFile
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from Bio import SeqIO
from Bio.Alphabet import generic_alphabet
from Bio.Alphabet import generic_dna, generic_rna
from Bio.Seq import Seq, reverse_complement
from Bio.SeqRecord import SeqRecord
from pandas.util.testing import assert_series_equal, assert_frame_equal
from crseek import exceptions
from crseek import utils
from test.tools import make_random_seq
formats = ['SeqRecord', 'Seq', 'str', 'tuple']
fmts = list(product(formats, formats)) + [('array', 'str')]
@pytest.mark.parametrize("iofmts", fmts)
def test_smrt_seq_write(iofmts):
orig_seqs = [SeqRecord(Seq('AAAAAA', alphabet = generic_alphabet),
id = 'A'),
SeqRecord(Seq('TTTTT', alphabet = generic_alphabet),
id = 'B'),
SeqRecord(Seq('GGGGGG', alphabet = generic_alphabet),
id = 'C')]
infmt, outfmt = iofmts
if infmt == 'SeqRecord':
seqs = [f for f in orig_seqs]
elif infmt == 'Seq':
seqs = [f.seq for f in orig_seqs]
elif infmt == 'str':
seqs = [str(f.seq) for f in orig_seqs]
elif infmt == 'array':
seqs = np.array([str(f.seq) for f in orig_seqs])
elif infmt == 'tuple':
seqs = []
for num, f in enumerate(orig_seqs):
seqs.append(('S-%i' % num, str(f.seq)))
else:
raise AssertionError('Unknown input format %s' % infmt)
rec_out = list(utils.smrt_seq_convert(outfmt, seqs))
rec_inp = orig_seqs
assert len(rec_out) == 3
for num, (out, inp) in enumerate(zip(rec_out, rec_inp)):
if infmt == 'SeqRecord':
inname = inp.id
elif infmt == 'tuple':
inname = 'S-%i' % num
else:
inname = 'Seq-%i' % num
if outfmt == 'SeqRecord':
outname = out.id
outseq = str(out.seq)
elif outfmt == 'Seq':
outname = None
outseq = str(out)
elif outfmt == 'str':
outname = None
outseq = out
elif outfmt == 'tuple':
outname, outseq = out
else:
raise AssertionError('Unknown outformat: %s' % outfmt)
if outname:
assert outname == inname
assert outseq == str(inp.seq)
class TestExtract(object):
def test_basic(self):
seq = Seq('A' * 20 + 'T' * 20 + 'CCGG' + 'T' * 25 + 'GG', alphabet = generic_dna)
cor = sorted([Seq('U' * 19 + 'C', alphabet = generic_rna),
Seq('U' * 20, alphabet = generic_rna),
Seq('A' * 19 + 'C', alphabet = generic_rna),
])
with pytest.warns(None) as warns:
res = utils.extract_possible_targets(SeqRecord(seq))
assert cor == res
assert len(warns) == 0, 'Still emits BioPython warning!'
def test_single_strand(self):
seq = Seq('A' * 20 + 'T' * 20 + 'CCGG' + 'T' * 25 + 'GG', alphabet = generic_dna)
cor = sorted([Seq('U' * 19 + 'C', alphabet = generic_rna),
Seq('U' * 20, alphabet = generic_rna)
])
res = utils.extract_possible_targets(SeqRecord(seq), both_strands = False)
assert cor == res
def test_starts_with_PAM(self):
seq = Seq('CGG' + 'A' * 20 + 'T' * 20 + 'CCGG' + 'T' * 25 + 'GG', alphabet = generic_dna)
cor = sorted([Seq('U' * 19 + 'C', alphabet = generic_rna),
Seq('U' * 20, alphabet = generic_rna),
Seq('A' * 19 + 'C', alphabet = generic_rna)
])
res = utils.extract_possible_targets(SeqRecord(seq))
assert cor == res
class TestTiling(object):
def test_basic(self):
grna = Seq('A' * 20, alphabet = generic_rna)
bseq = 'ACTG' * 20
seqR = SeqRecord(Seq(bseq, alphabet = generic_dna),
id = 'checking')
res = utils.tile_seqrecord(grna, seqR)
assert len(res) > 1
assert (res['spacer'] == grna).all()
for (name, strand, start), row in res.iterrows():
assert name == utils._make_record_key(seqR)
if strand == 1:
assert str(row['target']) == bseq[start:start + 23]
else:
assert str(row['target']) == reverse_complement(bseq[start:start + 23])
def test_str_spacer(self):
grna = 'A' * 20
bseq = 'ACTG' * 20
seqR = SeqRecord(Seq(bseq, alphabet = generic_dna),
id = 'checking')
with pytest.raises(ValueError):
utils.tile_seqrecord(grna, seqR)
def test_dna_spacer(self):
grna = Seq('A' * 20, alphabet = generic_dna)
bseq = 'ACTG' * 20
seqR = SeqRecord(Seq(bseq, alphabet = generic_dna),
id = 'checking')
with pytest.raises(exceptions.WrongAlphabetException):
utils.tile_seqrecord(grna, seqR)
def test_rna_locus(self):
grna = Seq('A' * 20, alphabet = generic_rna)
bseq = 'ACTG' * 20
seqR = SeqRecord(Seq(bseq, alphabet = generic_rna),
id = 'checking')
with pytest.raises(exceptions.WrongAlphabetException):
utils.tile_seqrecord(grna, seqR)
class CasOffABC(object):
def make_basic(self, pam = 'CGG'):
np.random.seed(0)
targets = ['T' * 20,
'T' * 19 + 'A',
'T' * 18 + 'AA',
'T' * 17 + 'AAA',
'T' * 14 + 'A' * 6]
seq_recs = []
for num, tg in enumerate(targets):
seq = make_random_seq(50, restricted = 'AT') + tg + pam + make_random_seq(50, restricted = 'AT')
seq_recs.append(SeqRecord(Seq(seq, alphabet = generic_dna),
id = 'Num-%i' % num, description = ''))
spacer = Seq('U' * 20, alphabet = generic_rna)
cor_index = pd.MultiIndex.from_tuples([('Num-0', 1, 50),
('Num-1', 1, 50),
('Num-2', 1, 50),
('Num-3', 1, 50), ],
names = ['name', 'strand', 'left'])
cor = pd.DataFrame([{'spacer': spacer, 'target': 'T' * 20 + pam},
{'spacer': spacer, 'target': 'T' * 19 + 'A' + pam},
{'spacer': spacer, 'target': 'T' * 18 + 'AA' + pam},
{'spacer': spacer, 'target': 'T' * 17 + 'AAA' + pam}, ],
index = cor_index)[['spacer', 'target']]
return spacer, seq_recs, cor
class TestCasOffSupport(CasOffABC):
_prefix = """Cas-OFFinder v2.4 (Aug 17 2016)
Copyright (c) 2013 <NAME> and <NAME>
Website: http://github.com/snugel/cas-offinder
Usage: cas-offinder {input_file} {C|G|A}[device_id(s)] {output_file}
(C: using CPUs, G: using GPUs, A: using accelerators)
Example input file:
/var/chromosomes/human_hg19
NNNNNNNNNNNNNNNNNNNNNRG
GGCCGACCTGTCGCTGACGCNNN 5
CGCCAGCGTCAGCGACAGGTNNN 5
ACGGCGCCAGCGTCAGCGACNNN 5
GTCGCTGACGCTGGCGCCGTNNN 5
Available device list:
"""
@patch('subprocess.check_output')
def test_guess_openci_devices_gpu(self, mock):
extra = '\n'.join(["Type: GPU, ID: 0, <Tesla C2075> on <NVIDIA CUDA>",
"Type: GPU, ID: 1, <NVS 510> on <NVIDIA CUDA>"])
mock.return_value = (self._prefix + extra).encode('ascii')
res = utils._guess_openci_devices()
assert res == 'G0'
@patch('subprocess.check_output')
def test_guess_openci_devices_cpu(self, mock):
extra = '\n'.join(["Type: CPU, ID: 0, Local CPU",
"Type: GPU, ID: 0, <NVS 510> on <NVIDIA CUDA>"])
mock.return_value = (self._prefix + extra).encode('ascii')
res = utils._guess_openci_devices()
assert res == 'C0'
@pytest.mark.skipif(utils._missing_casoffinder(), reason = "Need CasOff installed")
class TestCasOff(CasOffABC):
def test_basic_seqs(self):
spacer, seq_recs, cor = self.make_basic()
res = utils.cas_offinder([spacer], 3, locus = seq_recs)
assert_frame_equal(res, cor)
def test_smart_error_for_str_spacers(self):
_, seq_recs, cor = self.make_basic()
with pytest.raises(ValueError):
utils.cas_offinder(['U' * 20], 3, locus = seq_recs)
def test_smart_error_for_bad_alphabet_spacers(self):
spacer, seq_recs, cor = self.make_basic()
spacer.alphabet = generic_dna
with pytest.raises(exceptions.WrongAlphabetException):
utils.cas_offinder([spacer], 3, locus = seq_recs)
def test_smart_error_for_str_locus(self):
spacer, seq_recs, cor = self.make_basic()
with pytest.raises(ValueError):
utils.cas_offinder([spacer], 3, locus = ['A' * 500])
def test_no_hits(self):
_, seq_recs, cor = self.make_basic()
np.random.seed(20)
spacer = Seq(''.join(np.random.choice(list('AUCG'), size = 20)),
alphabet = generic_rna)
res = utils.cas_offinder([spacer], 0, locus = seq_recs)
assert len(res.index) == 0
assert res.index.names == ['name', 'strand', 'left']
np.testing.assert_array_equal(res.columns,
['spacer', 'target'])
def test_change_pam_long(self):
NmCas9_pam = 'NNNNGATT'
spacer, seq_recs, cor = self.make_basic(pam = 'CGCGGATT')
res = utils.cas_offinder([spacer], 3, locus = seq_recs, pam = NmCas9_pam)
| assert_frame_equal(res, cor) | pandas.util.testing.assert_frame_equal |
from connection import cer_connection
import pandas as pd
import numpy as np
import os
from errors import IdError
from datetime import date
import io
def execute_sql(path, query_name, db='tsql23cap'):
query_path = os.path.join(path, query_name)
conn, engine = cer_connection(db=db)
def utf16open(query_path):
file = io.open(query_path, mode='r', encoding="utf-16", errors='ignore')
query = file.read()
file.close()
return query
def no_encoding_open(query_path):
file = io.open(query_path, mode='r', errors='ignore')
query = file.read()
file.close()
return query
try:
query = utf16open(query_path)
except:
query = no_encoding_open(query_path)
df = pd.read_sql_query(query, con=conn)
conn.close()
return df
def most_common(df,
meta,
col_name,
meta_key,
top=1,
dtype="dict",
lower=True,
joinTies=True):
what_list = []
for what in df[col_name]:
what = str(what)
if ',' in what:
what_list.extend(what.split(','))
else:
what_list.append(what)
what_list = [x.strip() for x in what_list]
what_list = [x for x in what_list if x not in ['To be determined', '', "Other", "Not Specified", "Sans objet", "Autre"]]
dft = pd.DataFrame(what_list, columns=["entries"])
dft['records'] = 1
dft = dft.groupby(by="entries").sum().reset_index()
dft = dft.sort_values(by=['records', 'entries'], ascending=[False, True])
if joinTies:
dft = dft.groupby(by="records").agg({"entries": " & ".join}).reset_index()
dft = dft.sort_values(by=['records'], ascending=False)
dft = dft.head(top)
counter = {}
for name, count in zip(dft['entries'], dft['records']):
counter[name] = count
if lower:
counter = {k.lower(): counter[k] for k in list(counter)}
if dtype != "dict":
counter = list(counter.keys())
if top == 1:
counter = list(counter.keys())[0]
meta[meta_key] = counter
return meta
def normalizeBool(df, cols, normType="Y/N"):
for col in cols:
df[col] = [str(x).strip() for x in df[col]]
if normType == "T/F":
df[col] = df[col].replace({"True": "T",
"False": "F"})
elif normType == "Y/N":
df[col] = df[col].replace({"True": "Yes",
"False": "No"})
return df
def normalize_dates(df, date_list, short_date=False):
for date_col in date_list:
df[date_col] = pd.to_datetime(df[date_col], errors='raise')
if short_date:
df[date_col] = df[date_col].dt.date
return df
def normalize_text(df, text_list):
for text_col in text_list:
df[text_col] = df[text_col].astype(object)
df[text_col] = [str(x).strip() for x in df[text_col]]
return df
def normalize_numeric(df, num_list, decimals):
for num_col in num_list:
df[num_col] = pd.to_numeric(df[num_col], errors='coerce')
df[num_col] = df[num_col].round(decimals)
return df
def pipeline_names():
read_path = os.path.join(os.getcwd(), 'raw_data/','NEB_DM_PROD - 1271412 - Pipeline Naming Conventions.XLSX')
df = pd.read_excel(read_path, sheet_name='Pipeline Naming Conventions')
df = df.rename(columns={x: x.strip() for x in df.columns})
df['old name'] = [x.strip() for x in df['Company List maintained by <NAME> https://www.cer-rec.gc.ca/bts/whwr/cmpnsrgltdbnb-eng.html']]
df['new name'] = [x.strip() for x in df['Suggested Pipeline Name for ALL Future External Publications']]
return {old_name: new_name for old_name, new_name in zip(df['old name'],
df['new name'])}
def daysInYear(year):
d1 = date(year, 1, 1)
d2 = date(year + 1, 1, 1)
return (d2 - d1).days
def saveJson(df, write_path, precision=2):
df.to_json(write_path,
orient='records',
double_precision=precision,
compression='infer')
def get_company_names(col):
return sorted(list(set(col)))
def company_rename():
names = {'Westcoast Energy Inc., carrying on business as Spectra Energy Transmission': 'Westcoast Energy Inc.',
'Kingston Midstream Limited': 'Kingston Midstream Westspur Limited',
'Trans Québec and Maritimes Pipeline Inc.': 'Trans Quebec and Maritimes Pipeline Inc.',
'Enbridge Southern Lights GP Inc. on behalf of Enbridge Southern Lights LP': 'Southern Lights Pipeline',
'Alliance Pipeline Ltd as General Partner of Alliance Pipeline Limited Partnership': 'Alliance Pipeline Ltd.',
'Trans Mountain Pipeline Inc.': 'Trans Mountain Pipeline ULC',
'Kinder Morgan Cochin ULC': 'PKM Cochin ULC',
'Enbridge Bakken Pipeline Company Inc., on behalf of Enbridge Bakken Pipeline Limited Partnership': 'Enbridge Bakken Pipeline Company Inc.',
'TEML Westspur Pipelines Limited': 'Kingston Midstream Westspur Limited',
'Plains Marketing Canada, L.P.': 'Plains Midstream Canada ULC'}
return names
def conversion(df, commodity, dataCols, rounding=False, fillna=False):
if commodity == 'gas':
conv = 28316.85
elif commodity == "oil":
conv = 6.2898
for col in dataCols:
if fillna:
df[col] = df[col].fillna(fillna)
if commodity == "oil":
df[col] = [x*conv if not | pd.isnull(x) | pandas.isnull |
import numpy as np
import pandas as pd
import rqdatac as rq
rq.init()
#backtest period
startdate= | pd.to_datetime('20100104') | pandas.to_datetime |
import pandas as pd
import time
import utils
from datetime import datetime
class DataLoader:
""" Utils to load Yelp dataset data files and merge these to one data
frame, with one row for each review. Each row holds business and user data
and a label 1.0 for a 5 star rating and 0.0 for ratings of 1 to 4 stars.
Args:
path (string) : Path to directory containing yelp data files.
"""
def __init__(self, path="../data/"):
self.path = path
def load_review(self):
""" Load review data and form a data frame.
"""
start = time.time()
print("Loading review ...")
df = pd.read_json(self.path + "review.json", lines=True)
df.dropna(inplace=True)
# Map 5 star rating -> label = 1.0 and 1-4 star ratings -> label = 0.0
df['label'] = list(map(lambda x: int(int(x) == 5), df['stars'].values))
# Keep desired columns
df = df[['review_id', 'business_id', 'user_id', 'label', 'date']]
t = time.time() - start
print("Loaded review in {:.1f} seconds".format(t))
return df
def load_user(self):
""" Load user data, count number of friends and add seniority feature.
Returns:
df (pd.DataFrame) : Data for each user
"""
start = time.time()
print("Loading user ...")
df = pd.read_json(self.path + "user.json", lines=True)
df = df[df['review_count'] > 0] # Remove users with no reviews
# Add seniority feature by mapping the date of user creation to number
# of days since creation on 12/31/2019.
date_to_day = lambda d: datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
df['yelping_since'] = list(map(date_to_day, df['yelping_since']))
# Count number of friends and add this as a feature
count_friends = lambda d: len(d.split(','))
df['nb_friends'] = list(map(count_friends, df['friends']))
# Remove elite and generic columns
df.drop(["elite", "name", "yelping_since", "friends"],
axis=1,
inplace=True)
# Get and remove heavily skewed columns which add little information
columns_to_remove = utils.get_skewed_columns(df)
df.drop(columns_to_remove, axis=1, inplace=True)
t = time.time() - start
print("Loaded user in {:.1f} seconds".format(t))
return df
def load_business(self):
""" Load business data and form a dataframe.
"""
start = time.time()
print("Loading business ...")
df = | pd.read_json("../data/business.json", lines=True) | pandas.read_json |
#:!/usr/bin/env python
#: -*- coding: utf-8 -*-
__author__ = 'mayanqiong'
from collections import namedtuple
from datetime import datetime
from typing import Callable, Tuple
import aiohttp
from pandas import DataFrame, Series
from sgqlc.operation import Operation
from tqsdk.backtest import TqBacktest
from tqsdk.datetime import _get_expire_rest_days, _str_to_timestamp_nano
from tqsdk.ins_schema import ins_schema, _add_all_frags
from tqsdk.objs import Quote
from tqsdk.diff import _get_obj
from tqsdk.utils import _query_for_quote, _generate_uuid
from tqsdk.tafunc import _get_t_series, get_impv, _get_d1, get_delta, get_theta, get_gamma, get_vega, get_rho
"""
这两个类只在 api 中用到,主要为了支持用户异步中 await
没有继承 Entity 类
"""
async def ensure_quote(api, quote):
if quote.price_tick > 0 and quote.datetime != "":
return quote
async with api.register_update_notify(quote) as update_chan:
async for _ in update_chan:
if quote.price_tick > 0 and quote.datetime != "":
return quote
async def ensure_quote_with_underlying(api, quote):
await ensure_quote(api, quote)
if quote.underlying_symbol:
await ensure_quote(api, quote.underlying_quote)
return quote
class QuoteList(list):
"""
请求合约信息和行情信息,self._task 完成时,所有的合约已经收到了合约信息和行情信息
"""
def __init__(self, api, quotes):
self._api = api
list.__init__(self, quotes)
self._task = api.create_task(self._ensure_quotes(), _caller_api=True)
for quote in quotes:
# 为每个 quote 对象创建 _task
if not hasattr(quote, '_task'):
quote._task = api.create_task(ensure_quote_with_underlying(api, quote), _caller_api=True)
async def _ensure_symbols(self):
if all([q.price_tick > 0 for q in self]):
return
query_symbols = [q._path[-1] for q in self if not q.price_tick > 0]
query_pack = _query_for_quote(query_symbols)
self._api._send_pack(query_pack)
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if all([q.price_tick > 0 for q in self]):
return
async def _ensure_quotes(self):
await self._ensure_symbols()
self._api._auth._has_md_grants([q._path[-1] for q in self]) # 权限检查
# 发送的请求会请求到所有字段,如果是期权也会请求标的的合约信息
underlying_symbols = set([q.underlying_symbol for q in self if q.underlying_symbol])
need_quotes = set([q._path[-1] for q in self]).union(underlying_symbols)
if need_quotes - self._api._requests["quotes"] != set():
self._api._requests["quotes"] = self._api._requests["quotes"].union(need_quotes)
self._api._send_pack({
"aid": "subscribe_quote",
"ins_list": ",".join(self._api._requests["quotes"]),
})
if all([q.datetime != "" for q in self]):
return self
all_quotes = self + [_get_obj(self._api._data, ["quotes", s], self._api._prototype["quotes"]["#"]) for s in underlying_symbols]
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if all([q.datetime != "" for q in all_quotes]):
return self
def __await__(self):
return self._task.__await__()
async def _query_graphql_async(api, query_id, query):
api._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": query
})
symbols = _get_obj(api._data, ["symbols"])
async with api.register_update_notify(symbols) as update_chan:
async for _ in update_chan:
s = symbols.get(query_id, {})
if s.get("query") == query:
break
class SymbolList(list):
"""
query 系列函数返回对象
"""
def __init__(self, api, query_id: str, query: str, filter: Callable[[dict], list]):
self._api = api
self._query_id = query_id
self._query = query
self._filter = filter
list.__init__(self, [])
self._task = api.create_task(self._query_graphql(), _caller_api=True)
async def _query_graphql(self):
pack = {"query": self._query}
symbols = _get_obj(self._api._data, ["symbols"])
query_result = None
for symbol in symbols.values():
if symbol.items() >= pack.items(): # 检查是否发送过相同的请求
query_result = symbol
if query_result is None:
await _query_graphql_async(self._api, self._query_id, self._query)
query_result = symbols.get(self._query_id)
self += self._filter(query_result)
if isinstance(self._api._backtest, TqBacktest): # 回测时,清空缓存的请求
self._api._send_pack({
"aid": "ins_query",
"query_id": self._query_id,
"query": ""
})
return self
def __await__(self):
return self._task.__await__()
class SymbolLevelList(namedtuple('SymbolLevel', ['in_money_options', 'at_money_options', 'out_of_money_options'])):
"""
query 系列函数返回对象
"""
def __new__(cls, *args, **kwargs):
return super(SymbolLevelList, cls).__new__(cls, in_money_options=[], at_money_options=[], out_of_money_options=[])
def __init__(self, api, query_id: str, query: str, filter: Callable[[dict], Tuple[list, list, list]]):
self._api = api
self._query_id = query_id
self._query = query
self._filter = filter
self._task = api.create_task(self._query_graphql(), _caller_api=True)
async def _query_graphql(self):
pack = {"query": self._query}
symbols = _get_obj(self._api._data, ["symbols"])
query_result = None
for symbol in symbols.values():
if symbol.items() >= pack.items(): # 检查是否发送过相同的请求
query_result = symbol
if query_result is None:
await _query_graphql_async(self._api, self._query_id, self._query)
query_result = symbols.get(self._query_id)
l0, l1, l2 = self._filter(query_result)
self[0].extend(l0)
self[1].extend(l1)
self[2].extend(l2)
if isinstance(self._api._backtest, TqBacktest): # 回测时,清空缓存的请求
self._api._send_pack({
"aid": "ins_query",
"query_id": self._query_id,
"query": ""
})
return self
def __await__(self):
return self._task.__await__()
class TqDataFrame(DataFrame):
def __init__(self, api, *args, **kwargs):
super(TqDataFrame, self).__init__(*args, **kwargs)
self.__dict__["_api"] = api
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
async with self._api.register_update_notify(self) as update_chan:
async for _ in update_chan:
if self._api._serials.get(id(self))["init"]:
return self
def __await__(self):
return self.__dict__["_task"].__await__()
class TqSymbolDataFrame(DataFrame):
def __init__(self, api, symbol_list, backtest_timestamp, *args, **kwargs):
self.__dict__["_api"] = api
self.__dict__["_symbol_list"] = symbol_list
self.__dict__["_backtest_timestamp"] = backtest_timestamp
self.__dict__["_columns"] = [
"ins_class",
"instrument_id",
"instrument_name",
"price_tick",
"volume_multiple",
"max_limit_order_volume",
"max_market_order_volume",
"underlying_symbol",
"strike_price",
"exchange_id",
"product_id",
"expired",
"expire_datetime",
"expire_rest_days",
"delivery_year",
"delivery_month",
"last_exercise_datetime",
"exercise_year",
"exercise_month",
"option_class",
"upper_limit",
"lower_limit",
"pre_settlement",
"pre_open_interest",
"pre_close",
"trading_time_day",
"trading_time_night"
]
default_quote = Quote(None)
data = [{k: (s if k == "instrument_id" else default_quote.get(k, None)) for k in self.__dict__["_columns"]} for s in symbol_list]
super(TqSymbolDataFrame, self).__init__(data=data, columns=self.__dict__["_columns"], *args, **kwargs)
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
query_id = _generate_uuid("PYSDK_api")
op = Operation(ins_schema.rootQuery)
variables = {"instrument_id": self.__dict__["_symbol_list"]}
if self.__dict__["_backtest_timestamp"]:
variables["timestamp"] = self.__dict__["_backtest_timestamp"]
query = op.multi_symbol_info(**variables)
_add_all_frags(query)
self.__dict__["_api"]._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": op.__to_graphql__()
})
symbols = _get_obj(self.__dict__["_api"]._data, ["symbols"])
async with self.__dict__["_api"].register_update_notify(symbols) as update_chan:
async for _ in update_chan:
query_result = symbols.get(query_id, {})
if query_result:
all_keys = set(self.__dict__["_columns"])
all_keys.add('trading_time')
quotes = self.__dict__["_api"]._symbols_to_quotes(query_result, keys=all_keys)
self._quotes_to_dataframe(quotes)
if self.__dict__["_backtest_timestamp"]:
# 回测时这些字段应该为 nan
self.loc[:, ["upper_limit", "lower_limit", "pre_settlement", "pre_open_interest", "pre_close"]] = float('nan')
# 回测时清空请求,不缓存请求内容
self.__dict__["_api"]._send_pack({
"aid": "ins_query",
"query_id": query_id,
"query": ""
})
return self
def _get_trading_time(self, quotes, symbol, key):
v = quotes[symbol].get('trading_time', {'day': [], 'night': []}).get(key, [])
return v if v else None
def _quotes_to_dataframe(self, quotes):
default_quote = Quote(None)
for col in self.__dict__["_columns"]:
if col == "expire_rest_days":
current_dt = self._api._get_current_datetime().timestamp()
self.loc[:, col] = [_get_expire_rest_days(quotes[s]['expire_datetime'], current_dt)
if quotes[s].get('expire_datetime') else float('nan')
for s in self.__dict__["_symbol_list"]]
elif col == "trading_time_day" or col == "trading_time_night":
k = 'day' if col == "trading_time_day" else 'night'
self.loc[:, col] = Series([self._get_trading_time(quotes, s, k) for s in self.__dict__["_symbol_list"]])
else:
self.loc[:, col] = Series([quotes[s].get(col, default_quote[col]) for s in self.__dict__["_symbol_list"]])
def __await__(self):
return self.__dict__["_task"].__await__()
class TqSymbolRankingDataFrame(DataFrame):
def __init__(self, api, symbol, ranking_type, days, start_dt, broker):
self.__dict__["_api"] = api
params = {'symbol': symbol}
if days is not None:
params['days'] = days
if start_dt is not None:
params['start_date'] = start_dt.strftime("%Y%m%d")
if broker is not None:
params['broker'] = broker
self.__dict__["_params"] = params
self.__dict__["_symbol"] = symbol
self.__dict__["_ranking_type"] = f"{ranking_type.lower()}_ranking"
self.__dict__["_columns"] = [
"datetime",
"symbol",
"exchange_id",
"instrument_id",
"broker",
"volume",
"volume_change",
"volume_ranking",
"long_oi",
"long_change",
"long_ranking",
"short_oi",
"short_change",
"short_ranking"
]
super(TqSymbolRankingDataFrame, self).__init__(data=[], columns=self.__dict__["_columns"])
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def _get_ranking_data(self, ranking_id):
# 下载持仓排名数据,并将数据发回到 api.recv_chan
async with aiohttp.ClientSession(headers=self.__dict__["_api"]._base_headers) as session:
url = "https://symbol-ranking-system-fc-api.shinnytech.com/srs"
async with session.get(url, params=self.__dict__["_params"]) as response:
response.raise_for_status()
content = await response.json()
await self.__dict__["_api"]._ws_md_recv_chan.send({
"aid": "rtn_data",
"data": [{
"_symbol_rankings": {
ranking_id: content
}
}]
})
async def async_update(self):
await self.__dict__["_api"]._ensure_symbol_async(self.__dict__["_symbol"])
ranking_id = _generate_uuid("PYSDK_rank")
self.__dict__["_api"].create_task(self._get_ranking_data(ranking_id), _caller_api=True) # 错误会抛给 api 处理
symbol_rankings = _get_obj(self.__dict__["_api"]._data, ["_symbol_rankings"])
async with self.__dict__["_api"].register_update_notify(symbol_rankings) as update_chan:
async for _ in update_chan:
content = symbol_rankings.get(ranking_id, None)
if content is None:
continue
data = self._content_to_list(content)
for i, d in enumerate(data):
self.loc[i] = d
self.dropna(subset=[self.__dict__["_ranking_type"]], inplace=True)
self.sort_values(by=['datetime', self.__dict__["_ranking_type"]], inplace=True, ignore_index=True)
# 读完数据,清空数据
await self.__dict__["_api"]._ws_md_recv_chan.send({
"aid": "rtn_data",
"data": [{
"_symbol_rankings": {
ranking_id: None
}
}]
})
return self
def _content_to_list(self, content):
data = {}
for dt in content.keys():
for symbol in content[dt].keys():
if content[dt][symbol] is None:
continue
for data_type, rankings in content[dt][symbol].items():
for broker, rank_item in rankings.items():
item = data.setdefault((dt, symbol, broker), self._get_default_item(dt, symbol, broker))
if data_type == 'volume_ranking':
item['volume'] = rank_item['volume']
item['volume_change'] = rank_item['varvolume']
item['volume_ranking'] = rank_item['ranking']
elif data_type == 'long_ranking':
item['long_oi'] = rank_item['volume']
item['long_change'] = rank_item['varvolume']
item['long_ranking'] = rank_item['ranking']
elif data_type == 'short_ranking':
item['short_oi'] = rank_item['volume']
item['short_change'] = rank_item['varvolume']
item['short_ranking'] = rank_item['ranking']
return data.values()
def _get_default_item(self, dt, symbol, broker):
return {
"datetime": dt,
"symbol": symbol,
"exchange_id": symbol.split(".", maxsplit=1)[0],
"instrument_id": symbol.split(".", maxsplit=1)[1],
"broker": broker,
"volume": float('nan'),
"volume_change": float('nan'),
"volume_ranking": float('nan'),
"long_oi": float('nan'),
"long_change": float('nan'),
"long_ranking": float('nan'),
"short_oi": float('nan'),
"short_change": float('nan'),
"short_ranking": float('nan')
}
def __await__(self):
return self.__dict__["_task"].__await__()
class TqOptionGreeksDataFrame(DataFrame):
def __init__(self, api, symbol_list, v_list, r):
self.__dict__["_api"] = api
self.__dict__["_symbol_list"] = symbol_list
self.__dict__["_v_list"] = v_list
self.__dict__["_r"] = r
self.__dict__["_columns"] = [
"instrument_id",
"instrument_name",
"option_class",
"expire_rest_days",
"expire_datetime",
"underlying_symbol",
"strike_price",
"delta",
"gamma",
"theta",
"vega",
"rho"
]
super(TqOptionGreeksDataFrame, self).__init__(data=[], columns=self.__dict__["_columns"])
self.__dict__["_task"] = api.create_task(self.async_update(), _caller_api=True)
async def async_update(self):
symbol_list = self.__dict__["_symbol_list"]
quotes = await self.__dict__["_api"].get_quote_list(symbol_list)
if not all([q.ins_class.endswith("OPTION") for q in quotes]):
raise Exception("quote 参数列表中元素必须是期权类型")
for i, q in enumerate(quotes):
self.loc[i] = {k: q.get(k, float('nan')) for k in self.__dict__["_columns"]}
self._get_greeks(quotes)
return self
def _get_greeks(self, quotes):
series_close = Series(data=[q.last_price for q in quotes]) # 期权最新价
series_close1 = | Series(data=[q.underlying_quote.last_price for q in quotes]) | pandas.Series |
from pandas.util.py3compat import StringIO
import unittest
import sqlite3
import sys
import numpy as np
import pandas.io.sql as sql
import pandas.util.testing as tm
from pandas import Series, Index
class TestSQLite(unittest.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_sqlite_schema(frame, 'test')
self.db.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = sql.format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = | sql.get_sqlite_schema(frame, 'test') | pandas.io.sql.get_sqlite_schema |
import pandas as pd
data_path = '../data/'
store_id_map = pd.read_csv(data_path + 'store_id_relation.csv').set_index('hpg_store_id',drop=False)
air_reserve = pd.read_csv(data_path + 'air_reserve.csv').rename(columns={'air_store_id':'store_id'})
hpg_reserve = pd.read_csv(data_path + 'hpg_reserve.csv').rename(columns={'hpg_store_id':'store_id'})
air_reserve['visit_date'] = pd.to_datetime(air_reserve['visit_datetime'].str[:10])
air_reserve['reserve_date'] = | pd.to_datetime(air_reserve['reserve_datetime'].str[:10]) | pandas.to_datetime |
import pandas as pd
import pytest
from pandera import errors
from pandera import (
Column, DataFrameSchema, Index, MultiIndex, Check, DateTime, Float, Int,
String)
def test_column():
schema = DataFrameSchema({
"a": Column(Int, Check(lambda x: x > 0, element_wise=True))
})
data = pd.DataFrame({"a": [1, 2, 3]})
assert isinstance(schema.validate(data), pd.DataFrame)
def test_index_schema():
schema = DataFrameSchema(
columns={},
index=Index(
Int, [
Check(lambda x: 1 <= x <= 11, element_wise=True),
Check(lambda index: index.mean() > 1)]
))
df = pd.DataFrame(index=range(1, 11), dtype="int64")
assert isinstance(schema.validate(df), pd.DataFrame)
with pytest.raises(errors.SchemaError):
schema.validate(pd.DataFrame(index=range(1, 20)))
def test_multi_index_columns():
schema = DataFrameSchema({
("zero", "foo"): Column(Float, Check(lambda s: (s > 0) & (s < 1))),
("zero", "bar"): Column(
String, Check(lambda s: s.isin(["a", "b", "c", "d"]))),
("one", "foo"): Column(Int, Check(lambda s: (s > 0) & (s < 10))),
("one", "bar"): Column(
DateTime, Check(lambda s: s == pd.datetime(2019, 1, 1)))
})
validated_df = schema.validate(
pd.DataFrame({
("zero", "foo"): [0.1, 0.2, 0.7, 0.3],
("zero", "bar"): ["a", "b", "c", "d"],
("one", "foo"): [1, 6, 4, 7],
("one", "bar"): | pd.to_datetime(["2019/01/01"] * 4) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
"""
The pyvcf submodule is designed for working with VCF files. It implements
``pyvcf.VcfFrame`` which stores VCF data as ``pandas.DataFrame`` to allow
fast computation and easy manipulation. The ``pyvcf.VcfFrame`` class also
contains many useful plotting methods such as ``VcfFrame.plot_comparison``
and ``VcfFrame.plot_tmb``. The submodule strictly adheres to the
standard `VCF specification
<https://samtools.github.io/hts-specs/VCFv4.3.pdf>`_.
A typical VCF file contains metadata lines (prefixed with '##'), a header
line (prefixed with '#'), and genotype lines that begin with a chromosome
identifier (e.g. 'chr1'). See the VCF specification above for an example
VCF file.
Genotype lines have nine required fields for storing variant information
and variable-length fields for storing sample genotype data. For some
fields, missing values are tolerated and can be specified with a dot ('.').
The nine required fields are:
+-----+--------+------------------------------------+------------------------+
| No. | Name | Description | Examples |
+=====+========+====================================+========================+
| 1 | CHROM | Chromosome or contig identifier | 'chr2', '2', 'chrM' |
+-----+--------+------------------------------------+------------------------+
| 2 | POS | 1-based reference position | 10041, 23042 |
+-----+--------+------------------------------------+------------------------+
| 3 | ID | ';'-separated variant identifiers | '.', 'rs35', 'rs9;rs53'|
+-----+--------+------------------------------------+------------------------+
| 4 | REF | Reference allele | 'A', 'GT' |
+-----+--------+------------------------------------+------------------------+
| 5 | ALT | ','-separated alternate alleles | 'T', 'ACT', 'C,T' |
+-----+--------+------------------------------------+------------------------+
| 6 | QUAL | Phred-scaled quality score for ALT | '.', 67, 12 |
+-----+--------+------------------------------------+------------------------+
| 7 | FILTER | ';'-separated filters that failed | '.', 'PASS', 'q10;s50' |
+-----+--------+------------------------------------+------------------------+
| 8 | INFO | ';'-separated information fields | '.', 'DP=14;AF=0.5;DB' |
+-----+--------+------------------------------------+------------------------+
| 9 | FORMAT | ':'-separated genotype fields | 'GT', 'GT:AD:DP' |
+-----+--------+------------------------------------+------------------------+
You will sometimes come across VCFs that have only eight columns, and contain
no FORMAT or sample-specific information. These are called "sites-only" VCFs,
and represent variation that has been observed in a population. Generally,
information about the population of origin should be included in the header.
There are several common, reserved genotype keywords that are standards
across the community. Currently, the pyvcf submodule is aware of the
following:
* AD - Total read depth for each allele (R, Integer)
* AF - Allele fraction of the event in the tumor (1, Float)
* DP - Read depth (1, Integer)
If sample annotation data are available for a given VCF file, use
the :class:`common.AnnFrame` class to import the data.
"""
import os
import re
import gzip
from copy import deepcopy
import warnings
from . import pybed, common, pymaf
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.formula.api as smf
from Bio import bgzf
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn3
import seaborn as sns
from pysam import VariantFile
from io import StringIO
HEADERS = {
'CHROM': str,
'POS': int,
'ID': str,
'REF': str,
'ALT': str,
'QUAL': str,
'FILTER': str,
'INFO': str,
'FORMAT': str,
}
CONTIGS = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13',
'14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', 'M',
'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
'chrX', 'chrY', 'chrM'
]
# Below are reserved genotype keys copied from Table 2 of the VCF
# specification: https://samtools.github.io/hts-specs/VCFv4.3.pdf
RESERVED_GENOTYPE_KEYS = {
'AD': {'number': 'R', 'type': int}, # Read depth for each allele
'ADF': {'number': 'R', 'type': int}, # Read depth for each allele on the forward strand
'ADR': {'number': 'R', 'type': int}, # Read depth for each allele on the reverse strand
'DP': {'number': 1, 'type': int}, # Read depth
'EC': {'number': 'A', 'type': int}, # Expected alternate allele counts
'FT': {'number': 1, 'type': str}, # Filter indicating if this genotype was “called”
'GL': {'number': 'G', 'type': float}, # Genotype likelihoods
'GP': {'number': 'G', 'type': float}, # Genotype posterior probabilities
'GQ': {'number': 1, 'type': int}, # Conditional genotype quality
'GT': {'number': 1, 'type': str}, # Genotype
'HQ': {'number': 2, 'type': int}, # Haplotype quality
'MQ': {'number': 1, 'type': int}, # RMS mapping quality
'PL': {'number': 'G', 'type': int}, # Phred-scaled genotype likelihoods rounded to the closest integer
'PP': {'number': 'G', 'type': int}, # Phred-scaled genotype posterior probabilities rounded to the closest integer
'PQ': {'number': 1, 'type': int}, # Phasing quality
'PS': {'number': 1, 'type': int}, # Phase set
}
CUSTOM_GENOTYPE_KEYS = {
'AF': {'number': 1, 'type': float}, # Allele fraction of the event in the tumor
}
INFO_SPECIAL_KEYS = {
'#AC': ['AC', lambda x: sum([int(x) for x in x.split(',')]), True],
'#AF': ['AF', lambda x: sum([float(x) for x in x.split(',')]), True],
}
FORMAT_SPECIAL_KEYS = {
'#DP': ['DP', lambda x: int(x), True],
'#AD_REF': ['AD', lambda x: float(x.split(',')[0]), True],
'#AD_ALT': ['AD', lambda x: sum([int(y) for y in x.split(',')[1:]]), True],
'#AD_FRAC_REF': ['AD', lambda x: np.nan if sum([int(y) for y in x.split(',')]) == 0 else int(x.split(',')[0]) / sum([int(y) for y in x.split(',')]), True],
'#AD_FRAC_ALT': ['AD', lambda x: np.nan if sum([int(y) for y in x.split(',')]) == 0 else sum([int(y) for y in x.split(',')[1:]]) / sum([int(y) for y in x.split(',')]), True],
}
def rescue_filtered_variants(vfs, format='GT'):
"""
Rescue filtered variants if they are PASS in at least one of the input
VCF files.
Parameters
----------
vfs : list
List of VcfFrame objects.
Returns
-------
VcfFrame
VcfFrame object.
Examples
--------
>>> from fuc import pyvcf
>>> data1 = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'C'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['PASS', 'weak_evidence', 'PASS'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'A': ['0/1', '0/1', '0/1']
... }
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'C'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['orientation', 'weak_evidence', 'PASS'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'B': ['0/1', '0/1', '0/1']
... }
>>> data3 = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [102, 103, 104],
... 'ID': ['.', '.', '.'],
... 'REF': ['C', 'T', 'A'],
... 'ALT': ['T', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['PASS', 'weak_evidence', 'PASS'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'C': ['0/1', '0/1', '0/1']
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> vf3 = pyvcf.VcfFrame.from_dict([], data3)
>>> vf1.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . PASS . GT 0/1
1 chr1 101 . T C . weak_evidence . GT 0/1
2 chr1 102 . C T . PASS . GT 0/1
>>> vf2.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT B
0 chr1 100 . G A . orientation . GT 0/1
1 chr1 101 . T C . weak_evidence . GT 0/1
2 chr1 102 . C T . PASS . GT 0/1
>>> vf3.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT C
0 chr1 102 . C T . PASS . GT 0/1
1 chr1 103 . T C . weak_evidence . GT 0/1
2 chr1 104 . A T . PASS . GT 0/1
>>> rescued_vf = pyvcf.rescue_filtered_variants([vf1, vf2, vf3])
>>> rescued_vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B C
0 chr1 100 . G A . . . GT 0/1 0/1 ./.
1 chr1 102 . C T . . . GT 0/1 0/1 0/1
2 chr1 104 . A T . . . GT ./. ./. 0/1
"""
# Check for duplicate samples.
samples = []
for vf in vfs:
samples += vf.samples
s = pd.Series(samples)
duplicates = s[s.duplicated()].values
if duplicates:
raise ValueError(f'Duplicate samples found: {duplicates}.')
dfs = []
for vf in vfs:
df = vf.df[vf.df.FILTER == 'PASS']
dfs.append(df[['CHROM', 'POS', 'REF', 'ALT']])
df = pd.concat(dfs).drop_duplicates()
s = df.apply(lambda r: common.Variant(r.CHROM, r.POS, r.REF, r.ALT), axis=1)
filtered_vfs = []
for vf in vfs:
i = vf.df.apply(lambda r: common.Variant(r.CHROM, r.POS, r.REF, r.ALT) in s.values, axis=1)
filtered_vf = vf.copy()
filtered_vf.df = vf.df[i]
filtered_vfs.append(filtered_vf)
merged_vf = merge(filtered_vfs, how='outer', format=format)
return merged_vf
def gt_miss(g):
"""
Return True if sample genotype is missing.
Parameters
----------
g : str
Sample genotype.
Returns
-------
bool
True if sample genotype is missing.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_miss('0')
False
>>> pyvcf.gt_miss('0/0')
False
>>> pyvcf.gt_miss('0/1')
False
>>> pyvcf.gt_miss('0|0:48:1:51,51')
False
>>> pyvcf.gt_miss('./.:.:.')
True
>>> pyvcf.gt_miss('.:.')
True
>>> pyvcf.gt_miss('.')
True
>>> pyvcf.gt_miss('./.:13,3:16:41:41,0,402')
True
"""
return '.' in g.split(':')[0]
def gt_polyp(g):
"""Return True if sample genotype has a polyploid call.
Parameters
----------
g : str
Sample genotype.
Returns
-------
bool
True if sample genotype has a polyploid call.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_polyp('1')
False
>>> pyvcf.gt_polyp('0/1')
False
>>> pyvcf.gt_polyp('0/1/1')
True
>>> pyvcf.gt_polyp('1|0|1')
True
>>> pyvcf.gt_polyp('0/./1/1')
True
"""
gt = g.split(':')[0]
if '/' in gt:
return gt.count('/') > 1
else:
return gt.count('|') > 1
def gt_hasvar(g):
"""
Return True if sample genotype has at least one variant call.
Parameters
----------
g : str
Sample genotype.
Returns
-------
bool
True if sample genotype has a variant call.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_hasvar('0')
False
>>> pyvcf.gt_hasvar('0/0')
False
>>> pyvcf.gt_hasvar('./.')
False
>>> pyvcf.gt_hasvar('1')
True
>>> pyvcf.gt_hasvar('0/1')
True
>>> pyvcf.gt_hasvar('1/2')
True
>>> pyvcf.gt_hasvar('1|0')
True
>>> pyvcf.gt_hasvar('1|2:21:6:23,27')
True
"""
if g.split(':')[0].replace('/', '').replace(
'|', '').replace('.', '').replace('0', ''):
return True
else:
return False
def gt_unphase(g):
"""
Return unphased sample genotype.
Parameters
----------
g : str
Sample genotype.
Returns
-------
str
Unphased genotype.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_unphase('1')
'1'
>>> pyvcf.gt_unphase('0/0')
'0/0'
>>> pyvcf.gt_unphase('0/1')
'0/1'
>>> pyvcf.gt_unphase('0/1:35:4')
'0/1:35:4'
>>> pyvcf.gt_unphase('0|1')
'0/1'
>>> pyvcf.gt_unphase('1|0')
'0/1'
>>> pyvcf.gt_unphase('2|1:2:0:18,2')
'1/2:2:0:18,2'
>>> pyvcf.gt_unphase('.')
'.'
>>> pyvcf.gt_unphase('./.')
'./.'
>>> pyvcf.gt_unphase('.|.')
'./.'
"""
l = g.split(':')
gt = l[0]
if '|' not in gt:
return g
if '.' in gt:
return g.replace('|', '/')
l[0] = '/'.join([str(b) for b in sorted([int(a) for a in gt.split('|')])])
return ':'.join(l)
def gt_het(g):
"""
Return True if genotype call is heterozygous.
Parameters
----------
g : str
Genotype call.
Returns
-------
bool
True if genotype call is heterozygous.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_het('0/1')
True
>>> pyvcf.gt_het('0/0')
False
>>> pyvcf.gt_het('0|0')
False
>>> pyvcf.gt_het('1|0')
True
"""
l = g.split(':')
gt = l[0]
if '/' in gt:
gt = gt.split('/')
elif '|' in gt:
gt = gt.split('|')
else:
return False
return gt[0] != gt[1]
def gt_pseudophase(g):
"""
Return pseudophased genotype call.
Parameters
----------
g : str
Genotype call.
Returns
-------
str
Pseudophased genotype call.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.pseudophase('0/1')
'0|1'
>>> pyvcf.pseudophase('0/0:34:10,24')
'0|0:34:10,24'
"""
l = g.split(':')
l[0] = l[0].replace('/', '|')
return ':'.join(l)
def has_chr_prefix(file, size=1000):
"""
Return True if all of the sampled contigs from a VCF file have the
(annoying) 'chr' string.
Parameters
----------
file : str
VCF file (compressed or uncompressed).
size : int, default: 1000
Sampling size.
Returns
-------
bool
True if the 'chr' string is found.
"""
n = 0
vcf = VariantFile(file)
for record in vcf.fetch():
n += 1
if 'chr' not in record.chrom:
return False
if n > size:
break
vcf.close()
return True
def merge(
vfs, how='inner', format='GT', sort=True, collapse=False
):
"""
Merge VcfFrame objects.
Parameters
----------
vfs : list
List of VcfFrames to be merged.
how : str, default: 'inner'
Type of merge as defined in pandas.DataFrame.merge.
format : str, default: 'GT'
FORMAT subfields to be retained (e.g. 'GT:AD:DP').
sort : bool, default: True
If True, sort the VcfFrame before returning.
collapse : bool, default: False
If True, collapse duplicate records.
Returns
-------
VcfFrame
Merged VcfFrame.
See Also
--------
VcfFrame.merge
Merge self with another VcfFrame.
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data1 = {
... 'CHROM': ['chr1', 'chr1'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT:DP', 'GT:DP'],
... 'Steven': ['0/0:32', '0/1:29'],
... 'Sara': ['0/1:24', '1/1:30'],
... }
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr2'],
... 'POS': [100, 101, 200],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT:DP', 'GT:DP'],
... 'Dona': ['./.:.', '0/0:24', '0/0:26'],
... 'Michel': ['0/1:24', '0/1:31', '0/1:26'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> vf1.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara
0 chr1 100 . G A . . . GT:DP 0/0:32 0/1:24
1 chr1 101 . T C . . . GT:DP 0/1:29 1/1:30
>>> vf2.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Dona Michel
0 chr1 100 . G A . . . GT:DP ./.:. 0/1:24
1 chr1 101 . T C . . . GT:DP 0/0:24 0/1:31
2 chr2 200 . A T . . . GT:DP 0/0:26 0/1:26
We can merge the two VcfFrames with ``how='inner'`` (default):
>>> pyvcf.merge([vf1, vf2]).df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT <NAME>
0 chr1 100 . G A . . . GT 0/0 0/1 ./. 0/1
1 chr1 101 . T C . . . GT 0/1 1/1 0/0 0/1
We can also merge with ``how='outer'``:
>>> pyvcf.merge([vf1, vf2], how='outer').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT <NAME>
0 chr1 100 . G A . . . GT 0/0 0/1 ./. 0/1
1 chr1 101 . T C . . . GT 0/1 1/1 0/0 0/1
2 chr2 200 . A T . . . GT ./. ./. 0/0 0/1
Since both VcfFrames have the DP subfield, we can use ``format='GT:DP'``:
>>> pyvcf.merge([vf1, vf2], how='outer', format='GT:DP').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara Dona Michel
0 chr1 100 . G A . . . GT:DP 0/0:32 0/1:24 ./.:. 0/1:24
1 chr1 101 . T C . . . GT:DP 0/1:29 1/1:30 0/0:24 0/1:31
2 chr2 200 . A T . . . GT:DP ./.:. ./.:. 0/0:26 0/1:26
"""
merged_vf = vfs[0]
for vf in vfs[1:]:
merged_vf = merged_vf.merge(vf, how=how, format=format, sort=sort,
collapse=collapse)
return merged_vf
def row_hasindel(r):
"""
Return True if the row has an indel.
Parameters
----------
r : pandas.Series
VCF row.
Returns
-------
bool
True if the row has an indel.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'CT', 'A', 'C'],
... 'ALT': ['A', 'C', 'C,AT', 'A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT'],
... 'Steven': ['0/1', '0/1', '1/2', '0/1'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . . GT 0/1
1 chr1 101 . CT C . . . GT 0/1
2 chr1 102 . A C,AT . . . GT 1/2
3 chr1 103 . C A . . . GT 0/1
>>> vf.df.apply(pyvcf.row_hasindel, axis=1)
0 False
1 True
2 True
3 False
dtype: bool
"""
ref_has = len(r['REF']) > 1
alt_has = max([len(x) for x in r['ALT'].split(',')]) > 1
return ref_has or alt_has
def row_parseinfo(r, key):
"""Return INFO data in the row that match the given key.
Parameters
----------
r : pandas.Series
VCF row.
key : str
INFO key.
Returns
-------
str
Requested INFO data. Empty string if the key is not found.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'T', 'A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['DB;AC=0', 'DB;H2;AC=1', 'DB;H2;AC=1', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT'],
... 'Steven': ['0/0', '0/1', '0/1', '0/0'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . DB;AC=0 GT 0/0
1 chr1 101 . T C . . DB;H2;AC=1 GT 0/1
2 chr1 102 . A T . . DB;H2;AC=1 GT 0/1
3 chr1 103 . C A . . . GT 0/0
>>> vf.df.apply(pyvcf.row_parseinfo, args=('AC',), axis=1)
0 0
1 1
2 1
3
dtype: object
"""
result = ''
for field in r.INFO.split(';'):
if field.startswith(f'{key}='):
result = field[len(key)+1:]
return result
def row_updateinfo(r, key, value):
"""Update INFO data in the row that match the given key.
Parameters
----------
r : pandas.Series
VCF row.
key : str
INFO key.
value : str
New value to be assigned.
Returns
-------
str
New INFO field.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'T', 'A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['DB;AC=0', 'DB;H2;AC=1', 'DB;H2;AC=1', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT'],
... 'Steven': ['0/0', '0/1', '0/1', '0/0'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . DB;AC=0 GT 0/0
1 chr1 101 . T C . . DB;H2;AC=1 GT 0/1
2 chr1 102 . A T . . DB;H2;AC=1 GT 0/1
3 chr1 103 . C A . . . GT 0/0
>>> vf.df.apply(pyvcf.row_updateinfo, args=('AC', '4'), axis=1)
0 DB;AC=4
1 DB;H2;AC=4
2 DB;H2;AC=4
3 .
dtype: object
"""
fields = r.INFO.split(';')
for i, field in enumerate(fields):
if field.startswith(f'{key}='):
fields[i] = field[:len(key)+1] + value
break
return ';'.join(fields)
def row_missval(r):
"""Return the correctly formatted missing value for the row.
Parameters
----------
r : pandas.Series
VCF row.
Returns
-------
str
Missing value.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chrX'],
... 'POS': [100, 101, 102, 100],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'T', 'A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT:AD', 'GT:AD:DP', 'GT'],
... 'Steven': ['0/1', '0/1:14,15', '0/1:13,19:32', '0/1'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . . GT 0/1
1 chr1 101 . T C . . . GT:AD 0/1:14,15
2 chr1 102 . A T . . . GT:AD:DP 0/1:13,19:32
3 chrX 100 . C A . . . GT 0/1
>>> vf.df.apply(pyvcf.row_missval, axis=1)
0 ./.
1 ./.:.
2 ./.:.:.
3 .
dtype: object
"""
if 'X' in r.CHROM or 'Y' in r.CHROM:
m = '.'
else:
m = './.'
for i in range(1, len(r.FORMAT.split(':'))):
m += ':.'
return m
def simulate_genotype(
p=0.5, noise_scale=0.05, dp_show=True, dp_loc=30, dp_scale=10,
ad_show=True, ad_loc=0.5, ad_scale=0.05, af_show=True
):
lower, upper = 0, 1
mu, sigma = p, noise_scale
X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
has_var = np.random.binomial(1, X.rvs(1)) == 1
if has_var:
result = '0/1'
else:
result = '0/0'
dp = np.random.normal(loc=dp_loc, scale=dp_scale)
dp = round(abs(dp))
if has_var:
alt = round(abs(np.random.normal(loc=ad_loc, scale=ad_scale)) * dp)
ref = dp - alt
ad = f'{ref},{alt}'
else:
ad = f'{dp},0'
if has_var:
af = round(alt / (ref + alt), 3)
else:
af = 0
if dp_show:
result += f':{dp}'
if ad_show:
result += f':{ad}'
if af_show:
result += f':{af}'
return result
def simulate_sample(
n, p=0.5, noise_scale=0.1, dp_show=True, dp_loc=30, dp_scale=10,
ad_show=True, ad_loc=0.5, ad_scale=0.05, af_show=True
):
l = []
for i in range(n):
genotype = simulate_genotype(
p=p, noise_scale=noise_scale, dp_show=dp_show,
dp_loc=dp_loc, dp_scale=dp_scale, ad_show=ad_show,
ad_loc=ad_loc, ad_scale=ad_scale
)
l.append(genotype)
return l
def slice(file, regions, path=None):
"""
Slice a VCF file for specified regions.
Parameters
----------
file : str
Input VCF file must be already BGZF compressed (.gz) and indexed
(.tbi) to allow random access.
regions : str, list, or pybed.BedFrame
One or more regions to be sliced. Each region must have the format
chrom:start-end and be a half-open interval with (start, end]. This
means, for example, chr1:100-103 will extract positions 101, 102, and
103. Alternatively, you can provide a BED file (compressed or
uncompressed) to specify regions. Note that the 'chr' prefix in
contig names (e.g. 'chr1' vs. '1') will be automatically added or
removed as necessary to match the input VCF's contig names.
path : str, optional
Output VCF file. Writes to stdout when ``path='-'``. If None is
provided the result is returned as a string.
Returns
-------
None or str
If path is None, returns the resulting VCF format as a string.
Otherwise returns None.
"""
if isinstance(regions, str):
regions = [regions]
if isinstance(regions, pybed.BedFrame):
regions = regions.to_regions()
elif isinstance(regions, list):
if '.bed' in regions[0]:
regions = pybed.BedFrame.from_file(regions[0]).to_regions()
else:
regions = common.sort_regions(regions)
else:
raise TypeError('Incorrect regions type')
if has_chr_prefix(file):
regions = common.update_chr_prefix(regions, mode='add')
else:
regions = common.update_chr_prefix(regions, mode='remove')
vcf = VariantFile(file)
if path is None:
data = ''
data += str(vcf.header)
for region in regions:
chrom, start, end = common.parse_region(region)
if np.isnan(start):
start = None
if np.isnan(end):
end = None
for record in vcf.fetch(chrom, start, end):
data += str(record)
else:
data = None
output = VariantFile(path, 'w', header=vcf.header)
for region in regions:
chrom, start, end = common.parse_region(region)
for record in vcf.fetch(chrom, start, end):
output.write(record)
output.close()
vcf.close()
return data
def plot_af_correlation(vf1, vf2, ax=None, figsize=None):
"""
Create a scatter plot showing the correlation of allele frequency between
two VCF files.
This method will exclude the following sites:
- non-onverlapping sites
- multiallelic sites
- sites with one or more missing genotypes
Parameters
----------
vf1, vf2 : VcfFrame
VcfFrame objects to be compared.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
:context: close-figs
>>> from fuc import pyvcf, common
>>> import matplotlib.pyplot as plt
>>> data1 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.', '.'],
... 'REF': ['G', 'T', 'G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT', 'GT', 'GT', 'GT', 'GT'],
... 'A': ['0/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'B': ['0/0:30', '0/0', '0/1', '0/1', '1/1', '0/1'],
... 'C': ['1/1:30', '0/0', '1/1', '0/1', '1/1', '0/1'],
... 'D': ['0/0:30', '0/0', '0/0', '0/0', '1/1', '0/1'],
... 'E': ['0/0:30', '0/0', '0/0', '1/2', '1/1', '0/1'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [101, 102, 103, 104, 105],
... 'ID': ['.', '.', '.', '.', '.'],
... 'REF': ['T', 'G', 'T', 'A', 'C'],
... 'ALT': ['C', 'C', 'G,A', 'C', 'T'],
... 'QUAL': ['.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT', 'GT'],
... 'F': ['0/0', '0/1', '0/1', '1/1', '0/0'],
... 'G': ['0/0', '0/1', '0/1', '1/1', './.'],
... 'H': ['0/0', '0/1', '0/1', '1/1', '1/1'],
... 'I': ['0/0', '0/1', '0/0', '1/1', '1/1'],
... 'J': ['0/0', '0/1', '1/2', '1/1', '0/1'],
... }
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> pyvcf.plot_af_correlation(vf1, vf2)
>>> plt.tight_layout()
"""
def one_gt(g):
alleles = g.split(':')[0].split('/')
alleles = [x for x in alleles if x != '0']
return len(alleles)
def one_row(r):
locus = f'{r.CHROM}-{r.POS}-{r.REF}-{r.ALT}'
ac = r[9:].apply(one_gt).sum()
if 'X' in r.CHROM or 'Y' in r.CHROM:
total = len(r[9:])
else:
total = len(r[9:]) * 2
af = ac / total
return pd.Series([locus, af])
s1 = vf1.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s2 = vf2.filter_multialt().filter_empty(threshold=1).df.apply(one_row, axis=1)
s1.columns = ['Locus', 'First']
s2.columns = ['Locus', 'Second']
s1 = s1.set_index('Locus')
s2 = s2.set_index('Locus')
df = pd.concat([s1, s2], axis=1).dropna()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.scatterplot(data=df, x='First', y='Second', ax=ax)
return ax
class VcfFrame:
"""
Class for storing VCF data.
Parameters
----------
meta : list
List of metadata lines.
df : pandas.DataFrame
DataFrame containing VCF data.
See Also
--------
VcfFrame.from_dict
Construct VcfFrame from a dict of array-like or dicts.
VcfFrame.from_file
Construct VcfFrame from a VCF file.
VcfFrame.from_string
Construct VcfFrame from a string.
Examples
--------
Constructing VcfFrame from pandas DataFrame:
>>> from fuc import pyvcf
>>> import pandas as pd
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.',],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'Steven': ['0/1', '0/1', '0/1'],
... }
>>> df = pd.DataFrame(data)
>>> vf = pyvcf.VcfFrame(['##fileformat=VCFv4.3'], df)
>>> vf.meta
['##fileformat=VCFv4.3']
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . . GT 0/1
1 chr1 101 . T C . . . GT 0/1
2 chr1 102 . A T . . . GT 0/1
"""
def _check_df(self, df):
df = df.reset_index(drop=True)
df = df.astype(HEADERS)
return df
def __init__(self, meta, df):
self._meta = meta
self._df = self._check_df(df)
@property
def meta(self):
"""list : List of metadata lines."""
return self._meta
@meta.setter
def meta(self, value):
self._meta = value
@property
def df(self):
"""pandas.DataFrame : DataFrame containing VCF data."""
return self._df
@df.setter
def df(self, value):
self._df = self._check_df(value)
@property
def samples(self):
"""list : List of sample names."""
return self.df.columns[9:].to_list()
@property
def shape(self):
"""tuple : Dimensionality of VcfFrame (variants, samples)."""
return (self.df.shape[0], len(self.samples))
@property
def contigs(self):
"""list : List of contig names."""
return list(self.df.CHROM.unique())
@property
def has_chr_prefix(self):
"""bool : Whether the (annoying) 'chr' string is found."""
for contig in self.contigs:
if 'chr' in contig:
return True
return False
@property
def sites_only(self):
"""bool : Whether the VCF is sites-only."""
return not self.samples or 'FORMAT' not in self.df.columns
@property
def phased(self):
"""
Return True if every genotype in VcfFrame is haplotype phased.
Returns
-------
bool
If VcfFrame is fully phased, return True, if not return False.
Also return False if VcfFrame is empty.
Examples
--------
>>> from fuc import pyvcf
>>> data1 = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'A': ['1|1', '0|0', '1|0'],
... 'B': ['1|0', '0|1', '1|0'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> vf1.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . G A . . . GT 1|1 1|0
1 chr1 101 . T C . . . GT 0|0 0|1
2 chr1 102 . A T . . . GT 1|0 1|0
>>> vf1.phased
True
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'C': ['1|1', '0/0', '1|0'],
... 'D': ['1|0', '0/1', '1|0'],
... }
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> vf2.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT C D
0 chr1 100 . G A . . . GT 1|1 1|0
1 chr1 101 . T C . . . GT 0/0 0/1
2 chr1 102 . A T . . . GT 1|0 1|0
>>> vf2.phased
False
"""
if self.empty:
return False
def one_row(r):
def one_gt(g):
return '|' in g.split(':')[0]
return r[9:].apply(one_gt).all()
s = self.df.apply(one_row, axis=1)
return s.all()
@property
def empty(self):
"""
Indicator whether VcfFrame is empty.
Returns
-------
bool
If VcfFrame is empty, return True, if not return False.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'A': ['0/1', '1/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT 0/1
1 chr2 101 . T C . . . GT 1/1
>>> vf.df = vf.df[0:0]
>>> vf.df
Empty DataFrame
Columns: [CHROM, POS, ID, REF, ALT, QUAL, FILTER, INFO, FORMAT, A]
Index: []
>>> vf.empty
True
"""
return self.df.empty
def add_af(self, decimals=3):
"""
Compute AF from AD and then add it to the FORMAT field.
This method will compute allele fraction for each ALT allele in the
same order as listed.
Parameters
----------
decimals : int, default: 3
Number of decimals to display.
Returns
-------
VcfFrame
Updated VcfFrame object.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['A', 'G', 'A', 'C'],
... 'ALT': ['C', 'T', 'G', 'G,A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT:AD', 'GT:AD', 'GT', 'GT:AD'],
... 'A': ['0/1:12,15', '0/0:32,1', '0/1', './.:.'],
... 'B': ['0/1:13,17', '0/1:14,15', './.', '1/2:0,11,17'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . A C . . . GT:AD 0/1:12,15 0/1:13,17
1 chr1 101 . G T . . . GT:AD 0/0:32,1 0/1:14,15
2 chr1 102 . A G . . . GT 0/1 ./.
3 chr1 103 . C G,A . . . GT:AD ./.:. 1/2:0,11,17
>>> vf.add_af().df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . A C . . . GT:AD:AF 0/1:12,15:0.444,0.556 0/1:13,17:0.433,0.567
1 chr1 101 . G T . . . GT:AD:AF 0/0:32,1:0.970,0.030 0/1:14,15:0.483,0.517
2 chr1 102 . A G . . . GT:AF 0/1:. ./.:.
3 chr1 103 . C G,A . . . GT:AD:AF ./.:.:. 1/2:0,11,17:0.000,0.393,0.607
"""
def one_row(r):
try:
i = r.FORMAT.split(':').index('AD')
except ValueError:
i = None
def one_gt(g):
if i is None:
ad = None
else:
ad = g.split(':')[i]
if ad is None or ad == '.':
af = '.'
else:
depths = [int(x) for x in ad.split(',')]
total = sum(depths)
if total == 0:
af = '.'
else:
af = ','.join([f'{x/total:.{decimals}f}' for x in depths])
return f'{g}:{af}'
r.iloc[9:] = r.iloc[9:].apply(one_gt)
r.FORMAT += ':AF'
return r
df = self.df.apply(one_row, axis=1)
return self.__class__(self.copy_meta(), df)
def add_dp(self):
"""Compute DP using AD and add it to the FORMAT field.
Returns
-------
VcfFrame
Updated VcfFrame.
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr2', 'chr2'],
... 'POS': [100, 100, 200, 200],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['A', 'A', 'C', 'C'],
... 'ALT': ['C', 'T', 'G', 'G,A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT:AD', 'GT:AD', 'GT:AD', 'GT:AD'],
... 'Steven': ['0/1:12,15', '0/0:32,1', '0/1:16,12', './.:.'],
... 'Sara': ['0/1:13,17', '0/1:14,15', './.:.', '1/2:0,11,17'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara
0 chr1 100 . A C . . . GT:AD 0/1:12,15 0/1:13,17
1 chr1 100 . A T . . . GT:AD 0/0:32,1 0/1:14,15
2 chr2 200 . C G . . . GT:AD 0/1:16,12 ./.:.
3 chr2 200 . C G,A . . . GT:AD ./.:. 1/2:0,11,17
We can add the DP subfield to our genotype data:
>>> vf.add_dp().df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara
0 chr1 100 . A C . . . GT:AD:DP 0/1:12,15:27 0/1:13,17:30
1 chr1 100 . A T . . . GT:AD:DP 0/0:32,1:33 0/1:14,15:29
2 chr2 200 . C G . . . GT:AD:DP 0/1:16,12:28 ./.:.:.
3 chr2 200 . C G,A . . . GT:AD:DP ./.:.:. 1/2:0,11,17:28
"""
def outfunc(r):
i = r.FORMAT.split(':').index('AD')
def infunc(x):
ad = x.split(':')[i].split(',')
dp = 0
for depth in ad:
if depth == '.':
return f'{x}:.'
dp += int(depth)
return f'{x}:{dp}'
r.iloc[9:] = r.iloc[9:].apply(infunc)
r.FORMAT += ':DP'
return r
df = self.df.apply(outfunc, axis=1)
vf = self.__class__(self.copy_meta(), df)
return vf
def add_flag(self, flag, order='last', index=None):
"""Add the given flag to the INFO field.
The default behavior is to add the flag to all rows in the VcfFrame.
Parameters
----------
flag : str
INFO flag.
order : {'last', 'first', False}, default: 'last'
Determines the order in which the flag will be added.
- ``last`` : Add to the end of the list.
- ``first`` : Add to the beginning of the list.
- ``False`` : Overwrite the existing field.
index : list or pandas.Series, optional
Boolean index array indicating which rows should be updated.
Returns
-------
VcfFrame
Updated VcfFrame.
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'T', 'A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', 'DB', 'DB', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT'],
... 'Steven': ['0/0', '0/1', '0/1', '1/1'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . . GT 0/0
1 chr1 101 . T C . . DB GT 0/1
2 chr1 102 . A T . . DB GT 0/1
3 chr1 103 . C A . . . GT 1/1
We can add the SOMATIC flag to the INFO field:
>>> vf.add_flag('SOMATIC').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . SOMATIC GT 0/0
1 chr1 101 . T C . . DB;SOMATIC GT 0/1
2 chr1 102 . A T . . DB;SOMATIC GT 0/1
3 chr1 103 . C A . . SOMATIC GT 1/1
Setting ``order='first'`` will append the flag at the beginning:
>>> vf.add_flag('SOMATIC', order='first').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . SOMATIC GT 0/0
1 chr1 101 . T C . . SOMATIC;DB GT 0/1
2 chr1 102 . A T . . SOMATIC;DB GT 0/1
3 chr1 103 . C A . . SOMATIC GT 1/1
Setting ``order=False`` will overwrite the INFO field:
>>> vf.add_flag('SOMATIC', order=False).df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . SOMATIC GT 0/0
1 chr1 101 . T C . . SOMATIC GT 0/1
2 chr1 102 . A T . . SOMATIC GT 0/1
3 chr1 103 . C A . . SOMATIC GT 1/1
We can also specify which rows should be updated:
>>> vf.add_flag('SOMATIC', index=[True, True, False, False]).df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven
0 chr1 100 . G A . . SOMATIC GT 0/0
1 chr1 101 . T C . . DB;SOMATIC GT 0/1
2 chr1 102 . A T . . DB GT 0/1
3 chr1 103 . C A . . . GT 1/1
"""
if index is None:
index = [True for i in range(self.shape[0])]
def f(r):
if not index[r.name]:
return r
if r.INFO == '.':
r.INFO = flag
elif not order:
r.INFO = flag
elif order == 'first':
r.INFO = f'{flag};{r.INFO}'
else:
r.INFO += f';{flag}'
return r
df = self.df.apply(f, axis=1)
vf = self.__class__(self.copy_meta(), df)
return vf
def cfilter_empty(self, opposite=False, as_list=False):
"""Remove samples whose genotype calls are all missing.
Parameters
----------
opposite : bool, default: False
If True, return samples that don't meet the said criteria.
as_list : bool, default: False
If True, return a list of sample names instead of a VcfFrame.
Returns
-------
VcfFrame
Filtered VcfFrame.
Examples
--------
Assume we have the following data:
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['G', 'T', 'G', 'T'],
... 'ALT': ['A', 'C', 'C', 'C'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT'],
... 'Steven': ['0/1', '1/1', '1/1', '1/1'],
... 'Rachel': ['./.', './.', './.', './.'],
... 'John': ['0/0', './.', '0/0', '0/0'],
... 'Sara': ['./.', './.', './.', './.'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT <NAME> <NAME>
0 chr1 100 . G A . . . GT 0/1 ./. 0/0 ./.
1 chr1 101 . T C . . . GT 1/1 ./. ./. ./.
2 chr1 102 . G C . . . GT 1/1 ./. 0/0 ./.
3 chr1 103 . T C . . . GT 1/1 ./. 0/0 ./.
We can remove samples whose genotypes are all missing:
>>> vf.cfilter_empty().df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT <NAME>
0 chr1 100 . G A . . . GT 0/1 0/0
1 chr1 101 . T C . . . GT 1/1 ./.
2 chr1 102 . G C . . . GT 1/1 0/0
3 chr1 103 . T C . . . GT 1/1 0/0
We can also select those samples:
>>> vf.cfilter_empty(opposite=True).df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT <NAME>
0 chr1 100 . G A . . . GT ./. ./.
1 chr1 101 . T C . . . GT ./. ./.
2 chr1 102 . G C . . . GT ./. ./.
3 chr1 103 . T C . . . GT ./. ./.
Finally, we can return a list of sample names from the filtering:
>>> vf.cfilter_empty(as_list=True)
['Steven', 'John']
"""
f = lambda r: r[9:].apply(gt_miss)
s = self.df.apply(f, axis=1).all()
if opposite:
s = s[s == True]
else:
s = s[s == False]
l = s.index.to_list()
if as_list:
return l
return self.subset(l)
def collapse(self):
"""Collapse duplicate records in the VcfFrame.
Duplicate records have the identical values for CHROM, POS, and REF.
They can result from merging two VCF files.
.. note::
The method will sort the order of ALT alleles.
Returns
-------
VcfFrame
Collapsed VcfFrame.
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr2', 'chr2'],
... 'POS': [100, 100, 200, 200],
... 'ID': ['.', '.', '.', '.'],
... 'REF': ['A', 'A', 'C', 'C'],
... 'ALT': ['C', 'T', 'G', 'G,A'],
... 'QUAL': ['.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.'],
... 'FORMAT': ['GT:AD', 'GT:AD', 'GT:AD', 'GT:AD'],
... 'Steven': ['0/1:12,15', './.:.', '0/1:16,12', './.:.'],
... 'Sara': ['./.:.', '0/1:14,15', './.:.', '1/2:0,11,17'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara
0 chr1 100 . A C . . . GT:AD 0/1:12,15 ./.:.
1 chr1 100 . A T . . . GT:AD ./.:. 0/1:14,15
2 chr2 200 . C G . . . GT:AD 0/1:16,12 ./.:.
3 chr2 200 . C G,A . . . GT:AD ./.:. 1/2:0,11,17
We collapse the VcfFrame:
>>> vf.collapse().df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven Sara
0 chr1 100 . A C,T . . . GT:AD 0/1:12,15,0 0/2:14,0,15
2 chr2 200 . C A,G . . . GT:AD 0/2:16,0,12 1/2:0,17,11
"""
df = self.df.copy()
dup_idx = df.duplicated(['CHROM', 'POS', 'REF'], keep=False)
dups = {}
for i, r in df[dup_idx].iterrows():
name = f'{r.CHROM}:{r.POS}:{r.REF}'
if name not in dups:
dups[name] = []
dups[name].append(i)
def collapse_one(df):
ref_allele = df.REF.unique()[0]
alt_alleles = []
for i, r in df.iterrows():
alt_alleles += r.ALT.split(',')
alt_alleles = sorted(list(set(alt_alleles)),
key=lambda x: (len(x), x))
all_alleles = [ref_allele] + alt_alleles
def infunc(x, r_all_alleles, index_map):
if gt_miss(x):
return ''
old_fields = x.split(':')
old_gt = old_fields[0]
new_gt = '/'.join([str(x) for x in
sorted([index_map[int(i)] for i in old_gt.split('/')])
])
new_fields = [new_gt]
for old_field in old_fields[1:]:
old_subfields = old_field.split(',')
new_subfields = ['0' for x in all_alleles]
if len(old_subfields) == len(r_all_alleles):
for i, old_subfield in enumerate(old_subfields):
new_subfields[index_map[i]] = old_subfield
new_fields.append(','.join(new_subfields))
else:
new_fields.append(old_field)
return ':'.join(new_fields)
def outfunc(r):
r_alt_alleles = r.ALT.split(',')
r_all_alleles = [r.REF] + r_alt_alleles
old_indicies = [i for i in range(len(r_all_alleles))]
new_indicies = [all_alleles.index(x) for x in r_all_alleles]
index_map = dict(zip(old_indicies, new_indicies))
r[9:] = r[9:].apply(infunc, args=(r_all_alleles, index_map))
return r
df2 = df.apply(outfunc, axis=1)
def raise_error(c):
if sum(c.values != '') > 1:
message = ('cannot collapse following '
f'records:\n{df.loc[c.index]}')
raise ValueError(message)
df2.iloc[:, 9:].apply(raise_error)
df2 = df2.groupby(['CHROM', 'POS', 'REF']).agg(''.join)
df2 = df2.reset_index()
cols = list(df2)
cols[2], cols[3] = cols[3], cols[2]
df2 = df2[cols]
df2.ID = df.ID.unique()[0]
df2.ALT = ','.join(alt_alleles)
df2.QUAL = df.QUAL.unique()[0]
df2.FILTER = df.FILTER.unique()[0]
df2.INFO = df.INFO.unique()[0]
df2.FORMAT = df.FORMAT.unique()[0]
s = df2.squeeze()
s = s.replace('', row_missval(s))
return s
for name, i in dups.items():
df.iloc[i] = collapse_one(df.iloc[i])
df.drop_duplicates(subset=['CHROM', 'POS', 'REF'], inplace=True)
vf = self.__class__(self.copy_meta(), df)
return vf
@classmethod
def from_dict(cls, meta, data):
"""
Construct VcfFrame from a dict of array-like or dicts.
Parameters
----------
meta : list
List of the metadata lines.
data : dict
Of the form {field : array-like} or {field : dict}.
Returns
-------
VcfFrame
VcfFrame.
See Also
--------
VcfFrame
VcfFrame object creation using constructor.
VcfFrame.from_file
Construct VcfFrame from a VCF file.
VcfFrame.from_string
Construct VcfFrame from a string.
Examples
--------
Below is a simple example:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'A': ['0/1', '1/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT 0/1
1 chr2 101 . T C . . . GT 1/1
"""
return cls(meta, pd.DataFrame(data))
@classmethod
def from_file(
cls, fn, compression=False, meta_only=False, regions=None
):
"""
Construct VcfFrame from a VCF file.
The method will automatically use BGZF decompression if the filename
ends with '.gz'.
If the file is large you can speicfy regions of interest to speed up
data processing. Note that this requires the file be BGZF compressed
and indexed (.tbi) for random access. Each region to be sliced must
have the format chrom:start-end and be a half-open interval with
(start, end]. This means, for example, 'chr1:100-103' will extract
positions 101, 102, and 103. Alternatively, you can provide BED data
to specify regions.
Parameters
----------
fn : str or file-like object
VCF file (compressed or uncompressed). By file-like object, we
refer to objects with a :meth:`read()` method, such as a file
handle.
compression : bool, default: False
If True, use BGZF decompression regardless of the filename.
meta_only : bool, default: False
If True, only read metadata and header lines.
regions : str, list, or pybed.BedFrame, optional
Region or list of regions to be sliced. Also accepts a BED file
or a BedFrame.
Returns
-------
VcfFrame
VcfFrame object.
See Also
--------
VcfFrame
VcfFrame object creation using constructor.
VcfFrame.from_dict
Construct VcfFrame from a dict of array-like or dicts.
VcfFrame.from_string
Construct VcfFrame from a string.
Examples
--------
>>> from fuc import pyvcf
>>> vf = pyvcf.VcfFrame.from_file('unzipped.vcf')
>>> vf = pyvcf.VcfFrame.from_file('zipped.vcf.gz')
>>> vf = pyvcf.VcfFrame.from_file('zipped.vcf', compression=True)
"""
if isinstance(fn, str):
if regions is None:
s = ''
if fn.startswith('~'):
fn = os.path.expanduser(fn)
if fn.endswith('.gz') or compression:
f = bgzf.open(fn, 'rt')
else:
f = open(fn)
for line in f:
s += line
f.close()
else:
s = slice(fn, regions)
elif hasattr(fn, 'read'):
s = fn.read()
try:
s = s.decode('utf-8')
except AttributeError:
pass
else:
raise TypeError(f'Incorrect input type: {type(fn).__name__}')
vf = cls.from_string(s)
return vf
@classmethod
def from_string(cls, s, meta_only=False):
"""
Construct VcfFrame from a string.
Parameters
----------
s : str
String representation of a VCF file.
Returns
-------
VcfFrame
VcfFrame object.
See Also
--------
VcfFrame
VcfFrame object creation using constructor.
VcfFrame.from_file
Construct VcfFrame from a VCF file.
VcfFrame.from_dict
Construct VcfFrame from a dict of array-like or dicts.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'A': ['0/1', '0/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict(['##fileformat=VCFv4.3'], data)
>>> s = vf.to_string()
>>> print(s[:20])
##fileformat=VCFv4.3
>>> vf = pyvcf.VcfFrame.from_string(s)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT 0/1
1 chr1 101 . T C . . . GT 0/1
"""
skiprows = 0
meta = []
for line in s.split('\n'):
if line.startswith('##'):
meta.append(line.strip())
skiprows += 1
elif line.startswith('#CHROM'):
columns = line.strip().split('\t')
skiprows += 1
else:
break
columns[0] = 'CHROM'
for header in HEADERS:
if header not in columns and header != 'FORMAT':
raise ValueError(f"Required VCF column missing: '{header}'")
if meta_only:
df = pd.DataFrame(columns=columns)
else:
dtype = {**HEADERS, **{x: str for x in columns[9:]}}
df = pd.read_table(StringIO(s), skiprows=skiprows,
names=columns, dtype=dtype)
return cls(meta, df)
def calculate_concordance(self, a, b, c=None, mode='all'):
"""
Calculate genotype concordance between two (A, B) or three (A, B, C)
samples.
This method will return (Ab, aB, AB, ab) for comparison between two
samples and (Abc, aBc, ABc, abC, AbC, aBC, ABC, abc) for three
samples. Note that the former is equivalent to (FP, FN, TP, TN) if
we assume A is the test sample and B is the truth sample.
Only biallelic sites will be used for calculation. Additionally, the
method will ignore zygosity and only consider presence or absence of
variant calls (e.g. ``0/1`` and ``1/1`` will be treated the same).
Parameters
----------
a, b : str or int
Name or index of Samples A and B.
c : str or int, optional
Name or index of Sample C.
mode : {'all', 'snv', 'indel'}, default: 'all'
Determines which variant types should be analyzed:
- 'all': Include both SNVs and INDELs.
- 'snv': Include SNVs only.
- 'indel': Include INDELs only.
Returns
-------
tuple
Four- or eight-element tuple depending on the number of samples.
See Also
--------
fuc.api.common.sumstat
Return various summary statistics from (FP, FN, TP, TN).
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103, 104],
... 'ID': ['.', '.', '.', '.', '.'],
... 'REF': ['G', 'CT', 'T', 'C', 'A'],
... 'ALT': ['A', 'C', 'A', 'T', 'G,C'],
... 'QUAL': ['.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT', 'GT', 'GT'],
... 'A': ['0/1', '0/0', '0/0', '0/1', '0/0'],
... 'B': ['1/1', '0/1', './.', '0/1', '0/0'],
... 'C': ['0/1', '0/1', '1/1', './.', '1/2'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B C
0 chr1 100 . G A . . . GT 0/1 1/1 0/1
1 chr1 101 . CT C . . . GT 0/0 0/1 0/1
2 chr1 102 . T A . . . GT 0/0 ./. 1/1
3 chr1 103 . C T . . . GT 0/1 0/1 ./.
4 chr1 104 . A G,C . . . GT 0/0 0/0 1/2
We can first compare the samples A and B:
>>> vf.calculate_concordance('A', 'B', mode='all')
(0, 1, 2, 1)
>>> vf.calculate_concordance('A', 'B', mode='snv')
(0, 0, 2, 1)
>>> vf.calculate_concordance('A', 'B', mode='indel')
(0, 1, 0, 0)
We can also compare all three samples at once:
>>> vf.calculate_concordance('A', 'B', 'C')
(0, 0, 1, 1, 0, 1, 1, 0)
"""
vf = self.filter_multialt()
if mode == 'all':
pass
elif mode == 'snv':
vf = vf.filter_indel()
elif mode == 'indel':
vf = vf.filter_indel(opposite=True)
else:
raise ValueError(f'Incorrect mode: {mode}.')
if c is None:
result = self._compare_two(vf, a, b)
else:
result = self._compare_three(vf, a, b, c)
return result
def _compare_two(self, vf, a, b):
a = a if isinstance(a, str) else vf.samples[a]
b = b if isinstance(b, str) else vf.samples[b]
def func(r):
a_has = gt_hasvar(r[a])
b_has = gt_hasvar(r[b])
if a_has and not b_has:
return 'Ab'
elif not a_has and b_has:
return 'aB'
elif a_has and b_has:
return 'AB'
else:
return 'ab'
d = vf.df.apply(func, axis=1).value_counts().to_dict()
Ab = d['Ab'] if 'Ab' in d else 0
aB = d['aB'] if 'aB' in d else 0
AB = d['AB'] if 'AB' in d else 0
ab = d['ab'] if 'ab' in d else 0
return (Ab, aB, AB, ab)
def _compare_three(self, vf, a, b, c):
a = a if isinstance(a, str) else vf.samples[a]
b = b if isinstance(b, str) else vf.samples[b]
c = c if isinstance(c, str) else vf.samples[c]
def func(r):
a_has = gt_hasvar(r[a])
b_has = gt_hasvar(r[b])
c_has = gt_hasvar(r[c])
if a_has and not b_has and not c_has:
return 'Abc'
elif not a_has and b_has and not c_has:
return 'aBc'
elif a_has and b_has and not c_has:
return 'ABc'
elif not a_has and not b_has and c_has:
return 'abC'
elif a_has and not b_has and c_has:
return 'AbC'
elif not a_has and b_has and c_has:
return 'aBC'
elif a_has and b_has and c_has:
return 'ABC'
else:
return 'abc'
d = vf.df.apply(func, axis=1).value_counts().to_dict()
Abc = d['Abc'] if 'Abc' in d else 0
aBc = d['aBc'] if 'aBc' in d else 0
ABc = d['ABc'] if 'ABc' in d else 0
abC = d['abC'] if 'abC' in d else 0
AbC = d['AbC'] if 'AbC' in d else 0
aBC = d['aBC'] if 'aBC' in d else 0
ABC = d['ABC'] if 'ABC' in d else 0
abc = d['abc'] if 'abc' in d else 0
return (Abc, aBc, ABc, abC, AbC, aBC, ABC, abc)
def combine(self, a, b):
"""
Combine genotype data from two samples (A, B).
This method can be especially useful when you want to consolidate
genotype data from replicate samples. See examples below for more
details.
Parameters
----------
a, b : str or int
Name or index of Samples A and B.
Returns
-------
pandas.Series
Resulting VCF column.
See Also
--------
VcfFrame.subtract
Subtract genotype data between two samples (A, B).
Examples
--------
Assume we have following data where a cancer patient's tissue sample
has been sequenced twice:
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102, 103, 104],
... 'ID': ['.', '.', '.', '.', '.'],
... 'REF': ['G', 'T', 'T', 'A', 'C'],
... 'ALT': ['A', 'C', 'A', 'C', 'G'],
... 'QUAL': ['.', '.', '.', '.', '.'],
... 'FILTER': ['.', '.', '.', '.', '.'],
... 'INFO': ['.', '.', '.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT:DP', 'GT:DP', 'GT:DP', 'GT:DP'],
... 'Tissue1': ['./.:.', '0/0:7', '0/1:28', '0/1:4', '0/1:32'],
... 'Tissue2': ['0/1:24', '0/1:42', './.:.', './.:.', '0/1:19'],
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Tissue1 Tissue2
0 chr1 100 . G A . . . GT:DP ./.:. 0/1:24
1 chr1 101 . T C . . . GT:DP 0/0:7 0/1:42
2 chr1 102 . T A . . . GT:DP 0/1:28 ./.:.
3 chr1 103 . A C . . . GT:DP 0/1:4 ./.:.
4 chr1 104 . C G . . . GT:DP 0/1:32 0/1:19
We can combine genotype data from 'Tissue1' and 'Tissue2' to get a
more comprehensive variant profile:
>>> vf.df['Combined'] = vf.combine('Tissue1', 'Tissue2')
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Tissue1 Tissue2 Combined
0 chr1 100 . G A . . . GT:DP ./.:. 0/1:24 0/1:24
1 chr1 101 . T C . . . GT:DP 0/0:7 0/1:42 0/1:42
2 chr1 102 . T A . . . GT:DP 0/1:28 ./.:. 0/1:28
3 chr1 103 . A C . . . GT:DP 0/1:4 ./.:. 0/1:4
4 chr1 104 . C G . . . GT:DP 0/1:32 0/1:19 0/1:32
"""
a = a if isinstance(a, str) else self.samples[a]
b = b if isinstance(b, str) else self.samples[b]
def func(r):
a_has = gt_hasvar(r[a])
b_has = gt_hasvar(r[b])
if a_has and b_has:
return r[a]
elif a_has and not b_has:
return r[a]
elif not a_has and b_has:
return r[b]
else:
return r[a]
s = self.df.apply(func, axis=1)
return s
def copy_meta(self):
"""Return a copy of the metadata."""
return deepcopy(self.meta)
def copy_df(self):
"""Return a copy of the dataframe."""
return self.df.copy()
def copy(self):
"""Return a copy of the VcfFrame."""
return self.__class__(self.copy_meta(), self.copy_df())
def to_file(self, fn, compression=False):
"""
Write VcfFrame to a VCF file.
If the filename ends with '.gz', the method will automatically
use the BGZF compression when writing the file.
Parameters
----------
fn : str
VCF file path.
compression : bool, default: False
If True, use the BGZF compression.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'Steven': ['0/1', '1/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict(['##fileformat=VCFv4.3'], data)
>>> vf.to_file('unzipped.vcf')
>>> vf.to_file('zipped.vcf.gz')
>>> vf.to_file('zipped.vcf.gz', compression=True)
"""
if fn.endswith('.gz') or compression:
f = bgzf.open(fn, 'w')
else:
f = open(fn, 'w')
f.write(self.to_string())
f.close()
def to_string(self):
"""
Render the VcfFrame to a console-friendly tabular output.
Returns
-------
str
String representation of the VcfFrame.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'A': ['0/1', '0/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict(['##fileformat=VCFv4.3'], data)
>>> print(vf.to_string())
##fileformat=VCFv4.3
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
chr1 100 . G A . . . GT 0/1
chr1 101 . T C . . . GT 0/1
"""
s = ''
if self.meta:
s += '\n'.join(self.meta) + '\n'
s += self.df.rename(columns={'CHROM': '#CHROM'}
).to_csv(index=False, sep='\t')
return s
def strip(self, format='GT', metadata=False):
"""
Remove any unnecessary data.
Parameters
----------
format : str, default: 'GT'
FORMAT keys to retain (e.g. 'GT:AD:DP').
metadata : bool, default: False
If True, keep the metadata.
Returns
-------
VcfFrame
Stripped VcfFrame.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 101, 102],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT:DP:AD', 'GT:DP:AD', 'GT'],
... 'A': ['0/1:30:15,15', '1/1:28:0,28', '0/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT:DP:AD 0/1:30:15,15
1 chr1 101 . T C . . . GT:DP:AD 1/1:28:0,28
2 chr1 102 . A T . . . GT 0/1
>>> vf.strip('GT:DP').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT:DP 0/1:30
1 chr1 101 . T C . . . GT:DP 1/1:28
2 chr1 102 . A T . . . GT:DP 0/1:.
"""
new_keys = format.split(':')
def one_row(r):
old_keys = r.FORMAT.split(':')
indicies = [
old_keys.index(x) if x in old_keys else None for x in new_keys
]
def one_gt(g):
old_fields = g.split(':')
new_fields = [
'.' if x is None
else '.' if x >= len(old_fields)
else old_fields[x]
for x in indicies
]
return ':'.join(new_fields)
r.iloc[9:] = r.iloc[9:].apply(one_gt)
return r
df = self.df.copy()
df[['ID', 'QUAL', 'FILTER', 'INFO']] = '.'
df = df.apply(one_row, axis=1)
df.FORMAT = format
if metadata:
meta = self.copy_meta()
else:
meta = []
vf = self.__class__(meta, df)
return vf
def merge(
self, other, how='inner', format='GT', sort=True, collapse=False
):
"""
Merge with the other VcfFrame.
Parameters
----------
other : VcfFrame
Other VcfFrame.
how : str, default: 'inner'
Type of merge as defined in `pandas.DataFrame.merge`.
format : str, default: 'GT'
FORMAT subfields to be retained (e.g. 'GT:AD:DP').
sort : bool, default: True
If True, sort the VcfFrame before returning.
collapse : bool, default: False
If True, collapse duplicate records.
Returns
-------
VcfFrame
Merged VcfFrame.
Examples
--------
Assume we have the following data:
>>> from fuc import pyvcf
>>> data1 = {
... 'CHROM': ['chr1', 'chr1'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT:DP', 'GT:DP'],
... 'A': ['0/0:32', '0/1:29'],
... 'B': ['0/1:24', '1/1:30'],
... }
>>> data2 = {
... 'CHROM': ['chr1', 'chr1', 'chr2'],
... 'POS': [100, 101, 200],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'T', 'A'],
... 'ALT': ['A', 'C', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT:DP', 'GT:DP', 'GT:DP'],
... 'C': ['./.:.', '0/0:24', '0/0:26'],
... 'D': ['0/1:24', '0/1:31', '0/1:26'],
... }
>>> vf1 = pyvcf.VcfFrame.from_dict([], data1)
>>> vf2 = pyvcf.VcfFrame.from_dict([], data2)
>>> vf1.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . G A . . . GT:DP 0/0:32 0/1:24
1 chr1 101 . T C . . . GT:DP 0/1:29 1/1:30
>>> vf2.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT C D
0 chr1 100 . G A . . . GT:DP ./.:. 0/1:24
1 chr1 101 . T C . . . GT:DP 0/0:24 0/1:31
2 chr2 200 . A T . . . GT:DP 0/0:26 0/1:26
We can merge the two VcfFrames with ``how='inner'`` (default):
>>> vf1.merge(vf2).df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B C D
0 chr1 100 . G A . . . GT 0/0 0/1 ./. 0/1
1 chr1 101 . T C . . . GT 0/1 1/1 0/0 0/1
We can also merge with ``how='outer'``:
>>> vf1.merge(vf2, how='outer').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B C D
0 chr1 100 . G A . . . GT 0/0 0/1 ./. 0/1
1 chr1 101 . T C . . . GT 0/1 1/1 0/0 0/1
2 chr2 200 . A T . . . GT ./. ./. 0/0 0/1
Since both VcfFrames have the DP subfield, we can use ``format='GT:DP'``:
>>> vf1.merge(vf2, how='outer', format='GT:DP').df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B C D
0 chr1 100 . G A . . . GT:DP 0/0:32 0/1:24 ./.:. 0/1:24
1 chr1 101 . T C . . . GT:DP 0/1:29 1/1:30 0/0:24 0/1:31
2 chr2 200 . A T . . . GT:DP ./.:. ./.:. 0/0:26 0/1:26
"""
if self.sites_only and other.sites_only:
df = pd.concat([self.df, other.df])
merged = self.__class__([], df)
merged = merged.drop_duplicates()
else:
vf1 = self.strip(format=format)
vf2 = other.strip(format=format)
dropped = ['ID', 'QUAL', 'FILTER', 'INFO', 'FORMAT']
shared = ['CHROM', 'POS', 'REF', 'ALT']
df = vf1.df.merge(vf2.df.drop(columns=dropped), on=shared, how=how)
# This ensures that the column order is intact when either of the
# dataframes is empty.
cols = vf1.df.columns.to_list() + vf2.df.columns[9:].to_list()
df = df[cols]
df[dropped] = df[dropped].fillna('.')
df.FORMAT = format
def func(r):
n = len(r.FORMAT.split(':'))
x = './.'
for i in range(1, n):
x += ':.'
r = r.fillna(x)
return r
df = df.apply(func, axis=1)
merged = self.__class__([], df)
if collapse:
merged = merged.collapse()
if sort:
merged = merged.sort()
return merged
def meta_keys(self):
"""Print metadata lines with a key."""
for line in self.meta:
if '=<ID=' in line:
print(line)
def miss2ref(self):
"""
Convert missing genotype (./.) to homozygous REF (0/0).
Returns
-------
VcfFrame
VcfFrame object.
Examples
--------
>>> from fuc import pyvcf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['.', '.'],
... 'FORMAT': ['GT', 'GT'],
... 'A': ['./.', '1/1'],
... 'B': ['./.', './.']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . G A . . . GT ./. ./.
1 chr2 101 . T C . . . GT 1/1 ./.
>>> new_vf = vf.miss2ref()
>>> new_vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A B
0 chr1 100 . G A . . . GT 0/0 0/0
1 chr2 101 . T C . . . GT 1/1 0/0
"""
df = self.copy_df()
def one_gt(g):
l = [g.split(':')[0].replace('.', '0')] + g.split(':')[1:]
return ':'.join(l)
df.iloc[:, 9:] = df.iloc[:, 9:].applymap(one_gt)
return self.__class__(self.copy_meta(), df)
def plot_region(
self, sample, k='#DP', color=None, region=None, label=None, ax=None,
figsize=None, **kwargs
):
"""
Create a scatter plot showing read depth profile of a sample for
the specified region.
Parameters
----------
sample : str or int
Name or index of target sample.
k : str, default: '#DP'
Genotype key to use for extracting data:
- '#DP': Return read depth.
- '#AD_REF': Return REF allele depth.
- '#AD_ALT': Return ALT allele depth.
- '#AD_FRAC_REF': Return REF allele fraction.
- '#AD_FRAC_ALT': Return ALT allele fraction.
color : str, optional
Marker color.
region : str, optional
Target region ('chrom:start-end').
label : str, optional
Label to use for the data points.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> from fuc import pyvcf, common
>>> import matplotlib.pyplot as plt
>>> common.load_dataset('pyvcf')
>>> vcf_file = '~/fuc-data/pyvcf/getrm-cyp2d6-vdr.vcf'
>>> vf = pyvcf.VcfFrame.from_file(vcf_file)
>>> vf.plot_region('NA18973')
>>> plt.tight_layout()
We can display allele fraction of REF and ALT instead of DP:
.. plot::
:context: close-figs
>>> ax = vf.plot_region('NA18973', k='#AD_FRAC_REF', label='REF')
>>> vf.plot_region('NA18973', k='#AD_FRAC_ALT', label='ALT', ax=ax)
>>> plt.tight_layout()
"""
if self.df.empty:
raise ValueError('VcfFrame is empty')
sample = sample if isinstance(sample, str) else self.samples[sample]
if region is None:
if len(self.contigs) == 1:
vf = self.copy()
else:
raise ValueError('Multiple contigs found.')
else:
vf = self.slice(region)
df = vf.extract_format(k)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.scatter(
x=vf.df.POS, y=df[sample], c=color, label=label, **kwargs
)
ax.set_xlabel('Position')
ax.set_ylabel('Depth')
return ax
def plot_comparison(
self, a, b, c=None, labels=None, ax=None, figsize=None
):
"""
Create a Venn diagram showing genotype concordance between groups.
This method supports comparison between two groups (Groups A & B)
as well as three groups (Groups A, B, & C).
Parameters
----------
a, b : list
Sample names. The lists must have the same shape.
c : list, optional
Same as above.
labels : list, optional
List of labels to be displayed.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
matplotlib_venn._common.VennDiagram
VennDiagram object.
Examples
--------
.. plot::
:context: close-figs
>>> from fuc import pyvcf, common
>>> common.load_dataset('pyvcf')
>>> f = '~/fuc-data/pyvcf/plot_comparison.vcf'
>>> vf = pyvcf.VcfFrame.from_file(f)
>>> a = ['Steven_A', 'John_A', 'Sara_A']
>>> b = ['Steven_B', 'John_B', 'Sara_B']
>>> c = ['Steven_C', 'John_C', 'Sara_C']
>>> vf.plot_comparison(a, b)
.. plot::
:context: close-figs
>>> vf.plot_comparison(a, b, c)
"""
if len(a) != len(b):
raise ValueError('Groups A and B have different length.')
if c is not None and len(a) != len(c):
raise ValueError('Group C has unmatched length.')
if labels is None:
if c is None:
labels = ('A', 'B')
else:
labels = ('A', 'B', 'C')
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
venn_kws = dict(ax=ax, alpha=0.5, set_labels=labels)
if c is None:
out = self._plot_comparison_two(a, b, venn_kws)
else:
out = self._plot_comparison_three(a, b, c, venn_kws)
return ax, out
def _plot_comparison_two(self, a, b, venn_kws):
n = [0, 0, 0, 0]
for i in range(len(a)):
n = [x + y for x, y in zip(n, self.calculate_concordance(a[i], b[i]))]
out = venn2(subsets=n[:-1], **venn_kws)
return out
def _plot_comparison_three(self, a, b, c, venn_kws):
n = [0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(a)):
n = [x + y for x, y in zip(n, self.calculate_concordance(a[i], b[i], c[i]))]
out = venn3(subsets=n[:-1], **venn_kws)
return out
def plot_hist_format(
self, k, af=None, group_col=None, group_order=None, kde=True,
ax=None, figsize=None, **kwargs
):
"""
Create a histogram showing the distribution of data for the
specified FORMAT key.
Parameters
----------
k : str
One of the special FORMAT keys as defined in
:meth:`VcfFrame.extract_format`.
af : common.AnnFrame
AnnFrame containing sample annotation data.
group_col : list, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
kde : bool, default: True
Compute a kernel density estimate to smooth the distribution.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.histplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> from fuc import common, pyvcf
>>> common.load_dataset('pyvcf')
>>> vcf_file = '~/fuc-data/pyvcf/normal-tumor.vcf'
>>> vf = pyvcf.VcfFrame.from_file(vcf_file)
>>> vf.plot_hist_format('#DP')
We can draw multiple histograms with hue mapping:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/pyvcf/normal-tumor-annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col='Sample')
>>> vf.plot_hist_format('#DP', af=af, group_col='Tissue')
We can show AF instead of DP:
.. plot::
:context: close-figs
>>> vf.plot_hist_format('#AD_FRAC_REF')
"""
if k not in FORMAT_SPECIAL_KEYS:
raise ValueError('Incorrect FORMAT key.')
df = self.extract_format(k)
df = df.T
id_vars = ['index']
if group_col is not None:
df = | pd.concat([df, af.df[group_col]], axis=1, join='inner') | pandas.concat |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': | pd.StringDtype() | pandas.StringDtype |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.signals import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
sig = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=sig.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [10, 11, 12, 11, 10]
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert sig.vbt.signals.wrapper.freq == day_dt
assert sig['a'].vbt.signals.wrapper.freq == day_dt
assert sig.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert sig['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(sig['a'].vbt.signals.fshift(test_n), sig['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
sig['a'].vbt.signals.fshift(test_n).values,
nb.fshift_1d_nb(sig['a'].values, test_n)
)
pd.testing.assert_frame_equal(sig.vbt.signals.fshift(test_n), sig.shift(test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(sig['a']),
pd.Series(np.full(sig['a'].shape, False), index=sig['a'].index, name=sig['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(sig),
pd.DataFrame(np.full(sig.shape, False), index=sig.index, columns=sig.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
with pytest.raises(Exception) as e_info:
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate((5, 3), choice_func_nb, 1, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((sig.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((sig.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
sig['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=sig.index,
columns=sig.columns
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=sig.index, columns=sig.columns)
exits = pd.Series([True, False, True, False, True], index=sig.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
| pd.DataFrame.vbt.signals.clean(entries, exits) | pandas.DataFrame.vbt.signals.clean |
"""
Calculate MQA scores only for the resolved region from local score.
MQA methods:
- DeepAccNet
- P3CMQA
- ProQ3D
- VoroCNN
"""
import argparse
import os
import subprocess
import tarfile
from pathlib import Path
from typing import Any, List, Union
import numpy as np
import pandas as pd
from prody import parsePDB, writePDB
from tqdm import tqdm
data_dir = Path('../../data')
interim_path = data_dir / 'interim'
score_dir = data_dir / 'out/dataset/score/mqa'
def open_tar(tar_file: Union[str, Path]) -> tarfile.TarFile:
return tarfile.open(tar_file, 'r:gz')
def get_resolved_pdb(target: str, resolved_indices: List[int]) -> Path:
target_pdb_dir = data_dir / 'out/dataset/alphafold_output' / target
pdb_resolved_dir = data_dir / 'out/dataset/pdb/pdb_resolved'
pdb_resolved_target_dir = pdb_resolved_dir / target
pdb_resolved_target_dir.mkdir(parents=True, exist_ok=True)
for pdb in target_pdb_dir.glob('*.pdb'):
pdb_name = pdb.stem
output_pdb_path = pdb_resolved_target_dir / f'{pdb_name}.pdb'
if output_pdb_path.exists():
continue
mol = parsePDB(pdb)
resindices = mol.getResnums() - 1
resolved_atom_indices = np.where(np.isin(resindices, resolved_indices))[0]
mol_resolved = mol[resolved_atom_indices]
writePDB(str(output_pdb_path), mol_resolved)
return pdb_resolved_target_dir
class CalcResolvedConfidence:
missing_dict = np.load(interim_path / 'missing_residues.npy', allow_pickle=True).item()
def __init__(self, method: str, target_csv: Union[str, Path]):
self.method = method
self.target_df = pd.read_csv(target_csv, index_col=0)
def __call__(self, *args: Any, **kwds: Any) -> Any:
results = []
with tqdm(self.target_df.iterrows(), total=len(self.target_df)) as pbar:
for _, row in pbar:
target = row['id']
pbar.set_description(f'Target = {target}')
length = row['length']
result = self.for_target(target, length)
results.append(result)
if sum([1 if result is None else 0 for result in results]) > 0:
print(f'{self.method} calculation not yet finished')
exit()
return pd.concat(results)
def for_target(self, target: str, length: int) -> Union[pd.DataFrame, None]:
resolved_indices = self.get_resolved_indices(target, length)
if self.method == 'DeepAccNet' or self.method == 'DeepAccNet-Bert':
result = self.DeepAccNet(target, length)
elif self.method == 'P3CMQA' or self.method == 'Sato-3DCNN':
result = self.P3CMQA(target, resolved_indices)
elif self.method == 'ProQ3D':
result = self.ProQ3D(target, resolved_indices)
elif self.method == 'VoroCNN':
result = self.VoroCNN(target, resolved_indices)
elif self.method == 'DOPE':
result = self.DOPE(target, resolved_indices)
elif self.method == 'SBROD':
result = self.SBROD(target, resolved_indices)
else:
raise ValueError(f'Unknown method: {self.method}')
return result
@classmethod
def get_resolved_indices(cls, target: str, length: int) -> List[int]:
return np.setdiff1d(np.arange(length), cls.missing_dict[target])
def DeepAccNet(self, target: str, length: int) -> Union[pd.DataFrame, None]:
deepaccnet_path = score_dir / 'DeepAccNet'
result_path = deepaccnet_path / f'{target}_resolved.csv'
# if calculation already finished
if result_path.exists():
result_df = | pd.read_csv(result_path, index_col=0) | pandas.read_csv |
import pandas as pd
import argparse
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from common.time_utils import get_local_datetime, get_local_unixtime
from common.geoutils import great_circle_distance
from config.settings import BOUNDING_BOX
def load_trip_data(path, cols, new_cols):
df = pd.read_csv(path, usecols=cols, nrows=None)
df.rename(columns=dict(zip(cols, new_cols)), inplace=True)
return df
def convert_datetime(df):
df['request_datetime'] = pd.to_datetime(df.pickup_datetime).apply(lambda x: int(get_local_unixtime(x)))
df['trip_time'] = | pd.to_datetime(df.dropoff_datetime) | pandas.to_datetime |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_connect = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
global yData
global filterActionFinal
global dataSpacePointsIDs
lengthDF = len(df_connect.columns)
if (filterActionFinal == 'compose'):
getList = []
for index, row in df_connect.iterrows():
yDataSelected = []
for column in row[dataSpacePointsIDs]:
yDataSelected.append(column)
storeMode = mode(yDataSelected)
getList.append(storeMode)
df_connect[str(lengthDF)] = getList
countCorrect = []
length = len(df_connect.index)
for index, element in enumerate(yData):
countTemp = 0
dfPart = df_connect[[str(index)]]
for indexdf, row in dfPart.iterrows():
if (int(row.values[0]) == int(element)):
countTemp += 1
countCorrect.append(1 - (countTemp/length))
return countCorrect
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP):
global Results
global AllTargets
Results = []
parametersGen = PreprocessingParam()
PerClassMetrics = preProcessPerClassM()
FeatureAccuracy = preProcessFeatAcc()
perm_imp_eli5PDCon = preProcessPerm()
featureScoresCon = preProcessFeatSc()
metricsPerModel = preProcMetricsAllAndSel()
sumPerClassifier = preProcsumPerMetric(factors)
ModelsIDs = preProceModels()
parametersGenPD = parametersGen.to_json(orient='records')
PerClassMetrics = PerClassMetrics.to_json(orient='records')
FeatureAccuracy = FeatureAccuracy.to_json(orient='records')
perm_imp_eli5PDCon = perm_imp_eli5PDCon.to_json(orient='records')
featureScoresCon = featureScoresCon.to_json(orient='records')
XDataJSONEntireSet = XData.to_json(orient='records')
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(sumPerClassifier)) # Position: 0
Results.append(json.dumps(ModelSpaceMDS)) # Position: 1
Results.append(json.dumps(parametersGenPD)) # Position: 2
Results.append(PerClassMetrics) # Position: 3
Results.append(json.dumps(target_names)) # Position: 4
Results.append(FeatureAccuracy) # Position: 5
Results.append(json.dumps(XDataJSON)) # Position: 6
Results.append(0) # Position: 7
Results.append(json.dumps(PredictionSpaceMDS)) # Position: 8
Results.append(json.dumps(metricsPerModel)) # Position: 9
Results.append(perm_imp_eli5PDCon) # Position: 10
Results.append(featureScoresCon) # Position: 11
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 12
Results.append(json.dumps(ModelsIDs)) # Position: 13
Results.append(json.dumps(XDataJSONEntireSet)) # Position: 14
Results.append(json.dumps(yData)) # Position: 15
Results.append(json.dumps(AllTargets)) # Position: 16
Results.append(json.dumps(ModelSpaceUMAP)) # Position: 17
Results.append(json.dumps(PredictionSpaceTSNE)) # Position: 18
Results.append(json.dumps(PredictionSpaceUMAP)) # Position: 19
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRemoveFromStack', methods=["GET", "POST"])
def RetrieveSelClassifiersIDandRemoveFromStack():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
PredictionProbSelUpdate = PreprocessingPredUpdate(ClassifierIDsList)
global resultsUpdatePredictionSpace
resultsUpdatePredictionSpace = []
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[0])) # Position: 0
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[1]))
key = 3
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/UpdatePredictionsSpace', methods=["GET", "POST"])
def SendPredBacktobeUpdated():
response = {
'UpdatePredictions': resultsUpdatePredictionSpace
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
#ComputeMetricsForSel(ClassifierIDsList)
ClassifierIDCleaned = json.loads(ClassifierIDsList)
global keySpecInternal
keySpecInternal = 1
keySpecInternal = ClassifierIDCleaned['keyNow']
EnsembleModel(ClassifierIDsList, 1)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoinLocally', methods=["GET", "POST"])
def RetrieveSelClassifiersIDLocally():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
ComputeMetricsForSel(ClassifierIDsList)
return 'Everything Okay'
def ComputeMetricsForSel(Models):
Models = json.loads(Models)
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 19):
metricsPerModelCollSel[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelCollSel[index] = (1 - metric)*factors[index] * 100
else:
metricsPerModelCollSel[index] = metric*factors[index] * 100
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
response = {
'SelectedMetricsForModels': metricsPerModelCollSel
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
global resultsMetrics
resultsMetrics = []
df_concatMetrics = []
metricsSelList = []
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
paramsListSeptoDicKNN = paramsListSepPD[0].to_dict(orient='list')
paramsListSeptoDicSVC = paramsListSepPD[1].to_dict(orient='list')
paramsListSeptoDicGausNB = paramsListSepPD[2].to_dict(orient='list')
paramsListSeptoDicMLP = paramsListSepPD[3].to_dict(orient='list')
paramsListSeptoDicLR = paramsListSepPD[4].to_dict(orient='list')
paramsListSeptoDicLDA = paramsListSepPD[5].to_dict(orient='list')
paramsListSeptoDicQDA = paramsListSepPD[6].to_dict(orient='list')
paramsListSeptoDicRF = paramsListSepPD[7].to_dict(orient='list')
paramsListSeptoDicExtraT = paramsListSepPD[8].to_dict(orient='list')
paramsListSeptoDicAdaB = paramsListSepPD[9].to_dict(orient='list')
paramsListSeptoDicGradB = paramsListSepPD[10].to_dict(orient='list')
RetrieveParamsCleared = {}
RetrieveParamsClearedListKNN = []
for key, value in paramsListSeptoDicKNN.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListKNN.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListSVC = []
for key, value in paramsListSeptoDicSVC.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGausNB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListMLP = []
for key, value in paramsListSeptoDicMLP.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListMLP.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLR = []
for key, value in paramsListSeptoDicLR.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLR.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLDA = []
for key, value in paramsListSeptoDicLDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListQDA = []
for key, value in paramsListSeptoDicQDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListQDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListRF = []
for key, value in paramsListSeptoDicRF.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListRF.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListExtraT = []
for key, value in paramsListSeptoDicExtraT.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListExtraT.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListAdaB = []
for key, value in paramsListSeptoDicAdaB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListAdaB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGradB = []
for key, value in paramsListSeptoDicGradB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGradB.append(RetrieveParamsCleared)
if (len(paramsListSeptoDicKNN['n_neighbors']) == 0):
RetrieveParamsClearedListKNN = []
if (len(paramsListSeptoDicSVC['C']) == 0):
RetrieveParamsClearedListSVC = []
if (len(paramsListSeptoDicGausNB['var_smoothing']) == 0):
RetrieveParamsClearedListGausNB = []
if (len(paramsListSeptoDicMLP['alpha']) == 0):
RetrieveParamsClearedListMLP = []
if (len(paramsListSeptoDicLR['C']) == 0):
RetrieveParamsClearedListLR = []
if (len(paramsListSeptoDicLDA['shrinkage']) == 0):
RetrieveParamsClearedListLDA = []
if (len(paramsListSeptoDicQDA['reg_param']) == 0):
RetrieveParamsClearedListQDA = []
if (len(paramsListSeptoDicRF['n_estimators']) == 0):
RetrieveParamsClearedListRF = []
if (len(paramsListSeptoDicExtraT['n_estimators']) == 0):
RetrieveParamsClearedListExtraT = []
if (len(paramsListSeptoDicAdaB['n_estimators']) == 0):
RetrieveParamsClearedListAdaB = []
if (len(paramsListSeptoDicGradB['n_estimators']) == 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = RetrieveParamsClearedListKNN
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = RetrieveParamsClearedListSVC
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = RetrieveParamsClearedListGausNB
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListMLP
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListLR
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = RetrieveParamsClearedListLDA
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = RetrieveParamsClearedListQDA
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListRF
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListExtraT
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListAdaB
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints, crossValidation)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
parametersSelDataPD = parametersSelData[2].apply(pd.Series)
set_diff_df = | pd.concat([parametersSelDataPD, paramsListSepPD[2], paramsListSepPD[2]]) | pandas.concat |
# -*- coding: utf-8 -*-
"""550fp_logistic_regression_baseline.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1StVfRN6HxYJ8x3UQ0kYBpkSL7hPAbGH5
"""
import pandas as pd
import nltk
import numpy as np
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
wordnet_lemmatizer = WordNetLemmatizer()
porter_stemmer = PorterStemmer()
import preprocessor as p
#from preprocessor.api import clean, tokenize, parse,set_options
import re
from string import punctuation
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import KFold, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG)
df = pd.read_csv("data/Constraint_Train.csv")
val_df = pd.read_csv("data/Constraint_Val.csv")
test_df = | pd.read_csv("data/Constraint_Test.csv") | pandas.read_csv |
"""Create tables of followup visits"""
import os
import sys
import argparse
import logging
from collections import namedtuple
from logging import debug, info, warning, error, critical
import contextlib
import sqlite3
import datetime
import pandas as pd
# constants
# exception classes
# interface functions
def overwrite_schedule(reference_schedule, replacement_sequences, gap=120):
"""Create a new schedule, replacing visits in a reference schedule.
Args:
- reference_schedule :: a pandas.DataFrame with the original visits
- replacements :: pandas.GroupBy of replacement visit sequences
- gap :: gap duration (in seconds) between reference
and overwrite visits
"""
prior_clock = 0
subsequences = []
for _, replacement_sequence in replacement_sequences:
seq_start_clock = replacement_sequence.observationStartTime.min()
info("Overwriting with replacement sequence starting at "
+ datetime.datetime.fromtimestamp(seq_start_clock).isoformat())
# Find the start time of the next replacement sequence
replacement_start_clock = (replacement_sequence
.observationStartTime
.min())
# Find visits in the reference schedule between the end of the previous
# replacement sequence and the start of the next one, and add them
# to the list of sequences if there are any.
ref_subset = reference_schedule.query(
f'(observationStartTime > {prior_clock+gap}) and ((observationStartTime+visitTime+{gap})<{replacement_start_clock})')
if len(ref_subset) > 0:
subsequences.append(ref_subset)
# Actually add the replacement sequence
subsequences.append(replacement_sequence)
# Record the end for use in determining the next window.
prior_clock = (subsequences[-1]
.eval('observationStartTime+visitTime')
.max())
ref_subset = reference_schedule.query(
f'(observationStartTime > {prior_clock+gap})')
if len(ref_subset) > 0:
subsequences.append(ref_subset)
info("Combining %d subsequences" % len(subsequences))
if len(subsequences) >= 48:
group_size = int(len(subsequences)/48)
else:
group_size = 10
if len(subsequences) > group_size:
all_visits = subsequences[0]
subsequences = subsequences[1:]
while len(subsequences) > group_size:
new_subseqs = subsequences[:group_size]
subsequences = subsequences[group_size:]
all_visits = pd.concat([all_visits] + new_subseqs)
info("all_visits now %d long", len(all_visits))
if len(subsequences) > 0:
all_visits = pd.concat([all_visits] + subsequences)
else:
all_visits = | pd.concat(subsequences, sort=True) | pandas.concat |
import os
import numpy as np
import pandas as pd
import logging
import logging.config
import yaml
import psycopg2
from faker.providers.person.en import Provider
from sqlalchemy import create_engine
from dotenv import load_dotenv, find_dotenv
load_dotenv()
if __name__ == "__main__":
username = os.environ.get("USERNAME")
password = os.environ.get("PASSWORD")
host = os.environ.get("HOST")
port = os.environ.get("PORT")
database = os.environ.get("DATABASE")
path_yaml = os.environ.get("PATH_YAML")
with open(path_yaml, 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
table = "tb_user"
conn_string=f'postgresql://{username}:{password}@{host}:{port}/{database}'
def random_names(name_type, size):
"""
Generate n-length ndarray of person names.
name_type: a string, either first_names or last_names
"""
names = getattr(Provider, name_type)
return np.random.choice(names, size=size)
def random_genders(size, p=None):
"""Generate n-length ndarray of genders."""
if not p:
# default probabilities
p = (0.49, 0.49, 0.01, 0.01)
gender = ("M", "F", "O", "")
return np.random.choice(gender, size=size, p=p)
def random_dates(start, end, size):
"""
Generate random dates within range between start and end.
Adapted from: https://stackoverflow.com/a/50668285
"""
# Unix timestamp is in nanoseconds by default, so divide it by
# 24*60*60*10**9 to convert to days.
divide_by = 24 * 60 * 60 * 10**9
start_u = start.value // divide_by
end_u = end.value // divide_by
return pd.to_datetime(np.random.randint(start_u, end_u, size), unit="D")
size = 100
df = pd.DataFrame(columns=['first', 'last', 'gender', 'birthdate'])
df['first'] = random_names('first_names', size)
df['last'] = random_names('last_names', size)
df['gender'] = random_genders(size)
df['birthdate'] = random_dates(start= | pd.to_datetime('1940-01-01') | pandas.to_datetime |
import pandas as pd
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
import numpy as np
import math
import sys
import argparse
import json
import html
def read_excel(path):
df = pd.read_excel(path, sheet_name=0, header=None, index_col=None)
r_count = len(df.index)
c_count = len(df.columns)
map = {}
for i in range(0, c_count):
label = df.iloc[1, i]
map[i] = label
print(map)
data = []
search = []
uri_context = "https://webpark5032.sakura.ne.jp/tmp/sat/context.json"
context = {
"@context" : [
{
"ex" : "http://example.org/",
"data" : "https://nakamura196.github.io/sat/data",
"keiten" : "https://nakamura196.github.io/sat/経典番号/"
}
]
}
with open("../static/context.json", 'w') as f:
json.dump(context, f, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
for j in range(2, r_count):
id = df.iloc[j, 0]
id = str(id).zfill(5)
print(id)
if id == "00000":
continue
経典番号 = df.iloc[j, 1]
uri = "data:"+id+".json"
uri_経典番号 = "keiten:"+経典番号+".json"
枝番 = df.iloc[j, 9] if not pd.isnull(df.iloc[j, 9]) else ""
# print(df.iloc[j, 8], df.iloc[j, 9], df.iloc[j, 10], df.iloc[j, 11], df.iloc[j, 12], df.iloc[j, 13])
uri_sat = "https://21dzk.l.u-tokyo.ac.jp/SAT2018/"+df.iloc[j, 8]+枝番+"_."+str(df.iloc[j, 10]).zfill(2)+"."+str(df.iloc[j, 11]).zfill(4)+df.iloc[j, 12]+str(df.iloc[j, 13]).zfill(2)+".html"
# --------
texts_k = []
for c in range(0, 5):
start = c * 10
if not pd.isnull(df.iloc[j, 22+start]):
obj = {
"@id": uri+"#テキスト"+str(c+1)+"(勘同目録)",
"ex:標準名称" : df.iloc[j, 22+start],
}
if not pd.isnull(df.iloc[j, 23+start]):
obj["ex:巻"] = df.iloc[j, 23+start]
if not pd.isnull(df.iloc[j, 24+start]):
obj["ex:国"] = df.iloc[j, 24+start]
if not | pd.isnull(df.iloc[j, 25+start]) | pandas.isnull |
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
num = np.array(['3.14','-2.7','30'], dtype=np.string_) #코드 이해 쉽게 : dtype=np.string_
# num=num.astype(int)
# print(num)
# ValueError: invalid literal for int() with base 10: '3.14'
num=num.astype(float).astype(int)
print(num)
# [ 3 -2 30] : 바로 int형 변형이 안되면 float으로 바꿨다가 바꿀 수 있다.
num=num.astype(float)
print(num)
# [ 3.14 -2.7 30. ]
arr=np.arange(32).reshape((8,4))
print(arr)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]
# [12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]
# [24 25 26 27]
# [28 29 30 31]]
print(arr[[1,5,7,2],[0,3,1,2]]) #지정된 데이터 추출[[행번호],[열번호]]==>(행,열)순서쌍으로 요소 확인
# [ 4 23 29 10]
print(arr[[1,5,7,2]][:,[0,3,1,2]]) #[[행]][:,[열]] : 연속의 의미==>행 1,5,7,2번 index에 해당하는 행
# [[ 4 7 5 6]
# [20 23 21 22]
# [28 31 29 30]
# [ 8 11 9 10]]
print(arr[[1,5,7,2]][:,[3,1]]) #[[행]][:,[열]] : 연속의 의미==>index행에 대한 열 1,3번 index에 해당하는 열
# [[ 7 5]
# [23 21]
# [31 29]
# [11 9]]
import random
walk = []
position =0
steps=1000
for i in range(steps):
step = 1 if random.randint(0,1) else -1 #randint,randn,rannormal
position+=step
walk.append(position)
print("position : ",position)
# position : 18
print("walk : ",walk)
# walk : [-1, 0, 1, 0, -1, -2, -1, -....]
print(min(walk))
# -7
print(max(walk))
# 28
# print(abs(walk)) #abs : 절대값 변환
obj = Series([1,2,-3,4])
print(obj)
# 0 1
# 1 2
# 2 -3
# 3 4
# dtype: int64
print(obj.values) #values : 값만 추출함(속성, 함수)
# [ 1 2 -3 4]
print(obj.index) #index : 인덱스 추출
# RangeIndex(start=0, stop=4, step=1)
#인덱스 지정
obj = Series([1,2,-3,4],index=['x','y','z','k']) #index의 이름을 직접 부여
print(obj)
# 지정 인덱스 출력
# x 1
# y 2
# z -3
# k 4
# dtype: int64
print(obj['y'])
# 2
obj['x']=10
print(obj)
# x 10
# y 2
# z -3
# k 4
# dtype: int64
#여러개를 참조하는 방법
# print(obj['x','y'])
# # KeyError: ('x', 'y')
print(obj[['x','y','z']]) #index 1개 참조시 [],2개이상 참조시 [[]]
# x 10
# y 2
# z -3
# dtype: int64
print('='*50)
print(obj>0) #조건식 사용 가능
# x True
# y True
# z False
# k True
# dtype: bool
print(obj[obj>0]) #조건식으로 추출 가능
# x 10
# y 2
# k 4
# dtype: int64
print(obj*2) # 사칙연산 가능
# x 20
# y 4
# z -6
# k 8
# dtype: int64
print(np.exp(obj)) # 지수승
# x 22026.465795
# y 7.389056
# z 0.049787
# k 54.598150
# dtype: float64
# null(초기화 되지 않은 상태), na(결측치)
print(obj)
print('a' in obj) #in : 특정 문자가 있는지 확인
print('x' in obj) # 열: 특징, 행 : 관측치
print('='*50)
#key & value->Series->index & value 변환(key=>index,value=>value)
sdata = {'Ohio': 35000, 'Texas': 71000, "Oregon":16000, "Utah":5000}
obj3=Series(sdata) #dictionaly도 Series로 변환 가능
print(obj3)
# Ohio 35000
# Texas 71000
# Oregon 16000
# Utah 5000
# dtype: int64
print(type(obj3))
# <class 'pandas.core.series.Series'>
states = ['California','Ohio','Oregon','Texas']
obj99 = Series(states) #list를 Series로 변환
# print(obj99)
# # 0 California
# # 1 Ohio
# # 2 Oregon
# # 3 Texas
# # dtype: object
obj4 = Series(sdata, index=states) #sdata를 사용하여 index는 states기준으로 Series자료구조 변환
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# dtype: float64
print(pd.isnull(obj4))
# California True
# Ohio False
# Oregon False
# Texas False
# dtype: bool
#일반적인 개념 nan : 숫자가 아닌 문자같은 것.
#na : 값이 누락, null : 값이 초기화 되지 않은 상태
#pandas개념 : 혼용하여 사용
#isnull함수 : na(null,nan) 인지 아닌지 확인
print(obj4+obj3) # 교집합만의 value만 출력
obj4.name = 'population'
obj.index.name = 'state'
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# Name: population, dtype: float64
obj4.index=['w','x','y','z'] #index를 직접 변환
print(obj4)
# w NaN
# x 35000.0
# y 16000.0
# z 71000.0
# Name: population, dtype: float64
data = {
'state' : ['Ohio','Ohio','Ohio','Nevada','Nevada'],
'year': [2000,2001,2002,2001,2002],
'pop': [1.5,1.7,3.6,2.4,2.9]}
frame = DataFrame(data) #series 들의 묶음과 같음
print(frame)
# state year pop
# 0 Ohio 2000 1.5
# 1 Ohio 2001 1.7
# 2 Ohio 2002 3.6
# 3 Nevada 2001 2.4
# 4 Nevada 2002 2.9
print(DataFrame(data, columns=['year','state','pop'])) # column의 순서 변경(임시적)
# year state pop
# 0 2000 Ohio 1.5
# 1 2001 Ohio 1.7
# 2 2002 Ohio 3.6
# 3 2001 Nevada 2.4
# 4 2002 Nevada 2.9
frame = DataFrame(data, columns=['year','state','pop']) #fram으로 완전히 순서 변경
frame2= DataFrame(data, columns=['year','state','pop','debt'], index=['one','two','three','four','five'])
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 NaN
# three 2002 Ohio 3.6 NaN
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 NaN
print(frame2['state']) # 원하는 열만 출력
# one Ohio
# two Ohio
# three Ohio
# four Nevada
# five Nevada
# Name: state, dtype: object
print(frame2['year'])
# one 2000
# two 2001
# three 2002
# four 2001
# five 2002
# Name: year, dtype: int64
print(frame2.ix['three']) #ix : 특정 index(행)만 참조
#두개 이상의 열 또는 행을 추출 => [[]]사용
# print(frame2[['year','state']])
#
# print(frame2.ix[['three','five']])
print(frame2)
frame2['debt']=16.5
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 16.5
# two 2001 Ohio 1.7 16.5
# three 2002 Ohio 3.6 16.5
# four 2001 Nevada 2.4 16.5
# five 2002 Nevada 2.9 16.5
# frame2['debt']=np.arange(3)
# print(frame2)
# # ValueError: Length of values does not match length of index
frame2['debt']=np.arange(5)
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 0
# two 2001 Ohio 1.7 1
# three 2002 Ohio 3.6 2
# four 2001 Nevada 2.4 3
# five 2002 Nevada 2.9 4
print('='*50)
val = Series([-1.2,-1.5,-1.7],index=['two','three','five'])
print(val)
# two -1.2
# three -1.5
# five -1.7
# dtype: float64
#길이가 다른 데이터 열을추가시 -> 시리즈를 생성하여 추가
frame2['debt']=val # index를 지정하여 value 변경(index의 숫자가 동일하지 않아도 index가 지정되어있어서 대입가능)
print(frame2)
# 새로운 열 추가 : 동부에 속하는 Ohio는 True, 나머지는 False로 한다.(조건 제시형)
frame2['eastern']=frame2.state=='Ohio'
print(frame2)
# year state pop debt eastern
# one 2000 Ohio 1.5 NaN True
# two 2001 Ohio 1.7 -1.2 True
# three 2002 Ohio 3.6 -1.5 True
# four 2001 Nevada 2.4 NaN False
# five 2002 Nevada 2.9 -1.7 False
#열 제거
del frame2['eastern']
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 -1.2
# three 2002 Ohio 3.6 -1.5
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 -1.7
print(frame2.columns)
# Index(['year', 'state', 'pop', 'debt'], dtype='object')
print(frame2.index)
# Index(['one', 'two', 'three', 'four', 'five'], dtype='object')
pop = {'Nevada' : {2001 : 2.4,2002:2.9},'Ohio' : {2000 : 1.5,2001:1.7,2002:3.6}}
frame3 = DataFrame(pop)
print(frame3)
# Nevada Ohio
# 2000 NaN 1.5
# 2001 2.4 1.7
# 2002 2.9 3.6
# 열과 행 바꿈(transfer)
print(frame3.T)
# 2000 2001 2002
# Nevada NaN 2.4 2.9
# Ohio 1.5 1.7 3.6
# frame4 = DataFrame(pop,index=[2001,2002,2003]) #index 지정을 하려면 DataFrame을 사용해야한다.(딕셔너리엔 index가 없음)
# print(frame4)
# # AttributeError: 'list' object has no attribute 'astype'
frame4 = | DataFrame(frame3,index=[2001,2002,2003]) | pandas.DataFrame |
import re
import pandas as pd
import numpy as np
import scipy.stats, scipy.io
CHR_WILDCARD = 'CHRNUM'
def replace_str(pat, to_, from_=CHR_WILDCARD):
return re.sub(from_, str(to_), pat)
def _rs2int(ll):
oo = - np.ones(len(ll))
for i, l in enumerate(ll):
try:
oo[i] = int(re.sub('rs', '', l))
except:
continue
return oo
def _to_dict(ll):
odict = {}
has_chisq = False
for l in ll:
v, k = l.split(':')
if v == 'chisq':
has_chisq = True
odict[k] = v
if has_chisq is True:
for k, v in odict.items():
if v in ['b', 'b_se']:
del odict[k]
return odict
def _load_gwas(gwas_file, extra=None):
if extra is None:
dd = pd.read_parquet(gwas_file)
else:
dd = pd.read_csv(gwas_file, compression='gzip', sep='\s+')
dd.rename(columns=_to_dict(extra), inplace=True)
if 'chisq' not in dd.columns:
dd['chisq'] = (dd.b / dd.b_se) ** 2
dd['rs_int'] = _rs2int(list(dd.variant_id))
return dd[['rs_int', 'chisq']]
def load_gwas(gwas_pattern, logging, extra=None):
if CHR_WILDCARD in gwas_pattern:
out = []
for i in range(1, 23):
logging.info('load_gwas: chr = {}'.format(i))
dd = _load_gwas(replace_str(gwas_pattern, i), extra=extra)
out.append(dd)
out = | pd.concat(out, axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : OOF
# @Time : 2019-06-24 10:18
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '__init__.py'
__author__ = 'JieYuan'
__mtime__ = '19-1-2'
"""
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold
from statsmodels.api import GLM, families
from xgboost import XGBClassifier
"""
# TODO: cats
https://lightgbm.readthedocs.io/en/latest/Advanced-Topics.html
"""
class OOF(object):
"""Out of flod prediction
# TODO 支持回归
lightGBM一个一个地建立节点; XGboost一层一层地建立节点
https://blog.csdn.net/friyal/article/details/82758532
Catboost总是使用完全二叉树。它的节点是镜像的(对称树)。Catboost称对称树有利于避免overfit,增加可靠性,并且能大大加速预测等等。
计算某个category出现的频率,加上超参数,生成新的numerical features
# https://blog.csdn.net/linxid/article/details/80723811
"""
_params = {'metric': 'auc',
'learning_rate': 0.01,
'n_estimators': 30000,
'subsample': 0.8,
'colsample_bytree': 0.8,
'class_weight': 'balanced',
'scale_pos_weight': 1,
'random_state': 2019,
'verbosity': -1}
lgb = LGBMClassifier(n_jobs=16, **_params) # TODO: 常用模型另存为其他模块
xgb = XGBClassifier()
cat = CatBoostClassifier(
n_estimators=20000,
learning_rate=0.05,
loss_function='Logloss',
eval_metric='AUC',
random_state=2019)
def __init__(
self,
estimator=None,
folds=None,
early_stopping_rounds=300,
verbose=100):
# 指定lgb: metric xgb: eval_metric
self.estimator = self.lgb if estimator is None else estimator
self.folds = folds if folds else StratifiedKFold(
5, True, 2019) # 支持 RepeatedStratifiedKFold
self.model_type = self.estimator.__repr__()
self.early_stopping_rounds = early_stopping_rounds
self.verbose = verbose
# self.estimator_agrs = self.getfullargspec(self.estimator.fit).args if hasattr(self.estimator, 'fit') else None
def fit(
self,
X,
y,
X_test,
feval=None,
cat_feats=None,
exclude_columns=None,
epochs=16,
batch_size=128,
oof2csv=False,
plot=False):
"""
# TODO: Rank 融合
:param X: 保证索引唯一
:param y:
:param X_test:
:param feval: roc_auc_score(y_true, y_score)
:param cat_feats: 类别特征索引
:param exclude_columns:
仅针对 nn
:param epochs:
:param batch_size:
:return:
"""
# 判断输入数据转数据框
if isinstance(y, pd.Series):
y.reset_index(drop=True, inplace=True)
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
X_test = pd.DataFrame(X)
else:
X.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
# oof评估函数
feval = feval if feval else roc_auc_score
# 移除不需要的特征
if exclude_columns:
feats = X.columns.difference(exclude_columns)
X, X_test = X[feats], X_test[feats]
# Score
if hasattr(feval, '__repr__'):
score_name = feval.__repr__().split()[1]
else:
score_name = None
# cv num
if hasattr(self.folds, 'n_splits'):
num_cv = self.folds.n_splits
else:
num_cv = self.folds.cvargs['n_splits'] * self.folds.n_repeats
# Cross validation model
# Create arrays and dataframes to store results
oof_preds = np.zeros(X.shape[0])
sub_preds = np.zeros((X_test.shape[0], num_cv))
self.feature_importance_df = | pd.DataFrame() | pandas.DataFrame |
#Import pandas
import pandas as pd
# Create file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - EDITIONS.tsv'
# Load DataFrame from file_path: editions
editions = pd.read_csv(file_path, sep="\t")
# Extract the relevant columns: editions
editions = editions[["Edition","Grand Total","City","Country"]]
# Print editions DataFrame
print(editions)
# Import pandas
import pandas as pd
# Create the file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'
# Load DataFrame from file_path: ioc_codes
ioc_codes = pd.read_csv(file_path)
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[["Country","NOC"]]
# Print first and last 5 rows of ioc_codes
print(ioc_codes.head(5))
print(ioc_codes.tail(5))
# Import pandas
import pandas as pd
# Create empty dictionary: medals_dict
medals_dict = {}
for year in editions['Edition']:
# Create the file path: file_path
file_path = 'summer_{:d}.csv'.format(year)
# Load file_path into a DataFrame: medals_dict[year]
medals_dict[year] = pd.read_csv(file_path)
# Extract relevant columns: medals_dict[year]
medals_dict[year] = medals_dict[year][["Athlete","NOC","Medal"]]
# Assign year to column 'Edition' of medals_dict
medals_dict[year]['Edition'] = year
# Concatenate medals_dict: medals
medals = pd.concat(medals_dict,ignore_index=True)
# Print first and last 5 rows of medals
print(medals.head())
print(medals.tail())
# Construct the pivot_table: medal_counts
medal_counts = medals.pivot_table(index="Edition", values="Athlete",columns="NOC", aggfunc="count")
# Print the first & last 5 rows of medal_counts
print(medal_counts.head())
print(medal_counts.tail())
# Set Index of editions: totals
totals = editions.set_index("Edition")
# Reassign totals['Grand Total']: totals
totals = totals["Grand Total"]
# Divide medal_counts by totals: fractions
fractions = medal_counts.divide(totals,axis="rows")
# Print first & last 5 rows of fractions
print(fractions.head(5))
print(fractions.tail(5))
# Apply the expanding mean: mean_fractions
mean_fractions = fractions.expanding().mean()
# Compute the percentage change: fractions_change
fractions_change = mean_fractions.pct_change() * 100
# Reset the index of fractions_change: fractions_change
fractions_change = fractions_change.reset_index()
# Print first & last 5 rows of fractions_change
print(fractions_change.head(5))
print(fractions_change.tail(5))
# Import pandas
import pandas as pd
# Left join editions and ioc_codes: hosts
hosts = pd.merge(editions, ioc_codes, how="left")
# Extract relevant columns and set index: hosts
hosts = hosts[["Edition","NOC"]].set_index("Edition")
# Fix missing 'NOC' values of hosts
print(hosts.loc[hosts.NOC.isnull()])
hosts.loc[1972, 'NOC'] = 'FRG'
hosts.loc[1980, 'NOC'] = 'URS'
hosts.loc[1988, 'NOC'] = 'KOR'
# Reset Index of hosts: hosts
hosts = hosts.reset_index()
# Print hosts
print(hosts)
# Import pandas
import pandas as pd
# Reshape fractions_change: reshaped
reshaped = | pd.melt(fractions_change, id_vars="Edition", value_name="Change") | pandas.melt |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {mod_id} {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df = forecast_df.reindex(columns=org_idx)
u_forecast_df = u_forecast_df.reindex(columns=org_idx)
l_forecast_df = l_forecast_df.reindex(columns=org_idx)
# combine runtimes
try:
ens_runtime = sum(list(forecasts_runtime.values()), datetime.timedelta())
except Exception:
ens_runtime = datetime.timedelta(0)
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
ens_model_name = ensemble_params['model_name'].lower().strip()
s3list = ['best3', 'best3horizontal', 'bestn']
if ens_model_name in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
elif ens_model_name in horizontal_aliases:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == "mosaic":
ens_forecast = MosaicEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
elif ens_model_name == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
else:
raise ValueError("Ensemble model type not recognized.")
def _generate_distance_ensemble(dis_frac, forecast_length, initial_results):
"""Constructs a distance ensemble dictionary."""
dis_frac = 0.5
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
ensemble_models = best3.to_dict(orient='index')
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def _generate_bestn_dict(
best,
model_name: str = 'BestN',
model_metric: str = "best_score",
model_weights: dict = None,
):
ensemble_models = best.to_dict(orient='index')
model_parms = {
'model_name': model_name,
'model_count': best.shape[0],
'model_metric': model_metric,
'models': ensemble_models,
}
if model_weights is not None:
model_parms['model_weights'] = model_weights
return {
'Model': 'Ensemble',
'ModelParameters': json.dumps(model_parms),
'TransformationParameters': '{}',
'Ensemble': 1,
}
def EnsembleTemplateGenerator(
initial_results,
forecast_length: int = 14,
ensemble: str = "simple",
score_per_series=None,
):
"""Generate class 1 (non-horizontal) ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
# filter out horizontal ensembles
ens_temp = ens_temp[ens_temp['Ensemble'] <= 1]
if 'simple' in ensemble:
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3nonunique.shape[0]
if n_models == 3:
best3nu_params = pd.DataFrame(
_generate_bestn_dict(
best3nonunique, model_name='BestN', model_metric="best_score"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = (
best3metric.drop_duplicates()
.head(3)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
n_models = best3metric.shape[0]
if n_models == 3:
best3m_params = pd.DataFrame(
_generate_bestn_dict(
best3metric, model_name='BestN', model_metric="mixed_metric"
),
index=[0],
)
ensemble_templates = pd.concat([ensemble_templates, best3m_params], axis=0)
# best 3, all must be of different model types
ens_temp = (
ens_temp.sort_values('Score', ascending=True, na_position='last')
.groupby('Model')
.head(1)
.reset_index(drop=True)
)
best3unique = ens_temp.nsmallest(3, columns=['Score']).set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = best3unique.shape[0]
if n_models == 3:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
best3unique, model_name='BestN', model_metric="best_score_unique"
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if 'distance' in ensemble:
dis_frac = 0.2
distance_params = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params], axis=0, ignore_index=True
)
dis_frac = 0.5
distance_params2 = pd.DataFrame(
_generate_distance_ensemble(dis_frac, forecast_length, initial_results),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, distance_params2], axis=0, ignore_index=True
)
# in previous versions per_series metrics were only captured if 'horizontal' was passed
if 'simple' in ensemble:
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# choose best n based on score per series
n = 3
chosen_ones = per_series_ranked.sum(axis=1).nlargest(n)
bestn = ens_temp[ens_temp['ID'].isin(chosen_ones.index.tolist())].set_index(
"ID"
)[['Model', 'ModelParameters', 'TransformationParameters']]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric="bestn_horizontal",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
# cluster and then make best model per cluster
if per_series.shape[1] > 4:
try:
from sklearn.cluster import AgglomerativeClustering
max_clusters = 8
n_clusters = round(per_series.shape[1] / 3)
n_clusters = max_clusters if n_clusters > max_clusters else n_clusters
X = per_series_ranked.transpose()
clstr = AgglomerativeClustering(n_clusters=n_clusters).fit(X)
series_labels = clstr.labels_
for cluster in np.unique(series_labels).tolist():
current_ps = per_series_ranked[
per_series_ranked.columns[series_labels == cluster]
]
n = 3
chosen_ones = current_ps.sum(axis=1).nlargest(n)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
n_models = bestn.shape[0]
if n_models == n:
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"cluster_{cluster}",
model_weights=chosen_ones.to_dict(),
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params],
axis=0,
ignore_index=True,
)
except Exception as e:
print(f"cluster-based simple ensemble failed with {repr(e)}")
mods = pd.Series()
per_series_des = per_series.copy()
n_models = 3
# choose best per series, remove those series, then choose next best
for x in range(n_models):
n_dep = 5 if x < 2 else 10
n_dep = (
n_dep if per_series_des.shape[0] > n_dep else per_series_des.shape[0]
)
models_pos = []
tr_df = pd.DataFrame()
for _ in range(n_dep):
cr_df = pd.DataFrame(per_series_des.idxmin()).transpose()
tr_df = pd.concat([tr_df, cr_df], axis=0)
models_pos.extend(per_series_des.idxmin().tolist())
per_series_des[per_series_des == per_series_des.min()] = np.nan
cur_mods = pd.Series(models_pos).value_counts()
cur_mods = cur_mods.sort_values(ascending=False).head(1)
mods = mods.combine(cur_mods, max, fill_value=0)
rm_cols = tr_df[tr_df.isin(mods.index.tolist())]
rm_cols = rm_cols.dropna(how='all', axis=1).columns
per_series_des = per_series.copy().drop(mods.index, axis=0)
per_series_des = per_series_des.drop(rm_cols, axis=1)
if per_series_des.shape[1] == 0:
per_series_des = per_series.copy().drop(mods.index, axis=0)
best3 = (
initial_results.model_results[
initial_results.model_results['ID'].isin(mods.index.tolist())
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[['Model', 'ModelParameters', 'TransformationParameters']]
)
best3_params = pd.DataFrame(
_generate_bestn_dict(best3, model_name='BestN', model_metric="horizontal"),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3_params], axis=0, ignore_index=True
)
if 'subsample' in ensemble:
try:
import random
if score_per_series is None:
per_series = initial_results.per_series_mae
else:
per_series = score_per_series
per_series = per_series[per_series.index.isin(ens_temp['ID'].tolist())]
# make it ranking based! Need bigger=better for weighting
per_series_ranked = per_series.rank(ascending=False)
# subsample and then make best model per group
num_series = per_series.shape[1]
n_samples = num_series * 2
max_deep_ensembles = 100
n_samples = (
n_samples if n_samples < max_deep_ensembles else max_deep_ensembles
)
col_min = 1 if num_series < 3 else 2
col_max = round(num_series / 2)
col_max = num_series if col_max > num_series else col_max
for samp in range(n_samples):
n_cols = random.randint(col_min, col_max)
current_ps = per_series_ranked.sample(n=n_cols, axis=1)
n_largest = random.randint(9, 16)
n_sample = random.randint(2, 5)
# randomly choose one of best models
chosen_ones = current_ps.sum(axis=1).nlargest(n_largest)
n_sample = (
n_sample
if n_sample < chosen_ones.shape[0]
else chosen_ones.shape[0]
)
chosen_ones = chosen_ones.sample(n_sample).sort_values(ascending=False)
bestn = ens_temp[
ens_temp['ID'].isin(chosen_ones.index.tolist())
].set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
model_weights = random.choice([chosen_ones.to_dict(), None])
best3u_params = pd.DataFrame(
_generate_bestn_dict(
bestn,
model_name='BestN',
model_metric=f"subsample_{samp}",
model_weights=model_weights,
),
index=[0],
)
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
except Exception as e:
print(f"subsample ensembling failed with error: {repr(e)}")
return ensemble_templates
def HorizontalTemplateGenerator(
per_series,
model_results,
forecast_length: int = 14,
ensemble: str = "horizontal",
subset_flag: bool = True,
per_series2=None,
):
"""Generate horizontal ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
ensy = ['horizontal', 'probabilistic', 'hdist']
if any(x in ensemble for x in ensy):
if ('horizontal-max' in ensemble) or ('probabilistic-max' in ensemble):
mods_per_series = per_series.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score-max' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if 'hdist' in ensemble and not subset_flag:
mods_per_series = per_series.idxmin()
mods_per_series2 = per_series2.idxmin()
mods = pd.concat([mods_per_series, mods_per_series2]).unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'hdist'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'models': best5.to_dict(orient='index'),
'dis_frac': 0.3,
'series1': mods_per_series.to_dict(),
'series2': mods_per_series2.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if ('horizontal' in ensemble) or ('probabilistic' in ensemble):
# first generate lists of models by ID that are in shared and no_shared
no_shared_select = model_results['Model'].isin(no_shared)
shared_mod_lst = model_results[~no_shared_select]['ID'].tolist()
no_shared_mod_lst = model_results[no_shared_select]['ID'].tolist()
lowest_score_mod = [
model_results.iloc[model_results['Score'].idxmin()]['ID']
]
per_series[per_series.index.isin(shared_mod_lst)]
# remove those where idxmin is in no_shared
shared_maxes = per_series.idxmin().isin(shared_mod_lst)
shr_mx_cols = shared_maxes[shared_maxes].index
per_series_shareds = per_series.filter(shr_mx_cols, axis=1)
# select best n shared models (NEEDS improvement)
n_md = 5
use_shared_lst = (
per_series_shareds.median(axis=1).nsmallest(n_md).index.tolist()
)
# combine all of the above as allowed mods
allowed_list = no_shared_mod_lst + lowest_score_mod + use_shared_lst
per_series_filter = per_series[per_series.index.isin(allowed_list)]
# first select a few of the best shared models
# Option A: Best overall per model type (by different metrics?)
# Option B: Best per different clusters...
# Rank position in score for EACH series
# Lowest median ranking
# Lowest Quartile 1 of rankings
# Normalize and then take Min, Median, or IQ1
# then choose min from series of these + no_shared
# make sure no models are included that don't match to any series
# ENSEMBLE and NO_SHARED (it could be or it could not be)
# need to TEST cases where all columns are either shared or no_shared!
# concern: choose lots of models, slower to run initial
mods_per_series = per_series_filter.idxmin()
mods = mods_per_series.unique()
best5 = (
model_results[model_results['ID'].isin(mods.tolist())]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods.shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
if ('horizontal-min' in ensemble) or ('probabilistic-min' in ensemble):
mods = pd.Series()
per_series_des = per_series.copy()
n_models = 15
# choose best per series, remove those series, then choose next best
for x in range(n_models):
n_dep = x + 1
n_dep = (
n_dep
if per_series_des.shape[0] > n_dep
else per_series_des.shape[0]
)
models_pos = []
tr_df = pd.DataFrame()
for _ in range(n_dep):
cr_df = pd.DataFrame(per_series_des.idxmin()).transpose()
tr_df = pd.concat([tr_df, cr_df], axis=0)
models_pos.extend(per_series_des.idxmin().tolist())
per_series_des[per_series_des == per_series_des.min()] = np.nan
cur_mods = pd.Series(models_pos).value_counts()
cur_mods = cur_mods.sort_values(ascending=False).head(1)
mods = mods.combine(cur_mods, max, fill_value=0)
rm_cols = tr_df[tr_df.isin(mods.index.tolist())]
rm_cols = rm_cols.dropna(how='all', axis=1).columns
per_series_des = per_series.copy().drop(mods.index, axis=0)
per_series_des = per_series_des.drop(rm_cols, axis=1)
if per_series_des.shape[1] == 0:
per_series_des = per_series.copy().drop(mods.index, axis=0)
mods_per_series = per_series.loc[mods.index].idxmin()
best5 = (
model_results[
model_results['ID'].isin(mods_per_series.unique().tolist())
]
.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
.set_index("ID")[
['Model', 'ModelParameters', 'TransformationParameters']
]
)
nomen = 'Horizontal' if 'horizontal' in ensemble else 'Probabilistic'
metric = 'Score-min' if 'horizontal' in ensemble else 'SPL'
best5_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': nomen,
'model_count': mods_per_series.unique().shape[0],
'model_metric': metric,
'models': best5.to_dict(orient='index'),
'series': mods_per_series.to_dict(),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
best5_params = pd.DataFrame(best5_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best5_params], axis=0, ignore_index=True
)
return ensemble_templates
def generate_mosaic_template(
initial_results, full_mae_ids, num_validations, col_names, full_mae_errors, smoothing_window=None, **kwargs
):
"""Generate an ensemble template from results."""
total_vals = num_validations + 1
local_results = initial_results.copy()
# sort by runtime then drop duplicates on metric results
local_results = local_results.sort_values(by="TotalRuntimeSeconds", ascending=True)
local_results.drop_duplicates(
subset=['ValidationRound', 'smape', 'mae', 'spl'], inplace=True
)
# remove slow models... tbd
# select only models run through all validations
run_count = local_results[['Model', 'ID']].groupby("ID").count()
models_to_use = run_count[run_count['Model'] == total_vals].index.tolist()
# begin figuring out which are the min models for each point
id_array = np.array([y for y in sorted(full_mae_ids) if y in models_to_use])
errors_array = np.array(
[
x
for y, x in sorted(
zip(full_mae_ids, full_mae_errors), key=lambda pair: pair[0]
)
if y in models_to_use
]
)
# window across multiple time steps to smooth the result
name = "Mosaic"
if smoothing_window is not None:
from scipy.ndimage import uniform_filter1d
errors_array = uniform_filter1d(np.nan_to_num(errors_array), size=smoothing_window, axis=1)
# name = "Mosaic-window"
slice_points = np.arange(0, errors_array.shape[0], step=total_vals)
id_sliced = id_array[slice_points]
best_points = np.add.reduceat(errors_array, slice_points, axis=0).argmin(axis=0)
model_id_array = pd.DataFrame(np.take(id_sliced, best_points), columns=col_names)
used_models = pd.unique(model_id_array.values.flatten())
used_models_results = local_results[
["ID", "Model", "ModelParameters", "TransformationParameters"]
].drop_duplicates(subset='ID')
used_models_results = used_models_results[
used_models_results['ID'].isin(used_models)
].set_index("ID")
ensemble_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': name,
'model_count': used_models_results.shape[0],
'model_metric': "MAE",
'models': used_models_results.to_dict(orient='index'),
'series': model_id_array.to_dict(orient='dict'),
}
),
'TransformationParameters': '{}',
'Ensemble': 2,
}
ensemble_template = pd.DataFrame(ensemble_params, index=[0])
return ensemble_template
def mosaic_to_horizontal(ModelParameters, forecast_period: int = 0):
"""Take a mosaic template and pull a single forecast step as a horizontal model.
Args:
ModelParameters (dict): the json.loads() of the ModelParameters of a mosaic ensemble template
forecast_period (int): when to choose the model, starting with 0
where 0 would be the first forecast datestamp, 1 would be the second, and so on
must be less than forecast_length that the model was trained on.
Returs:
ModelParameters (dict)
"""
if str(ModelParameters['model_name']).lower() != "mosaic":
raise ValueError("Input parameters are not recognized as a mosaic ensemble.")
all_models = ModelParameters['series']
result = {k: v[str(forecast_period)] for k, v in all_models.items()}
model_result = {
k: v for k, v in ModelParameters['models'].items() if k in result.values()
}
return {
'model_name': "horizontal",
'model_count': len(model_result),
"model_metric": "mosaic_conversion",
'models': model_result,
'series': result,
}
def MosaicEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for mosaic ensembling.
Args:
prematched_series (dict): from outer horizontal generalization, possibly different than params
"""
# work with forecast_lengths longer or shorter than provided by template
# this is meant to fill in any failures
startTime = datetime.datetime.now()
sample_idx = next(iter(forecasts.values())).index
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for mosaic generalization.")
full_models = available_models # hope it doesn't need to fill
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train,
prematched_series,
available_models=available_models,
full_models=full_models,
)
org_idx = df_train.columns
final = pd.DataFrame.from_dict(all_series)
final.index.name = "forecast_period"
melted = pd.melt(
final,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
melted["forecast_period"] = melted["forecast_period"].astype(int)
max_forecast_period = melted["forecast_period"].max()
# handle forecast length being longer than template
len_sample_index = len(sample_idx)
if len_sample_index > (max_forecast_period + 1):
print("Mosaic forecast length longer than template provided.")
base_df = melted[melted['forecast_period'] == max_forecast_period]
needed_stamps = len_sample_index - (max_forecast_period + 1)
newdf = pd.DataFrame(np.repeat(base_df.to_numpy(), needed_stamps, axis=0))
newdf.columns = base_df.columns
newdf['forecast_period'] = np.tile(
np.arange(max_forecast_period + 1, needed_stamps + 1 + max_forecast_period),
base_df.shape[0],
)
melted = | pd.concat([melted, newdf]) | pandas.concat |
from unittest.mock import patch
import featuretools as ft
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from blocktorch.pipelines.components import DFSTransformer
def test_index_errors(X_y_binary):
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=0)
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=None)
def test_numeric_columns(X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
feature = DFSTransformer()
feature.fit(X_pd, y)
feature.transform(X_pd)
@patch("blocktorch.pipelines.components.transformers.preprocessing.featuretools.dfs")
@patch(
"blocktorch.pipelines.components.transformers.preprocessing.featuretools.calculate_feature_matrix"
)
def test_featuretools_index(mock_calculate_feature_matrix, mock_dfs, X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
X_new_index = X_pd.copy()
index = [i for i in range(len(X))]
new_index = [i * 2 for i in index]
X_new_index["index"] = new_index
mock_calculate_feature_matrix.return_value = | pd.DataFrame({}) | pandas.DataFrame |
import re
import os
import pandas as pd
import numpy as np
def readGas(DataPath, building, building_num, write_data, datafile, floor_area):
dateparse = lambda x: pd.datetime.strptime(x, '%d-%b-%y')
print('importing gas data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if building_num == 1: # Central House
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_GasData.csv', date_parser=dateparse,
header=0, index_col=0)
df = df.loc['2013-01-01':'2016-10-01'] # ['2015-09-31':'2016-10-01'] ['2012-01-24':'2016-10-01']
df = df.groupby(df.index.month).mean() # get the monthly mean over multiple years
df = pd.concat([df[9:], df[:9]]) # reorder the months to align with the submetered data...
rng = pd.date_range(start='09/2016', end='09/2017', freq='M')
df = df.set_index(rng) # set new index to align mean monthly gas data with metered electricity
df.rename(columns={df.columns[0]: 'Gas'}, inplace=True)
return df
def readSTM(DataPathSTM, building, building_num, write_data, datafile, floor_area):
""" Short Term Monitoring """
if building_num in {0}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
elif building_num in {1}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d/%m/%Y %H:%M')
if building_num in {0,1}:
df_stm = | pd.read_csv(DataPathSTM + datafile + '/' + datafile + '_combined.csv', date_parser=dateparseSTM, header=0,index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
| tm.assert_almost_equal(df.values, expected) | pandas.util.testing.assert_almost_equal |
import os as os
from lib import ReadCsv
from lib import ReadConfig
from lib import ReadData
from lib import NetworkModel
from lib import ModelMetrics
from lib import SeriesPlot
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from lib import modwt
import keras
from datetime import date,datetime,time
from datetime import datetime
config = ReadConfig.ReadConfig()
config_data = config.read_config(os.path.join("config", "config.json"))
reader = ReadData.ReadData()
all_data = reader.readClimateFiles(config_data)
subset = all_data[['date','site_x', 'Hs','Hmax','Tz','Tp','DirTpTRUE','SST']]
subset.describe()
def make_date(series):
for dt in series:
yield datetime.strptime(dt, '%d/%m/%Y')
dates = list(make_date(subset['date']))
subset.index = range(0, subset.shape[0])
datesDf = pd.DataFrame({'dates': pd.Series(dates)}, index=range(0,len(dates)))
subset2 = pd.concat([subset, datesDf], axis=1)
subset2 = subset2.sort_values('dates')
idx1 = subset2.reindex(columns=['dates','site_x']).index
subset2.index = idx1
sitenames = subset2['site_x'].unique()
def getsite(data, col, site):
return data[(data[col] == site)]
# 7 day lag.
def make_lags(data, fromN, maxN):
for i in range(fromN,maxN):
nextData = data.shift(i).dropna()
colnames = list(map(lambda col: col+'_t-'+str(i), nextData.columns))
nextData.columns = colnames
yield nextData
target_set = None
for site in sitenames:
data = getsite(subset2, 'site_x', site)
data.index = range(0,data.shape[0])
lags = list(make_lags(data, 1,8))
minrows = lags[6].shape[0]
target = data[6:minrows]
for i in range(0,len(lags)):
lags[i] = lags[i][i:minrows]
lags.append(target)
if target_set is None:
target_set = pd.concat(lags, axis=1)
else:
temp = pd.concat(lags, axis=1)
target_set = pd.concat([target_set, temp], axis=0)
target_set = target_set.dropna()
target_set[['Hs_t-7','Hs_t-6','Hs_t-5','Hs_t-4','Hs_t-3','Hs_t-2','Hs_t-1','Hs']].head(10)
# Now that we have timeseries data we now need to calculate wavelet decompositions for each
# window of time. Note that we are lagging only up to a period of 7 days.
norm_data = None
numeric_cols = ['Hs', 'Hmax','Tz','Tp','DirTpTRUE','SST']
temp = []
for col in numeric_cols:
for i in range(1,8):
temp.append(col+'_t-'+str(i))
numeric_cols.extend(temp)
wavelet='db3'
wavelet_cols = []
wavelet_data=None
for site in sitenames:
data = getsite(target_set, 'site_x', site)
data = data[numeric_cols]
for col in numeric_cols:
C1, C2, C3, A = modwt.modwt(data[col].values, wavelet, 3)
nameA = col+"_A1"
name1 = col+"_C1"
name2 = col+"_C2"
name3 = col+"_C3"
wavelet_cols.append([nameA,name1,name2,name3])
data[nameA] = pd.Series(A)
data[name1] = pd.Series(C1)
data[name2] = | pd.Series(C2) | pandas.Series |
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
# Data normalization
def normalization_radius(num):
return (num - 3.5) // 0.5
def normalization_height(num):
return (num - 4.5) // 0.5
if __name__ == '__main__':
freq = pd.read_excel("Mode1.xlsx")
dataset_x = pd.DataFrame(freq[["Electric_Abs_3D", "Magnetic_Abs_3D", "Mode 1"]])
# dataset_y = pd.DataFrame(freq[["cH", "cR"]])
dataset_y = | pd.DataFrame(freq[["cR"]]) | pandas.DataFrame |
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import seaborn as sns
import torch
from pyspark.sql import SparkSession
plt.style.use("seaborn")
plt.rcParams.update({
"figure.titlesize": 30,
"axes.titlesize": 24,
"axes.labelsize": 20,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"legend.title_fontsize": 20,
"legend.fontsize": 16
})
@click.command()
@click.option("--model-id", type=str)
def make_charts(model_id: str):
spark = SparkSession \
.builder \
.appName("ASBA") \
.config("spark.driver.memory", "15g") \
.config("spark.sql.shuffle.partitions", "300") \
.getOrCreate()
businesses = spark.read.json("data/yelp/business.json")
reviews = spark.read.json("data/yelp/review.json")
users = spark.read.json("data/yelp/user.json")
tips = spark.read.json("data/yelp/tip.json")
model_name = model_id.replace('/', '-')
path = Path(f"charts/{model_name}")
path.mkdir(parents=True, exist_ok=True)
# Confusion Heatmap
confusion = pd.read_csv(f"data/{model_name}/confusion.csv")
labels = list(confusion.columns)
confusion = confusion.values
fig, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
X = confusion / confusion.sum(1, keepdims=True)
sns.heatmap(
pd.DataFrame(X, index=labels, columns=labels),
cmap=cmap,
center=0,
vmin=0,
vmax=1,
annot=True,
fmt=".3f",
linewidths=.5,
cbar_kws={
"shrink": .5
}
)
ax.set_title(f"Confusion Matrix for {model_id} on Test Set")
ax.set_xlabel("Truth")
ax.set_ylabel("Predicted")
plt.tight_layout()
fig.savefig(path.joinpath("confusion.png"))
# Review vs Tip Word Length
review_wc = reviews \
.withColumn("word_count", F.size(F.split(F.col("text"), " "))) \
.select("word_count") \
.toPandas()
review_wc["type"] = "review"
tip_wc = tips \
.withColumn("word_count", F.size(F.split(F.col("text"), " "))) \
.select("word_count") \
.toPandas()
tip_wc["type"] = "tip"
word_count = pd.concat([review_wc, tip_wc], axis=0, ignore_index=True)
fig, ax = plt.subplots(figsize=(15, 10))
sns.kdeplot(
data=word_count,
x="word_count",
hue="type",
log_scale=True,
cumulative=True,
common_norm=False,
common_grid=True,
ax=ax
)
ax.set_title("Cumulative Dist. Comparison for Tips vs. Reviews")
ax.set_xlabel("Number of Words")
ax.set_ylabel("$F(x)$")
plt.tight_layout()
fig.savefig(path.joinpath("comparison.png"))
del word_count, review_wc, tip_wc
# Bias Correction Chart
adjusted = reviews \
.join(users, reviews.user_id == users.user_id) \
.withColumn("adjusted_stars", (1 / 2)*(F.col("stars") - F.col("average_stars")) + 3) \
.select("business_id", "stars", "adjusted_stars") \
.groupBy("business_id") \
.mean()
ratings = adjusted.toPandas()
ratings["bias"] = ratings["avg(stars)"] - ratings["avg(adjusted_stars)"]
df1 = ratings.get(["business_id", "avg(stars)"])
df1 = df1.rename({"avg(stars)": "stars"}, axis=1)
df1["adjusted"] = False
df2 = ratings.get(["business_id", "avg(adjusted_stars)"])
df2 = df2.rename({"avg(adjusted_stars)": "stars"}, axis=1)
df2["adjusted"] = True
combined = pd.concat([df1, df2], axis=0, ignore_index=True)
fig, ax = plt.subplots(1, 2, figsize=(25, 10))
fig.suptitle("Review Star Ratings Bias Correction")
sns.kdeplot(data=combined, x="stars", hue="adjusted", multiple="stack", bw_adjust=4, clip=(1, 5), ax=ax[0])
ax[0].set_title("Distribution of Stars Comparison")
ax[0].set_xlabel("Stars")
ax[0].set_xlim(1, 5)
sns.kdeplot(data=ratings, x="bias", fill=True, color="purple", ax=ax[1], clip=(-4, 4))
ax[1].set_title("Distribution of Bias Between Stars and Adjusted Stars")
ax[1].set_xlabel("Bias")
ax[1].set_xlim(-4, 4)
plt.tight_layout()
fig.savefig(path.joinpath("bias_correction.png"))
# Aspect Pair Plots
aspects = spark.read.json(f"data/{model_name}/aspects.json")
aspects = aspects.select("business_id", "user_id", "aspect", "polarity").groupBy(["business_id"]).pivot("aspect").mean()
aspects = aspects.toPandas()
aspects = aspects.set_index("business_id")
aspects = 2*aspects + 3
g = sns.pairplot(aspects, kind="hist")
g.fig.suptitle("Aspect Relationship Pair Plots")
g.fig.set_size_inches(15, 15)
g.fig.savefig(path.joinpath("aspect_pairs.png"))
# Answer Variations Chart
bdf = businesses.alias("a") \
.join(adjusted.alias("b"), businesses.business_id == adjusted.business_id) \
.select("a.business_id", "name", "city", "state", "stars", "review_count", F.col("avg(adjusted_stars)").alias("adjusted_stars", )) \
.toPandas()
bdf = bdf.set_index("business_id")
results = | pd.concat([bdf, aspects], join="inner", axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# ## CIND820 Project Course Code
# #### <NAME> - 501072988
# ### Assignment 3
# ### Initial Results and Code
# In[1]:
#import libraries
import laspy as lp, sklearn as skl, numpy as np, matplotlib as mp, pandas as pd
# In[2]:
from sklearn import cluster
from sklearn import preprocessing as prep
from sklearn.neighbors import NearestNeighbors
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, confusion_matrix, silhouette_score, calinski_harabasz_score
from sklearn.cluster import OPTICS
# In[3]:
from scipy.spatial import ConvexHull, Voronoi
# In[4]:
import matplotlib.pyplot as plt
# In[5]:
import PIL
from PIL import ImageStat as istat
from PIL import ImageOps
# #### Data is loaded and prepared
# In[6]:
#original dataset https://nrs.objectstore.gov.bc.ca/gdwuts/092/092g/2016/dsm/bc_092g025_3_4_2_xyes_8_utm10_20170601_dsm.laz
#renamed here for clarity
path_to_data = "F:/Data/Lidar/dtvan/dtvan.laz"
with lp.open(path_to_data) as las_file:
las_data = las_file.read()
# In[7]:
# data loaded into a dataframe
df = pd.DataFrame({"X":las_data.x,"Y":las_data.y,"Z":las_data.z,"Intensity":las_data.intensity,"return_num":las_data.return_number,"totalreturns":las_data.num_returns,"classification":las_data.classification})
# In[8]:
#full dataset displayed on a scatter plot
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(df['X'],df['Y'],zorder=1,alpha=0.25,c='black',s=0.001)
# In[9]:
print("Total points:" + str(las_data.header.point_count))
# In[10]:
#print("Classes: " + str(set(list(df['classification']))))
# In[11]:
#data summary
df.describe()
# #### The full original dataset is too large to work with so it is clipped to a smaller study area. The dataframe is queried by the study area longitude and latitude boundary. The pre-classified ground points, class 2, are removed (since we are not concerned with ground right now), class 1 are the unclassified points and we only want to work with these.
# In[12]:
# Define the area of interest, these values in meters are in the "NAD83 UTM 10" coordinate system of the provided dataset
# These are the upper and lower limits in meters to be used, these can be found using google maps or other free sites/software
# These were selected somewhat arbitrarily
aoi_extent = {'xmax':492349.0731766,'xmin':492043.6935073,'ymax':5458645.8660691,'ymin':5458340.4864470}
# In[285]:
#query the dataframe to return only the points within the extent above and remove the points defined as ground as well
df_clip = df.query("X>{0}&X<{1}&Y>{2}&Y<{3}&Intensity<200".format(aoi_extent['xmin'],aoi_extent['xmax'],aoi_extent['ymin'],aoi_extent['ymax']))
# In[385]:
df_clip.describe()
# #### Dataset statistics and information - exploratory
# In[386]:
#renaming the data frame for clarity
data = df_clip
# In[387]:
mp.pyplot.hist(data['totalreturns'])
# In[388]:
mp.pyplot.hist(data['Y'])
# In[389]:
mp.pyplot.hist(data['Z'])
# In[390]:
mp.pyplot.hist(data['Intensity'])
#was very heavy on the low value and light on the high so queried to only 300 or less
# In[391]:
i_cutoff = 300
# In[392]:
len(data[data['Intensity']<i_cutoff])
# In[393]:
len(data[data['Intensity']>i_cutoff])
# In[394]:
mp.pyplot.hist(data[data['Intensity']<i_cutoff])
# In[395]:
len(data[data['Intensity']>i_cutoff])/len(data[data['Intensity']<i_cutoff])
# In[396]:
#summarize the normalized data
data.describe()
# In[397]:
#study area points displayed on a scatter plot
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(data['X'],data['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[398]:
#the height is used value to visualize in 3d, since the values are in meters for all 3 axes, it plots nicely as is
fig,ax = plt.subplots(figsize = (15,15)),plt.axes(projection='3d')
ax.scatter3D(data['X'],data['Y'],data['Z'],c='black',s=0.01,alpha=0.5)
#from matplotlib import cm
#ax.plot_surface(df_clip['X'],df_clip['Y'],df_clip['Z'],cmap=cm.coolwarm,linewidth=0,antialiased=False)
# # DEM Height Values
# In[399]:
# The Z values are an absolute elevation which is not as useful as height relative to the ground
# I need to create a ground image used to subtract from the elevation(Z) in order to get the height of the points relative to the ground
# The DEM was provided with the lidar data, I can clip this and extract the elevation of the ground for the entire area. Where the is something on the ground such as a building, the value is estimated using the nearest ground points available. I can then subtract the laser return value by the ground value to get the relative height of the object instead of the absolute height to sea level. This gives a more accurate height value to use with the algorithms.
# In[400]:
dem_path = "F:/Data/Lidar/images/BCVAN_DEM1m_Clip.tif"
img0 = PIL.Image.open(dem_path)
# In[401]:
dem_array = np.asarray(img0)
# In[415]:
dem_img = img0.convert("F")
# In[416]:
#dem_img = img0.convert("RGB")
# In[417]:
np.asarray(dem_img)
# In[418]:
x_dem_img = (data['X'] - min(data['X']))/(max(data['X']-min(data['X'])))*306
y_dem_img = (data['Y'] - min(data['Y']))/(max(data['Y']-min(data['Y'])))*306
# In[419]:
x_dem_img
# In[420]:
coord_array_dem = np.array(pd.DataFrame({"X":x_dem_img,"Y":y_dem_img}))
# In[421]:
coord_array_dem
# In[422]:
dem_value = []
for coord in coord_array_dem:
val = dem_img.getpixel((coord[0],coord[1]))
dem_value.append(val)
# In[423]:
len(dem_value)
# In[424]:
data['dem_value'] = dem_value
# In[425]:
data
# In[426]:
data['height'] = data['Z'] - data['dem_value']
# In[427]:
data['height'].describe()
# In[428]:
data
# In[429]:
df_unclassified = data.query("classification==1")
df_ground = data.query("classification==2")
# In[430]:
# Plotting the pre classified/labelled ground points for reference
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(df_ground['X'],df_ground['Y'],c='black',s=0.01,alpha=0.5)
# In[431]:
# Plotting the pre classified/labelled unclassified points for reference
#it appears that alot of ground points are still labelled as unclassified.
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(df_unclassified['X'],df_unclassified['Y'],c='black',s=0.01,alpha=0.5)
# # Normalization
# #### Data normalized and preprocessed for analysis
# In[432]:
x_normal = (data['X'] - min(data['X']))/(max(data['X']-min(data['X'])))
# In[433]:
y_normal = (data['Y'] - min(data['Y']))/(max(data['Y']-min(data['Y'])))
# In[434]:
z_normal = (data['Z'] - min(data['Z']))/(max(data['Z']-min(data['Z'])))
# In[435]:
height_normal = (data['height'] - min(data['height']))/(max(data['height']-min(data['height'])))
# In[436]:
i_normal = (data['Intensity'] - min(data['Intensity']))/(max(data['Intensity']-min(data['Intensity'])))
# In[437]:
# new dataframe containing all the normalized values is created
df_normal = pd.DataFrame({'X':x_normal,'Y':y_normal,'Z':z_normal,'height':height_normal,'Intensity':i_normal,'return_num':df_clip['return_num'],'totalreturns':df_clip['totalreturns'],'classification':df_clip['classification']})
# In[438]:
df_normal
# In[439]:
# Plotting normalized looks the same but with the new scale
fig,ax = plt.subplots(figsize = (10,10))
ax.scatter(df_normal['X'],df_normal['Y'],c='black',s=0.01,alpha=0.5)
# In[440]:
df_normal.dtypes
# # Supervised Classification
# ## Ground
# In[441]:
# Classify the ground for supervised classifier using the provided ground points as labels:
# In[442]:
df_normal
# In[443]:
train,test = train_test_split(df_normal)
# In[444]:
train_features = pd.DataFrame({"Intensity":train['Intensity'],"return_num":train['return_num'],"totalreturns":train['totalreturns'],"height":train['height']})
# In[445]:
train_labels = np.ravel(pd.DataFrame({"classification":train['classification']}))
# In[446]:
test_features = pd.DataFrame({"Intensity":test['Intensity'],"return_num":test['return_num'],"totalreturns":test['totalreturns'],"height":test['height']})
# In[447]:
test_labels = np.ravel(pd.DataFrame({"classification":test['classification']}))
# In[448]:
#creates the model
model = RandomForestClassifier(max_depth=5,random_state=0,n_estimators=50,criterion="entropy",verbose=0,class_weight="balanced")
# In[449]:
# trains the model - fit the train data to the model
model_fit = model.fit(train_features,train_labels)
# In[450]:
#predict the test data
test_predictions = model_fit.predict(test_features)
# In[451]:
len([i for i in test_predictions if i == 1])
# In[452]:
len([i for i in test_predictions if i != 1])
# In[453]:
model_fit.score(test_features,test_labels)
# In[454]:
confusion_matrix(test_labels,test_predictions)
# In[455]:
table = pd.DataFrame({"Intensity":df_normal['Intensity'],"return_num":df_normal['return_num'],"totalreturns":df_normal['totalreturns'],"H":df_normal['height']})
# In[456]:
table_labels = df_normal['classification']
# In[457]:
table_predictions = model_fit.predict(table)
# In[458]:
len([i for i in table_predictions if i == 1])
# In[459]:
len([i for i in table_predictions if i != 1])
# In[460]:
model_fit.score(table,table_labels)
# In[461]:
confusion_matrix(table_labels,table_predictions)
# In[462]:
df_normal['prediction'] = table_predictions.tolist()
# In[463]:
df_normal
# In[464]:
df_normal.query("classification != prediction")
# In[465]:
predicted_ground = df_normal.query("prediction == 2")
# In[466]:
predicted_ground
# In[467]:
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(predicted_ground['X'],predicted_ground['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[468]:
last_ground = predicted_ground.query("return_num==totalreturns")
# In[469]:
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(last_ground['X'],last_ground['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[470]:
last_ground
# In[471]:
predicted_non_ground = df_normal.query("prediction == 1")
# In[472]:
fig,ax = plt.subplots(figsize = (10,10))
ax.scatter(predicted_non_ground['X'],predicted_non_ground['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[473]:
predicted_non_ground
# In[474]:
data = predicted_non_ground
# In[475]:
data
# ## Add Imagery Data
# #### 2015 Imagery data was obtained from the City of Vancouver to extract the RGB values
# #### The image was clipped using external software (QGIS, open-source mapping program) to the same area of interest as above
# #### The selected image size is 4084x4084, the lidar data is normalized by 4084 to extract the nearest pixel value(r,g,b) from the image for each point
# In[476]:
image_path = "F:/Data/Lidar/images/BCVANC15_P9_aoiclip.tif"
# In[477]:
img = PIL.Image.open(image_path)
# In[478]:
rgb_img = img.convert("RGB")
# In[479]:
rgb_img
# In[480]:
#this can be used to crop the imagery if we knew the exact coordinates, but I used QGIS to clip the imagery instead
#left,top,right,bottom = 0,0,4084,4084
#rgb_img = img.crop((left,top,right,bottom))
# In[481]:
#import math
#math.sqrt(istat.Stat(rgb_img).count[0])
# In[482]:
#this size aligns the pixels to the lidar points to extract the rgb values for each point
img.size
# In[483]:
#The image origin (top left) is different than the coordinate system of the lidar so the image needs to be flipped for the calculation to align them
rgb_img_flip = PIL.ImageOps.flip(rgb_img)
# In[484]:
#rgb_img.getpixel((2070,2070))
# In[485]:
# rescales the point values to line up with the pixels in the imagery - same idea as normalization
# this is basically reprojecting the coordinates of the lidar points to the coordinates of the image
x_img = (data['X'] - min(data['X']))/(max(data['X']-min(data['X'])))*4083
y_img = (data['Y'] - min(data['Y']))/(max(data['Y']-min(data['Y'])))*4083
# In[486]:
y_img
# In[487]:
coord_array = np.array(pd.DataFrame({"X":x_img,"Y":y_img}))
# In[488]:
coord_array
# locations on the image to read the rgb values
# #### The nearest R,G,B pixel value from the image is extracted for each lidar point and the results are saved as a field in the data frame
# In[489]:
rgb_data = []
rgb_data_r = []
rgb_data_g = []
rgb_data_b = []
for coord in coord_array:
rgb=rgb_img_flip.getpixel((coord[0],coord[1]))
r=rgb[0]
g=rgb[1]
b=rgb[2]
rgb_data.append(rgb)
rgb_data_r.append(r)
rgb_data_g.append(g)
rgb_data_b.append(b)
data['rgb'] = rgb_data
data['r'] = rgb_data_r
data['g'] = rgb_data_g
data['b'] = rgb_data_b
# In[490]:
data
# ## Vegetation - abandoned
# In[491]:
#trying to extract the vegetation using combination of fields for supervised classifier, to varied success
#vegetation = df_all.query("return_num < totalreturns & totalreturns > 1")
#fig,ax = plt.subplots(figsize = (15,15))
#ax.scatter(vegetation['X'],vegetation['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[492]:
vegetation = data.query("return_num < totalreturns & totalreturns > 1")
#vegetation = data.query("r<0.9 & g>0.1")
# In[493]:
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(vegetation['X'],vegetation['Y'],zorder=1,alpha=0.5,c='black',s=0.01)
# In[494]:
data_normal = data
# #### The R,G,B values are normalized like the rest:
# In[495]:
r_normal = (data['r'] - min(data['r']))/(max(data['r']-min(data['r'])))
g_normal = (data['g'] - min(data['g']))/(max(data['g']-min(data['g'])))
b_normal = (data['b'] - min(data['b']))/(max(data['b']-min(data['b'])))
data_normal['r'] = r_normal
data_normal['g'] = g_normal
data_normal['b'] = b_normal
# In[496]:
data_normal
# #### Additonal testing and improvements
# In[497]:
data = data_normal
# In[498]:
data
# #### Initial Classification (unsupervised) using kmeans clustering
# #### Attempt to classify points into undetermined classes based on the data
# ##### Variables: Height, Intensity, R, G, B
# In[499]:
#features = pd.DataFrame({"R":data['r'],"G":data['g'],"B":data['b'],"H":data['height'],"I":data['Intensity']})
#features = pd.DataFrame({"R":data['r'],"G":data['g'],"B":data['b']})
features = pd.DataFrame({"H":data['height'],"I":data['Intensity']})
# In[500]:
features
# In[501]:
X1 = np.array(features)
# In[502]:
X1
# In[503]:
# initialize model
kmeancluster = cluster.KMeans(3,init='random',n_init=10)
# In[504]:
# fit to data
k_clusters = kmeancluster.fit(X1)
# In[505]:
len(k_clusters.labels_)
# In[506]:
print("Number of clusters:" + str(len(np.unique(k_clusters.labels_))))
print("Points clustered: " + str(len([i for i in k_clusters.labels_ if i != -1])))
# In[507]:
calinski_harabasz_score(X1,k_clusters.labels_)
# In[547]:
silhouette_score(X1,k_clusters.labels_,sample_size=10000)
# In[508]:
# add results to data frame
data['k_cluster'] = k_clusters.labels_
# In[509]:
#rename for clarity again
results = data
# In[510]:
# visualize the classes
fig,ax = plt.subplots(figsize = (15,15))
ax.scatter(results['X'],results['Y'],c=results['k_cluster'],s=0.1,alpha=0.5)
# In[511]:
fig,ax = plt.subplots(figsize = (15,15)),plt.axes(projection='3d')
ax.scatter3D(results['X'],results['Y'],results['Z'],c=results['k_cluster'],s=0.01,alpha=1)
# In[512]:
#try to remove shadow
colour_manip = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:41:06 2019
@author: erhan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_path = "../collected_data/2020 08 26_data_erhan.txt"
#data = pd.read_csv(data_path,sep=':',skiprows=520, nrows=50)
data = | pd.read_csv(data_path,sep=':') | pandas.read_csv |
from sklearn import metrics
import pandas as pd
import numpy as np
from copy import deepcopy
import os
import re
from io import StringIO
from subprocess import Popen, PIPE
from data.download import DatasetDownloader
from data.preprocessing import Preprocessor
import time
from sklearn.cluster import KMeans
import plotly.offline as plotly_offline
import plotly.graph_objs as go
import matplotlib.pyplot as plt
class ElkiPipe:
"""
This class provides a pipe to the java implementation of elki:
https://elki-project.github.io/
"""
def __init__(self, elki_path_to_jar=None):
if elki_path_to_jar is None:
src_dir = os.path.join(os.getcwd(), os.pardir, 'src')
self.elki_path_to_jar = os.path.join(src_dir, "models","elki-bundle-0.7.1.jar")
def run_elki(self, df:pd.DataFrame, parameters:list, plot_path=None):
"""
Wrapper function to elki java implementation.
Based on https://stackoverflow.com/questions/15326505/running-clustering-algorithms-in-elki and
the command line builder from https://elki-project.github.io/
e.g. For predecon we have the following command:
KDDCLIApplication -dbc.in filepath -algorithm clustering.subspace.PreDeCon -dbscan.epsilon 10.0 -dbscan.minpts 5 \
-predecon.delta 0.1 -predecon.lambda 2
"""
if plot_path is None:
elki_data = self.__remove_unused_columns(df)
else:
elki_data = df
temp_path = "elki_temp.csv"
elki_data.to_csv(temp_path, sep=";", index=False)
constant_args = ["java", "-jar", self.elki_path_to_jar,
"KDDCLIApplication",
"-dbc.in", temp_path,
"-parser.colsep", ";"]
args = constant_args + parameters
if plot_path is not None:
args += ["-resulthandler", "ExportVisualizations",
"-vis.output", plot_path]
process = Popen(args,
stdout=PIPE)
(console_output, error) = process.communicate()
exit_code = process.wait()
if exit_code != 0:
raise IOError ("ELKI failed to run:\n{}".format(console_output.decode("utf-8", errors='ignore')))
#clean up file
os.remove(temp_path)
clustering_results = console_output.decode("utf-8")
#process console output
cleaned_results = self.__get_elki_cluster_result(clustering_results)
return cleaned_results
@staticmethod
def __get_elki_cluster_result(results):
""" Constructs a dataframe with the cluster labels
"""
cleaned_results = pd.DataFrame(columns=["ID","label"])
for cluster_id, points_in_cluster in enumerate(results.split("Cluster: Cluster")[1:]):
#print("points_in_cluster: ", points_in_cluster)
helper_df = pd.DataFrame(columns=["ID","label"])
# Important for sorting the points to its original order
point_ids = re.findall(r"ID=(\d+)", points_in_cluster)
helper_df["ID"] = point_ids
helper_df["label"] = cluster_id
cleaned_results = pd.concat([cleaned_results, helper_df])
# Prepare labels
cleaned_results = cleaned_results.sort_values("ID").drop("ID",axis=1).reset_index(drop=True)
return cleaned_results
@staticmethod
def get_parameters_for_predecon(param_eps = 10.0, param_minpts = 2, param_delta = 0.1, param_lambda = 1, param_kappa = 20.0):
"""
This method returns the parameters for the PreDeCon algorithm in format for the Elki Pipe
# Hyperparameter param_minpts: Minimal number of points in epsilon-neighbourhood.
# Hyperparameter param_eps: Distance for neighbourhood.
# Hyperparameter param_delta : Variance threshold for subspace preference clusters.
# Hyperparameter param_lambda: Dimensionality threshold.
# Hyperparameter param_kappa: Weight for subspace preference vectors.
"""
return [ "-algorithm", "clustering.subspace.PreDeCon",
"-dbscan.epsilon", str(float(param_eps)),
"-dbscan.minpts", str(param_minpts),
"-predecon.delta", str(float(param_delta)),
"-predecon.lambda", str(param_lambda),
"-predecon.kappa", str(float(param_kappa))
]
@staticmethod
def get_parameters_for_hdbscan(param_minpts=100):
return ["-algorithm", "clustering.hierarchical.HDBSCANLinearMemory",
"-hdbscan.minPts", str(param_minpts)]
@staticmethod
def __remove_unused_columns(df:pd.DataFrame, remove_cols = None):
if remove_cols is None:
remove_cols = ["mode", "notes", "scripted", "token", "trip_id"]
colnames = list(df.columns)
df_copy = deepcopy(df)
for col_i in remove_cols:
if col_i in colnames:
df_copy = df_copy.drop(col_i,axis=1)
return df_copy
class Clustering:
"""
Class for routines related to clustering.
"""
# All distance metrics for numerical data natively supported by scipy.
# See https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html.
SCIPY_DISTANCE_METRICS = [
"braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine",
"euclidean", "hamming", "mahalanobis", "matching", "minkowski", "seuclidean",
"sqeuclidean"
]
@staticmethod
def evaluate_distance_metrics(filename: str = "preprocessed_data.dat"):
"""
Evaluates the following distance metrics...
...for column "total":
- All of scipy's built-in distance metrics
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html),
- DTW.
...for individual columns (as described in task assignment):
- Euclidean distance.
See Preprocessor.calculate_distance_for_individual_columns() on why only L2 is currently supported for indiv.
columns.
:param filename:
:return:
"""
# Auxiliary variable holding names of columns with categorical data.
categorical_columns = ["mode", "notes", "scripted", "token", "trip_id"]
# 1. Load preprocessed data from disk.
data_dir = os.path.join(os.path.abspath(DatasetDownloader.get_data_dir()))
file_path = os.path.join(data_dir, "preprocessed", filename)
dfs = Preprocessor.restore_preprocessed_data_from_disk(file_path)
# 2. Get cut trip snippets.
trips_cut_per_30_sec = Preprocessor.get_cut_trip_snippets_for_targets(
dfs=dfs,
snippet_length=30,
sensor_type="acceleration",
target_column_names=["total", "x", "y", "z"]
)
##############################################################################
# 3. Calculate distance matrices for all metrics with column "total".
##############################################################################
all_dist_metrics_for_total = deepcopy(Clustering.SCIPY_DISTANCE_METRICS)
all_dist_metrics_for_total.append("dtw")
performance_data = []
for index, metric in enumerate(all_dist_metrics_for_total):
if metric not in ("metric_name_to_exclude",):
print("Calculating ", metric)
start_time = time.time()
distance_matrix_with_categorical_data = Preprocessor.calculate_distance_for_n2(
trips_cut_per_30_sec[0],
metric=metric
)
runtime_in_sec = (time.time() - start_time) / 1000
# Drop categorical data.
distance_matrix = distance_matrix_with_categorical_data.drop(categorical_columns, axis=1)
# Naive asumption of 3 clusters due to 3 labels Walk, Metro and Tram
kmeans = KMeans(n_clusters=3, random_state=0).fit(distance_matrix)
cluster_labels = kmeans.labels_
distance_matrix_with_categorical_data["cluster_labels"] = cluster_labels
# Validate performance against assume ground truth of transport modes (e. g. three clusters), ignoring
# tokens, scripted/unscripted and other potential subclasses.
performance = Clustering.get_clustering_performance_as_dict(
features=distance_matrix,
cluster_assignments=cluster_labels,
true_labels=distance_matrix_with_categorical_data["mode"]
)
performance["runtime_for_distance_calculation"] = float(runtime_in_sec)
performance["distance_metric"] = metric
performance["distance_metric_index"] = index + 1
performance_data.append(performance)
# Transform performance data to data frame.
performance_df = | pd.DataFrame(performance_data) | pandas.DataFrame |
import pandas as pd
import sys
import timeit
from optimize.check_vaex_func_efficiency import sizeof_fmt
t_in_ms = 101242323432.5 # arbitrary value to be used as timestamp, for 2000 sample rate
t_in_s = t_in_ms / 1000
def test_pd_to_timestamp():
setup = "import ecgrecord; ecg_record, seg, lead = ecgrecord.ECGRecord.example()"
stmt = "seg.get_time_axis()"
return timeit.timeit(stmt, setup, number=2)
if __name__ == "__main__":
print(sys.getsizeof(t_in_ms))
sz_int = sys.getsizeof(t_in_s)
print(sz_int)
tstamp_s = | pd.to_datetime(t_in_s, unit='s') | pandas.to_datetime |
import re
from datetime import datetime
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import squarify
from sklearn_pandas import CategoricalImputer
'''
Project started on 20th of May 2019 - by sophie789hu
In the area of the #meetoo, the question around gender equity araises (again) for the greatest interest of Wmen.
Recent concerns have been adressed regarding our skewed vision of genders.
Media, society, family... each of these factors and even more affect our jugdment against Women. Those contribute to
the sustainment of gender stereotypes and prejudices, inherited by our Judeo-Christian culture.
Isn't it interesting to notice that the gender inequality starts from the omni-presence of male characters in our surrounding?
And even in the Comics literacy.
The brief analysis below does not aim to blaim DC and Marvel but more to encourage them to create more female characters in their stories.
'''
# IMPORT OF DC AND MARVEL DATASETS
df_dc = pd.read_csv("dc-wikia-data.csv")
df_marvel = pd.read_csv("marvel-wikia-data.csv")
# DESCRIPTION OF DC and MARVEL DATASETS
def DescribeDataframe(df_dc, df_marvel):
""" Describe dataframe """
for df in [df_dc, df_marvel]:
print(df.info())
print(df.head())
print(df.describe())
DescribeDataframe(df_dc, df_marvel)
"""
The DC dataset contains 6896 entries such as each entry represents one character.
Each character is characterized by 13 variables or columns, such as:
1 - "page_id" of type integer, corresponds to the unique identifier of the character's page
2 - "name" of type object, corresponds to the unique name of the character. Most of the time,
- if the character is a super-hero: "nickname" + "civil first and last name" in brankets (ex: "Batman (B<NAME>)"")
- if the character is not a super-hero: "civil first and last name" + "universe" in brankets (ex: "<NAME> (New Earth)")
The universes are "Earth-Two", "Earth-One" or "New Earth".
3 - "urlslug" of type object, corresponds to the unique url of the character within the wiki in "dc.fandom.com".
4 - "ID" of type object, indicates the identity status of the character: "Secret Identity", "Public Identity" or "Identity Unknown".
5 - "ALIGN" of type object, indicates if one character is good ("Good Characters"), bad ("Bad Characters"), neutral ("Neutral Characters") or "Reformed Criminals".
6 - "EYE" of type object, indicates the eye color of the character. There are 17 options from "Blue Eyes" to "Orange Eyes". The option "Auburn Hair" may be an error.
7 - "HAIR" of type object, indicates the hair color of the character. There are 17 options from "Black Hair" to "Platinum Blond Hair".
8 - "SEX" of type object, indicates the gender of the character: "Male Characters", "Female Characters", "Genderless Characters"
9 - "GSM" of type object, indicates if the character is a gender or sexual minority: "Bisexual Characters" or "Homosexual Characters".
10 - "ALIVE" of type object, indicates if the character is alive ("Living Characters") or deceased ("Deceased Characters")
11 - "APPEARANCES" of type object, indicates the number of appareances of the character in comic books (as of Sep. 2, 2014)
12 - "FIRST APPEARANCE" of type object, indicates the year and the month of the character's first appearance in a comic book, if available
13 - "YEAR" of type float, indicates the year of the character's first appearance in a comic book, if available. The year in the column "YEAR" is equal
The Marvel dataset contains 16376 entries such as each entry represents one character.
Each character is characterized by 13 variables or columns, same as in the dataset df_dc.
"""
# DATA CLEANING
def AlignColumn(df_dc, df_marvel):
""" Align column names """
list_df = [df_dc, df_marvel]
for df in list_df:
df.columns = df.columns.str.upper()
df.rename(columns={'ID': 'IDENTITY TYPE', 'SEX': 'GENDER', 'APPEARANCES': 'NUMBER OF APPEARANCES',
'FIRST APPEARANCE': 'DATE OF FIRST APPEARANCE', 'YEAR': 'YEAR OF FIRST APPEARANCE', 'ALIGN': 'TEAM'}, inplace=True)
return df_dc, df_marvel
def CompareDataType(df_dc, df_marvel):
""" Compare column data type """
for col in df_dc.columns:
if df_dc[col].dtype != df_marvel[col].dtype:
print(col)
else:
print(col, ":", df_dc[col].dtype, "vs.", df_marvel[col].dtype)
def AlignData(df_dc, df_marvel):
""" Align data name and value """
list_df = [df_dc, df_marvel]
for df in list_df:
# Align data type
df["NUMBER OF APPEARANCES"] = df["NUMBER OF APPEARANCES"].apply(lambda line: int(line) if pd.isnull(line) is False else None)
df["YEAR OF FIRST APPEARANCE"] = df["YEAR OF FIRST APPEARANCE"].apply(lambda line: str(int(line)) if pd.isnull(line) is False else None)
# Align data value in column
df.GENDER = df.GENDER.str.replace(" Characters", "")
print(df.GENDER)
# print(df.GENDER)
df.GENDER.replace(r"Agender|Genderless|Genderfluid|Transgender", "Other", regex=True, inplace=True)
df.TEAM = df.TEAM.str.replace(" Characters", "")
df.TEAM = df.TEAM.str.replace("Reformed Criminals", "Neutral")
df["IDENTITY TYPE"].replace(r"Identity Unknown|No Dual Identity|Known to Authorities Identity", "Other", regex=True, inplace=True)
df["IDENTITY TYPE"] = df["IDENTITY TYPE"].str.replace(" Identity", "")
df.ALIVE = df.ALIVE.str.replace(" Characters", "")
df.ALIVE.replace("Living", "Alive", regex=True, inplace=True)
df.ALIVE.replace("Deceased", "Dead", regex=True, inplace=True)
return df_dc, df_marvel
def CompareYears_dc(df_dc):
""" Compare year in "FIRST APPEARANCE" and "YEAR OF FIRST APPEARANCE" columns - in df_dc"""
year_in_date_dc = df_dc["DATE OF FIRST APPEARANCE"].apply(lambda line: line.split(",")[0] if pd.isnull(line) is False else False)
year_in_year_dc = df_dc["YEAR OF FIRST APPEARANCE"].apply(lambda line: line if pd.isnull(line) is False else False)
print(year_in_date_dc[year_in_date_dc != year_in_year_dc])
def CompareYears_marvel(df_marvel):
""" Compare year in "FIRST APPEARANCE" and "YEAR OF FIRST APPEARANCE" columns - in df_marvel"""
year_in_date_marvel = df_marvel["DATE OF FIRST APPEARANCE"].apply(lambda line: line[-2:] if pd.isnull(line) is False else True)
year_in_year_marvel = df_marvel["YEAR OF FIRST APPEARANCE"].apply(lambda line: str(int(line))[2:] if pd.isnull(line) is False else True)
print(year_in_date_marvel[year_in_date_marvel != year_in_year_marvel])
def VerifyUnicity(df_dc, df_marvel):
""" Verify if the unique values are not shared by Marvel and DC """
sharedPageIds = df_dc[df_dc["PAGE_ID"].isin(df_marvel["PAGE_ID"]) == True]
print(sharedPageIds)
namesOfSharedPageIds = sharedPageIds[sharedPageIds["NAME"].isin(df_marvel["NAME"]) == True]
print(namesOfSharedPageIds)
sharedNames = df_dc[df_dc["NAME"].isin(df_marvel["NAME"]) == True]
print(sharedNames)
sharedUrls = df_dc[df_dc["URLSLUG"].isin(df_marvel["URLSLUG"]) == True]
print(sharedUrls)
def MaintainUniqueness(df_dc, df_marvel):
""" Maintain uniqueness by creating new ids and source column """
df_dc["NEW_PAGEID"] = df_dc["PAGE_ID"].apply(lambda id: str(id)+"_dc")
df_marvel["NEW_PAGEID"] = df_marvel["PAGE_ID"].apply(lambda id: str(id)+"_marvel")
df_dc["COMICS"] = "DC"
df_marvel["COMICS"] = "Marvel"
return df_dc, df_marvel
def AppendDf(df_dc, df_marvel):
""" Append df_dc and df_marvel """
df_Comics = df_dc.append(df_marvel, sort=False).reset_index(drop=True)
df_Comics.drop(["PAGE_ID", "URLSLUG", "DATE OF FIRST APPEARANCE"], axis=1, inplace=True)
return df_Comics
def MissingValue(df_Comics):
""" Count the missing values """
list_missingValue = list()
list_ratio = list()
for col in df_Comics.columns:
missingValue = df_Comics[col].isna().sum()
ratio = "{:.2%}".format(missingValue/len(df_Comics))
list_missingValue.append(missingValue)
list_ratio.append(ratio)
df_missingValue = | pd.DataFrame({"column": df_Comics.columns, "number of missing values": list_missingValue, "ratio": list_ratio}) | pandas.DataFrame |
import os
from matplotlib.finance import candlestick2_ohlc as plt_candle
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import datetime
from oandapyV20 import API
import oandapyV20.endpoints.instruments as oandapy
now = datetime.datetime.now() - datetime.timedelta(hours=9) # 標準時に合わせる
minutes = 61 # 60分取得
time_min = now - datetime.timedelta(minutes=120) # 2時間前からデータを取得する
# start = datetime.datetime(year=2018,month=5,day=1)
time_min = time_min.strftime("%Y-%m-%dT%H:%M:00.000000Z")
# OANDA_API ACCESS-TOKEN
access_token = os.environ.get("ACCESS_TOKEN")
api = API(access_token = access_token, environment="practice")
request = oandapy.InstrumentsCandles(instrument = "USD_JPY",
params = { "alignmentTimezone": "Japan", "from": start, "count": minutes, "granularity": "M1" })
api.request(request)
#filename = "candle.csv"
candle = pd.DataFrame.from_dict(
[row['mid'] for row in request.response['candles']])
candle['time'] = [row['time'] for row in request.response['candles']]
#candle.to_csv(filename)
#candle = pd.read_csv('candle.csv')
candle = candle
sns.set_style("whitegrid")
#移動平均を求める
small_window = 5 # 5分平均線を求める
big_window = 20 # 20分平均線求める
sma5 = pd.Series.rolling(candle.c, window=small_window).mean()
sma20 = | pd.Series.rolling(candle.c, window=big_window) | pandas.Series.rolling |
# -*- coding: utf-8 -*-
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
from tqdm import tqdm
from ..functions.coord import load_coord, calc_atom_dist, wmhb_name, no_hb_name
from ..functions.lst import type_lst, lst_nums, lst_to_str, str_to_lst
from ..functions.col import (
get_dist_col,
core_path_col,
modelid_col,
chainid_col,
vect_1_col,
vect_2_col,
hb_status_col,
hb_angle_1_col,
hb_angle_2_col,
wmhb_angle_col,
outlier_col,
)
from ..functions.table import get_df_at_index
from ..functions.path import modify_coord_path, save_table
def get_index_dist(
df,
index,
x_resids,
y_resids,
x_atomids=None,
y_atomids=None,
shared_resids=None,
shared_atomids=None,
atom_dist_col_lst=None,
vect_1_col_lst=None,
vect_2_col_lst=None,
hb_status_col_lst=None,
hb_angle_1_col_lst=None,
hb_angle_2_col_lst=None,
wmhb_angle_col_lst=None,
outlier_col_lst=None,
check_hb=False,
use_h=False,
hb_sc=True,
hb_bb=True,
min_hb_dist=2.0,
max_hb_dist=3.2,
min_wmhb_dist=2.0,
max_wmhb_dist=3.0,
min_hb_angle=90,
max_hb_angle=180,
min_wmhb_angle=80,
max_wmhb_angle=140,
coord_path_col=None,
):
if coord_path_col is None:
coord_path_col = core_path_col
index_df = get_df_at_index(df, index)
coord_path = index_df.at[index, coord_path_col]
modelid = index_df.at[index, modelid_col]
chainid = index_df.at[index, chainid_col]
val_lst = [x_resids, y_resids]
if x_atomids is not None:
val_lst.append(x_atomids)
if y_atomids is not None:
val_lst.append(y_atomids)
if shared_resids is not None:
val_lst.append(shared_resids)
if shared_atomids is not None:
val_lst.append(shared_atomids)
for i, val in enumerate(val_lst):
val_lst[i] = type_lst(val)
x_lst = lst_nums(0, len(val_lst[0]) - 1)
if use_h:
coord_path = modify_coord_path(coord_path, return_pdb=True, add_h=True)
structure = load_coord(coord_path)
for x in x_lst:
resid_1 = val_lst[0][x]
resid_2 = val_lst[1][x]
if x_atomids is not None:
atomid_1 = val_lst[2][x]
else:
atomid_1 = None
if y_atomids is not None:
atomid_2 = val_lst[3][x]
else:
atomid_2 = None
if atom_dist_col_lst is None:
cont_atom_dist_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
)
else:
cont_atom_dist_col = atom_dist_col_lst[x]
if vect_1_col_lst is None:
cont_vect_1_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=vect_1_col,
)
else:
cont_vect_1_col = vect_1_col_lst[x]
if vect_2_col_lst is None:
cont_vect_2_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=vect_2_col,
)
else:
cont_vect_2_col = vect_2_col_lst[x]
if hb_status_col_lst is None:
cont_hb_status_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=hb_status_col,
)
else:
cont_hb_status_col = hb_status_col_lst[x]
if hb_angle_1_col_lst is None:
cont_hb_angle_1_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=hb_angle_1_col,
)
else:
cont_hb_angle_1_col = hb_angle_1_col_lst[x]
if hb_angle_2_col_lst is None:
cont_hb_angle_2_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=hb_angle_2_col,
)
else:
cont_hb_angle_2_col = hb_angle_2_col_lst[x]
if wmhb_angle_col_lst is None:
cont_wmhb_angle_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=wmhb_angle_col,
)
else:
cont_wmhb_angle_col = wmhb_angle_col_lst[x]
if outlier_col_lst is None:
cont_outlier_col = get_dist_col(
resid_1,
resid_2,
x_atomid=lst_to_str(atomid_1, join_txt="+"),
y_atomid=lst_to_str(atomid_2, join_txt="+"),
ext=outlier_col,
)
else:
cont_outlier_col = outlier_col_lst[x]
resid_lst = [resid_1, resid_2]
for i, resid in enumerate(resid_lst):
if type(resid) == str:
resid = str_to_lst(df.at[index, resid])[0]
resid_lst[i] = resid
result = calc_atom_dist(
structure=structure,
chainid_1=chainid,
modelid_1=modelid,
resid_1=resid_lst[0],
atomid_1=atomid_1,
chainid_2=chainid,
modelid_2=modelid,
resid_2=resid_lst[1],
atomid_2=atomid_2,
check_hb=check_hb,
use_h=use_h,
hb_sc=hb_sc,
hb_bb=hb_bb,
min_hb_dist=min_hb_dist,
max_hb_dist=max_hb_dist,
min_wmhb_dist=min_wmhb_dist,
max_wmhb_dist=max_wmhb_dist,
min_hb_angle=min_hb_angle,
max_hb_angle=max_hb_angle,
min_wmhb_angle=min_wmhb_angle,
max_wmhb_angle=max_wmhb_angle,
return_vect=True,
)
atom_dist = result[0]
vect_1 = result[1]
vect_2 = result[2]
if check_hb:
hb_status = result[3]
hb_angle_1 = result[4]
hb_angle_2 = result[5]
wmhb_angle = result[6]
outlier_status = result[7]
if hb_status == wmhb_name:
if shared_resids is not None:
shared_resid = val_lst[4][x]
if shared_atomids is not None:
shared_atomid = val_lst[5][x]
else:
shared_atomid = None
shared_result_1 = calc_atom_dist(
structure=structure,
chainid_1=chainid,
modelid_1=modelid,
resid_1=resid_lst[0],
atomid_1=atomid_1,
chainid_2=chainid,
modelid_2=modelid,
resid_2=shared_resid,
atomid_2=shared_atomid,
check_hb=check_hb,
use_h=use_h,
hb_sc=hb_sc,
hb_bb=hb_bb,
min_hb_dist=min_hb_dist,
max_hb_dist=max_hb_dist,
min_wmhb_dist=min_wmhb_dist,
max_wmhb_dist=max_wmhb_dist,
min_hb_angle=min_hb_angle,
max_hb_angle=max_hb_angle,
min_wmhb_angle=min_wmhb_angle,
max_wmhb_angle=max_wmhb_angle,
return_vect=True,
)
shared_result_2 = calc_atom_dist(
structure=structure,
chainid_1=chainid,
modelid_1=modelid,
resid_1=shared_resid,
atomid_1=shared_atomid,
chainid_2=chainid,
modelid_2=modelid,
resid_2=resid_lst[1],
atomid_2=atomid_2,
check_hb=check_hb,
use_h=use_h,
hb_sc=hb_sc,
hb_bb=hb_bb,
min_hb_dist=min_hb_dist,
max_hb_dist=max_hb_dist,
min_wmhb_dist=min_wmhb_dist,
max_wmhb_dist=max_wmhb_dist,
min_hb_angle=min_hb_angle,
max_hb_angle=max_hb_angle,
min_wmhb_angle=min_wmhb_angle,
max_wmhb_angle=max_wmhb_angle,
return_vect=True,
)
if (shared_result_1[3] == no_hb_name) or (
shared_result_2[3] == no_hb_name
):
hb_status = no_hb_name
outlier_status = True
index_df.at[
index,
cont_atom_dist_col,
] = atom_dist
index_df.at[
index,
cont_vect_1_col,
] = vect_1
index_df.at[
index,
cont_vect_2_col,
] = vect_2
if check_hb:
index_df.at[
index,
cont_hb_status_col,
] = hb_status
index_df.at[
index,
cont_hb_angle_1_col,
] = hb_angle_1
index_df.at[
index,
cont_hb_angle_2_col,
] = hb_angle_2
index_df.at[
index,
cont_wmhb_angle_col,
] = wmhb_angle
index_df.at[
index,
cont_outlier_col,
] = outlier_status
return index_df
def build_dist_table(
df,
x_resids,
y_resids,
x_atomids=None,
y_atomids=None,
shared_resids=None,
shared_atomids=None,
dist_table_path=None,
atom_dist_col_lst=None,
hb_status_col_lst=None,
hb_angle_1_col_lst=None,
hb_angle_2_col_lst=None,
wmhb_angle_col_lst=None,
outlier_col_lst=None,
check_hb=False,
use_h=False,
hb_sc=True,
hb_bb=True,
min_hb_dist=2.0,
max_hb_dist=3.2,
min_wmhb_dist=2.0,
max_wmhb_dist=3.0,
min_hb_angle=90,
max_hb_angle=180,
min_wmhb_angle=80,
max_wmhb_angle=140,
coord_path_col=None,
):
df = df.reset_index(drop=True)
dist_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# **About the Competition:**
#
# [Avito](https://www.avito.ru/), Russia’s largest classified advertisements website, is hosting its fourth Kaggle competition. The challenge is to predict demand for an online advertisement based on its full description (title, description, images, etc.), its context (geographically where it was posted, similar ads already posted) and historical demand for similar ads in similar contexts.
#
# **About the Notebook:**
#
# One more exciting competition ahead and this involves both NLP (text data in Russian) and Image data along with numerical . In this notebook, let us get into the basic data exploration using python.
#
# Thanks to [Yandex Translate](https://translate.yandex.com/), I was able to get english names for the russian names and used them whenever possible. Most of the plots are in plotly and so please hover over them to see more details.
# In[26]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
color = sns.color_palette()
get_ipython().run_line_magic('matplotlib', 'inline')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
# Now let us look at the input files present in the dataset.
# In[2]:
from subprocess import check_output
print(check_output(["ls", "../input/"]).decode("utf8"))
# The description of the data files from the data page:
#
# * train.csv - Train data.
# * test.csv - Test data. Same schema as the train data, minus deal_probability.
# * train_active.csv - Supplemental data from ads that were displayed during the same period as train.csv. Same schema as the train data, minus deal_probability.
# * test_active.csv - Supplemental data from ads that were displayed during the same period as test.csv. Same schema as the train data, minus deal_probability.
# * periods_train.csv - Supplemental data showing the dates when the ads from train_active.csv were activated and when they where displayed.
# * periods_test.csv - Supplemental data showing the dates when the ads from test_active.csv were activated and when they where displayed. Same schema as periods_train.csv, except that the item ids map to an ad in test_active.csv.
# * train_jpg.zip - Images from the ads in train.csv.
# * test_jpg.zip - Images from the ads in test.csv.
# * sample_submission.csv - A sample submission in the correct format.
#
# Let us start with the train file.
# In[3]:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
print("Train file rows and columns are : ", train_df.shape)
print("Test file rows and columns are : ", test_df.shape)
# In[4]:
train_df.head()
# The train dataset description is as follows:
#
# * item_id - Ad id.
# * user_id - User id.
# * region - Ad region.
# * city - Ad city.
# * parent_category_name - Top level ad category as classified by Avito's ad model.
# * category_name - Fine grain ad category as classified by Avito's ad model.
# * param_1 - Optional parameter from Avito's ad model.
# * param_2 - Optional parameter from Avito's ad model.
# * param_3 - Optional parameter from Avito's ad model.
# * title - Ad title.
# * description - Ad description.
# * price - Ad price.
# * item_seq_number - Ad sequential number for user.
# * activation_date- Date ad was placed.
# * user_type - User type.
# * image - Id code of image. Ties to a jpg file in train_jpg. Not every ad has an image.
# * image_top_1 - Avito's classification code for the image.
# * deal_probability - The target variable. This is the likelihood that an ad actually sold something. It's not possible to verify every transaction with certainty, so this column's value can be any float from zero to one.
#
# So deal probability is our target variable and is a float value between 0 and 1 as per the data page. Let us have a look at it.
# In[5]:
plt.figure(figsize=(12,8))
sns.distplot(train_df["deal_probability"].values, bins=100, kde=False)
plt.xlabel('Deal Probility', fontsize=12)
plt.title("Deal Probability Histogram", fontsize=14)
plt.show()
plt.figure(figsize=(8,6))
plt.scatter(range(train_df.shape[0]), np.sort(train_df['deal_probability'].values))
plt.xlabel('index', fontsize=12)
plt.ylabel('deal probability', fontsize=12)
plt.title("Deal Probability Distribution", fontsize=14)
plt.show()
# So almost 100K Ads has 0 probaility (which means it did not sell anything) and few ads have a probability of 1. Rest of the deal probabilities have values in between.
#
# **Region wise distribution of Ads:**
#
# Let us look at the region wise distribution of ads.
# In[6]:
from io import StringIO
temp_data = StringIO("""
region,region_en
Свердловская область, Sverdlovsk oblast
Самарская область, Samara oblast
Ростовская область, Rostov oblast
Татарстан, Tatarstan
Волгоградская область, Volgograd oblast
Нижегородская область, Nizhny Novgorod oblast
Пермский край, Perm Krai
Оренбургская область, Orenburg oblast
Ханты-Мансийский АО, Khanty-Mansi Autonomous Okrug
Тюменская область, Tyumen oblast
Башкортостан, Bashkortostan
Краснодарский край, Krasnodar Krai
Новосибирская область, Novosibirsk oblast
Омская область, Omsk oblast
Белгородская область, Belgorod oblast
Челябинская область, Chelyabinsk oblast
Воронежская область, Voronezh oblast
Кемеровская область, Kemerovo oblast
Саратовская область, Saratov oblast
Владимирская область, Vladimir oblast
Калининградская область, Kaliningrad oblast
Красноярский край, Krasnoyarsk Krai
Ярославская область, Yaroslavl oblast
Удмуртия, Udmurtia
Алтайский край, Altai Krai
Иркутская область, Irkutsk oblast
Ставропольский край, Stavropol Krai
Тульская область, Tula oblast
""")
region_df = pd.read_csv(temp_data)
train_df = pd.merge(train_df, region_df, how="left", on="region")
# In[7]:
temp_series = train_df['region_en'].value_counts()
labels = (np.array(temp_series.index))
sizes = (np.array((temp_series / temp_series.sum())*100))
trace = go.Pie(labels=labels, values=sizes)
layout = go.Layout(
title='Region distribution',
width=900,
height=900,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="region")
# The regions have percentage of ads between 1.71% to 9.41%. So the top regions are:
# 1. Krasnodar region - 9.41%
# 2. Sverdlovsk region - 6.28%
# 3. Rostov region - 5.99%
#
# In[8]:
plt.figure(figsize=(12,8))
sns.boxplot(y="region_en", x="deal_probability", data=train_df)
plt.xlabel('Deal probability', fontsize=12)
plt.ylabel('Region', fontsize=12)
plt.title("Deal probability by region")
plt.xticks(rotation='vertical')
plt.show()
# **City wise distribution of Ads:**
#
# Now let us have a look at the top 20 cities present in the dataset.
# In[9]:
cnt_srs = train_df['city'].value_counts().head(20)
trace = go.Bar(
y=cnt_srs.index[::-1],
x=cnt_srs.values[::-1],
orientation = 'h',
marker=dict(
color=cnt_srs.values[::-1],
colorscale = 'Blues',
reversescale = True
),
)
layout = dict(
title='City distribution of Ads',
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="CityAds")
# So the top cities where the ads are shown are
# 1. Krasnodar
# 2. Ekaterinburg
# 3. Novosibirsk
# **Parent Category Name:**
#
# Now let us look at the distribution of parent cateory names.
# In[10]:
temp_data = StringIO("""
parent_category_name,parent_category_name_en
Личные вещи,Personal belongings
Для дома и дачи,For the home and garden
Бытовая электроника,Consumer electronics
Недвижимость,Real estate
Хобби и отдых,Hobbies & leisure
Транспорт,Transport
Услуги,Services
Животные,Animals
Для бизнеса,For business
""")
temp_df = pd.read_csv(temp_data)
train_df = pd.merge(train_df, temp_df, on="parent_category_name", how="left")
# In[11]:
temp_series = train_df['parent_category_name_en'].value_counts()
labels = (np.array(temp_series.index))
sizes = (np.array((temp_series / temp_series.sum())*100))
trace = go.Pie(labels=labels, values=sizes)
layout = go.Layout(
title='Parent Category distribution',
width=900,
height=900,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="parentcategory")
# 1So 46.4% of the ads are for Personal belongings, 11.9% are for home and garden and 11.5% for consumer electronics.
# In[12]:
plt.figure(figsize=(12,8))
sns.boxplot(x="parent_category_name_en", y="deal_probability", data=train_df)
plt.ylabel('Deal probability', fontsize=12)
plt.xlabel('Parent Category', fontsize=12)
plt.title("Deal probability by parent category", fontsize=14)
plt.xticks(rotation='vertical')
plt.show()
# Services category seems to have slightly higher deal probability compared to others.
# ** Category of Ads:**
#
# Now let us look at the category of ads.
# In[13]:
temp_data = StringIO("""
category_name,category_name_en
"Одежда, обувь, аксессуары","Clothing, shoes, accessories"
Детская одежда и обувь,Children's clothing and shoes
Товары для детей и игрушки,Children's products and toys
Квартиры,Apartments
Телефоны,Phones
Мебель и интерьер,Furniture and interior
Предложение услуг,Offer services
Автомобили,Cars
Ремонт и строительство,Repair and construction
Бытовая техника,Appliances
Товары для компьютера,Products for computer
"Дома, дачи, коттеджи","Houses, villas, cottages"
Красота и здоровье,Health and beauty
Аудио и видео,Audio and video
Спорт и отдых,Sports and recreation
Коллекционирование,Collecting
Оборудование для бизнеса,Equipment for business
Земельные участки,Land
Часы и украшения,Watches and jewelry
Книги и журналы,Books and magazines
Собаки,Dogs
"Игры, приставки и программы","Games, consoles and software"
Другие животные,Other animals
Велосипеды,Bikes
Ноутбуки,Laptops
Кошки,Cats
Грузовики и спецтехника,Trucks and buses
Посуда и товары для кухни,Tableware and goods for kitchen
Растения,Plants
Планшеты и электронные книги,Tablets and e-books
Товары для животных,Pet products
Комнаты,Room
Фототехника,Photo
Коммерческая недвижимость,Commercial property
Гаражи и машиноместа,Garages and Parking spaces
Музыкальные инструменты,Musical instruments
Оргтехника и расходники,Office equipment and consumables
Птицы,Birds
Продукты питания,Food
Мотоциклы и мототехника,Motorcycles and bikes
Настольные компьютеры,Desktop computers
Аквариум,Aquarium
Охота и рыбалка,Hunting and fishing
Билеты и путешествия,Tickets and travel
Водный транспорт,Water transport
Готовый бизнес,Ready business
Недвижимость за рубежом,Property abroad
""")
temp_df = pd.read_csv(temp_data)
train_df = pd.merge(train_df, temp_df, on="category_name", how="left")
# In[14]:
cnt_srs = train_df['category_name_en'].value_counts()
trace = go.Bar(
y=cnt_srs.index[::-1],
x=cnt_srs.values[::-1],
orientation = 'h',
marker=dict(
color=cnt_srs.values[::-1],
colorscale = 'Blues',
reversescale = True
),
)
layout = dict(
title='Category Name of Ads - Count',
height=900
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="category name")
# So the top 3 categories are:
# 1. Clothes, shoes, accessories
# 2. Children's clothing and footwear
# 3. Goods for children and toys
#
# ** User Type:**
#
# Now let us look at the user type.
# In[15]:
temp_series = train_df['user_type'].value_counts()
labels = (np.array(temp_series.index))
sizes = (np.array((temp_series / temp_series.sum())*100))
trace = go.Pie(labels=labels, values=sizes)
layout = go.Layout(
title='User Type distribution',
width=600,
height=600,
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="usertype")
# Private users constitute 72% of the data followed by company and shop.
#
# **Price:**
#
# This is the price shown in the Ad.
# In[16]:
train_df["price_new"] = train_df["price"].values
train_df["price_new"].fillna(np.nanmean(train_df["price"].values), inplace=True)
plt.figure(figsize=(12,8))
sns.distplot(np.log1p(train_df["price_new"].values), bins=100, kde=False)
plt.xlabel('Log of price', fontsize=12)
plt.title("Log of Price Histogram", fontsize=14)
plt.show()
# **Activation Date:**
# In[17]:
cnt_srs = train_df['activation_date'].value_counts()
trace = go.Bar(
x=cnt_srs.index,
y=cnt_srs.values,
marker=dict(
color=cnt_srs.values,
colorscale = 'Picnic',
reversescale = True
),
)
layout = go.Layout(
title='Activation Dates in Train'
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="ActivationDate")
# Activation dates in test
cnt_srs = test_df['activation_date'].value_counts()
trace = go.Bar(
x=cnt_srs.index,
y=cnt_srs.values,
marker=dict(
color=cnt_srs.values,
colorscale = 'Picnic',
reversescale = True
),
)
layout = go.Layout(
title='Activation Dates in Test'
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="ActivationDate")
# Inferences:
#
# 1. So the dates are not different between train and test sets. So we need to be careful while doing our validation. May be time based validation is a good option.
# 2. We are given two weeks data for training (March 15 to March 28) and one week data for testing (April 12 to April 18, 2017).
# 3. There is a gap of two weeks in between training and testing data.
# 4. We can probably use weekday as a feature since all the days are present in both train and test sets.
#
#
# **User id:**
#
# Now we can have a look at the number of unique users in train & test and also the number of common users if any.
# In[18]:
from matplotlib_venn import venn2
plt.figure(figsize=(10,7))
venn2([set(train_df.user_id.unique()), set(test_df.user_id.unique())], set_labels = ('Train set', 'Test set') )
plt.title("Number of users in train and test", fontsize=15)
plt.show()
# So out of the 306K users in test, about 68K users are there in train and the rest are new.
#
# ** Title: **
#
# First let us look at the number of common titles between train and test set
# In[19]:
from matplotlib_venn import venn2
plt.figure(figsize=(10,7))
venn2([set(train_df.title.unique()), set(test_df.title.unique())], set_labels = ('Train set', 'Test set') )
plt.title("Number of titles in train and test", fontsize=15)
plt.show()
# We have around 64K common titles between train and test set. Now let us look at the number of words present in the title column.
# In[20]:
train_df["title_nwords"] = train_df["title"].apply(lambda x: len(x.split()))
test_df["title_nwords"] = test_df["title"].apply(lambda x: len(x.split()))
cnt_srs = train_df['title_nwords'].value_counts()
trace = go.Bar(
x=cnt_srs.index,
y=cnt_srs.values,
marker=dict(
color="blue",
#colorscale = 'Blues',
reversescale = True
),
)
layout = go.Layout(
title='Number of words in title column'
)
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="title_nwords")
# Majority of the tiles have 1, 2 or 3 words and has a long tail.
#
# Now we will do the following:
# 1. Take the TF-IDF of the title column and this will be a sparse matrix with huge dimesnions.
# 2. Get the top SVD components fof this TF-IDF
# 3. Plot the distribution of SVD components with Deal probability to see if these variables help.
# In[21]:
### TFIDF Vectorizer ###
tfidf_vec = TfidfVectorizer(ngram_range=(1,1))
full_tfidf = tfidf_vec.fit_transform(train_df['title'].values.tolist() + test_df['title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title'].values.tolist())
### SVD Components ###
n_comp = 3
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_tfidf))
train_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
train_df = pd.concat([train_df, train_svd], axis=1)
test_df = | pd.concat([test_df, test_svd], axis=1) | pandas.concat |
from utils import *
import pandas as pd
import datetime
import time
from math import exp
import multiprocessing as mp
#Function for splitting the dataframe processing across multiple workers
def divide_and_conquer(df):
n_workers=16
N_ROWS = round(len(df)/n_workers+1)
with mp.Pool(n_workers) as pool:
cnt = len(df.index)
n, remainder = divmod(cnt, N_ROWS)
results = []
start_index = 0
for i in range(n):
results.append(pool.apply_async(process_frame,(df.loc[start_index:start_index+N_ROWS-1, :],)))
start_index += N_ROWS
if remainder:
results.append(pool.apply_async(process_frame,(df.loc[start_index:start_index+remainder-1, :],)))
new_dfs = [result.get() for result in results]
df = pd.concat(new_dfs, ignore_index=True)
return df
#Function to process the dataframe -> Assign values
def process_frame(base_df):
column_names=base_df.columns.tolist()+['I1','I2','I3','I4','I5','I6','I7','I8','I9','I10','I11','I12','I13','I14','I15','I16','I17','I18','I19','I20','I21','I22','Y']
op2 = pd.DataFrame(columns = column_names)
days_lag=365
for index,row in base_df.iterrows():
gap=datetime.timedelta(days=days_lag)
temp_row=(row.to_dict())
start_date=row['Procedure_time']-datetime.timedelta(days=2)#row['Admit Timestamp']
end_date=row['Procedure_time']
EMPI_val=row['EMPI']
df=ICD_df[ICD_df['EMPI']==EMPI_val].copy()
if df.empty==False:
while end_date-start_date>=datetime.timedelta(days=1):
df1=df[(df['Service Day']<=start_date)&(df['Service Day']>=start_date-gap)].copy()
df1['diff']=(start_date-df1['Service Day'])
df1['diff']=df1['diff'].dt.days
df1['weight']=np.exp(-np.power(df1['diff'],2))
for i in range(1,23):
df2=df1[df1['ICD_group']==i].copy()
temp_row['I'+str(i)]=df2['weight'].sum()
df2=df2[0:0].copy()
if(end_date-start_date==datetime.timedelta(days=1)):
temp_row['Y']=1
else:
temp_row['Y']=0
op2=op2.append(temp_row,ignore_index=True)
start_date=start_date+datetime.timedelta(days=1)
df1=df1[0:0].copy()
df=df[0:0].copy()
return op2
if __name__ == '__main__':
#List of columns to drop
encounters_drop_list=["Unnamed: 2","# Encounters"]
patient_drop_list=['Patient Name','D/C Disp','Unnamed: 9','Discharge Day','Discharge Timestamp','Reason for Visit','Metrics', '# Encs', '# Encs.1', '# Encs.2','# Encs.3', '# Encs.4']
ICD_drop_list=['ICD-10 Diagnosis DESC','Patient Name','Patient ID']
medication_drop_list=['Unnamed: 1','Metrics']
ICD_file_list=['ICD10s_2018_2019.csv','ICD10s_2016_2018.csv','ICD10s_2013_2016.csv']
#Loading encounters file -> Contains the specific encounter stamps
encounters_df=pd.read_csv("/labs/banerjeelab/Central_line/Data/Encounters.csv")
encounters_df=drop_columns(encounters_drop_list,encounters_df)
encounters_df=encounters_df.dropna()
#Loading patients details -> to merge patient information
patients_df=pd.read_csv("/labs/banerjeelab/Central_line/Data/Patient List.csv")
patients_df=drop_columns(patient_drop_list,patients_df)
patients_df=patients_df.dropna()
base_df=concatenate_dfs(encounters_df,patients_df,'Encounter','Encounter')
#Merging patients info with the encounters with matching encounter IDs
base_df['Date of Birth'] = pd.to_datetime(base_df['Date of Birth'], format="%m/%d/%Y")
base_df['Service Day']=pd.to_datetime(base_df['Service Day'], format="%m/%d/%Y")
base_df['Admit Timestamp']= pd.to_datetime(base_df['Admit Timestamp'],errors='coerce', format="%m/%d/%Y %H:%M:%S")
base_df['Age']=abs(base_df['Date of Birth']-base_df['Admit Timestamp'])
base_df['Age']=round(base_df['Age'].dt.days / 365,1)
base_df['EMPI']=base_df['EMPI'].astype(int)
base_df['EMPI']=base_df['EMPI'].astype(str)
base_df=drop_columns(['Date of Birth'],base_df)
base_df=base_df.rename(columns={'Service Day':'Procedure_time','Patient ID':'Patient'})
#Loading ICD codes data
ICD_df= | pd.DataFrame() | pandas.DataFrame |
import cloudscraper
from bs4 import BeautifulSoup as bs
import pandas as pd
import urllib.parse
import re
class GlobalFirePowerScraper:
def __init__(self):
self.ranking = pd.DataFrame()
self.country_details = | pd.DataFrame() | pandas.DataFrame |
import os
from pathlib import Path
import random
import time
import math
import json
import json
import shutil
import inspect
import warnings
import logging
import functools
from concurrent.futures import ThreadPoolExecutor
import dask
from dask.diagnostics import ProgressBar
from copy import deepcopy
from tqdm import tqdm
import numpy as np
import pandas as pd
from PIL import Image
import torch
import rasterio
import rtree
import pyproj
import cv2
import scipy.ndimage
import skimage.morphology
import shapely
import geopandas as gpd
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torchvision.ops import nms
from deepforest import main, preprocess, predict, visualize
from urbantree.setting import Setting
def distance(points):
"""
calculate the distance of two points
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
Returns
-------
float
the distance of two points
"""
p1, p2 = points
return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))
def calc_rectangle_bbox(points, img_h, img_w):
"""
calculate bbox from a rectangle.
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
img_h : int
maximal image height
img_w : int
maximal image width
Returns
-------
dict
corresponding bbox. I.e. `{ 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax }`
"""
lt, rb = points
xmin, ymin = lt
xmax, ymax = rb
xmin = min(max(0, xmin), img_w)
xmax = min(max(0, xmax), img_w)
ymin = min(max(0, ymin), img_h)
ymax = min(max(0, ymax), img_h)
return { 'xmin':xmin, 'ymin':ymin, 'xmax':xmax, 'ymax':ymax }
def calc_circle_bbox(points, img_h, img_w):
"""
calculate bbox from a circle.
Parameters
----------
points : list
a list of two points. E.g. `[[x1, y1], [x2, y2]]`
img_h : int
maximal image height
img_w : int
maximal image width
Returns
-------
dict
corresponding bbox. I.e. `{ 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax }`
"""
center = points[0]
dist = distance(points)
xmin = center[0] - dist
xmax = center[0] + dist
ymin = center[1] - dist
ymax = center[1] + dist
xmin = min(max(0, xmin), img_w)
xmax = min(max(0, xmax), img_w)
ymin = min(max(0, ymin), img_h)
ymax = min(max(0, ymax), img_h)
return { 'xmin':xmin, 'ymin':ymin, 'xmax':xmax, 'ymax':ymax }
def generate_response(dataset_train_dir, dataset_response_dir,
model_trainer_min_bbox_size,
model_trainer_min_bbox_ratio,
model_trainer_validation_ratio,
model_trainer_patch_sizes=[],
model_trainer_patch_overlap_size=32,
**ignored):
"""
generate response of training dataset with labelme annotation results.
Parameters
----------
dataset_train_dir : str
the path to input training dataset folder with labelme annotation results in json
dataset_response_dir : str
the path to output training response folder for further training with torch vision
model_trainer_min_bbox_size : int
minimal size of object bbox which should be trained
model_trainer_min_bbox_ratio : float
minimal ratio (short_side/long_side) of object bbox
model_trainer_validation_ratio : float
train/validation split ratio
model_trainer_patch_sizes : list of int
a list of patch sizes for training images
model_trainer_patch_overlap_size : int
overlapping size of cropped training images with the given patch size
"""
TRAINING_IMG_DIR = Path(dataset_train_dir)
def glob_json(path):
return list(path.glob('*.json'))
# TRAINING_IMG_PATH can be a list of directories
LABELME_RESULTS = [glob_json(TRAINING_IMG_DIR)] \
if not isinstance(TRAINING_IMG_DIR, list) \
else list(map(glob_json, TRAINING_IMG_DIR))
# output folder of response (annotation csv fot torch vision)
RESPONSE_DEEPFOREST_DIR = Path(dataset_response_dir)
PATCH_SIZES = model_trainer_patch_sizes
PATCH_OVERLAP_SIZE = model_trainer_patch_overlap_size
VALIDATION_RATIO = model_trainer_validation_ratio
TARGET_MIN_SIZE = model_trainer_min_bbox_size
TARGET_MIN_RATIO = model_trainer_min_bbox_ratio
assert TARGET_MIN_RATIO <= 1 and TARGET_MIN_RATIO > 0, "0 < model_trainer_min_bbox_ratio <= 1"
#############################################################
# need to start from scratch
if RESPONSE_DEEPFOREST_DIR.exists() and os.listdir(RESPONSE_DEEPFOREST_DIR):
raise RuntimeError("Directory is not empty: " + str(RESPONSE_DEEPFOREST_DIR))
os.makedirs(RESPONSE_DEEPFOREST_DIR, exist_ok=True)
def bbox_size(bbox):
return (bbox['xmax'] - bbox['xmin']) * (bbox['ymax'] - bbox['ymin'])
def bbox_ratio(bbox):
return (bbox['xmax'] - bbox['xmin']) / (bbox['ymax'] - bbox['ymin'])
print("Processing source labelme annotation:", TRAINING_IMG_DIR)
all_df = []
for dir_id, label_jsons in enumerate(LABELME_RESULTS):
for lbme in label_jsons:
# labelme json file
lbme_path = Path(lbme)
image_json = None
with open(lbme) as f:
image_json = json.load(f)
img_h = image_json['imageHeight']
img_w = image_json['imageWidth']
img_dir = lbme.parent
src_img_name = image_json['imagePath']
dest_img_name = str(dir_id) + '-' + src_img_name
# copy training data to response folder
shutil.copy(img_dir.joinpath(src_img_name),
RESPONSE_DEEPFOREST_DIR.joinpath(dest_img_name))
rows_list = []
# process geometries
for shape in image_json['shapes']:
shape_type = shape['shape_type']
shape_label = shape['label']
shape_points = shape['points']
bbox = None
if shape_type == 'circle':
bbox = calc_circle_bbox(points=shape_points, img_h=img_h, img_w=img_w)
elif shape_type == 'rectangle':
bbox = calc_rectangle_bbox(points=shape_points, img_h=img_h, img_w=img_w)
else:
raise "FIXME"
if (bbox_size(bbox) >= TARGET_MIN_SIZE) and \
((bbox_ratio(bbox) >= TARGET_MIN_RATIO) and (bbox_ratio(bbox) <= 1.0/TARGET_MIN_RATIO)):
row = { 'image_path': dest_img_name, **bbox, 'label': shape_label }
rows_list.append(row)
df = | pd.DataFrame(rows_list) | pandas.DataFrame |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionBitwiseOrTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_bitwise_or_scalar(self):
self.assertEqual(dnp.bitwise_or(1, 4), 5)
self.assertEqual(np.bitwise_or(1, 4), 5)
# self.assertEqual(dnp.bitwise_or(1, 4), np.bitwise_or(1, 4))
self.assertEqual(dnp.bitwise_or(1, -5), -5)
self.assertEqual(np.bitwise_or(1, -5), -5)
# self.assertEqual(dnp.bitwise_or(1, -5), np.bitwise_or(1, -5))
self.assertEqual(dnp.bitwise_or(0, 9), 9)
self.assertEqual(np.bitwise_or(0, 9), 9)
# self.assertEqual(dnp.bitwise_or(0, 9), np.bitwise_or(0, 9))
def test_function_math_binary_bitwise_or_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.bitwise_or(lst1, lst2), np.bitwise_or(lst1, lst2))
def test_function_math_binary_bitwise_or_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.bitwise_or(dnpa, 1), np.bitwise_or(npa, 1))
# TODO: bitwise_or bug
# assert_array_equal(dnp.bitwise_or(1, dnpa), np.bitwise_or(1, npa))
def test_function_math_binary_bitwise_or_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.bitwise_or(dnpa1, dnpa2), np.bitwise_or(npa1, npa2))
def test_function_math_binary_bitwise_or_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.bitwise_or(npa1, npa2, out=npa)
dnp.bitwise_or(dnpa1, dnpa2, out=dnpa)
# TODO: dolphindb numpy bitwise_or bug
# assert_array_equal(dnpa.to_numpy(), npa)
def test_function_math_binary_bitwise_or_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.bitwise_or(dnpa, os).to_pandas(), np.bitwise_or(npa, ps))
assert_series_equal(dnp.bitwise_or(os, dnpa).to_pandas(), np.bitwise_or(ps, npa))
pser = | pd.Series([1, 2, 3]) | pandas.Series |
### This script will create a trained scVI model and write the data that is used by wormcells-viz
### https://github.com/WormBase/wormcells-viz
### please check that the default arguments match your annotations
'''
It assumes that data has been wrangled into the WormBase standard anndata format:
https://github.com/WormBase/anndata-wrangling
Three separate anndata files (.h5ad) will be created:
## For the expression heatmap
This data is a 2D matrix of shape:
$ n_{celltypes} \times n_{genes} = x_{obs} \times y_{var} $
```
adata.obs = cell_types
adata.var = gene_id
adata.X = log10 scvi expression frequency values in the X field
```
## For the gene histogram
This data is a 3D tensor of shape:
$ n_{celltypes} \times n_{bins} \times n_{genes} = x_{obs} \times y_{var} \times z_{layers} $
The anndata obs contains the cell types and var contains the histogram bins,
the genes are stored in layers with the keys being the gene ID.
We store the genes in the layers because each view in the wormcells-viz app show the histograms for a single gene,
so this makes accessing the data simpler
The histogram bin counts are computed from the scvi normalized expression values, binned in 100 bins from 10^-9 to 10^0
```
adata.obs = cell_types
adata.var = bins with counts
these should be 100 evenly spaced bins, with the counts of cells containing
values between (-10, 0), representing the data 10^-9 to 10^0 expression rates log10 transformed
adata.X = NOTHING (filled with vector of zeroes)
adata.layers[cell_type] = the key is the corresponding cell_type
each layer contains counts in each bin for all cell types
adata.uns['about'] = information about the dataset
```
## For the swarm plots
This data is a 3D tensor of shape:
$ n_{celltypes} \times n_{genes} \times n_{celltypes} = x_{obs} \times y_{var} \times z_{layers} $
Notice that the cell types are repeated along two dimensions, because this data contains the results of pairwise DE
comparisons among each cell type in the data.
Plus $n_{celltypes}$ matrices of shape:
$ n_{celltypes} \times n_{genes} = x_{obs} \times y_{var} $
Because each `anndata.uns[celltype]` contains a dataframe with global differnetial expression results for that celltype.
Finally, `anndata.uns['heatmap']` contains the 2D matrix with log10 scvi expression rates heatmap data, with genes in
the index and cell types in the columns. This can be used to display the expression of each tissue upon mouseover.
```
anndata.obs = cell_types
anndata.var = gene_id
anndata.X = NOTHING (filled with vector of zeroes)
anndata.layers[cell_type] = mean log fold change for a given cell type for all genes
anndata.uns[cell_type] = contain the DE result of the corresponding cell type vs all other cells
this can be used for ordering the genes by p-value, expression,
and by log fold change lfc max/min/median/mean/std
anndata.uns['heatmap']= dataframe with genes in index and cell types in columns containing the log10 of the
scvi expression frequency for each cell type
anndata.uns['about'] = information about the dataset
```
'''
### USER DEFINED ARGUMENTS
### PLEASE MAKE SURE THESE ARGUMENTS MATCH YOUR DATA
# path to anndata file on which to train the model
anndata_path = 'cengen.h5ad'
# this should be the label on which you'd like to stratify groups
# typically it is cell_type or cell_subtype
stratification_label = 'cell_subtype'
# minimum number of UMIs seen for each gene to kept
min_gene_counts = 100
### the adata.obs key on which scvi should perform batch correction
batch_key = 'sample_batch'
# model_name is the name of the folder where scvi will look for the trained model, or
# save the trained model if it doesn't find anything
model_name = 'cengen_scvi_2021-06-13'
### these multiline strings will be added to the adata.uns['about'] property, it can be anything
about_heatmap = """
Heatmap data for model {string_model_name}.
This h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
about_histograms = """
Histogram data for model {string_model_name}.
This h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
about_swarmplots = """
Swarm plot data for model {string_model_name}
this h5ad file is made to be used with the WormBase wormcells-viz app
https://github.com/WormBase/wormcells-viz
""".format(string_model_name=model_name)
### END OF USER ARGUMENTS SECTION --- YOU SHOULDN'T NEED TO CHANGE ANYTHING BELOW HERE ####
### IMPORTS ###
print('Starting imports...')
import anndata
import scvi
import numpy as np
from tqdm import tqdm
import pandas as pd
import os
import scanpy
import warnings
from scipy import sparse
warnings.filterwarnings("ignore")
print('Using scvi-tools version:', scvi.__version__)
def load_or_train_scvi_model(model_name=model_name, anndata_path=anndata_path):
# Try loading model, if it doesn't exist train from scratch
print('Trying to load or train model...')
try:
model = scvi.model.SCVI.load(model_name)
print('Loaded model:', model_name)
except:
### DEFINE AND TRAIN MODEL
# these hyperparameters are fine for a small dataset, with a few batches
# if integration is a problem then you can try increasing the layers to 3
# and hidden units to 256
print('Creating and training model:', model_name)
adata = anndata.read_h5ad(anndata_path)
print(adata)
print('Restricting to genes with minimum counts of ', min_gene_counts)
adata.var['gene_counts'] = np.squeeze(np.asarray(adata.X.sum(0)))
adata = adata[:, adata.var.gene_counts > min_gene_counts]
print(adata)
## register adata with SCVI, for more information see
## https://docs.scvi-tools.org/en/stable/api/reference/scvi.data.setup_anndata.html
adata.layers["counts"] = adata.X.copy().tocsr() # converts to CSR format, preserve counts
scvi.data.setup_anndata(adata,
layer="counts",
batch_key=batch_key)
# typically you don't need to go tweak these parameters for training a model
model = scvi.model.SCVI(adata,
n_hidden=256,
n_layers=2,
gene_likelihood='nb',
dispersion='gene-batch'
)
# MODEL TRAINING
# this model will train quickly even without a GPU, 25 epochs is not quite enough to
# finish training, but this notebook is meant to run quickly just for showing the entire
# data generation pipeline
model.train(check_val_every_n_epoch=1,
use_gpu=True,
max_epochs=125,
plan_kwargs={'lr': 1e-3})
train_test_results = model.history['elbo_train']
train_test_results['elbo_validation'] = model.history['elbo_validation']
### MAKE SURE THE MODEL FINISHED TRAINING FOR BEST RESULTS
print(train_test_results)
model.save(model_name, save_anndata=True)
# save the training results to a csv for inspection if needed
train_test_results.to_csv(model_name + '+train_test_results.csv')
return model
def make_de_global(model, stratification_label=stratification_label, model_name=model_name):
# perform DE on each cell type vs the rest of cells, this computes the expresssion (scale1)
# in each celltype, used for the heatmap anndata, plus scale1, the p-values and lfc_median
# for each cell type which are used for ranking the swarmplot
# saves in a csv to avoid recomputing
# checks if the CSV exists prior to running the DE
de_global_filename = model_name + '+de_global.csv'
try:
de_global = pd.read_csv(de_global_filename, index_col=0)
print('Loaded global DE:', de_global_filename)
except:
print('Performing global DE...')
de_global = model.differential_expression(
groupby=stratification_label,
all_stats=False
)
# scvi currently puts the groups in a column named "comparison", eg
# an entry would be "Neurons vs Instestine" but we need to split that into
# one column for group1 and group2. Submitted a PR to change that:
# https://github.com/YosefLab/scvi-tools/pull/1074
de_global['group1'] = de_global['comparison'].str.split(' vs ', expand=True)[0]
de_global['group2'] = de_global['comparison'].str.split(' vs ', expand=True)[1]
de_global.to_csv(de_global_filename)
return de_global
def make_heatmap_anndata(de_global,
about=about_heatmap,
model_name=model_name,
stratification_label=stratification_label):
heatmap_anndata_filename = model_name + '+heatmap_anndata.h5ad'
if os.path.isfile(heatmap_anndata_filename):
print('Skipping heatmatp creation, anndata already exists at file: ', heatmap_anndata_filename)
return None
else:
print('Creating heatmap anndata... ')
# pivot the DE result dataframe to create a dataframe for the heatmap
# with gene ids in the index and cell type name in the columns and
# scale1 in the entries, then take the log10 of scale1
heatmap_df = de_global[['scale1', 'group1']]
heatmap_df['log10scale1'] = np.log10(heatmap_df['scale1']).values
heatmap_df = heatmap_df[['log10scale1', 'group1']]
heatmap_df = heatmap_df.pivot(columns='group1', values='log10scale1')
heatmap_df.to_csv(model_name + '+heatmap_df.csv')
# put the heatmap data in anndata object
heatmap_adata = anndata.AnnData(X=heatmap_df.values.T,
obs=pd.DataFrame(index=heatmap_df.columns.values),
var=pd.DataFrame(index=heatmap_df.index.values),
)
# rename obs and var to make clear what they hold
heatmap_adata.var.index.rename('gene_id', inplace=True)
heatmap_adata.obs.index.rename(stratification_label, inplace=True)
# add some meatadata explaining what the data is
heatmap_adata.uns['about'] = about_heatmap
heatmap_adata.write_h5ad(heatmap_anndata_filename)
print('Heatmap anndata saved: ', heatmap_anndata_filename)
return None
def make_histogram_anndata(model,
stratification_label=stratification_label,
about_histograms=about_histograms):
histogram_anndata_filename = model_name + '+histogram_anndata.h5ad'
if os.path.isfile(histogram_anndata_filename):
print('Skipping histogram creation, anndata already exists at file: ', histogram_anndata_filename)
return None
else:
adata = model.adata
bins_intervals = np.histogram([0], bins=100, range=(-10, 0), density=False)[1][:-1]
### get the scvi normalized expression then log10 that
adata.layers['normalized'] = model.get_normalized_expression()
adata.layers['log10normalized'] = np.log10(adata.layers['normalized'])
###loops through each cell type and then each gene to compute the histogram of expression
# first get dimensions to initialize adata object
obs_stratification_label_unique_values = adata.obs[stratification_label].unique()
# gets the bin intervals from the np histogram function
nbins = 100
histogram_range = (-10, 0)
bin_intervals = np.histogram([0], bins=nbins, range=histogram_range, density=False)[1][:-1]
# converts list of bins to string for anndata var index
bin_intervals = np.round(list(bins_intervals), 1).astype(str)
gene_histogram_adata = anndata.AnnData(X=np.zeros((len(obs_stratification_label_unique_values),
len(bins_intervals))),
var= | pd.DataFrame(index=bin_intervals) | pandas.DataFrame |
# coding: utf-8
# # Coverage of eADAGE LV
#
# The goal of this notebook is to examine why genes were found to be generic. Specifically, this notebook is trying to answer the question: Are generic genes found in more multiplier latent variables compared to specific genes?
#
# The eADAGE model uses a DAE to extracts patterns of gene expression activity in the latent variables (referred to as nodes in the paper). Here we are examining the coverage of generic genes within these latent variables.
#
# **Definitions:**
# * Generic genes: Are genes that are consistently differentially expressed across multiple simulated experiments.
#
# * Other genes: These are all other non-generic genes. These genes include those that are not consistently differentially expressed across simulated experiments - i.e. the genes are specifically changed in an experiment. It could also indicate genes that are consistently unchanged (i.e. housekeeping genes)
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import random
import textwrap
import scipy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
from ponyo import utils
from generic_expression_patterns_modules import lv
# In[2]:
# Get data directory containing gene summary data
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
data_dir = os.path.join(base_dir, "pseudomonas_analysis")
# Read in config variables
config_filename = os.path.abspath(
os.path.join(base_dir, "configs", "config_pseudomonas_33245.tsv")
)
params = utils.read_config(config_filename)
local_dir = params["local_dir"]
project_id = params["project_id"]
# In[3]:
# Output file
nonzero_figure_filename = "nonzero_LV_coverage_pa.svg"
highweight_figure_filename = "highweight_LV_coverage_pa.svg"
# ## Load data
# In[4]:
# Get gene summary file
summary_data_filename = os.path.join(
data_dir,
f"generic_gene_summary_{project_id}_crcZ_v_WT.tsv"
)
# In[5]:
# Load gene summary data
data = pd.read_csv(summary_data_filename, sep="\t", index_col=0, header=0)
# Check that genes are unique since we will be using them as dictionary keys below
assert(data.shape[0] == len(data["Gene ID"].unique()))
# **Manual steps to process eADAGE data**
# 1. Data downloaded from https://zenodo.org/record/580093
# 2. Get Weight matrix (`eADAGE weight matrix.xlsx`) file
# 3. Save tab (`weight matrix`) as .csv file in `local_dir`
# In[6]:
# Load eADAGE weight matrix
eADAGE_weight_filename = os.path.join(local_dir, "eADAGE weight matrix.csv")
eADAGE_weight = pd.read_csv(eADAGE_weight_filename, sep=",", index_col=0, header=0)
# In[7]:
eADAGE_weight.shape
# In[8]:
# Get a rough sense for how many genes contribute to a given LV
# (i.e. how many genes have a value > 0 for a given LV)
# Notice that eADAGE is NOT sparse
(eADAGE_weight != 0).sum().sort_values(ascending=True)
# ## Get gene data
#
# Define generic genes based on simulated gene ranking. Refer to [figure](https://github.com/greenelab/generic-expression-patterns/blob/master/pseudomonas_analysis/gene_ranking_logFC.svg) as a guide.
#
# **Definitions:**
# * Generic genes: `Rank (simulated) >= 4500`
#
# (Having a high rank indicates that these genes are consistently changed across simulated experiments.)
#
# * Other genes: `Rank (simulated) < 4500`
#
# (Having a lower rank indicates that these genes are not consistently changed across simulated experiments - i.e. the genes are specifically changed in an experiment. It could also indicate genes that are consistently unchanged.)
# In[9]:
generic_threshold = 4500
dict_genes = lv.get_generic_specific_genes(data, generic_threshold)
# In[10]:
# Check overlap between eADAGE genes and our genes
eADAGE_genes = list(eADAGE_weight.index)
our_genes = list(data.index)
shared_genes = set(our_genes).intersection(eADAGE_genes)
print(len(our_genes))
print(len(shared_genes))
# In[11]:
# Drop gene ids not used in eADAGE analysis
processed_dict_genes = lv.process_generic_specific_gene_lists(dict_genes, eADAGE_weight)
# In[12]:
# Check numbers add up
assert len(shared_genes) == len(processed_dict_genes["generic"]) + len(processed_dict_genes["other"])
# ## Get coverage of LVs
#
# For each gene (generic or other) we want to find:
# 1. The number of LVs that gene is present
# 2. The number of LVs that the gene contributes a lot to (i.e. the gene is highly weighted within that LV)
# ### Nonzero LV coverage
# In[13]:
dict_nonzero_coverage = lv.get_nonzero_LV_coverage(processed_dict_genes, eADAGE_weight)
# In[14]:
# Check genes mapped correctly
assert processed_dict_genes["generic"][0] in dict_nonzero_coverage["generic"].index
assert len(dict_nonzero_coverage["generic"]) == len(processed_dict_genes["generic"])
assert len(dict_nonzero_coverage["other"]) == len(processed_dict_genes["other"])
# ### High weight LV coverage
# In[15]:
dict_highweight_coverage = lv.get_highweight_LV_coverage_pseudomonas(processed_dict_genes, eADAGE_weight)
# In[16]:
# Check genes mapped correctly
assert processed_dict_genes["generic"][0] in dict_highweight_coverage["generic"].index
assert len(dict_highweight_coverage["generic"]) == len(processed_dict_genes["generic"])
assert len(dict_highweight_coverage["other"]) == len(processed_dict_genes["other"])
# In[17]:
# Check high weight genes obtained are in fact at the extremes of the distribution
# Quick look at the distribution of gene weights per LV
sns.distplot(eADAGE_weight["Node2"], kde=False)
plt.yscale("log")
# ### Assemble LV coverage and plot
# In[18]:
all_coverage = []
for gene_label in dict_genes.keys():
merged_df = pd.DataFrame(
dict_nonzero_coverage[gene_label],
columns= ["nonzero LV coverage"]
).merge(
pd.DataFrame(
dict_highweight_coverage[gene_label],
columns= ["highweight LV coverage"]
),
left_index=True,
right_index=True
)
merged_df['gene type'] = gene_label
all_coverage.append(merged_df)
all_coverage_df = | pd.concat(all_coverage) | pandas.concat |
"""Track and analyze grocery spending at an item level.
This app allows a user to input a grocery item.
"""
# app.py
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import plotly.express as px
import pandas as pd
import dash_table
from dash_table import FormatTemplate
from datetime import date
df = pd.read_csv('data/items.csv') # read items.csv file into df
df_category = pd.read_csv('data/category.csv') # read category.csv file into df
pd.options.display.float_format = '{:.2f}'.format # set pandas format to 2 decimals
df['total'] = df['price'] * df['quantity'] # add 'total' column to df
df['month_year'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
class DataTransform:
def __init__(self, data):
self.data = data
def columns_pattern(self):
# get columns name from dataframe
columns = list(self.data.columns)
# remove spaces, specified characters and format to snake case
columns = list(map( lambda x: x.strip().lower().replace(' ', '_').replace(':', '').replace('.', ''), columns))
self.data.columns = columns
# for col in columns:
# self.data[col] = self.data[col].apply(
# lambda x: x.strip().lower().replace(' ', '_').replace(':', '')
# )
return self.data
def price_format(self):
# format price columns to float
self.data['product_price'] = self.data['product_price'].apply(
lambda x: float(x.replace('$', '').strip()) if pd.notnull(x) else x
)
return self.data
def str_values_pattern(self):
# select string subset of the dataframe
#data_cat = self.data.select_dtypes(include='str')
# get string columns name from dataframe
columns = list(self.data.select_dtypes(include='object').columns)
# remove spaces, specified characters and format to snake case
for col in columns:
self.data[col] = self.data[col].apply(
lambda x: x.strip().lower().replace(' ', '_') if | pd.notnull(x) | pandas.notnull |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import unittest
import numpy as np
import pandas as pd
from pandas import MultiIndex
from hadar.workflow.pipeline import (
Stage,
FreePlug,
RestrictedPlug,
FocusStage,
Clip,
Rename,
Drop,
Fault,
RepeatScenario,
Pipeline,
)
class Double(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines * 2
class Max9(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines.clip(None, 9)
class Divide(FocusStage):
def __init__(self):
Stage.__init__(self, RestrictedPlug(inputs=["a", "b"], outputs=["d", "r"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "d"] = (scenario["a"] / scenario["b"]).apply(np.floor)
scenario.loc[:, "r"] = scenario["a"] - scenario["b"] * scenario["d"]
return scenario.drop(["a", "b"], axis=1)
class Inverse(FocusStage):
def __init__(self):
FocusStage.__init__(self, RestrictedPlug(inputs=["d"], outputs=["d", "-d"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "-d"] = -scenario["d"]
return scenario.copy()
class Wrong(Stage):
def __init__(self):
Stage.__init__(self, plug=RestrictedPlug(inputs=["e"], outputs=["e"]))
def _process_timeline(self, timeline: pd.DataFrame) -> pd.DataFrame:
return timeline
class TestFreePlug(unittest.TestCase):
def test_linkable_to(self):
self.assertTrue(FreePlug().linkable_to(FreePlug()))
def test_join_to_fre(self):
# Input
a = FreePlug()
b = FreePlug()
# Test
c = a + b
self.assertEqual(a, c)
def test_join_to_restricted(self):
# Input
a = FreePlug()
b = RestrictedPlug(inputs=["a", "b"], outputs=["c", "d"])
# Test
c = a + b
self.assertEqual(b, c)
class TestRestrictedPlug(unittest.TestCase):
def test_linkable_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
self.assertTrue(a.linkable_to(FreePlug()))
def test_linkable_to_restricted_ok(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Test
self.assertTrue(a.linkable_to(b))
def test_linkable_to_restricted_wrong(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c", "f"], outputs=["e"])
# Test
self.assertFalse(a.linkable_to(b))
def test_join_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
b = a + FreePlug()
self.assertEqual(a, b)
def test_join_to_restricted(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Expected
exp = RestrictedPlug(inputs=["a"], outputs=["e", "d"])
# Test
c = a + b
self.assertEqual(exp, c)
class TestPipeline(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
# Expected
exp = pd.DataFrame({(0, "a"): [4, 8, 12]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
def test_add(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
pipe += Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [1, 1, 1], (0, "r"): [0, 0, 0]}, dtype=float)
# Test & Verify
o = pipe(i)
self.assertEqual(3, len(pipe.stages))
self.assertIsInstance(pipe.plug, RestrictedPlug)
pd.testing.assert_frame_equal(exp, o)
def test_link_pipeline_free_to_free(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pipe = Double() + Max9()
# Expected
exp = pd.DataFrame({(0, "a"): [2, 4, 6], (0, "b"): [8, 9, 9]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual([], pipe.plug.inputs)
self.assertEqual([], pipe.plug.outputs)
def test_link_pipeline_free_to_restricted(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Double() + Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [2, 4, 5], (0, "r"): [4, 0, 4]}, dtype="float")
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual(["a", "b"], pipe.plug.inputs)
self.assertEqual(["d", "r"], pipe.plug.outputs)
def test_link_pipeline_restricted_to_free(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Divide() + Double()
# Expected
exp = | pd.DataFrame({(0, "d"): [4, 8, 10], (0, "r"): [4, 0, 4]}, dtype="float") | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 21:06:55 2018
@author: paulhuynh
"""
###############################################################################
### packages required to run code. Make sure to install all required packages.
###############################################################################
import re,string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import MDS
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import pandas as pd
import os
from gensim.models import Word2Vec,LdaMulticore, TfidfModel
from gensim import corpora
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import numpy as np
###############################################################################
### Function to process documents
###############################################################################
def clean_doc(doc):
#split document into individual words
tokens=doc.split()
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 4]
#lowercase all words
tokens = [word.lower() for word in tokens]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# word stemming
# ps=PorterStemmer()
# tokens=[ps.stem(word) for word in tokens]
return tokens
###############################################################################
# Functions to label encoding
###############################################################################
def One_Hot(variable):
LE=LabelEncoder()
LE.fit(variable)
Label1=LE.transform(variable)
OHE=OneHotEncoder()
labels=OHE.fit_transform(Label1.reshape(-1,1)).toarray()
return labels, LE, OHE
###############################################################################
### Processing text into lists
###############################################################################
#set working Directory to where class corpus is saved.
os.chdir('/Users/paulhuynh/Documents/School/2019 Winter/453 - Maren/2019 Winter - MSDS 453 - Class Corpus/')
#read in class corpus csv into python
data=pd.read_csv('Class Corpus.csv')
#create empty list to store text documents titles
titles=[]
#for loop which appends the DSI title to the titles list
for i in range(0,len(data)):
temp_text=data['DSI_Title'].iloc[i]
titles.append(temp_text)
#create empty list to store text documents
text_body=[]
#for loop which appends the text to the text_body list
for i in range(0,len(data)):
temp_text=data['Text'].iloc[i]
text_body.append(temp_text)
#Note: the text_body is the unprocessed list of documents read directly form
#the csv.
#empty list to store processed documents
processed_text=[]
#for loop to process the text to the processed_text list
for i in text_body:
text=clean_doc(i)
processed_text.append(text)
#Note: the processed_text is the PROCESSED list of documents read directly form
#the csv. Note the list of words is separated by commas.
#stitch back together individual words to reform body of text
final_processed_text=[]
for i in processed_text:
temp_DSI=i[0]
for k in range(1,len(i)):
temp_DSI=temp_DSI+' '+i[k]
final_processed_text.append(temp_DSI)
#Note: We stitched the processed text together so the TFIDF vectorizer can work.
#Final section of code has 3 lists used. 2 of which are used for further processing.
#(1) text_body - unused, (2) processed_text (used in W2V),
#(3) final_processed_text (used in TFIDF), and (4) DSI titles (used in TFIDF Matrix)
###############################################################################
### Sklearn TFIDF
###############################################################################
#note the ngram_range will allow you to include multiple words within the TFIDF matrix
#Call Tfidf Vectorizer
Tfidf=TfidfVectorizer(ngram_range=(1,3))
#fit the vectorizer using final processed documents. The vectorizer requires the
#stiched back together document.
TFIDF_matrix=Tfidf.fit_transform(final_processed_text)
#creating datafram from TFIDF Matrix
matrix=pd.DataFrame(TFIDF_matrix.toarray(), columns=Tfidf.get_feature_names(), index=titles)
###############################################################################
### Explore TFIDF Values
###############################################################################
average_TFIDF={}
for i in matrix.columns:
average_TFIDF[i]=np.mean(matrix[i])
average_TFIDF_DF=pd.DataFrame(average_TFIDF,index=[0]).transpose()
average_TFIDF_DF.columns=['TFIDF']
#calculate Q1 and Q3 range
Q1=np.percentile(average_TFIDF_DF, 25)
Q3=np.percentile(average_TFIDF_DF, 75)
IQR = Q3 - Q1
outlier=Q3+(1.5*IQR)
#words that exceed the Q3+IQR*1.5
outlier_list=average_TFIDF_DF[average_TFIDF_DF['TFIDF']>=outlier]
#can export matrix to csv and explore further if necessary
###############################################################################
### Doc2Vec
###############################################################################
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(final_processed_text)]
model = Doc2Vec(documents, vector_size=100, window=2, min_count=1, workers=4)
doc2vec_df=pd.DataFrame()
for i in range(0,len(processed_text)):
vector=pd.DataFrame(model.infer_vector(processed_text[i])).transpose()
doc2vec_df=pd.concat([doc2vec_df,vector], axis=0)
doc2vec_df=doc2vec_df.reset_index()
doc_titles={'title': titles}
t=pd.DataFrame(doc_titles)
doc2vec_df= | pd.concat([doc2vec_df,t], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 09:28:56 2018
@author: nce3xin
"""
from __future__ import print_function
import os
import sys
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(current_dir)
sys.path.append('..')
import torch
from scipy.io import arff
import pandas as pd
import hyperparams
import torch.utils.data
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
import extract_2D_features
#from CNN_res import extract_dense_representation
import numpy as np
from models import model_CNN
import hyperparams
batch_size = hyperparams.batch_size
use_cuda = not hyperparams.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# CNN_res
def load_model(pt):
model=model_CNN.CNNModel()
model.load_state_dict(torch.load(pt))
return model
# load CNN model
cnn_out_dims=hyperparams.cnn_out_dims
CNN_model_pt='models_saved/CNN_cuda_epoch=90_outdims='+str(cnn_out_dims)+'.pth'
#CNN_model_pt='models_saved/CNN_cpu_epoch=6_outdims=5.pth'
if hyperparams.MODEL!='CNN' and hyperparams.CNN_mapping:
model=load_model(CNN_model_pt)
model=model.to(device)
# load merged_train_df and merged_test_df
merged_train_df=pd.read_csv('data/gen/train.csv')
merged_test_df=pd.read_csv('data/gen/test.csv')
def extract_dense_ftrs_by_CNN(model,data_loader,device):
with torch.no_grad():
for i, (data, _) in enumerate(data_loader):
data = data.to(device)
data = data.float()
output,dense=model(data)
if i==0:
prev=dense
else:
prev=torch.cat([prev,dense],0)
return prev
def extract_dense_representation():
train_loader,test_loader,_,_=load_2d_data_for_CNN(shuffle=False)
train_dense=extract_dense_ftrs_by_CNN(model,train_loader,device)
test_dense=extract_dense_ftrs_by_CNN(model,test_loader,device)
return train_dense,test_dense
# CNN_res
class WeiboDenseDataset(torch.utils.data.Dataset):
def __init__(self,data,normalization):
self.normalization=normalization
self.ftrs=data[:,1:-1] # remove "Instance number" column
#self.ftrs=self.ftrs.float()
if self.normalization:
if hyperparams.standard_scale:
self.ftrs=preprocessing.scale(self.ftrs)
elif hyperparams.min_max_scaler:
min_max_scaler=MinMaxScaler()
self.ftrs=min_max_scaler.fit_transform(self.ftrs)
self.label=data[:,-1].astype(int)
def __len__(self):
return len(self.ftrs)
def __getitem__(self,idx):
ftrs=self.ftrs[idx,:]
label=self.label[idx]
return (ftrs,label)
def get_n_class(self):
return 3
def get_in_ftrs(self):
return self.ftrs.shape[1]
class Weibo2DForCNN(torch.utils.data.Dataset):
def __init__(self,ftrs2D,labels_for_2D_ftrs):
self.ftrs=ftrs2D
self.ftrs=self.ftrs.unsqueeze(1)
self.ftrs=self.ftrs.float()
self.label=labels_for_2D_ftrs
def __len__(self):
return len(self.ftrs)
def __getitem__(self,idx):
ftrs=self.ftrs[idx,:]
label=self.label[:,idx] # Because label is a horizontal vector
return (ftrs,label)
def get_n_class(self):
return 3
def get_in_ftrs(self):
return self.ftrs.size()[1]
class WeiboTemporalDataset(torch.utils.data.Dataset):
def __init__(self,file_path,normalization):
self.df=pd.read_csv(file_path)
self.normalization=normalization
# convert object labels to numeric
self.df['Class']=self.df['Class'].astype('category')
self.df['Class']=self.df['Class'].cat.rename_categories([0,1,2]).astype(int)
self.df=self.df.iloc[:,1:]
self.ftrs=self.df.iloc[:,:-1] # features
self.ftrs=self.ftrs.values
if self.normalization:
if hyperparams.standard_scale:
self.ftrs=preprocessing.scale(self.ftrs)
elif hyperparams.min_max_scaler:
min_max_scaler=MinMaxScaler()
self.ftrs=min_max_scaler.fit_transform(self.ftrs)
self.ftrs=torch.from_numpy(self.ftrs)
self.ftrs=self.ftrs.float()
self.label=torch.LongTensor([self.df.iloc[:,-1]]) # labels
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
ftrs=self.ftrs[idx,:]
label=self.label[:,idx] # Because label is a horizontal vector
return (ftrs,label)
def get_n_class(self):
return 3
def get_in_ftrs(self):
return self.ftrs.size()[1]
# dataset
class WeiboDataset(torch.utils.data.Dataset):
def __init__(self,file_path,temporal,normalization):
# read .arff file
data = arff.loadarff(file_path)
self.df = | pd.DataFrame(data[0]) | pandas.DataFrame |
import pysam
import pandas as pd
import numpy as np
import re
import os
import sys
import collections
import scipy
from scipy import stats
import statsmodels
from statsmodels.stats.multitest import fdrcorrection
try:
from . import global_para
except ImportError:
import global_para
try:
from .consensus_seq import *
except ImportError:
from consensus_seq import *
try:
from .math_stat import *
except ImportError:
from math_stat import *
def f_0cdna():
# if 0 cdna detected, report messages to users
global_para.logger.info("Program finished successfully")
global_para.logger.info("No cDNA detected. Exiting.")
exit(0)
def f_if_0cdna(obj):
if len(obj) == 0:
f_0cdna()
def f_warning_merge_region(df_region):
# df_region = df_region_stat_bed_merge.copy()
df_region['diff'] =abs( df_region.start - df_region.end)
df_region_diff = df_region[df_region['diff']>10000]
del df_region_diff['diff']
if len(df_region_diff)>0:
global_para.logger.warning("%d extreme long regions are detected (>10 kb), please check results carefully"%len(df_region_diff))
global_para.logger.info(df_region_diff)
def read_gene_model(gtf_gene_unique_file):
# load gene model into a dataframe
print('Loading gene model table')
dict_type = {
"seqname":"str",
"start":"int64",
"end":"int64",
"gene_id":"str",
"gene_name":"str",
"transcript_id":"str",
"exon_flank_start20":"str",
"exon_flank_end20":"str",
"is_exon_boundary_start":"str",
"is_exon_boundary_end":"str",
"exon_boundary_start_nearseq20":"str",
"exon_boundary_end_nearseq20":"str"}
df_gene_exon_unique = pd.read_csv(gtf_gene_unique_file, sep = '\t',header = 0)
df_gene_exon_unique = df_gene_exon_unique.astype(dict_type)
# convert all sequences to uppercase
df_gene_exon_unique['exon_flank_start20'] = df_gene_exon_unique['exon_flank_start20'].str.upper()
df_gene_exon_unique['exon_flank_end20'] = df_gene_exon_unique['exon_flank_end20'].str.upper()
df_gene_exon_unique['exon_boundary_start_nearseq20'] = df_gene_exon_unique['exon_boundary_start_nearseq20'].str.upper()
df_gene_exon_unique['exon_boundary_end_nearseq20'] = df_gene_exon_unique['exon_boundary_end_nearseq20'].str.upper()
df_gene_exon_unique = df_gene_exon_unique.fillna('')
print('Loaded %d exons\n'%(len(df_gene_exon_unique)))
return(df_gene_exon_unique)
def check_bam_index(genome_bam_file):
## check index of bam file; if no, generate one.
print('Checking index of input bam file')
if os.path.exists(genome_bam_file + '.bai') or os.path.exists(re.sub('bam$','bai',genome_bam_file)):
print('Index file exists')
else:
print('file is not indexed, now generating index')
pysam.index(genome_bam_file)
print('Index file created\n')
return
def f_overlap_reference(genome_bam_file,df_gene_exon):
# overlap reference for input bam and gene model
bam_genome = pysam.AlignmentFile(genome_bam_file,'rb')
reference_bam = bam_genome.references
bam_genome.close()
reference_exon = df_gene_exon.seqname.unique().tolist()
overlap_reference = [x for x in reference_bam if x in reference_exon]
if len(overlap_reference)==0: global_para.logger.error('chromosome names are not matched between gene model and bam file'); exit(1)
df_gene_exon = df_gene_exon.query('seqname in @overlap_reference')
return df_gene_exon
def f_close_exon_merge(df_transcript_exon):
df_transcript_exon = df_transcript_exon.sort_values(['transcript_id','start'])
df_transcript_exon = df_transcript_exon.reset_index(drop = True)
df_transcript_exon['start_next'] = df_transcript_exon.groupby(['transcript_id'])['start'].shift(-1)
df_transcript_exon['dis_exon'] = abs(df_transcript_exon['end'] - df_transcript_exon['start_next'])
df_transcript_exon_close = df_transcript_exon.query('dis_exon<@global_para.exon_distance')
list_transcript = df_transcript_exon_close.transcript_id.unique().tolist()
if len(list_transcript) >0:
list_df_transcript_merge = []
for transcript_id in list_transcript:
sub_df = df_transcript_exon.query('transcript_id==@transcript_id')
sub_df_new = f_df_1transcript_merge(sub_df)
list_df_transcript_merge.append(sub_df_new)
df_transcript_exon_close_new = pd.concat(list_df_transcript_merge)
df_transcript_exon_noclose = df_transcript_exon.query('transcript_id not in @list_transcript')
df_transcript_exon_new = pd.concat([df_transcript_exon_close_new, df_transcript_exon_noclose])
else:
df_transcript_exon_new = df_transcript_exon.copy()
del df_transcript_exon_new['start_next']
del df_transcript_exon_new['dis_exon']
return df_transcript_exon_new
def f_df_1transcript_merge(sub_df):
list_new = []
list_iskeep = [True]*len(sub_df)
for i in range(len(sub_df)):
# print("line %s"%i)
if list_iskeep[i] and sub_df.iloc[i].dis_exon <global_para.exon_distance:
j = 1
up_list = sub_df.iloc[[i]]
while up_list.iloc[0].dis_exon < global_para.exon_distance:
down_list = sub_df.iloc[[i+j]]
up_list['end'] = down_list.iloc[0]['end']
up_list['exon_flank_end20'] = down_list.iloc[0]['exon_flank_end20']
up_list['is_exon_boundary_end'] = down_list.iloc[0]['is_exon_boundary_end']
up_list['exon_boundary_end_nearseq20'] = down_list.iloc[0]['exon_boundary_end_nearseq20']
up_list['start_next'] = down_list.iloc[0]['start_next']
up_list['dis_exon'] = down_list.iloc[0]['dis_exon']
list_iskeep[i+j] = False
j = j+1
list_new.append(up_list)
elif list_iskeep[i] == False:
continue
else:
list_new.append(sub_df.iloc[[i]])
sub_df_new = | pd.concat(list_new) | pandas.concat |
# -*- coding: utf-8 -*-
"""Datareader for cell testers and potentiostats.
This module is used for loading data and databases created by different cell
testers. Currently it only accepts arbin-type res-files (access) data as
raw data files, but we intend to implement more types soon. It also creates
processed files in the hdf5-format.
Example:
>>> d = CellpyData()
>>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs
>>> voltage_curves = d.get_cap()
>>> d.save("mytest.hdf")
Todo:
* Remove mass dependency in summary data
* use pd.loc[row,column] e.g. pd.loc[:,"charge_cap"] for col or
pd.loc[(pd.["step"]==1),"x"]
"""
import os
from pathlib import Path
import logging
import sys
import collections
import warnings
import csv
import itertools
import time
from scipy import interpolate
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
from cellpy.parameters import prms
from cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_cellpy_units,
get_headers_normal,
get_headers_step_table,
ATTRS_CELLPYFILE,
)
from cellpy.readers.core import (
FileID,
Cell,
CELLPY_FILE_VERSION,
MINIMUM_CELLPY_FILE_VERSION,
xldate_as_datetime,
)
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# TODO: @jepe - performance warnings - mixed types within cols (pytables)
performance_warning_level = "ignore" # "ignore", "error"
warnings.filterwarnings(
performance_warning_level, category=pd.io.pytables.PerformanceWarning
)
pd.set_option("mode.chained_assignment", None) # "raise", "warn", None
module_logger = logging.getLogger(__name__)
class CellpyData(object):
"""Main class for working and storing data.
This class is the main work-horse for cellpy where all the functions for
reading, selecting, and tweaking your data is located. It also contains the
header definitions, both for the cellpy hdf5 format, and for the various
cell-tester file-formats that can be read. The class can contain
several tests and each test is stored in a list. If you see what I mean...
Attributes:
cells (list): list of DataSet objects.
"""
def __str__(self):
txt = "<CellpyData>\n"
if self.name:
txt += f"name: {self.name}\n"
if self.table_names:
txt += f"table_names: {self.table_names}\n"
if self.tester:
txt += f"tester: {self.tester}\n"
if self.cells:
txt += "datasets: [ ->\n"
for i, d in enumerate(self.cells):
txt += f" ({i})\n"
for t in str(d).split("\n"):
txt += " "
txt += t
txt += "\n"
txt += "\n"
txt += "]"
else:
txt += "datasets: []"
txt += "\n"
return txt
def __bool__(self):
if self.cells:
return True
else:
return False
def __init__(
self,
filenames=None,
selected_scans=None,
profile=False,
filestatuschecker=None, # "modified"
fetch_one_liners=False,
tester=None,
initialize=False,
):
"""CellpyData object
Args:
filenames: list of files to load.
selected_scans:
profile: experimental feature.
filestatuschecker: property to compare cellpy and raw-files;
default read from prms-file.
fetch_one_liners: experimental feature.
tester: instrument used (e.g. "arbin") (checks prms-file as
default).
initialize: create a dummy (empty) dataset; defaults to False.
"""
if tester is None:
self.tester = prms.Instruments.tester
else:
self.tester = tester
self.loader = None # this will be set in the function set_instrument
self.logger = logging.getLogger(__name__)
self.logger.debug("created CellpyData instance")
self.name = None
self.profile = profile
self.minimum_selection = {}
if filestatuschecker is None:
self.filestatuschecker = prms.Reader.filestatuschecker
else:
self.filestatuschecker = filestatuschecker
self.forced_errors = 0
self.summary_exists = False
if not filenames:
self.file_names = []
else:
self.file_names = filenames
if not self._is_listtype(self.file_names):
self.file_names = [self.file_names]
if not selected_scans:
self.selected_scans = []
else:
self.selected_scans = selected_scans
if not self._is_listtype(self.selected_scans):
self.selected_scans = [self.selected_scans]
self.cells = []
self.status_datasets = []
self.selected_cell_number = 0
self.number_of_datasets = 0
self.capacity_modifiers = ["reset"]
self.list_of_step_types = [
"charge",
"discharge",
"cv_charge",
"cv_discharge",
"taper_charge",
"taper_discharge",
"charge_cv",
"discharge_cv",
"ocvrlx_up",
"ocvrlx_down",
"ir",
"rest",
"not_known",
]
# - options
self.force_step_table_creation = prms.Reader.force_step_table_creation
self.force_all = prms.Reader.force_all
self.sep = prms.Reader.sep
self._cycle_mode = prms.Reader.cycle_mode
# self.max_res_filesize = prms.Reader.max_res_filesize
self.load_only_summary = prms.Reader.load_only_summary
self.select_minimal = prms.Reader.select_minimal
# self.chunk_size = prms.Reader.chunk_size # 100000
# self.max_chunks = prms.Reader.max_chunks
# self.last_chunk = prms.Reader.last_chunk
self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles
# self.load_until_error = prms.Reader.load_until_error
self.ensure_step_table = prms.Reader.ensure_step_table
self.daniel_number = prms.Reader.daniel_number
# self.raw_datadir = prms.Reader.raw_datadir
self.raw_datadir = prms.Paths.rawdatadir
# self.cellpy_datadir = prms.Reader.cellpy_datadir
self.cellpy_datadir = prms.Paths.cellpydatadir
# search in prm-file for res and hdf5 dirs in loadcell:
self.auto_dirs = prms.Reader.auto_dirs
# - headers and instruments
self.headers_normal = get_headers_normal()
self.headers_summary = get_headers_summary()
self.headers_step_table = get_headers_step_table()
self.table_names = None # dictionary defined in set_instruments
self.set_instrument()
# - units used by cellpy
self.cellpy_units = get_cellpy_units()
if initialize:
self.initialize()
def initialize(self):
self.logger.debug("Initializing...")
self.cells.append(Cell())
@property
def cell(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
cell = self.cells[self.selected_cell_number]
return cell
@property
def dataset(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
warnings.warn("The .dataset property is deprecated, please use .cell instead.")
cell = self.cells[self.selected_cell_number]
return cell
@property
def empty(self):
"""gives False if the CellpyData object is empty (or un-functional)"""
return not self.check()
# TODO: @jepe - merge the _set_xxinstrument methods into one method
def set_instrument(self, instrument=None):
"""Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat)
"""
self.logger.debug(f"Setting instrument: {instrument}")
if instrument is None:
instrument = self.tester
if instrument in ["arbin", "arbin_res"]:
self._set_arbin()
self.tester = "arbin"
elif instrument == "arbin_sql":
self._set_arbin_sql()
self.tester = "arbin"
elif instrument == "arbin_experimental":
self._set_arbin_experimental()
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
self._set_pec()
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
self._set_biologic()
self.tester = "biologic"
elif instrument == "custom":
self._set_custom()
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'")
def _set_biologic(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import biologics_mpr as instr
self.loader_class = instr.MprLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_pec(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import pec as instr
self.loader_class = instr.PECLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_maccor(self):
warnings.warn("not implemented")
def _set_custom(self):
# use a custom format (csv with information lines on top)
from cellpy.readers.instruments import custom as instr
self.loader_class = instr.CustomLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
logging.debug("setting custom file-type (will be used when loading raw")
self.loader = self.loader_class.loader
def _set_arbin_sql(self):
warnings.warn("not implemented")
def _set_arbin(self):
from cellpy.readers.instruments import arbin as instr
self.loader_class = instr.ArbinLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
# def _set_arbin_experimental(self):
# # Note! All these _set_instrument methods can be generalized to one
# # method. At the moment, I find it
# # more transparent to separate them into respective methods pr
# # instrument.
# from .instruments import arbin_experimental as instr
# self.loader_class = instr.ArbinLoader()
# # get information
# self.raw_units = self.loader_class.get_raw_units()
# self.raw_limits = self.loader_class.get_raw_limits()
# # send information (should improve this later)
# # loader_class.load_only_summary = self.load_only_summary
# # loader_class.select_minimal = self.select_minimal
# # loader_class.max_res_filesize = self.max_res_filesize
# # loader_class.chunk_size = self.chunk_size
# # loader_class.max_chunks = self.max_chunks
# # loader_class.last_chunk = self.last_chunk
# # loader_class.limit_loaded_cycles = self.limit_loaded_cycles
# # loader_class.load_until_error = self.load_until_error
#
# # create loader
# self.loader = self.loader_class.loader
def _create_logger(self):
from cellpy import log
self.logger = logging.getLogger(__name__)
log.setup_logging(default_level="DEBUG")
def set_cycle_mode(self, cycle_mode):
"""set the cycle mode"""
self._cycle_mode = cycle_mode
@property
def cycle_mode(self):
return self._cycle_mode
@cycle_mode.setter
def cycle_mode(self, cycle_mode):
self.logger.debug(f"-> cycle_mode: {cycle_mode}")
self._cycle_mode = cycle_mode
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("Directory does not exist")
return
self.raw_datadir = directory
def set_cellpy_datadir(self, directory=None):
"""Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info("Directory does not exist")
return
self.cellpy_datadir = directory
def check_file_ids(self, rawfiles, cellpyfile):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument.
"""
txt = "Checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True
def _check_raw(self, file_names, abort_on_missing=False):
"""Get the file-ids for the res_files."""
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
def _check_cellpy_file(self, filename):
"""Get the file-ids for the cellpy_file."""
strip_filenames = True
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select("CellpyData/fidtable")
except KeyError:
self.logger.warning("no fidtable -" " you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None
@staticmethod
def _compare_ids(ids_res, ids_cellpy_file):
similar = True
l_res = len(ids_res)
l_cellpy = len(ids_cellpy_file)
if l_res == l_cellpy and l_cellpy > 0:
for name, value in list(ids_res.items()):
if ids_cellpy_file[name] != value:
similar = False
else:
similar = False
return similar
def loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
only_summary=False,
only_first=False,
force_raw=False,
use_cellpy_stat_file=None,
):
"""Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
only_first (bool): only use the first file fitting search criteria
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>>
"""
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.debug("cellpy file(s) needs updating - loading raw")
self.logger.info("Loading raw-file")
self.logger.debug(raw_files)
self.from_raw(raw_files)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
if mass:
self.set_mass(mass)
return self
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError(
"Too many files to merge - "
"could be a p2-p3 zip thing"
)
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug(
"the first dataset (or only dataset) loaded from the raw data file is empty"
)
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.cells.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_res(self, filenames=None, check_file_type=True):
"""Convenience function for loading arbin-type data into the
datastructure.
Args:
filenames: ((lists of) list of raw-file names): uses
cellpy.file_names if None.
If list-of-list, it loads each list into separate datasets.
The files in the inner list will be merged.
check_file_type (bool): check file type if True
(res-, or cellpy-format)
"""
raise DeprecatedFeature
def _validate_datasets(self, level=0):
self.logger.debug("validating test")
level = 0
# simple validation for finding empty datasets - should be expanded to
# find not-complete datasets, datasets with missing prms etc
v = []
if level == 0:
for test in self.cells:
# check that it contains all the necessary headers
# (and add missing ones)
# test = self._clean_up_normal_table(test)
# check that the test is not empty
v.append(self._is_not_empty_dataset(test))
self.logger.debug(f"validation array: {v}")
return v
def check(self):
"""Returns False if no datasets exists or if one or more of the datasets
are empty"""
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False
def _is_not_empty_dataset(self, dataset):
if dataset is self._empty_dataset():
return False
else:
return True
def _clean_up_normal_table(self, test=None, dataset_number=None):
# check that test contains all the necessary headers
# (and add missing ones)
raise NotImplementedError
def _report_empty_dataset(self):
self.logger.info("Empty set")
@staticmethod
def _empty_dataset():
return None
def _invent_a_name(self, filename=None, override=False):
if filename is None:
self.name = "nameless"
return
if self.name and not override:
return
path = Path(filename)
self.name = path.with_suffix("").name
def load(self, cellpy_file, parent_level=None, return_cls=True):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level.
return_cls (bool): Return the class.
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def _load_hdf5(self, filename, parent_level=None):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData")
Returns:
loaded datasets (DataSet-object)
"""
# TODO: option for reading version and relabelling dfsummary etc
# if the version is older
data = None
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
"Using non-default parent label for the "
"hdf-store: {}".format(parent_level)
)
if CELLPY_FILE_VERSION > 4:
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
meta_dir = "/info" # hard-coded
fid_dir = prms._cellpyfile_fid
else:
raw_dir = "/raw"
step_dir = "/step_table"
summary_dir = "/dfsummary"
meta_dir = "/info"
fid_dir = "/fidtable"
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version < CELLPY_FILE_VERSION:
if data.cellpy_file_version < 5:
self.logger.debug(f"version: {data.cellpy_file_version}")
_raw_dir = "/dfdata"
_step_dir = "/step_table"
_summary_dir = "/dfsummary"
_fid_dir = "/fidtable"
self._check_keys_in_cellpy_file(
meta_dir, parent_level, _raw_dir, store, _summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, _summary_dir
)
self._extract_raw_from_cellpy_file(
data, parent_level, _raw_dir, store
)
self._extract_steps_from_cellpy_file(
data, parent_level, _step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
_fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
warnings.warn(
"Loaded old cellpy-file version (<5). "
"Please update and save again."
)
else:
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(
data, parent_level, step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
data.raw_data_files, data.raw_data_files_length = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _create_initial_data_set_from_cellpy_file(self, meta_dir, parent_level, store):
# Remark that this function is run before selecting loading method
# based on version. If you change the meta_dir prm to something else than
# "/info" it will most likely fail.
data = Cell()
meta_table = None
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError as e:
self.logger.info("This file is VERY old - no info given here")
self.logger.info("You should convert the files to a newer version!")
self.logger.debug(e)
try:
data.cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.debug(f"cellpy file version. {data.cellpy_file_version}")
return data, meta_table
def _check_keys_in_cellpy_file(
self, meta_dir, parent_level, raw_dir, store, summary_dir
):
required_keys = [raw_dir, summary_dir, meta_dir]
required_keys = ["/" + parent_level + _ for _ in required_keys]
for key in required_keys:
if key not in store.keys():
self.logger.info(
f"This cellpy-file is not good enough - "
f"at least one key is missing: {key}"
)
raise Exception(
f"OH MY GOD! At least one crucial key" f"is missing {key}!"
)
self.logger.debug(f"Keys in current cellpy-file: {store.keys()}")
def _extract_raw_from_cellpy_file(self, data, parent_level, raw_dir, store):
data.raw = store.select(parent_level + raw_dir)
def _extract_summary_from_cellpy_file(self, data, parent_level, store, summary_dir):
data.summary = store.select(parent_level + summary_dir)
def _extract_fids_from_cellpy_file(self, fid_dir, parent_level, store):
try:
fid_table = store.select(
parent_level + fid_dir
) # remark! changed spelling from
# lower letter to camel-case!
fid_table_selected = True
except Exception as e:
self.logger.debug(e)
self.logger.debug("could not get fid from cellpy-file")
fid_table = []
warnings.warn("no fid_table - you should update your cellpy-file")
fid_table_selected = False
return fid_table, fid_table_selected
def _extract_steps_from_cellpy_file(self, data, parent_level, step_dir, store):
try:
data.steps = store.select(parent_level + step_dir)
except Exception as e:
self.logging.debug("could not get steps from cellpy-file")
data.steps = pd.DataFrame()
warnings.warn(f"Unhandled exception raised: {e}")
def _extract_meta_from_cellpy_file(self, data, meta_table, filename):
# get attributes from meta table
for attribute in ATTRS_CELLPYFILE:
value = self._extract_from_dict(meta_table, attribute)
# some fixes due to errors propagated into the cellpy-files
if attribute == "creator":
if not isinstance(value, str):
value = "no_name"
if attribute == "test_no":
if not isinstance(value, (int, float)):
value = 0
setattr(data, attribute, value)
if data.mass is None:
data.mass = 1.0
else:
data.mass_given = True
data.loaded_from = str(filename)
# hack to allow the renaming of tests to datasets
try:
name = self._extract_from_dict_hard(meta_table, "name")
if not isinstance(name, str):
name = "no_name"
data.name = name
except KeyError:
self.logger.debug(f"missing key in meta table: name")
print(meta_table)
warnings.warn("OLD-TYPE: Recommend to save in new format!")
try:
name = self._extract_from_dict(meta_table, "test_name")
except Exception as e:
name = "no_name"
self.logger.debug("name set to 'no_name")
warnings.warn(f"Unhandled exception raised: {e}")
data.name = name
# unpacking the raw data limits
for key in data.raw_limits:
try:
data.raw_limits[key] = self._extract_from_dict_hard(meta_table, key)
except KeyError:
self.logger.debug(f"missing key in meta_table: {key}")
warnings.warn("OLD-TYPE: Recommend to save in new format!")
@staticmethod
def _extract_from_dict(t, x, default_value=None):
try:
value = t[x].values
if value:
value = value[0]
except KeyError:
value = default_value
return value
@staticmethod
def _extract_from_dict_hard(t, x):
value = t[x].values
if value:
value = value[0]
return value
def _create_infotable(self, dataset_number=None):
# needed for saving class/DataSet to hdf5
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
test = self.get_cell(dataset_number)
infotable = collections.OrderedDict()
for attribute in ATTRS_CELLPYFILE:
value = getattr(test, attribute)
infotable[attribute] = [value]
infotable["cellpy_file_version"] = [CELLPY_FILE_VERSION]
limits = test.raw_limits
for key in limits:
infotable[key] = limits[key]
infotable = pd.DataFrame(infotable)
self.logger.debug("_create_infotable: fid")
fidtable = collections.OrderedDict()
fidtable["raw_data_name"] = []
fidtable["raw_data_full_name"] = []
fidtable["raw_data_size"] = []
fidtable["raw_data_last_modified"] = []
fidtable["raw_data_last_accessed"] = []
fidtable["raw_data_last_info_changed"] = []
fidtable["raw_data_location"] = []
fidtable["raw_data_files_length"] = []
fids = test.raw_data_files
fidtable["raw_data_fid"] = fids
if fids:
for fid, length in zip(fids, test.raw_data_files_length):
fidtable["raw_data_name"].append(fid.name)
fidtable["raw_data_full_name"].append(fid.full_name)
fidtable["raw_data_size"].append(fid.size)
fidtable["raw_data_last_modified"].append(fid.last_modified)
fidtable["raw_data_last_accessed"].append(fid.last_accessed)
fidtable["raw_data_last_info_changed"].append(fid.last_info_changed)
fidtable["raw_data_location"].append(fid.location)
fidtable["raw_data_files_length"].append(length)
else:
warnings.warn("seems you lost info about your raw-data")
fidtable = pd.DataFrame(fidtable)
return infotable, fidtable
def _convert2fid_list(self, tbl):
self.logger.debug("converting loaded fidtable to FileID object")
fids = []
lengths = []
counter = 0
for item in tbl["raw_data_name"]:
fid = FileID()
fid.name = item
fid.full_name = tbl["raw_data_full_name"][counter]
fid.size = tbl["raw_data_size"][counter]
fid.last_modified = tbl["raw_data_last_modified"][counter]
fid.last_accessed = tbl["raw_data_last_accessed"][counter]
fid.last_info_changed = tbl["raw_data_last_info_changed"][counter]
fid.location = tbl["raw_data_location"][counter]
length = tbl["raw_data_files_length"][counter]
counter += 1
fids.append(fid)
lengths.append(length)
if counter < 1:
self.logger.debug("info about raw files missing")
return fids, lengths
def merge(self, datasets=None, separate_datasets=False):
"""This function merges datasets into one set."""
self.logger.info("Merging")
if separate_datasets:
warnings.warn(
"The option seperate_datasets=True is"
"not implemented yet. Performing merging, but"
"neglecting the option."
)
else:
if datasets is None:
datasets = list(range(len(self.cells)))
first = True
for dataset_number in datasets:
if first:
dataset = self.cells[dataset_number]
first = False
else:
dataset = self._append(dataset, self.cells[dataset_number])
for raw_data_file, file_size in zip(
self.cells[dataset_number].raw_data_files,
self.cells[dataset_number].raw_data_files_length,
):
dataset.raw_data_files.append(raw_data_file)
dataset.raw_data_files_length.append(file_size)
self.cells = [dataset]
self.number_of_datasets = 1
return self
def _append(self, t1, t2, merge_summary=True, merge_step_table=True):
self.logger.debug(
f"merging two datasets (merge summary = {merge_summary}) "
f"(merge step table = {merge_step_table})"
)
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
# finding diff of time
start_time_1 = t1.start_datetime
start_time_2 = t2.start_datetime
diff_time = xldate_as_datetime(start_time_2) - xldate_as_datetime(start_time_1)
diff_time = diff_time.total_seconds()
if diff_time < 0:
self.logger.warning("Wow! your new dataset is older than the old!")
self.logger.debug(f"diff time: {diff_time}")
sort_key = self.headers_normal.datetime_txt # DateTime
# mod data points for set 2
data_point_header = self.headers_normal.data_point_txt
try:
last_data_point = max(t1.raw[data_point_header])
except ValueError:
last_data_point = 0
t2.raw[data_point_header] = t2.raw[data_point_header] + last_data_point
# mod cycle index for set 2
cycle_index_header = self.headers_normal.cycle_index_txt
try:
last_cycle = max(t1.raw[cycle_index_header])
except ValueError:
last_cycle = 0
t2.raw[cycle_index_header] = t2.raw[cycle_index_header] + last_cycle
# mod test time for set 2
test_time_header = self.headers_normal.test_time_txt
t2.raw[test_time_header] = t2.raw[test_time_header] + diff_time
# merging
if not t1.raw.empty:
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
# checking if we already have made a summary file of these datasets
# (to be used if merging summaries (but not properly implemented yet))
if t1.summary_made and t2.summary_made:
dfsummary_made = True
else:
dfsummary_made = False
# checking if we already have made step tables for these datasets
if t1.steps_made and t2.steps_made:
step_table_made = True
else:
step_table_made = False
if merge_summary:
# check if (self-made) summary exists.
self_made_summary = True
try:
test_it = t1.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
try:
test_it = t2.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
if self_made_summary:
# mod cycle index for set 2
last_cycle = max(t1.summary[cycle_index_header])
t2.summary[cycle_index_header] = (
t2.summary[cycle_index_header] + last_cycle
)
# mod test time for set 2
t2.summary[test_time_header] = (
t2.summary[test_time_header] + diff_time
)
# to-do: mod all the cumsum stuff in the summary (best to make
# summary after merging) merging
else:
t2.summary[data_point_header] = (
t2.summary[data_point_header] + last_data_point
)
summary2 = pd.concat([t1.summary, t2.summary], ignore_index=True)
test.summary = summary2
if merge_step_table:
if step_table_made:
cycle_index_header = self.headers_normal.cycle_index_txt
t2.steps[self.headers_step_table.cycle] = (
t2.raw[self.headers_step_table.cycle] + last_cycle
)
steps2 = pd.concat([t1.steps, t2.steps], ignore_index=True)
test.steps = steps2
else:
self.logger.debug(
"could not merge step tables "
"(non-existing) -"
"create them first!"
)
test.no_cycles = max(raw2[cycle_index_header])
test.raw = raw2
else:
test.no_cycles = max(t2.raw[cycle_index_header])
test = t2
test.merged = True
self.logger.debug(" -> merged with new dataset")
# TODO: @jepe - update merging for more variables
return test
# --------------iterate-and-find-in-data-----------------------------------
def _validate_dataset_number(self, n, check_for_empty=True):
# Returns dataset_number (or None if empty)
# Remark! _is_not_empty_dataset returns True or False
if not len(self.cells):
self.logger.info(
"Can't see any datasets! Are you sure you have " "loaded anything?"
)
return
if n is not None:
v = n
else:
if self.selected_cell_number is None:
v = 0
else:
v = self.selected_cell_number
if check_for_empty:
not_empty = self._is_not_empty_dataset(self.cells[v])
if not_empty:
return v
else:
return None
else:
return v
def _validate_step_table(self, dataset_number=None, simple=False):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
step_index_header = self.headers_normal.step_index_txt
self.logger.debug("-validating step table")
d = self.cells[dataset_number].raw
s = self.cells[dataset_number].steps
if not self.cells[dataset_number].steps_made:
return False
no_cycles_raw = np.amax(d[self.headers_normal.cycle_index_txt])
headers_step_table = self.headers_step_table
no_cycles_step_table = np.amax(s[headers_step_table.cycle])
if simple:
self.logger.debug(" (simple)")
if no_cycles_raw == no_cycles_step_table:
return True
else:
return False
else:
validated = True
if no_cycles_raw != no_cycles_step_table:
self.logger.debug(" differ in no. of cycles")
validated = False
else:
for j in range(1, no_cycles_raw + 1):
cycle_number = j
no_steps_raw = len(
np.unique(
d.loc[
d[self.headers_normal.cycle_index_txt] == cycle_number,
self.headers_normal.step_index_txt,
]
)
)
no_steps_step_table = len(
s.loc[
s[headers_step_table.cycle] == cycle_number,
headers_step_table.step,
]
)
if no_steps_raw != no_steps_step_table:
validated = False
# txt = ("Error in step table "
# "(cycle: %i) d: %i, s:%i)" % (
# cycle_number,
# no_steps_raw,
# no_steps_steps
# )
# )
#
# self.logger.debug(txt)
return validated
def print_steps(self, dataset_number=None):
"""Print the step table."""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
st = self.cells[dataset_number].steps
print(st)
def get_step_numbers(
self,
steptype="charge",
allctypes=True,
pdtype=False,
cycle_number=None,
dataset_number=None,
trim_taper_steps=None,
steps_to_skip=None,
steptable=None,
):
# TODO: @jepe - include sub_steps here
# TODO: @jepe - include option for not selecting taper steps here
"""Get the step numbers of selected type.
Returns the selected step_numbers for the selected type of step(s).
Args:
steptype (string): string identifying type of step.
allctypes (bool): get all types of charge (or discharge).
pdtype (bool): return results as pandas.DataFrame
cycle_number (int): selected cycle, selects all if not set.
dataset_number (int): test number (default first)
(usually not used).
trim_taper_steps (integer): number of taper steps to skip (counted
from the end, i.e. 1 means skip last step in each cycle).
steps_to_skip (list): step numbers that should not be included.
steptable (pandas.DataFrame): optional steptable
Returns:
A dictionary containing a list of step numbers corresponding
to the selected steptype for the cycle(s).
Returns a pandas.DataFrame instead of a dict of lists if pdtype is
set to True. The frame is a sub-set of the step-table frame
(i.e. all the same columns, only filtered by rows).
Example:
>>> my_charge_steps = CellpyData.get_step_numbers(
>>> "charge",
>>> cycle_number = 3
>>> )
>>> print my_charge_steps
{3: [5,8]}
"""
t0 = time.time()
self.logger.debug("Trying to get step-types")
if steps_to_skip is None:
steps_to_skip = []
if steptable is None:
self.logger.debug("steptable=None")
dataset_number = self._validate_dataset_number(dataset_number)
self.logger.debug(f"dt 1: {time.time() - t0}")
if dataset_number is None:
self._report_empty_dataset()
return
if not self.cells[dataset_number].steps_made:
self.logger.debug("steps is not made")
if self.force_step_table_creation or self.force_all:
self.logger.debug("creating step_table for")
self.logger.debug(self.cells[dataset_number].loaded_from)
# print "CREAING STEP-TABLE"
self.make_step_table(dataset_number=dataset_number)
else:
self.logger.info(
"ERROR! Cannot use get_steps: create step_table first"
)
self.logger.info("You could use find_step_numbers method instead")
self.logger.info("(but I don't recommend it)")
return None
# check if steptype is valid
steptype = steptype.lower()
steptypes = []
helper_step_types = ["ocv", "charge_discharge"]
valid_step_type = True
self.logger.debug(f"dt 2: {time.time() - t0}")
if steptype in self.list_of_step_types:
steptypes.append(steptype)
else:
txt = "%s is not a valid core steptype" % steptype
if steptype in helper_step_types:
txt = "but a helper steptype"
if steptype == "ocv":
steptypes.append("ocvrlx_up")
steptypes.append("ocvrlx_down")
elif steptype == "charge_discharge":
steptypes.append("charge")
steptypes.append("discharge")
else:
valid_step_type = False
self.logger.debug(txt)
if not valid_step_type:
return None
# in case of selection allctypes, then modify charge, discharge
if allctypes:
add_these = []
for st in steptypes:
if st in ["charge", "discharge"]:
st1 = st + "_cv"
add_these.append(st1)
st1 = "cv_" + st
add_these.append(st1)
for st in add_these:
steptypes.append(st)
# self.logger.debug("Your steptypes:")
# self.logger.debug(steptypes)
if steptable is None:
st = self.cells[dataset_number].steps
else:
st = steptable
shdr = self.headers_step_table
# retrieving cycle numbers
self.logger.debug(f"dt 3: {time.time() - t0}")
if cycle_number is None:
cycle_numbers = self.get_cycle_numbers(dataset_number, steptable=steptable)
else:
if isinstance(cycle_number, (list, tuple)):
cycle_numbers = cycle_number
else:
cycle_numbers = [cycle_number]
if trim_taper_steps is not None:
trim_taper_steps = -trim_taper_steps
self.logger.debug("taper steps to trim given")
if pdtype:
self.logger.debug("Return pandas dataframe.")
if trim_taper_steps:
self.logger.info(
"Trimming taper steps is currently not"
"possible when returning pd.DataFrame. "
"Do it manually insteaD."
)
out = st[st[shdr.type].isin(steptypes) & st[shdr.cycle].isin(cycle_numbers)]
return out
# if not pdtype, return a dict instead
# self.logger.debug("out as dict; out[cycle] = [s1,s2,...]")
# self.logger.debug("(same behaviour as find_step_numbers)")
# self.logger.debug("return dict of lists")
# self.logger.warning(
# "returning dict will be deprecated",
# )
out = dict()
self.logger.debug(f"return a dict")
self.logger.debug(f"dt 4: {time.time() - t0}")
for cycle in cycle_numbers:
steplist = []
for s in steptypes:
step = st[(st[shdr.type] == s) & (st[shdr.cycle] == cycle)][
shdr.step
].tolist()
for newstep in step[:trim_taper_steps]:
if newstep in steps_to_skip:
self.logger.debug(f"skipping step {newstep}")
else:
steplist.append(int(newstep))
if not steplist:
steplist = [0]
out[cycle] = steplist
self.logger.debug(f"dt tot: {time.time() - t0}")
return out
def load_step_specifications(self, file_name, short=False, dataset_number=None):
""" Load a table that contains step-type definitions.
This function loads a file containing a specification for each step or
for each (cycle_number, step_number) combinations if short==False. The
step_cycle specifications that are allowed are stored in the variable
cellreader.list_of_step_types.
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
# if short:
# # the table only consists of steps (not cycle,step pairs) assuming
# # that the step numbers uniquely defines step type (this is true
# # for arbin at least).
# raise NotImplementedError
step_specs = | pd.read_csv(file_name, sep=prms.Reader.sep) | pandas.read_csv |
import logging
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
from captum.attr import IntegratedGradients
from pandas import read_excel
from scipy.stats import mannwhitneyu
from sklearn.metrics import precision_recall_curve, roc_curve
import scanpypip.utils as ut
def highly_variable_genes(data,
layer=None, n_top_genes=None,
min_disp=0.5, max_disp=np.inf, min_mean=0.0125, max_mean=3,
span=0.3, n_bins=20, flavor='seurat', subset=False, inplace=True, batch_key=None, PCA_graph=False, PCA_dim = 50, k = 10, n_pcs=40):
adata = sc.AnnData(data)
adata.var_names_make_unique() # this is unnecessary if using `var_names='gene_ids'` in `sc.read_10x_mtx`
adata.obs_names_make_unique()
if n_top_genes!=None:
sc.pp.highly_variable_genes(adata,layer=layer,n_top_genes=n_top_genes,
span=span, n_bins=n_bins, flavor='seurat_v3', subset=subset, inplace=inplace, batch_key=batch_key)
else:
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata,
layer=layer,n_top_genes=n_top_genes,
min_disp=min_disp, max_disp=max_disp, min_mean=min_mean, max_mean=max_mean,
span=span, n_bins=n_bins, flavor=flavor, subset=subset, inplace=inplace, batch_key=batch_key)
if PCA_graph == True:
sc.tl.pca(adata,n_comps=PCA_dim)
X_pca = adata.obsm["X_pca"]
sc.pp.neighbors(adata, n_neighbors=k, n_pcs=n_pcs)
return adata.var.highly_variable,adata,X_pca
return adata.var.highly_variable,adata
def save_arguments(args,now):
args_strings =re.sub("\'|\"|Namespace|\(|\)","",str(args)).split(sep=', ')
args_dict = dict()
for item in args_strings:
items = item.split(sep='=')
args_dict[items[0]] = items[1]
args_df = pd.DataFrame(args_dict,index=[now]).T
args_df.to_csv("saved/logs/arguments_" +now + '.csv')
return args_df
def plot_label_hist(Y,save=None):
# the histogram of the data
n, bins, patches = plt.hist(Y, 50, density=True, facecolor='g', alpha=0.75)
plt.xlabel('Y values')
plt.ylabel('Probability')
plt.title('Histogram of target')
# plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.xlim(40, 160)
# plt.ylim(0, 0.03)
# plt.grid(True)
if save == None:
plt.show()
else:
plt.savefig(save)
# plot no skill and model roc curves
def plot_roc_curve(test_y,naive_probs,model_probs,title="",path="figures/roc_curve.pdf"):
# plot naive skill roc curve
fpr, tpr, _ = roc_curve(test_y, naive_probs)
plt.plot(fpr, tpr, linestyle='--', label='Random')
# plot model roc curve
fpr, tpr, _ = roc_curve(test_y, model_probs)
plt.plot(fpr, tpr, marker='.', label='Predition')
# axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# show the legend
plt.legend()
plt.title(title)
# show the plot
if path == None:
plt.show()
else:
plt.savefig(path)
plt.close()
# plot no skill and model precision-recall curves
def plot_pr_curve(test_y,model_probs,selected_label = 1,title="",path="figures/prc_curve.pdf"):
# calculate the no skill line as the proportion of the positive class
no_skill = len(test_y[test_y==selected_label]) / len(test_y)
# plot the no skill precision-recall curve
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='Random')
# plot model precision-recall curve
precision, recall, _ = precision_recall_curve(test_y, model_probs)
plt.plot(recall, precision, marker='.', label='Predition')
# axis labels
plt.xlabel('Recall')
plt.ylabel('Precision')
# show the legend
plt.legend()
plt.title(title)
# show the plot
if path == None:
plt.show()
else:
plt.savefig(path)
plt.close()
def specific_process(adata,dataname="",**kargs):
if dataname =="GSE117872":
select_origin = kargs['select_origin']
adata = process_117872(adata,select_origin=select_origin)
elif dataname == "GSE122843":
adata = process_122843(adata)
elif dataname == "GSE110894":
adata = process_110894(adata)
elif dataname == "GSE112274":
adata = process_112274(adata)
elif dataname == "GSE108383":
adata = process_108383(adata)
elif dataname == "GSE140440":
adata = process_140440(adata)
elif dataname == "GSE129730":
adata = process_129730(adata)
elif dataname == "GSE149383":
adata = process_149383(adata)
return adata
def process_108383(adata,**kargs):
obs_names = adata.obs.index
annotation_dict = {}
for section in [0,1,2,3,4]:
svals = [index.split("_")[section] for index in obs_names]
annotation_dict["name_section_"+str(section+1)] = svals
df_annotation=pd.DataFrame(annotation_dict,index=obs_names)
adata.obs=df_annotation
# adata.obs['name_section_3'].replace("par", "sensitive", inplace=True)
# adata.obs['name_section_3'].replace("br", "resistant", inplace=True)
# adata.obs['sensitive']=adata.obs['name_section_3']
sensitive = [int(row.find("br")==-1) for row in adata.obs.loc[:,"name_section_3"]]
sens_ = ['Resistant' if (row.find("br")!=-1) else 'Sensitive' for row in adata.obs.loc[:,"name_section_3"]]
#adata.obs.loc[adata.obs.cluster=="Holiday","cluster"] = "Sensitive"
adata.obs['sensitive'] = sensitive
adata.obs['sensitivity'] = sens_
# Cluster de score
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
return adata
def process_117872(adata,**kargs):
annotation = pd.read_csv('data/GSE117872/GSE117872_good_Data_cellinfo.txt',sep="\t",index_col="groups")
for item in annotation.columns:
#adata.obs[str(item)] = annotation.loc[:,item].convert_dtypes('category').values
adata.obs[str(item)] = annotation.loc[:,item].astype("category")
if "select_origin" in kargs:
origin = kargs['select_origin']
if origin!="all":
selected=adata.obs['origin']==origin
selected=selected.to_numpy('bool')
adata = adata[selected, :]
sensitive = [int(row.find("Resistant")==-1) for row in adata.obs.loc[:,"cluster"]]
sens_ = ['Resistant' if (row.find("Resistant")!=-1) else 'Sensitive' for row in adata.obs.loc[:,"cluster"]]
#adata.obs.loc[adata.obs.cluster=="Holiday","cluster"] = "Sensitive"
adata.obs['sensitive'] = sensitive
adata.obs['sensitivity'] = sens_
# Cluster de score
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
return adata
def process_122843(adata,**kargs):
# Data specific preprocessing of cell info
file_name = 'data/GSE122843/GSE122843_CellInfo.xlsx' # change it to the name of your excel file
df_cellinfo = read_excel(file_name,header=2)
df_cellinfo = df_cellinfo.fillna(method='pad')
# Dictionary of DMSO between cell info and expression matrix
match_dict={'DMSO':'DMSO (D7)',
"DMSOw8":'DMSO (D56)',
"IBET400":"400nM IBET",
"IBET600":"600nM IBET",
"IBET800":"800nM IBET",
"IBETI1000":"1000nM IBET",
"IBET1000w8":"1000nM IBET (D56)"}
inv_match_dict = {v: k for k, v in match_dict.items()}
index = [inv_match_dict[sn]+'_' for sn in df_cellinfo.loc[:,'Sample Name']]
# Creat index in the count matrix style
inversindex = index+df_cellinfo.loc[:,'Well Position']
inversindex.name = 'Index'
df_cellinfo.index = inversindex
# Inner join of the obs adata information
obs_merge = pd.merge(adata.obs,df_cellinfo,left_index=True,right_index=True,how='left')
# Replace obs
adata.obs = obs_merge
return adata
def process_110894(adata,**kargs):
# Data specific preprocessing of cell info
file_name = 'data/GSE110894/GSE110894_CellInfo.xlsx' # change it to the name of your excel file
df_cellinfo = read_excel(file_name,header=3)
df_cellinfo=df_cellinfo.dropna(how="all")
df_cellinfo = df_cellinfo.fillna(method='pad')
well_post = ["_"+wp.split("=")[0] for wp in df_cellinfo.loc[:,'Well position']]
inversindex = df_cellinfo.loc[:,'Plate#']+well_post
inversindex.name = 'Index'
df_cellinfo.index = inversindex
obs_merge = pd.merge(adata.obs,df_cellinfo,left_index=True,right_index=True,how='left')
adata.obs = obs_merge
sensitive = [int(row.find("RESISTANT")==-1) for row in obs_merge.loc[:,"Sample name"]]
adata.obs['sensitive'] = sensitive
sens_ = ['Resistant' if (row.find("RESISTANT")!=-1) else 'Sensitive' for row in obs_merge.loc[:,"Sample name"]]
adata.obs['sensitivity'] = sens_
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
print(adata)
return adata
def process_112274(adata,**kargs):
obs_names = adata.obs.index
annotation_dict = {}
for section in [0,1,2,3]:
svals = [index.split("_")[section] for index in obs_names]
annotation_dict["name_section_"+str(section+1)] = svals
df_annotation=pd.DataFrame(annotation_dict,index=obs_names)
adata.obs=df_annotation
sensitive = [int(row.find("parental")!=-1) for row in df_annotation.loc[:,"name_section_2"]]
adata.obs['sensitive'] = sensitive
sens_ = ['Resistant' if (row.find("parental")==-1) else 'Sensitive' for row in df_annotation.loc[:,"name_section_2"]]
adata.obs['sensitivity'] = sens_
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
return adata
def process_116237(adata,**kargs):
obs_names = adata.obs.index
annotation_dict = {}
for section in [0,1,2]:
svals = [re.split('_|\.',index)[section] for index in obs_names]
annotation_dict["name_section_"+str(section+1)] = svals
return adata
def process_140440(adata,**kargs):
# Data specific preprocessing of cell info
file_name = 'data/GSE140440/Annotation.txt' # change it to the name of your excel file
df_cellinfo = pd.read_csv(file_name,header=None,index_col=0,sep="\t")
sensitive = [int(row.find("Res")==-1) for row in df_cellinfo.iloc[:,0]]
adata.obs['sensitive'] = sensitive
sens_ = ['Resistant' if (row.find("Res")!=-1) else 'Sensitive' for row in df_cellinfo.iloc[:,0]]
adata.obs['sensitivity'] = sens_
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
return adata
def process_129730(adata,**kargs):
#Data specific preprocessing of cell info
# sensitive = [ 1 if row in [''] \
# for row in adata.obs['sample']]
sensitive = [ 1 if (row <=9) else 0 for row in adata.obs['sample'].astype(int)]
adata.obs['sensitive'] = sensitive
sens_ = ['Resistant' if (row >9) else 'Sensitive' for row in adata.obs['sample'].astype(int)]
adata.obs['sensitivity'] = sens_
pval = 0.05
n_genes = 50
if "pval_thres" in kargs:
pval=kargs['pval_thres']
if "num_de" in kargs:
n_genes = kargs['num_de']
adata = de_score(adata=adata,clustername="sensitivity",pval=pval,n=n_genes)
return adata
def process_149383(adata,**kargs):
# Data specific preprocessing of cell info
file_name = '../data/GSE149383/erl_total_2K_meta.csv' # change it to the name of your excel file
df_cellinfo = | pd.read_csv(file_name,header=None,index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pendulum
from bowtie import App, command
from bowtie.visual import Markdown, Plotly
from bowtie.control import Dropdown
import plotly.graph_objs as go
import pandas as pd
kickstarter_df = pd.read_csv('kickstarter-cleaned.csv', parse_dates=False)
kickstarter_df['broader_category'] = kickstarter_df['category_slug'].str.split('/').str.get(0)
kickstarter_df['created_at'] = pd.to_datetime(kickstarter_df['created_at'])
kickstarter_df_sub = kickstarter_df.sample(10000)
CATEGORIES = kickstarter_df.broader_category.unique()
COLUMNS = ['launched_at', 'deadline', 'blurb', 'usd_pledged', 'state',
'spotlight', 'staff_pick', 'category_slug', 'backers_count', 'country']
# Picked with http://tristen.ca/hcl-picker/#/hlc/6/1.05/251C2A/E98F55
COLORS = ['#7DFB6D', '#C7B815', '#D4752E', '#C7583F']
STATES = ['successful', 'suspended', 'failed', 'canceled']
header = Markdown('# Kickstarter Dashboard')
select = Dropdown(labels=CATEGORIES, values=CATEGORIES, multi=True)
pledged = Plotly()
counts = Plotly()
def init():
z = select.get()
if z is None:
update_pledged()
update_counts()
def get_categories(categories=None):
if categories:
return [x['value'] for x in categories]
return CATEGORIES
def update_pledged(categories=None):
categories = get_categories(categories)
sub_df = kickstarter_df_sub[kickstarter_df_sub.broader_category.isin(categories)]
pdict = {
'data': [
go.Scatter(
x=sub_df[kickstarter_df_sub.state == state].created_at,
y=sub_df[kickstarter_df_sub.state == state].usd_pledged,
text=sub_df[kickstarter_df_sub.state == state].name,
mode='markers',
opacity=0.7,
marker={
'size': 15,
'color': color,
'line': {'width': 0.5, 'color': 'white'}
},
name=state,
) for (state, color) in zip(STATES, COLORS)
],
'layout': go.Layout(
xaxis={'title': 'Date'},
yaxis={'title': 'USD pledged', 'type': 'log'},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest'
)
}
pledged.do_all(pdict)
def update_counts(categories=None, layout=None):
categories = get_categories(categories)
if layout is not None and 'xaxis.autorange' not in layout:
x0 = pendulum.parse(layout['xaxis.range[0]'])
x1 = pendulum.parse(layout['xaxis.range[1]'])
y0 = 10 ** layout['yaxis.range[0]']
y1 = 10 ** layout['yaxis.range[1]']
sub_df = kickstarter_df[kickstarter_df.created_at.between(x0, x1) & kickstarter_df.usd_pledged.between(y0, y1)]
else:
sub_df = kickstarter_df
sub_df = sub_df[sub_df.broader_category.isin(categories)]
stacked_barchart_df = (
sub_df.groupby('broader_category').state
.value_counts()
)
stacked_barchart_df = stacked_barchart_df.reindex(
| pd.MultiIndex.from_product([categories, STATES], names=stacked_barchart_df.index.names) | pandas.MultiIndex.from_product |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.